Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
36 changes: 36 additions & 0 deletions benchkit/benches/small/dd.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
# Copyright (C) 2026 Vrije Universiteit Brussel. All rights reserved.
# SPDX-License-Identifier: MIT

from benchkit.core.bktypes.callresults import RunResult
from benchkit.core.bktypes.contexts import RunContext


class DDBench:
"""
DD benchmark (benchkit core protocol).

CPU-intensive workload using dd to read from /dev/urandom and write to
/dev/null. Useful for testing perf-based profiling (e.g. flamegraphs)
because it generates measurable CPU activity, unlike sleep.

- run: execute dd if=/dev/urandom of=/dev/null bs=1M count=block_count
"""

def run(
self,
ctx: RunContext,
block_count: int,
) -> RunResult:
out = ctx.exec(
argv=[
"dd",
"if=/dev/urandom",
"of=/dev/null",
"bs=1M",
f"count={block_count}",
],
cwd=ctx.build_result.build_dir if ctx.build_result is not None else None,
print_output=False,
ignore_ret_codes=(1,),
)
return RunResult(outputs=[out])
8 changes: 8 additions & 0 deletions benchkit/commandwrappers/perf.py
Original file line number Diff line number Diff line change
Expand Up @@ -876,6 +876,14 @@ def post_run_hook_flamegraph(
)
perf_folded_pathname.write_text(out_folded.strip())

if not out_folded.strip():
print(
"[WARNING] No perf stack counts found. "
"Skipping flamegraph generation for this run.",
file=sys.stderr,
)
return

flamegraph_command = self._flamegraph_command(
title=flamegraph_title,
subtitle=flamegraph_subtitle,
Expand Down
115 changes: 68 additions & 47 deletions tests/campaigns/campaign_flame.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,35 +2,31 @@
# Copyright (C) 2024 Vrije Universiteit Brussel. All rights reserved.
# SPDX-License-Identifier: MIT

from benchmarks.sleep import SleepBench
"""
Flamegraph test campaign.

from benchkit.campaign import CampaignIterateVariables
from benchkit.commandwrappers.perf import PerfReportWrap, enable_non_sudo_perf
from benchkit.platforms import get_current_platform
from benchkit.utils.dir import caller_dir
from benchkit.utils.git import clone_repo
Two sub-campaigns exercise perf-based flame graph generation:

1. **sleep** — a workload that produces zero CPU samples.
Validates the graceful no-samples path (warning instead of crash).

def main() -> None:
platform = get_current_platform()
enable_non_sudo_perf(comm_layer=platform.comm)
2. **dd** — a CPU-intensive workload (dd if=/dev/urandom of=/dev/null).
Validates actual flamegraph SVG generation with real perf data.
"""

flamegraph_path = caller_dir() / "deps/FlameGraph"
clone_repo(
repo_url="https://github.com/brendangregg/FlameGraph.git",
repo_src_dir=flamegraph_path,
commit="cd9ee4c4449775a2f867acf31c84b7fe4b132ad5",
)
from benchkit import CampaignCartesianProduct
from benchkit.benches.small.dd import DDBench
from benchkit.benches.small.sleep import SleepBench
from benchkit.campaign import CampaignSuite
from benchkit.commandwrappers.perf import PerfReportWrap, enable_non_sudo_perf
from benchkit.platforms import get_current_platform
from benchkit.utils.dir import get_tools_dir

perf_wrapper = PerfReportWrap(
freq=99,
# freq=10,
report_interactive=False,
report_file=True,
flamegraph_path=flamegraph_path,
)

def flame_post_hook(
def _make_flame_post_hook(perf_wrapper):
"""Return a post-run hook that generates a flame graph for each run."""

def hook(
experiment_results_lines,
record_data_dir,
write_record_file_fun,
Expand All @@ -43,35 +39,60 @@ def flame_post_hook(
flamegraph_fontsize=14,
)

campaign = CampaignIterateVariables(
name="flame",
benchmark=SleepBench(
command_wrappers=[perf_wrapper],
post_run_hooks=[
perf_wrapper.post_run_hook_report,
flame_post_hook,
],
),
return hook


def main() -> None:
platform = get_current_platform()
enable_non_sudo_perf(comm_layer=platform.comm)

flamegraph_dir = get_tools_dir(None) / "FlameGraph"

# --- shared perf wrapper (one instance is fine for sequential campaigns) ---
perf_wrapper = PerfReportWrap(
freq=99,
report_interactive=False,
report_file=True,
flamegraph_path=flamegraph_dir,
)
perf_wrapper.fetch_flamegraph()

flame_hook = _make_flame_post_hook(perf_wrapper)

# --- Campaign 1: sleep (no CPU samples → graceful skip) ---
campaign_sleep = CampaignCartesianProduct(
name="flame_sleep",
benchmark=SleepBench(),
variables={
"duration_seconds": [1],
},
nb_runs=1,
variables=[
{
"duration_seconds": 1,
},
{
"duration_seconds": 2,
},
command_wrappers=[perf_wrapper],
post_run_hooks=[
perf_wrapper.post_run_hook_report,
flame_hook,
],
constants=None,
debug=False,
gdb=False,
enable_data_dir=True,
platform=platform,
)

campaign.run()
# --- Campaign 2: dd (CPU-intensive → real flamegraph) ---
campaign_dd = CampaignCartesianProduct(
name="flame_dd",
benchmark=DDBench(),
variables={
"block_count": [50],
},
nb_runs=1,
command_wrappers=[perf_wrapper],
post_run_hooks=[
perf_wrapper.post_run_hook_report,
flame_hook,
],
platform=platform,
)

results_path = campaign.base_data_dir()
perf_wrapper.fzf_report(search_dir=results_path)
perf_wrapper.fzf_flamegraph(search_dir=results_path)
suite = CampaignSuite(campaigns=[campaign_sleep, campaign_dd])
suite.run_suite()


if __name__ == "__main__":
Expand Down
58 changes: 58 additions & 0 deletions tests/campaigns/campaign_flame_fzf.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,58 @@
#!/usr/bin/env python3
# Copyright (C) 2024 Vrije Universiteit Brussel. All rights reserved.
# SPDX-License-Identifier: MIT
from benchkit import CampaignCartesianProduct
from benchkit.benches.small.dd import DDBench
from benchkit.commandwrappers.perf import PerfReportWrap, enable_non_sudo_perf
from benchkit.platforms import get_current_platform
from benchkit.utils.dir import get_tools_dir


def main() -> None:
platform = get_current_platform()
enable_non_sudo_perf(comm_layer=platform.comm)

flamegraph_dir = get_tools_dir(None) / "FlameGraph"
perf_wrapper = PerfReportWrap(
freq=99,
report_interactive=False,
report_file=True,
flamegraph_path=flamegraph_dir,
)
perf_wrapper.fetch_flamegraph()

def flame_post_hook(
experiment_results_lines,
record_data_dir,
write_record_file_fun,
):
return perf_wrapper.post_run_hook_flamegraph(
experiment_results_lines=experiment_results_lines,
record_data_dir=record_data_dir,
write_record_file_fun=write_record_file_fun,
flamegraph_width=400,
flamegraph_fontsize=14,
)

campaign = CampaignCartesianProduct(
name="flame_dd_fzf",
benchmark=DDBench(),
variables={"block_count": [50, 500]},
nb_runs=1,
command_wrappers=[perf_wrapper],
post_run_hooks=[
perf_wrapper.post_run_hook_report,
flame_post_hook,
],
platform=platform,
)

campaign.run()

results_path = campaign.base_data_dir()
perf_wrapper.fzf_report(search_dir=results_path)
perf_wrapper.fzf_flamegraph(search_dir=results_path)


if __name__ == "__main__":
main()