Skip to content

Commit 8fd19bb

Browse files
authored
Merge pull request #27 from miciav/feature/runner-controller-ui
Feature/runner controller UI
2 parents 021a842 + 4571fd3 commit 8fd19bb

32 files changed

+1640
-289
lines changed

docs/configuration.md

Lines changed: 25 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,25 @@
1+
## Configuration
2+
3+
All knobs are defined in `BenchmarkConfig`:
4+
5+
```python
6+
from pathlib import Path
7+
from lb_runner.benchmark_config import BenchmarkConfig
8+
from lb_runner.plugins.stress_ng.plugin import StressNGConfig
9+
10+
config = BenchmarkConfig(
11+
repetitions=5,
12+
test_duration_seconds=120,
13+
metrics_interval_seconds=0.5,
14+
plugin_settings={
15+
"stress_ng": StressNGConfig(
16+
cpu_workers=4,
17+
vm_workers=2,
18+
vm_bytes="2G",
19+
)
20+
},
21+
)
22+
23+
config.save(Path("my_config.json"))
24+
config = BenchmarkConfig.load(Path("my_config.json"))
25+
```

docs/contributing.md

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,7 @@
1+
## Contributing
2+
3+
1. Fork the project
4+
2. Create a feature branch (`git checkout -b feature/AmazingFeature`)
5+
3. Commit your changes (`git commit -m 'Add some AmazingFeature'`)
6+
4. Push the branch (`git push origin feature/AmazingFeature`)
7+
5. Open a Pull Request

docs/css/extra.css

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,8 @@
1+
.md-header__button.md-logo img {
2+
height: 3.5rem; /* Adjust as needed */
3+
}
4+
5+
.md-header__button.md-logo {
6+
padding-top: 0.5rem; /* Adjust vertical alignment if necessary */
7+
padding-bottom: 0.5rem;
8+
}

docs/index.md

Lines changed: 6 additions & 105 deletions
Original file line numberDiff line numberDiff line change
@@ -34,45 +34,7 @@ It supports two operation modes:
3434
- **perf**: Linux profiling
3535
- **bcc/eBPF tools**: optional kernel-level metrics
3636

37-
## Installation
3837

39-
1. Clone the repository:
40-
```bash
41-
git clone <repository-url>
42-
cd linux-benchmark-lib
43-
```
44-
45-
### Mode 1: Agent (Lightweight)
46-
Installs only the core dependencies for running benchmarks on a target node.
47-
48-
```bash
49-
uv sync
50-
# Or install as a tool
51-
uv tool install .
52-
```
53-
54-
### Mode 2: Controller (Full)
55-
Installs the core plus orchestration tools (Ansible) and reporting libraries (Matplotlib, Seaborn).
56-
57-
```bash
58-
uv sync --extra controller
59-
# Or install as a tool
60-
uv tool install ".[controller]"
61-
```
62-
63-
### Development
64-
Installs all dependencies including test and linting tools.
65-
66-
```bash
67-
uv sync --all-extras --dev
68-
```
69-
70-
Switch between modes quickly with the helper script:
71-
72-
```bash
73-
bash tools/switch_mode.sh base # core only
74-
bash tools/switch_mode.sh controller # adds controller extra
75-
bash tools/switch_mode.sh dev # dev + all extras
7638
```
7739
7840
## CLI (lb)
@@ -111,40 +73,9 @@ See `CLI.md` for the full command reference. Highlights:
11173
This updates the generated apt/pip install block in `Dockerfile` and rewrites `lb_controller/ansible/roles/workload_runner/tasks/plugins.generated.yml`.
11274
- Commit both the manifest and generated files so remote setup and the container stay in sync with available plugins.
11375
- See `docs/PLUGIN_DEVELOPMENT.md` for a full plugin authoring guide (WorkloadPlugin interface, manifests, packaging, git installs).
114-
- HPL plugin: vedi `lb_runner/plugins/hpl/README.md` per note su packaging `.deb`, build VM/Docker e test `xhpl`.
115-
116-
## Quick Start
117-
118-
```python
119-
from lb_runner.benchmark_config import BenchmarkConfig, RemoteHostConfig, RemoteExecutionConfig
120-
from lb_controller.controller import BenchmarkController
121-
from lb_runner.local_runner import LocalRunner
122-
from lb_runner.plugin_system.builtin import builtin_plugins
123-
from lb_runner.plugin_system.registry import PluginRegistry
124-
125-
# Create a configuration
126-
config = BenchmarkConfig(
127-
repetitions=3,
128-
test_duration_seconds=3600,
129-
metrics_interval_seconds=1.0
130-
)
131-
132-
# Local execution (Agent Mode)
133-
registry = PluginRegistry(builtin_plugins())
134-
runner = LocalRunner(config, registry=registry)
135-
runner.run_benchmark("stress_ng")
136-
137-
# Remote execution (Controller Mode)
138-
# Requires 'controller' extra installed
139-
remote_config = BenchmarkConfig(
140-
remote_hosts=[RemoteHostConfig(name="node1", address="192.168.1.10", user="ubuntu")],
141-
remote_execution=RemoteExecutionConfig(enabled=True),
142-
)
143-
# Use distinct, non-empty `name` values per host; they become per-host output dirs.
144-
controller = BenchmarkController(remote_config)
145-
summary = controller.run(["stress_ng"], run_id="demo-run")
146-
print(summary.per_host_output)
147-
```
76+
- HPL plugin: see `lb_runner/plugins/hpl/README.md` for notes on `.deb` packaging, build VM/Docker and `xhpl` testing.
77+
78+
14879

14980
## Project Layout
15081

@@ -158,31 +89,7 @@ linux-benchmark-lib/
15889
└── pyproject.toml # Project configuration (Core + Extras)
15990
```
16091

161-
## Configuration
162-
163-
All knobs are defined in `BenchmarkConfig`:
164-
165-
```python
166-
from pathlib import Path
167-
from lb_runner.benchmark_config import BenchmarkConfig
168-
from lb_runner.plugins.stress_ng.plugin import StressNGConfig
169-
170-
config = BenchmarkConfig(
171-
repetitions=5,
172-
test_duration_seconds=120,
173-
metrics_interval_seconds=0.5,
174-
plugin_settings={
175-
"stress_ng": StressNGConfig(
176-
cpu_workers=4,
177-
vm_workers=2,
178-
vm_bytes="2G",
179-
)
180-
},
181-
)
182-
183-
config.save(Path("my_config.json"))
184-
config = BenchmarkConfig.load(Path("my_config.json"))
185-
```
92+
18693

18794
## Output
18895

@@ -209,14 +116,8 @@ mv classes*.puml docs/diagrams/classes.puml
209116
mv packages*.puml docs/diagrams/packages.puml
210117
```
211118

212-
## Contributing
213119

214-
1. Fork the project
215-
2. Create a feature branch (`git checkout -b feature/AmazingFeature`)
216-
3. Commit your changes (`git commit -m 'Add some AmazingFeature'`)
217-
4. Push the branch (`git push origin feature/AmazingFeature`)
218-
5. Open a Pull Request
219120

220-
## Licenza
121+
## License
221122

222-
Distribuito sotto licenza MIT. Vedi `LICENSE` per maggiori informazioni.
123+
Distributed under the MIT License. See `LICENSE` for more information.

docs/installation.md

Lines changed: 39 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,39 @@
1+
## Installation
2+
3+
1. Clone the repository:
4+
```bash
5+
git clone <repository-url>
6+
cd linux-benchmark-lib
7+
```
8+
9+
### Mode 1: Agent (Lightweight)
10+
Installs only the core dependencies for running benchmarks on a target node.
11+
12+
```bash
13+
uv sync
14+
# Or install as a tool
15+
uv tool install .
16+
```
17+
18+
### Mode 2: Controller (Full)
19+
Installs the core plus orchestration tools (Ansible) and reporting libraries (Matplotlib, Seaborn).
20+
21+
```bash
22+
uv sync --extra controller
23+
# Or install as a tool
24+
uv tool install ".[controller]"
25+
```
26+
27+
### Development
28+
Installs all dependencies including test and linting tools.
29+
30+
```bash
31+
uv sync --all-extras --dev
32+
```
33+
34+
Switch between modes quickly with the helper script:
35+
36+
```bash
37+
bash tools/switch_mode.sh base # core only
38+
bash tools/switch_mode.sh controller # adds controller extra
39+
bash tools/switch_mode.sh dev # dev + all extras

docs/quickstart.md

Lines changed: 32 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,32 @@
1+
## Quick Start
2+
3+
```python
4+
from lb_runner.benchmark_config import BenchmarkConfig, RemoteHostConfig, RemoteExecutionConfig
5+
from lb_controller.controller import BenchmarkController
6+
from lb_runner.local_runner import LocalRunner
7+
from lb_runner.plugin_system.builtin import builtin_plugins
8+
from lb_runner.plugin_system.registry import PluginRegistry
9+
10+
# Create a configuration
11+
config = BenchmarkConfig(
12+
repetitions=3,
13+
test_duration_seconds=3600,
14+
metrics_interval_seconds=1.0
15+
)
16+
17+
# Local execution (Agent Mode)
18+
registry = PluginRegistry(builtin_plugins())
19+
runner = LocalRunner(config, registry=registry)
20+
runner.run_benchmark("stress_ng")
21+
22+
# Remote execution (Controller Mode)
23+
# Requires 'controller' extra installed
24+
remote_config = BenchmarkConfig(
25+
remote_hosts=[RemoteHostConfig(name="node1", address="192.168.1.10", user="ubuntu")],
26+
remote_execution=RemoteExecutionConfig(enabled=True),
27+
)
28+
# Use distinct, non-empty `name` values per host; they become per-host output dirs.
29+
controller = BenchmarkController(remote_config)
30+
summary = controller.run(["stress_ng"], run_id="demo-run")
31+
print(summary.per_host_output)
32+
```

lb_controller/ansible/roles/workload_runner/tasks/main.yml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -9,8 +9,8 @@
99
- name: Define workload runner workdir (use globally installed lb_workdir)
1010
set_fact:
1111
workload_runner_workdir: "{{ lb_workdir | default('/opt/lb') }}"
12-
setup_prefix: "[setup:{{ inventory_hostname }}]"
13-
run_prefix: "[run:{{ inventory_hostname }}]"
12+
setup_prefix: "[setup]"
13+
run_prefix: "[run]"
1414

1515
- name: "{{ setup_prefix }} Ensure workload output directory exists"
1616
ansible.builtin.file:

lb_controller/ansible/roles/workload_runner/tasks/run_single_rep.yml

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@
88
if workload_runner_reps is defined and (workload_runner_reps | length) > 0
99
else (workload_runner_repetitions_total | default(1) | int)
1010
}}
11-
run_prefix: "[run:{{ inventory_hostname }}:{{ workload_item[0] }}]"
11+
run_prefix: "[run:{{ workload_item[0] }}]"
1212

1313
- name: "{{ run_prefix }} Emit start event for repetition"
1414
ansible.builtin.debug:
@@ -75,6 +75,7 @@
7575
chdir: "{{ workload_runner_workdir }}"
7676
environment:
7777
LB_RUN_HOST: "{{ inventory_hostname }}"
78+
LB_ENABLE_EVENT_LOGGING: "1"
7879
register: workload_runner_rep_result
7980
changed_when: true
8081

lb_controller/ansible_executor.py

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@
1111
from typing import Any, Callable, Dict, List, Optional
1212

1313
from lb_controller.types import ExecutionResult, InventorySpec, RemoteExecutor
14+
from lb_runner.stop_token import StopToken
1415

1516
logger = logging.getLogger(__name__)
1617
ANSIBLE_ROOT = Path(__file__).resolve().parent / "ansible"
@@ -25,6 +26,7 @@ def __init__(
2526
runner_fn: Optional[Callable[..., Any]] = None,
2627
stream_output: bool = False,
2728
output_callback: Optional[Callable[[str, str], None]] = None,
29+
stop_token: StopToken | None = None,
2830
):
2931
"""
3032
Initialize the executor.
@@ -43,6 +45,7 @@ def __init__(
4345
self.event_log_path = self.private_data_dir / "lb_events.jsonl"
4446
self._runner_fn = runner_fn
4547
self.stream_output = stream_output
48+
self.stop_token = stop_token
4649
# Force Ansible temp into a writable location inside the runner dir to avoid host-level permission issues
4750
self.local_tmp = self.private_data_dir / "tmp"
4851
self.local_tmp.mkdir(parents=True, exist_ok=True)
@@ -66,6 +69,8 @@ def run_playbook(
6669
limit_hosts: Optional[List[str]] = None,
6770
) -> ExecutionResult:
6871
"""Execute a playbook using ansible-runner."""
72+
if self.stop_token and self.stop_token.should_stop():
73+
return ExecutionResult(rc=1, status="stopped", stats={})
6974
if not playbook_path.exists():
7075
raise FileNotFoundError(f"Playbook not found: {playbook_path}")
7176

@@ -204,13 +209,20 @@ def _run_subprocess_playbook(
204209
text=True,
205210
)
206211
assert proc.stdout is not None
212+
stop_requested = False
207213
for line in proc.stdout:
214+
if self.stop_token and self.stop_token.should_stop():
215+
stop_requested = True
216+
proc.terminate()
217+
break
208218
if self.output_callback:
209219
self.output_callback(line.rstrip("\n"), "\n")
210220
else:
211221
sys.stdout.write(line)
212222
proc.wait()
213223
rc = proc.returncode
224+
if stop_requested or (self.stop_token and self.stop_token.should_stop()):
225+
return ExecutionResult(rc=rc or 1, status="stopped", stats={})
214226
else:
215227
completed = subprocess.run(
216228
cmd,

0 commit comments

Comments
 (0)