-
Notifications
You must be signed in to change notification settings - Fork 20
Expand file tree
/
Copy pathoperator.py
More file actions
1104 lines (1039 loc) · 44.3 KB
/
operator.py
File metadata and controls
1104 lines (1039 loc) · 44.3 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
from __future__ import annotations
import json
import re
import shutil
import subprocess
import sys
import threading
import time
import uuid
from datetime import datetime
from pathlib import Path
from typing import TextIO
from .terminal_ui import TerminalUI
from .utils import (
DEFAULT_REFINEMENT_SUGGESTIONS,
FIXED_STAGE_OPTIONS,
OperatorResult,
RunPaths,
StageSpec,
append_jsonl,
approved_stage_summaries,
extract_stream_text_fragments,
read_text,
relative_to_run,
write_text,
)
class ClaudeOperator:
backend_name = "claude"
def __init__(
self,
command: str = "claude",
model: str = "sonnet",
fake_mode: bool = False,
output_stream: TextIO = sys.stdout,
ui: TerminalUI | None = None,
stage_timeout: int = 14400,
) -> None:
self.command = command
self.model = model
self.fake_mode = fake_mode
self.output_stream = output_stream
self.ui = ui or TerminalUI(output_stream=output_stream)
self.stage_timeout = stage_timeout
def run_stage(
self,
stage: StageSpec,
prompt: str,
paths: RunPaths,
attempt_no: int,
continue_session: bool = False,
) -> OperatorResult:
if self.fake_mode:
return self._run_fake(stage, prompt, paths, attempt_no, continue_session=continue_session)
return self._run_real(stage, prompt, paths, attempt_no, continue_session=continue_session)
def _run_real(
self,
stage: StageSpec,
prompt: str,
paths: RunPaths,
attempt_no: int,
continue_session: bool = False,
) -> OperatorResult:
if shutil.which(self.command) is None:
raise FileNotFoundError(
f"{self._agent_label()} CLI not found: {self.command}. Install it or use --fake-operator."
)
prompt_path = paths.prompt_cache_dir / f"{stage.slug}_attempt_{attempt_no:02d}.prompt.md"
write_text(prompt_path, prompt)
session_id = self._resolve_stage_session_id(paths, stage, continue_session)
command, invocation_cwd, stdin_text = self._prepare_invocation(
prompt_path,
session_id,
paths=paths,
resume=continue_session,
)
active_command = command
self._write_attempt_state(
paths,
stage,
attempt_no,
{
"status": "starting",
"mode": "resume" if continue_session else "start",
"session_id": session_id,
"prompt_path": str(prompt_path),
"command": command,
"started_at": self._now(),
},
)
append_jsonl(
paths.logs_raw,
{
"_meta": {
"stage": stage.slug,
"attempt": attempt_no,
"mode": "real_continue" if continue_session else "real_start",
"command": command,
"prompt_path": str(prompt_path),
"session_id": session_id,
}
},
)
exit_code, stdout_text, stderr_text, observed_session_id, stream_meta = self._run_streaming_command(
command=command,
cwd=invocation_cwd,
stage=stage,
attempt_no=attempt_no,
paths=paths,
mode="real_continue" if continue_session else "real_start",
stdin_text=stdin_text,
)
stage_file = paths.stage_tmp_file(stage)
if (
continue_session
and exit_code != 0
and self._looks_like_resume_failure(stdout_text, stderr_text)
):
fallback_session_id = str(uuid.uuid4())
fallback_command, fallback_cwd, fallback_stdin_text = self._prepare_invocation(
prompt_path,
fallback_session_id,
paths=paths,
resume=False,
)
append_jsonl(
paths.logs_raw,
{
"_meta": {
"stage": stage.slug,
"attempt": attempt_no,
"mode": "real_continue_fallback_start",
"previous_session_id": session_id,
"fallback_session_id": fallback_session_id,
"command": fallback_command,
"prompt_path": str(prompt_path),
}
},
)
self._mark_session_broken(paths, stage, session_id, reason="resume_failure")
exit_code, stdout_text, stderr_text, observed_session_id, stream_meta = self._run_streaming_command(
command=fallback_command,
cwd=fallback_cwd,
stage=stage,
attempt_no=attempt_no,
paths=paths,
mode="real_continue_fallback_start",
stdin_text=fallback_stdin_text,
)
session_id = fallback_session_id
active_command = fallback_command
success = exit_code == 0 and stage_file.exists()
effective_session_id = self._select_effective_session_id(
requested_session_id=session_id,
observed_session_id=observed_session_id,
success=success,
)
self._persist_stage_session_id(paths, stage, effective_session_id)
self._update_session_state(
paths,
stage,
effective_session_id,
{
"broken": not success and continue_session,
"last_exit_code": exit_code,
"last_mode": "resume" if continue_session else "start",
"updated_at": self._now(),
},
)
self._write_attempt_state(
paths,
stage,
attempt_no,
{
"status": "completed" if success else "failed",
"mode": "resume" if continue_session else "start",
"session_id": effective_session_id,
"prompt_path": str(prompt_path),
"command": active_command,
"exit_code": exit_code,
"stdout_excerpt": stdout_text[-2000:] if stdout_text else "",
"stderr_excerpt": stderr_text[-1000:] if stderr_text else "",
"stream_meta": stream_meta,
"finished_at": self._now(),
},
)
return OperatorResult(
success=success,
exit_code=exit_code,
stdout=stdout_text,
stderr=stderr_text,
stage_file_path=stage_file,
session_id=effective_session_id,
)
def repair_stage_summary(
self,
stage: StageSpec,
original_prompt: str,
original_result: OperatorResult,
paths: RunPaths,
attempt_no: int,
) -> OperatorResult:
if self.fake_mode:
return self._run_fake(stage, original_prompt, paths, attempt_no, continue_session=False)
stage_file = paths.stage_tmp_file(stage)
current_draft_text = read_text(stage_file) if stage_file.exists() else "(missing)"
current_final_path = paths.stage_file(stage)
current_final_text = read_text(current_final_path) if current_final_path.exists() else "(missing)"
recovery_prompt = f"""
You are performing failure recovery for {stage.stage_title}.
The previous attempt either failed before producing a valid stage summary file, or produced a file with missing required sections.
Your only task now is to overwrite the stage summary file at:
{stage_file}
Rules:
- Do not browse the web.
- Do not use WebSearch or WebFetch.
- Do not try to continue the full research workflow.
- Use only the information already available in the prompt below and the run directory if needed.
- If the earlier attempt failed or produced incomplete evidence, state that clearly in the summary.
- You must still produce a valid markdown file in the required format.
- Treat `{stage_file}` as the final deliverable, not as a scratchpad.
- Do not write half-finished, placeholder, outline-only, pending, or in-progress content to `{stage_file}`.
- If you need scratch notes while repairing, write them somewhere else in the run directory, not to `{stage_file}`.
- Do not describe, summarize, or comment on the repair prompt itself.
- Do not ask the user what to do next.
- Do not say that the stage "already completed successfully" unless the written stage file itself contains the full required structure.
- You must directly write the repaired markdown file, then stop.
Required markdown structure:
# Stage X: <name>
## Objective
## Previously Approved Stage Summaries
## What I Did
## Key Results
## Files Produced
## Decision Ledger
## Suggestions for Refinement
1. {DEFAULT_REFINEMENT_SUGGESTIONS[0]}
2. {DEFAULT_REFINEMENT_SUGGESTIONS[1]}
3. {DEFAULT_REFINEMENT_SUGGESTIONS[2]}
## Your Options
{chr(10).join(FIXED_STAGE_OPTIONS)}
Required completion behavior:
1. Read the current stage file if it exists.
2. Overwrite it with a complete markdown document in the exact structure above.
3. Ensure both `## Previously Approved Stage Summaries` and `## Your Options` are present.
4. Ensure there is no `[In progress]`, `[Pending]`, `[TODO]`, `[TBD]`, or similar unfinished marker anywhere in the file.
5. After writing, respond only with a short confirmation that you rewrote the file.
Current draft stage file contents:
{current_draft_text}
Current promoted stage file contents:
{current_final_text}
Original prompt:
{original_prompt}
Original stdout:
{original_result.stdout or "(empty)"}
Original stderr:
{original_result.stderr or "(empty)"}
""".strip()
recovery_prompt_path = paths.prompt_cache_dir / f"{stage.slug}_attempt_{attempt_no:02d}_repair.prompt.md"
write_text(recovery_prompt_path, recovery_prompt)
session_id = self._resolve_stage_session_id(paths, stage, continue_session=True, allow_create=False)
if session_id:
command, invocation_cwd, stdin_text = self._prepare_invocation(
recovery_prompt_path,
session_id,
paths=paths,
resume=True,
tools="Write,Read,Glob,Grep",
)
else:
session_id = self._resolve_stage_session_id(paths, stage, continue_session=False)
command, invocation_cwd, stdin_text = self._prepare_invocation(
recovery_prompt_path,
session_id,
paths=paths,
resume=False,
tools="Write,Read,Glob,Grep",
)
append_jsonl(
paths.logs_raw,
{
"_meta": {
"stage": stage.slug,
"attempt": attempt_no,
"mode": "repair",
"command": command,
"prompt_path": str(recovery_prompt_path),
"session_id": session_id,
}
},
)
self._write_attempt_state(
paths,
stage,
attempt_no,
{
"status": "repair_starting",
"mode": "repair",
"session_id": session_id,
"prompt_path": str(recovery_prompt_path),
"command": command,
"started_at": self._now(),
},
)
exit_code, stdout_text, stderr_text, observed_session_id, stream_meta = self._run_streaming_command(
command=command,
cwd=invocation_cwd,
stage=stage,
attempt_no=attempt_no,
paths=paths,
mode="repair",
stdin_text=stdin_text,
)
if (
session_id
and exit_code != 0
and self._looks_like_resume_failure(stdout_text, stderr_text)
):
fallback_session_id = str(uuid.uuid4())
fallback_command, fallback_cwd, fallback_stdin_text = self._prepare_invocation(
recovery_prompt_path,
fallback_session_id,
paths=paths,
resume=False,
tools="Write,Read,Glob,Grep",
)
append_jsonl(
paths.logs_raw,
{
"_meta": {
"stage": stage.slug,
"attempt": attempt_no,
"mode": "repair_fallback_start",
"previous_session_id": session_id,
"fallback_session_id": fallback_session_id,
"command": fallback_command,
"prompt_path": str(recovery_prompt_path),
}
},
)
self._mark_session_broken(paths, stage, session_id, reason="repair_resume_failure")
exit_code, stdout_text, stderr_text, observed_session_id, stream_meta = self._run_streaming_command(
command=fallback_command,
cwd=fallback_cwd,
stage=stage,
attempt_no=attempt_no,
paths=paths,
mode="repair_fallback_start",
stdin_text=fallback_stdin_text,
)
session_id = fallback_session_id
command = fallback_command
success = exit_code == 0 and stage_file.exists()
effective_session_id = self._select_effective_session_id(
requested_session_id=session_id,
observed_session_id=observed_session_id,
success=success,
)
self._persist_stage_session_id(paths, stage, effective_session_id)
self._update_session_state(
paths,
stage,
effective_session_id,
{
"broken": not success,
"last_exit_code": exit_code,
"last_mode": "repair",
"updated_at": self._now(),
},
)
self._write_attempt_state(
paths,
stage,
attempt_no,
{
"status": "repair_completed" if exit_code == 0 and stage_file.exists() else "repair_failed",
"mode": "repair",
"session_id": effective_session_id,
"prompt_path": str(recovery_prompt_path),
"command": command,
"exit_code": exit_code,
"stdout_excerpt": stdout_text[-2000:] if stdout_text else "",
"stderr_excerpt": stderr_text[-1000:] if stderr_text else "",
"stream_meta": stream_meta,
"finished_at": self._now(),
},
)
return OperatorResult(
success=success,
exit_code=exit_code,
stdout=stdout_text,
stderr=stderr_text,
stage_file_path=stage_file,
session_id=effective_session_id,
)
def _run_streaming_command(
self,
command: list[str],
cwd: Path,
stage: StageSpec,
attempt_no: int,
paths: RunPaths,
mode: str,
stdin_text: str | None = None,
) -> tuple[int, str, str, str | None, dict[str, object]]:
process = subprocess.Popen(
command,
cwd=str(cwd),
stdin=subprocess.PIPE if stdin_text is not None else None,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
text=True,
bufsize=1,
)
if process.stdout is None:
raise RuntimeError(f"Failed to capture {self._agent_label()} output stream.")
stdin_thread: threading.Thread | None = None
if stdin_text is not None and process.stdin is not None:
def _feed_stdin() -> None:
try:
process.stdin.write(stdin_text)
except BrokenPipeError:
pass
finally:
try:
process.stdin.close()
except BrokenPipeError:
pass
stdin_thread = threading.Thread(target=_feed_stdin, daemon=True)
stdin_thread.start()
extracted_fragments: list[str] = []
raw_lines: list[str] = []
non_json_lines: list[str] = []
ended_with_newline = True
observed_session_id: str | None = None
tool_names: dict[str, str] = {}
malformed_json_count = 0
timed_out = threading.Event()
start_time = time.monotonic()
def _on_timeout() -> None:
timed_out.set()
process.terminate()
try:
process.wait(timeout=5)
except subprocess.TimeoutExpired:
process.kill()
timer = threading.Timer(self.stage_timeout, _on_timeout)
timer.daemon = True
timer.start()
try:
for raw_line in process.stdout:
if timed_out.is_set():
break
ended_with_newline = raw_line.endswith("\n")
line = raw_line.rstrip("\n")
raw_lines.append(line)
stripped = line.strip()
if not stripped:
continue
try:
payload = json.loads(stripped)
except json.JSONDecodeError:
malformed_json_count += 1
append_jsonl(
paths.logs_raw,
{
"_meta": {
"stage": stage.slug,
"attempt": attempt_no,
"mode": mode,
"non_json_output": stripped,
}
},
)
non_json_lines.append(stripped)
self.ui.show_raw_stream_line(stripped)
continue
append_jsonl(paths.logs_raw, payload)
if observed_session_id is None:
observed_session_id = self._extract_session_id(payload)
extracted_fragments.extend(extract_stream_text_fragments(payload))
self.ui.show_stream_event(payload, tool_names)
except KeyboardInterrupt:
elapsed = time.monotonic() - start_time
process.terminate()
try:
process.wait(timeout=5)
except subprocess.TimeoutExpired:
process.kill()
process.wait()
append_jsonl(
paths.logs_raw,
{
"_meta": {
"stage": stage.slug,
"attempt": attempt_no,
"mode": mode,
"event": "keyboard_interrupt",
"elapsed_seconds": round(elapsed, 1),
}
},
)
raise
finally:
timer.cancel()
if stdin_thread is not None:
stdin_thread.join(timeout=1)
if process.stdin is not None and not process.stdin.closed:
process.stdin.close()
process.stdout.close()
if timed_out.is_set():
elapsed = time.monotonic() - start_time
append_jsonl(
paths.logs_raw,
{
"_meta": {
"stage": stage.slug,
"attempt": attempt_no,
"mode": mode,
"event": "stage_timeout",
"timeout_seconds": self.stage_timeout,
"elapsed_seconds": round(elapsed, 1),
}
},
)
stdout_text = self._compose_stdout_text(
extracted_fragments=extracted_fragments,
non_json_lines=non_json_lines,
raw_lines=raw_lines,
)
return -1, stdout_text, "Stage timed out", observed_session_id, {
"raw_line_count": len(raw_lines),
"non_json_line_count": len(non_json_lines),
"malformed_json_count": malformed_json_count,
"observed_session_id": observed_session_id,
"timed_out": True,
}
exit_code = process.wait()
if raw_lines and not ended_with_newline:
self.output_stream.write("\n")
self.output_stream.flush()
stdout_text = self._compose_stdout_text(
extracted_fragments=extracted_fragments,
non_json_lines=non_json_lines,
raw_lines=raw_lines,
)
return exit_code, stdout_text, "", observed_session_id, {
"raw_line_count": len(raw_lines),
"non_json_line_count": len(non_json_lines),
"malformed_json_count": malformed_json_count,
"observed_session_id": observed_session_id,
}
def _compose_stdout_text(
self,
extracted_fragments: list[str],
non_json_lines: list[str],
raw_lines: list[str],
) -> str:
fragment_text = "\n".join(fragment for fragment in extracted_fragments if fragment).strip()
non_json_text = "\n".join(line for line in non_json_lines if line).strip()
raw_text = "\n".join(line for line in raw_lines if line).strip()
parts: list[str] = []
if fragment_text:
parts.append(fragment_text)
if non_json_text:
parts.append(non_json_text)
if not parts and raw_text:
parts.append(raw_text)
return "\n\n".join(parts).strip()
def _run_fake(
self,
stage: StageSpec,
prompt: str,
paths: RunPaths,
attempt_no: int,
continue_session: bool = False,
) -> OperatorResult:
session_id = self._resolve_stage_session_id(paths, stage, continue_session=continue_session)
self._persist_stage_session_id(paths, stage, session_id)
approved_memory = self._extract_approved_memory_from_prompt(prompt) or read_text(paths.memory)
previous_summaries = approved_stage_summaries(approved_memory)
agent_label = self._agent_label()
note_path = paths.notes_dir / f"{stage.slug}_fake_operator_note.md"
stage_tmp_path = paths.stage_tmp_file(stage)
user_goal = read_text(paths.user_input).strip()
write_text(
note_path,
(
f"# Fake Operator Note: {stage.stage_title}\n\n"
"This file was produced by fake-operator mode to validate the workflow, "
"directory layout, stage summary handling, and approval loop without "
f"calling {agent_label}."
),
)
if stage.number == 1 and "smoke test" in user_goal.lower():
intro_path = paths.notes_dir / "autor_intro.md"
sources_path = paths.literature_dir / "sources.json"
claims_path = paths.literature_dir / "claims.json"
write_text(
intro_path,
(
"# AutoR Overview\n\n"
"AutoR is a terminal-first, file-based, human-in-the-loop research workflow runner.\n\n"
"It executes a fixed 8-stage pipeline:\n"
"1. Literature survey\n"
"2. Hypothesis generation\n"
"3. Study design\n"
"4. Implementation\n"
"5. Experimentation\n"
"6. Analysis\n"
"7. Writing\n"
"8. Dissemination\n\n"
"Every stage writes artifacts into an isolated run directory and must be explicitly approved by the user.\n"
),
)
write_text(
sources_path,
json.dumps(
{
"sources": [
{
"source_id": "S1",
"title": "AutoR product overview",
"path": relative_to_run(intro_path, paths.run_root),
}
]
},
indent=2,
),
)
write_text(
claims_path,
json.dumps(
{
"claims": [
{
"claim_id": "CL1",
"statement": "AutoR is a terminal-first, file-based, human-in-the-loop research workflow runner.",
"source_ids": ["S1"],
}
]
},
indent=2,
),
)
stage_markdown = (
f"# Stage {stage.number:02d}: {stage.display_name}\n\n"
"## Objective\n"
"Introduce AutoR during a fake-mode smoke test while demonstrating the terminal UI, "
"stage summary contract, and approval loop.\n\n"
"## Previously Approved Stage Summaries\n"
f"{previous_summaries}\n\n"
"## What I Did\n"
f"- Entered fake-operator mode so the full terminal workflow could be demonstrated without calling {agent_label}.\n"
"- Generated a short markdown introduction to AutoR for recording and smoke-test purposes.\n"
f"- Wrote overview material to `{relative_to_run(intro_path, paths.run_root)}` and preserved the fake operator note at `{relative_to_run(note_path, paths.run_root)}`.\n"
f"- Produced a valid stage summary draft at `{relative_to_run(stage_tmp_path, paths.run_root)}`.\n\n"
"## Key Results\n"
"- AutoR is a terminal-first, file-based, human-in-the-loop research workflow runner.\n"
"- The workflow is fixed into 8 stages: literature, hypothesis, design, implementation, experimentation, analysis, writing, and dissemination.\n"
"- Every run is isolated under `runs/<run_id>/`, with prompts, logs, stage summaries, and workspace artifacts written to disk.\n"
"- The UI smoke test confirms the current terminal interface, menu interaction, and stage-summary rendering path are working.\n"
"- This output is a product demo and workflow intro, not a real research result.\n\n"
"## Files Produced\n"
f"- `{relative_to_run(intro_path, paths.run_root)}`\n"
f"- `{relative_to_run(sources_path, paths.run_root)}`\n"
f"- `{relative_to_run(claims_path, paths.run_root)}`\n"
f"- `{relative_to_run(note_path, paths.run_root)}`\n"
f"- `{relative_to_run(stage_tmp_path, paths.run_root)}`\n\n"
"## Decision Ledger\n"
"- **Open Questions**: Which real research goal should be used for the first live run?\n"
"- **Locked Decisions**: Keep the smoke test in fake mode so the demo stays deterministic.\n"
"- **Assumptions**: The current terminal UI and approval loop are the main things being demonstrated.\n"
"- **Rejected Alternatives**: Treating the smoke test as a real research result.\n\n"
"## Suggestions for Refinement\n"
f"1. Switch from fake mode to the real {agent_label} operator and record a live stage execution.\n"
"2. Tune the terminal theme, colors, and screen layout for recording aesthetics.\n"
"3. Expand the intro note with a concrete example run and artifact tour before moving on.\n\n"
"## Your Options\n"
"1. Use suggestion 1\n"
"2. Use suggestion 2\n"
"3. Use suggestion 3\n"
"4. Refine with your own feedback\n"
"5. Approve and continue\n"
"6. Abort\n"
)
elif stage.slug == "01_literature_survey":
sources_path = paths.literature_dir / "sources.json"
claims_path = paths.literature_dir / "claims.json"
write_text(
sources_path,
json.dumps(
{
"sources": [
{
"source_id": "S1",
"title": "Foundational long-context prompting study",
"path": relative_to_run(note_path, paths.run_root),
},
{
"source_id": "S2",
"title": "Retrieval-augmented reasoning baseline",
"path": relative_to_run(note_path, paths.run_root),
},
]
},
indent=2,
),
)
write_text(
claims_path,
json.dumps(
{
"claims": [
{
"claim_id": "CL1",
"statement": "Long-context prompting degrades when relevant evidence is diffuse.",
"source_ids": ["S1"],
},
{
"claim_id": "CL2",
"statement": "Retrieval is a common mitigation strategy in recent reasoning systems.",
"source_ids": ["S1", "S2"],
},
]
},
indent=2,
),
)
stage_markdown = (
f"# Stage {stage.number:02d}: {stage.display_name}\n\n"
"## Objective\n"
"Validate the literature-survey workflow using a minimal claim-to-source ledger.\n\n"
"## Previously Approved Stage Summaries\n"
f"{previous_summaries}\n\n"
"## What I Did\n"
f"- Executed fake-operator mode instead of invoking {agent_label}.\n"
f"- Wrote supporting source and claim ledgers to `{relative_to_run(sources_path, paths.run_root)}` and `{relative_to_run(claims_path, paths.run_root)}`.\n"
f"- Preserved the fake operator note at `{relative_to_run(note_path, paths.run_root)}`.\n"
"- Produced a valid Stage 01 summary with traceable survey artifacts.\n\n"
"## Key Results\n"
"- The fake literature run now produces a structured source catalog and claim ledger.\n"
"- Downstream stages can inherit grounded survey claims instead of only prose.\n"
"- This remains workflow scaffolding, not a real literature review.\n\n"
"## Files Produced\n"
f"- `{relative_to_run(sources_path, paths.run_root)}`\n"
f"- `{relative_to_run(claims_path, paths.run_root)}`\n"
f"- `{relative_to_run(note_path, paths.run_root)}`\n"
f"- `{relative_to_run(stage_tmp_path, paths.run_root)}`\n\n"
"## Decision Ledger\n"
"- **Open Questions**: Which real papers should replace the fake source catalog?\n"
"- **Locked Decisions**: Stage 01 should emit traceable survey evidence, not only prose.\n"
"- **Assumptions**: The fake ledgers are placeholders for workflow validation only.\n"
"- **Rejected Alternatives**: Approving a literature stage with no claim-to-source trace.\n\n"
"## Suggestions for Refinement\n"
"1. Replace the fake source ledger with real paper metadata before continuing.\n"
"2. Expand the claim ledger so it captures conflicting evidence, not only supporting evidence.\n"
"3. Add dataset and benchmark notes to the literature directory alongside the ledgers.\n\n"
"## Your Options\n"
"1. Use suggestion 1\n"
"2. Use suggestion 2\n"
"3. Use suggestion 3\n"
"4. Refine with your own feedback\n"
"5. Approve and continue\n"
"6. Abort\n"
)
elif stage.slug == "02_hypothesis_generation":
hypotheses_path = paths.notes_dir / "hypotheses.md"
write_text(
hypotheses_path,
(
"# Typed Hypotheses\n\n"
"## Theoretical Propositions\n"
"- T1: Retrieval addresses context fragmentation.\n\n"
"## Empirical Hypotheses\n"
"- H1: Retrieval will improve long-context accuracy.\n\n"
"## Paper Claims\n"
"- C1: Retrieval is a practical long-context fix.\n"
),
)
stage_markdown = (
f"# Stage {stage.number:02d}: {stage.display_name}\n\n"
"## Objective\n"
"Validate the Stage 02 workflow using typed propositions, empirical hypotheses, and provisional paper claims.\n\n"
"## Previously Approved Stage Summaries\n"
f"{previous_summaries}\n\n"
"## What I Did\n"
f"- Executed fake-operator mode instead of invoking {agent_label}.\n"
f"- Wrote supporting hypothesis notes to `{relative_to_run(hypotheses_path, paths.run_root)}`.\n"
f"- Preserved the fake operator note at `{relative_to_run(note_path, paths.run_root)}`.\n"
"- Produced a typed Stage 02 summary so downstream stages can consume structured hypothesis context.\n\n"
"## Key Results\n\n"
"### Theoretical Propositions\n"
"- **T1**: Retrieval reduces context fragmentation in long-context prompting.\n"
" - Derived from: Prior long-context failure patterns summarized in Stage 01.\n\n"
"### Empirical Hypotheses\n"
"- **H1**: Adding retrieval will improve long-context task accuracy by at least 8 points.\n"
" - Depends on: T1\n"
" - Verification: Compare retrieval-on vs retrieval-off on the benchmark suite.\n\n"
"### Paper Claims (Provisional)\n"
"- **C1**: Retrieval is a practical way to stabilize long-context reasoning.\n"
" - Status: proposed\n\n"
"## Files Produced\n"
f"- `{relative_to_run(hypotheses_path, paths.run_root)}`\n"
f"- `{relative_to_run(paths.hypothesis_manifest, paths.run_root)}`\n"
f"- `{relative_to_run(note_path, paths.run_root)}`\n"
f"- `{relative_to_run(stage_tmp_path, paths.run_root)}`\n\n"
"## Decision Ledger\n"
"- **Open Questions**: How large should the retrieval gain threshold be?\n"
"- **Locked Decisions**: Keep typed claims separated for downstream stages.\n"
"- **Assumptions**: Stage 03 onward will treat empirical hypotheses as the main test targets.\n"
"- **Rejected Alternatives**: Mixing theory, hypotheses, and paper narrative into one prose block.\n\n"
"## Suggestions for Refinement\n"
"1. Add a second empirical hypothesis about latency trade-offs.\n"
"2. Tighten the effect-size threshold with more prior evidence.\n"
"3. Add a weaker fallback paper claim in case the main hypothesis is only partially supported.\n\n"
"## Your Options\n"
"1. Use suggestion 1\n"
"2. Use suggestion 2\n"
"3. Use suggestion 3\n"
"4. Refine with your own feedback\n"
"5. Approve and continue\n"
"6. Abort\n"
)
else:
stage_markdown = (
f"# Stage {stage.number:02d}: {stage.display_name}\n\n"
"## Objective\n"
f"Validate the workflow path for {stage.display_name} and confirm that the "
"manager, operator, and filesystem contracts are functioning.\n\n"
"## Previously Approved Stage Summaries\n"
f"{previous_summaries}\n\n"
"## What I Did\n"
f"- Executed fake-operator mode instead of invoking {agent_label}.\n"
f"- Created a placeholder artifact at `{relative_to_run(note_path, paths.run_root)}`.\n"
f"- Simulated a complete stage attempt for `{stage.slug}`.\n\n"
"## Key Results\n"
"- The orchestration loop, run layout, and stage-summary validation path are active.\n"
f"- Prompt length for this attempt was {len(prompt.split())} words.\n"
"- No research claim from this stage should be treated as real output.\n\n"
"## Files Produced\n"
f"- `{relative_to_run(note_path, paths.run_root)}`\n"
f"- `{relative_to_run(stage_tmp_path, paths.run_root)}`\n\n"
"## Decision Ledger\n"
f"- **Open Questions**: What real evidence should replace the fake output for {stage.display_name}?\n"
f"- **Locked Decisions**: Keep `{stage.slug}` inside the current run layout and approval contract.\n"
"- **Assumptions**: This smoke run is only validating workflow mechanics.\n"
"- **Rejected Alternatives**: Treating placeholder artifacts as real research deliverables.\n\n"
"## Suggestions for Refinement\n"
f"1. Replace fake mode with the real {agent_label} operator and inspect the resulting artifacts.\n"
"2. Tighten the stage prompt to better reflect the target of actual publication-grade work.\n"
"3. Add stronger expectations for the concrete artifacts and files outputs from this stage.\n\n"
"## Your Options\n"
"1. Use suggestion 1\n"
"2. Use suggestion 2\n"
"3. Use suggestion 3\n"
"4. Refine with your own feedback\n"
"5. Approve and continue\n"
"6. Abort\n"
)
write_text(stage_tmp_path, stage_markdown)
append_jsonl(
paths.logs_raw,
{
"_meta": {
"stage": stage.slug,
"attempt": attempt_no,
"mode": "fake_continue" if continue_session else "fake_start",
"session_id": session_id,
}
},
)
return OperatorResult(
success=True,
exit_code=0,
stdout="Fake operator completed successfully.",
stderr="",
stage_file_path=stage_tmp_path,
session_id=session_id,
)
def _extract_approved_memory_from_prompt(self, prompt: str) -> str | None:
match = re.search(
r"^# Approved Memory\s*$\n?(.*?)(?=^# [^\n]+\s*$|\Z)",
prompt,
flags=re.MULTILINE | re.DOTALL,
)
if not match:
return None
extracted = match.group(1).strip()
return extracted or None
def _resolve_stage_session_id(
self,
paths: RunPaths,
stage: StageSpec,
continue_session: bool,
allow_create: bool = True,
) -> str | None:
broken_session_id: str | None = None
session_state_path = paths.stage_session_state_file(stage)
if session_state_path.exists():
payload = json.loads(read_text(session_state_path))
session_id = str(payload.get("session_id") or "").strip()
broken = bool(payload.get("broken", False))
if session_id and not broken:
return session_id
if session_id and broken:
broken_session_id = session_id
session_file = paths.stage_session_file(stage)
if session_file.exists():
session_id = read_text(session_file).strip()
if session_id and session_id != broken_session_id:
return session_id
if continue_session and not allow_create:
return None
return str(uuid.uuid4())
def _select_effective_session_id(
self,
*,
requested_session_id: str | None,
observed_session_id: str | None,
success: bool,
) -> str | None:
del observed_session_id, success
return requested_session_id
def _persist_stage_session_id(self, paths: RunPaths, stage: StageSpec, session_id: str | None) -> None:
if not session_id:
return
write_text(paths.stage_session_file(stage), session_id)
self._update_session_state(
paths,
stage,
session_id,
{
"broken": False,
"updated_at": self._now(),
},
)
def _extract_session_id(self, payload: dict[str, object]) -> str | None:
value = payload.get("session_id")
if isinstance(value, str) and value.strip():
return value.strip()
value = payload.get("thread_id")
if isinstance(value, str) and value.strip():
return value.strip()