From 5600289e2be03aa7337dc59b8ce3318ea6217956 Mon Sep 17 00:00:00 2001 From: masa10-f Date: Sat, 15 Nov 2025 14:31:10 +0900 Subject: [PATCH 01/21] greedy schedule solver --- graphqomb/greedy_scheduler.py | 350 ++++++++++++++++++++++++++++++++++ 1 file changed, 350 insertions(+) create mode 100644 graphqomb/greedy_scheduler.py diff --git a/graphqomb/greedy_scheduler.py b/graphqomb/greedy_scheduler.py new file mode 100644 index 00000000..59235289 --- /dev/null +++ b/graphqomb/greedy_scheduler.py @@ -0,0 +1,350 @@ +"""Greedy heuristic scheduler for fast MBQC pattern scheduling. + +This module provides fast greedy scheduling algorithms as an alternative to +CP-SAT based optimization. The greedy algorithms provide approximate solutions +with 100-1000x speedup compared to CP-SAT, making them suitable for large-scale +graphs or when optimality is not critical. + +This module provides: + +- `greedy_minimize_time`: Fast greedy scheduler optimizing for minimal execution time +- `greedy_minimize_space`: Fast greedy scheduler optimizing for minimal qubit usage +""" + +from __future__ import annotations + +from collections import defaultdict +from graphlib import TopologicalSorter +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from collections.abc import Mapping + from collections.abc import Set as AbstractSet + + from graphqomb.graphstate import BaseGraphState + + +def _dag_parents(dag: Mapping[int, AbstractSet[int]], node: int) -> set[int]: + """Find all parent nodes (predecessors) of a given node in the DAG. + + Parameters + ---------- + dag : Mapping[int, AbstractSet[int]] + The directed acyclic graph (node -> children mapping) + node : int + The node to find parents for + + Returns + ------- + set[int] + Set of parent nodes (nodes that have 'node' as a child) + """ + return {parent for parent, children in dag.items() if node in children} + + +def _compute_critical_path_length(dag: Mapping[int, AbstractSet[int]]) -> dict[int, int]: + """Compute the critical path length for each node in the DAG. + + The critical path length is the length of the longest path from the node + to any output node (leaf). This is used as a priority metric for scheduling. + + Parameters + ---------- + dag : Mapping[int, AbstractSet[int]] + The directed acyclic graph (node -> children mapping) + + Returns + ------- + dict[int, int] + Mapping from node to its critical path length + """ + # Topological sort (children first for bottom-up computation) + topo_order = list(TopologicalSorter(dag).static_order()) + + critical_length: dict[int, int] = {} + for node in topo_order: + children = dag.get(node, set()) + if not children: + # Leaf node (output node) + critical_length[node] = 0 + else: + # Critical path = 1 + max(critical path of children) + child_lengths = [critical_length[child] for child in children] + critical_length[node] = max(child_lengths, default=0) + 1 + + return critical_length + + +def greedy_minimize_time( + graph: BaseGraphState, + dag: Mapping[int, AbstractSet[int]], +) -> tuple[dict[int, int], dict[int, int]]: + """Fast greedy scheduler optimizing for minimal execution time (makespan). + + This algorithm uses Critical Path List Scheduling: + 1. Compute critical path length for each node + 2. Schedule nodes in order of decreasing critical path length + 3. Each node is scheduled as early as possible while respecting constraints + + Computational Complexity: O(N + E) where N is number of nodes, E is number of edges + Expected speedup: 100-1000x compared to CP-SAT + Approximation quality: Typically within 2x of optimal + + Parameters + ---------- + graph : BaseGraphState + The graph state to schedule + dag : Mapping[int, AbstractSet[int]] + The directed acyclic graph representing measurement dependencies + + Returns + ------- + tuple[dict[int, int], dict[int, int]] + A tuple of (prepare_time, measure_time) dictionaries + """ + # Compute critical path for prioritization + critical_length = _compute_critical_path_length(dag) + + # Get all nodes in topological order for processing + topo_order = list(TopologicalSorter(dag).static_order()) + + # Sort by critical path length (longest critical path first for better parallelism) + sorted_nodes = sorted(topo_order, key=lambda n: -critical_length.get(n, 0)) + + # Initialize scheduling dictionaries + prepare_time: dict[int, int] = {} + measure_time: dict[int, int] = {} + + # PASS 1: Set preparation times + # Process in topological order (parents before children) + for node in sorted_nodes: + # Prepare non-input nodes + if node not in graph.input_node_indices: + # Constraint 1: Prepare after all DAG parents are measured + parents = _dag_parents(dag, node) + parent_meas_times = [measure_time[p] for p in parents if p in measure_time] + earliest_prepare = max(parent_meas_times, default=0) + + prepare_time[node] = earliest_prepare + + # PASS 2: Set measurement times + # Process in reverse topological order (children before parents) so that DAG constraints are satisfied + for node in reversed(sorted_nodes): + # Measure non-output nodes + if node not in graph.output_node_indices: + # Constraint 1: Neighbor preparation constraint + # All neighbors must be prepared before this node can be measured + neighbor_prep_times = [] + for neighbor in graph.neighbors(node): + if neighbor in graph.input_node_indices: + # Input nodes are considered prepared at time -1 + neighbor_prep_times.append(-1) + else: + neighbor_prep_times.append(prepare_time[neighbor]) + + # Earliest time when all neighbors are prepared + earliest_by_neighbors = max(neighbor_prep_times, default=-1) + 1 + + # Constraint 2: Preparation constraint (non-input nodes only) + # Must be measured after this node is prepared + if node in graph.input_node_indices: + # Input nodes: only need neighbors to be prepared + earliest_measure = earliest_by_neighbors + else: + # Non-input nodes: must be after both preparation and neighbor preparation + earliest_by_prep = prepare_time[node] + 1 + earliest_measure = max(earliest_by_prep, earliest_by_neighbors) + + # Constraint 3: DAG ordering - must be measured BEFORE all children + # Children are already processed (reverse topo order), so check their times + children = dag.get(node, set()) + if children: + # Find the earliest child measurement time + child_meas_times = [measure_time[child] for child in children if child in measure_time] + if child_meas_times: + # Must be measured before the earliest child (strictly <) + earliest_child_time = min(child_meas_times) + # Upper bound: must be < earliest_child_time + # So latest possible time is earliest_child_time - 1 + # However, we cannot violate the neighbor constraint (hard minimum) + latest_possible = earliest_child_time - 1 + if latest_possible < earliest_measure: + # Conflict: cannot satisfy both constraints + # This indicates the schedule is infeasible with current prep times + # For greedy, we prioritize the neighbor constraint (entanglement must work) + # and accept sub-optimal DAG ordering + pass # Keep earliest_measure as is + else: + earliest_measure = latest_possible + + measure_time[node] = earliest_measure + + # PASS 3: Iterative fix-up to resolve any DAG constraint violations + # If a parent's measurement time >= child's measurement time, push the child later + # Repeat until no violations remain (cascading updates) + max_iterations = len(sorted_nodes) # Upper bound to avoid infinite loops + for _ in range(max_iterations): + violations_found = False + for node in sorted_nodes: + if node not in graph.output_node_indices and node in measure_time: + children = dag.get(node, set()) + for child in children: + if child in measure_time and measure_time[node] >= measure_time[child]: + # Violation: parent >= child, need to push child later + measure_time[child] = measure_time[node] + 1 + violations_found = True + if not violations_found: + break # No more violations, done + + return prepare_time, measure_time + + +def greedy_minimize_space( + graph: BaseGraphState, + dag: Mapping[int, AbstractSet[int]], +) -> tuple[dict[int, int], dict[int, int]]: + """Fast greedy scheduler optimizing for minimal qubit usage (space). + + This algorithm uses a resource-aware greedy approach: + 1. Track alive nodes (prepared but not yet measured) at each time step + 2. Schedule measurements eagerly when nodes are no longer needed + 3. Delay preparation of nodes until necessary + + Computational Complexity: O(N log N + E) where N is nodes, E is edges + Expected speedup: 100-1000x compared to CP-SAT + Approximation quality: Typically near-optimal for space usage + + Parameters + ---------- + graph : BaseGraphState + The graph state to schedule + dag : Mapping[int, AbstractSet[int]] + The directed acyclic graph representing measurement dependencies + + Returns + ------- + tuple[dict[int, int], dict[int, int]] + A tuple of (prepare_time, measure_time) dictionaries + """ + # Reverse topological order (leaves to roots) for bottom-up scheduling + topo_order = list(TopologicalSorter(dag).static_order()) + + # Track when each node can be measured (earliest time when all neighbors are ready) + prepare_time: dict[int, int] = {} + measure_time: dict[int, int] = {} + + # Track alive nodes and current time + current_time = 0 + alive_nodes: set[int] = set(graph.input_node_indices.keys()) # Input nodes are always alive + + # Nodes ready to be measured (all neighbors prepared) + ready_to_measure: dict[int, int] = {} # node -> earliest measure time + + # Process nodes in topological order to set preparation times + for node in reversed(topo_order): + # Prepare non-input nodes + if node not in graph.input_node_indices: + # Constraint 1: Prepare after all DAG parents are measured + parents = _dag_parents(dag, node) + parent_meas_times = [measure_time[p] for p in parents if p in measure_time] + earliest_prepare = max(parent_meas_times, default=0) + + prepare_time[node] = earliest_prepare + alive_nodes.add(node) + current_time = max(current_time, earliest_prepare) + + # Second pass: compute measurement times (now all nodes are prepared) + for node in reversed(topo_order): + # Check if node should be measured (non-output nodes) + if node not in graph.output_node_indices: + # Constraint 1: Neighbor preparation constraint + neighbor_prep_times = [] + for neighbor in graph.neighbors(node): + if neighbor in graph.input_node_indices: + neighbor_prep_times.append(-1) + else: + neighbor_prep_times.append(prepare_time[neighbor]) + + # Earliest time when all neighbors are prepared + earliest_by_neighbors = max(neighbor_prep_times, default=-1) + 1 + + # Constraint 2: Preparation constraint (non-input nodes only) + if node in graph.input_node_indices: + earliest_meas = earliest_by_neighbors + else: + earliest_by_prep = prepare_time[node] + 1 + earliest_meas = max(earliest_by_prep, earliest_by_neighbors) + + # Constraint 3: DAG ordering - must be measured BEFORE all children + children = dag.get(node, set()) + if children: + child_meas_times = [ready_to_measure[child] for child in children if child in ready_to_measure] + if child_meas_times: + earliest_child_time = min(child_meas_times) + # Must be < earliest_child_time + earliest_meas = min(earliest_meas, earliest_child_time - 1) + + ready_to_measure[node] = earliest_meas + + # Third pass: Schedule measurements to minimize space + # Use a greedy approach: measure nodes as soon as possible when they're ready + nodes_to_measure = [n for n in graph.physical_nodes if n not in graph.output_node_indices] + + # Sort by earliest measurement time + sorted_by_meas_time = sorted( + [(ready_to_measure.get(node, 0), node) for node in nodes_to_measure if node in ready_to_measure] + ) + + for _, node in sorted_by_meas_time: + measure_time[node] = ready_to_measure[node] + + # Fourth pass: Iterative fix-up to resolve any DAG constraint violations + max_iterations = len(topo_order) + for _ in range(max_iterations): + violations_found = False + for node in topo_order: + if node not in graph.output_node_indices and node in measure_time: + children = dag.get(node, set()) + for child in children: + if child in measure_time and measure_time[node] >= measure_time[child]: + measure_time[child] = measure_time[node] + 1 + violations_found = True + if not violations_found: + break + + return prepare_time, measure_time + + +def solve_greedy_schedule( + graph: BaseGraphState, + dag: Mapping[int, AbstractSet[int]], + minimize_space: bool = False, +) -> tuple[dict[int, int], dict[int, int]] | None: + """Solve scheduling using greedy heuristics. + + This is a convenience wrapper that selects the appropriate greedy algorithm + based on the optimization objective. + + Parameters + ---------- + graph : BaseGraphState + The graph state to schedule + dag : Mapping[int, AbstractSet[int]] + The directed acyclic graph representing measurement dependencies + minimize_space : bool, default=False + If True, optimize for minimal qubit usage (space). + If False, optimize for minimal execution time. + + Returns + ------- + tuple[dict[int, int], dict[int, int]] | None + A tuple of (prepare_time, measure_time) dictionaries if successful, + None if scheduling fails (should rarely happen for valid inputs) + """ + try: + if minimize_space: + return greedy_minimize_space(graph, dag) + else: + return greedy_minimize_time(graph, dag) + except Exception: + return None From a052c38ec08ff0d5e316a2d95129597393d98547 Mon Sep 17 00:00:00 2001 From: masa10-f Date: Sat, 15 Nov 2025 14:33:05 +0900 Subject: [PATCH 02/21] add greedy search option --- graphqomb/scheduler.py | 23 +++++++++++++++++++---- 1 file changed, 19 insertions(+), 4 deletions(-) diff --git a/graphqomb/scheduler.py b/graphqomb/scheduler.py index 889f045d..11c44c1c 100644 --- a/graphqomb/scheduler.py +++ b/graphqomb/scheduler.py @@ -13,6 +13,7 @@ from graphqomb.feedforward import dag_from_flow from graphqomb.schedule_solver import ScheduleConfig, Strategy, solve_schedule +from graphqomb.greedy_scheduler import solve_greedy_schedule if TYPE_CHECKING: from collections.abc import Mapping @@ -483,15 +484,21 @@ def solve_schedule( self, config: ScheduleConfig | None = None, timeout: int = 60, + use_greedy: bool = False, ) -> bool: - r"""Compute the schedule using the constraint programming solver. + r"""Compute the schedule using constraint programming or greedy heuristics. Parameters ---------- config : `ScheduleConfig` | `None`, optional - The scheduling configuration. If None, defaults to MINIMIZE_SPACE strategy. + The scheduling configuration. If None, defaults to MINIMIZE_TIME strategy. timeout : `int`, optional - Maximum solve time in seconds, by default 60 + Maximum solve time in seconds for CP-SAT solver, by default 60. + Ignored when use_greedy=True. + use_greedy : `bool`, optional + If True, use fast greedy heuristics instead of CP-SAT. + Greedy algorithms are much faster than CP-SAT, but provide approximate solutions. + Default is False (use CP-SAT for optimal solutions). Returns ------- @@ -506,7 +513,15 @@ def solve_schedule( if config is None: config = ScheduleConfig(Strategy.MINIMIZE_TIME) - result = solve_schedule(self.graph, self.dag, config, timeout) + if use_greedy: + # Use fast greedy heuristics + + minimize_space = config.strategy == Strategy.MINIMIZE_SPACE + result = solve_greedy_schedule(self.graph, self.dag, minimize_space) + else: + # Use CP-SAT solver for optimal solution + result = solve_schedule(self.graph, self.dag, config, timeout) + if result is None: return False From 7de65fa7e2b462da1a370e5832e85df6399dbb62 Mon Sep 17 00:00:00 2001 From: masa10-f Date: Sat, 15 Nov 2025 14:33:25 +0900 Subject: [PATCH 03/21] add unit test for greedy search --- tests/test_greedy_scheduler.py | 381 +++++++++++++++++++++++++++++++++ 1 file changed, 381 insertions(+) create mode 100644 tests/test_greedy_scheduler.py diff --git a/tests/test_greedy_scheduler.py b/tests/test_greedy_scheduler.py new file mode 100644 index 00000000..48c847e4 --- /dev/null +++ b/tests/test_greedy_scheduler.py @@ -0,0 +1,381 @@ +"""Test greedy scheduling algorithms.""" + +import time + +import pytest + +from graphqomb.common import Plane, PlannerMeasBasis +from graphqomb.graphstate import GraphState +from graphqomb.greedy_scheduler import ( + greedy_minimize_space, + greedy_minimize_time, + solve_greedy_schedule, +) +from graphqomb.schedule_solver import ScheduleConfig, Strategy +from graphqomb.scheduler import Scheduler + + +def test_greedy_minimize_time_simple() -> None: + """Test greedy_minimize_time on a simple graph.""" + # Create a simple 3-node chain graph + graph = GraphState() + node0 = graph.add_physical_node() + node1 = graph.add_physical_node() + node2 = graph.add_physical_node() + graph.add_physical_edge(node0, node1) + graph.add_physical_edge(node1, node2) + qindex = 0 + graph.register_input(node0, qindex) + graph.register_output(node2, qindex) + + flow = {node0: {node1}, node1: {node2}} + scheduler = Scheduler(graph, flow) + + # Run greedy scheduler + prepare_time, measure_time = greedy_minimize_time(graph, scheduler.dag) + + # Check that all non-input nodes have preparation times + assert node1 in prepare_time + assert node0 not in prepare_time # Input node should not be prepared + + # Check that all non-output nodes have measurement times + assert node0 in measure_time + assert node1 in measure_time + assert node2 not in measure_time # Output node should not be measured + + # Verify DAG constraints: node0 measured before node1 + assert measure_time[node0] < measure_time[node1] + + +def test_greedy_minimize_space_simple() -> None: + """Test greedy_minimize_space on a simple graph.""" + # Create a simple 3-node chain graph + graph = GraphState() + node0 = graph.add_physical_node() + node1 = graph.add_physical_node() + node2 = graph.add_physical_node() + graph.add_physical_edge(node0, node1) + graph.add_physical_edge(node1, node2) + qindex = 0 + graph.register_input(node0, qindex) + graph.register_output(node2, qindex) + + flow = {node0: {node1}, node1: {node2}} + scheduler = Scheduler(graph, flow) + + # Run greedy scheduler + prepare_time, measure_time = greedy_minimize_space(graph, scheduler.dag) + + # Check that all non-input nodes have preparation times + assert node1 in prepare_time + assert node0 not in prepare_time # Input node should not be prepared + + # Check that all non-output nodes have measurement times + assert node0 in measure_time + assert node1 in measure_time + assert node2 not in measure_time # Output node should not be measured + + # Verify DAG constraints + assert measure_time[node0] < measure_time[node1] + + +def test_greedy_scheduler_via_solve_schedule() -> None: + """Test greedy scheduler through Scheduler.solve_schedule with use_greedy=True.""" + # Create a simple graph + graph = GraphState() + node0 = graph.add_physical_node() + node1 = graph.add_physical_node() + node2 = graph.add_physical_node() + graph.add_physical_edge(node0, node1) + graph.add_physical_edge(node1, node2) + qindex = 0 + graph.register_input(node0, qindex) + graph.register_output(node2, qindex) + + flow = {node0: {node1}, node1: {node2}} + scheduler = Scheduler(graph, flow) + + # Test with greedy MINIMIZE_TIME + config = ScheduleConfig(strategy=Strategy.MINIMIZE_TIME) + success = scheduler.solve_schedule(config, use_greedy=True) + assert success + + # Verify schedule is valid + scheduler.validate_schedule() + + # Test with greedy MINIMIZE_SPACE + scheduler2 = Scheduler(graph, flow) + config = ScheduleConfig(strategy=Strategy.MINIMIZE_SPACE) + success = scheduler2.solve_schedule(config, use_greedy=True) + assert success + + # Verify schedule is valid + scheduler2.validate_schedule() + + +def test_greedy_vs_cpsat_correctness() -> None: + """Test that greedy scheduler produces valid schedules compared to CP-SAT.""" + # Create a slightly larger graph + graph = GraphState() + nodes = [graph.add_physical_node() for _ in range(5)] + + # Create a chain + for i in range(4): + graph.add_physical_edge(nodes[i], nodes[i + 1]) + + qindex = 0 + graph.register_input(nodes[0], qindex) + graph.register_output(nodes[4], qindex) + + flow = {nodes[i]: {nodes[i + 1]} for i in range(4)} + + # Test greedy scheduler + scheduler_greedy = Scheduler(graph, flow) + config = ScheduleConfig(strategy=Strategy.MINIMIZE_TIME) + success_greedy = scheduler_greedy.solve_schedule(config, use_greedy=True) + assert success_greedy + + # Verify greedy schedule is valid + scheduler_greedy.validate_schedule() + + # Test CP-SAT scheduler + scheduler_cpsat = Scheduler(graph, flow) + success_cpsat = scheduler_cpsat.solve_schedule(config, use_greedy=False, timeout=10) + assert success_cpsat + + # Verify CP-SAT schedule is valid + scheduler_cpsat.validate_schedule() + + # Both should produce valid schedules + # Note: Greedy may not be optimal, so we don't compare quality here + + +def test_greedy_scheduler_larger_graph() -> None: + """Test greedy scheduler on a larger graph to ensure scalability.""" + # Create a larger graph with branching structure + graph = GraphState() + num_layers = 4 + nodes_per_layer = 3 + + # Build layered graph + all_nodes = [] + for layer in range(num_layers): + layer_nodes = [graph.add_physical_node() for _ in range(nodes_per_layer)] + all_nodes.append(layer_nodes) + + # Connect to previous layer (if not first layer) + if layer > 0: + for i, node in enumerate(layer_nodes): + # Connect to corresponding node in previous layer + prev_node = all_nodes[layer - 1][i] + graph.add_physical_edge(prev_node, node) + + # Register inputs (first layer) and outputs (last layer) + for i, node in enumerate(all_nodes[0]): + graph.register_input(node, i) + for i, node in enumerate(all_nodes[-1]): + graph.register_output(node, i) + + # Build flow (simple forward flow) + flow = {} + for layer in range(num_layers - 1): + for i, node in enumerate(all_nodes[layer]): + if node not in graph.output_node_indices: + flow[node] = {all_nodes[layer + 1][i]} + + # Test greedy scheduler + scheduler = Scheduler(graph, flow) + config = ScheduleConfig(strategy=Strategy.MINIMIZE_TIME) + success = scheduler.solve_schedule(config, use_greedy=True) + assert success + + # Validate the schedule + scheduler.validate_schedule() + + # Check that we got reasonable results + assert scheduler.num_slices() > 0 + assert scheduler.num_slices() <= num_layers * 2 # Reasonable upper bound + + +@pytest.mark.parametrize("strategy", [Strategy.MINIMIZE_TIME, Strategy.MINIMIZE_SPACE]) +def test_greedy_scheduler_both_strategies(strategy: Strategy) -> None: + """Test greedy scheduler with both optimization strategies.""" + # Create a graph + graph = GraphState() + node0 = graph.add_physical_node() + node1 = graph.add_physical_node() + node2 = graph.add_physical_node() + node3 = graph.add_physical_node() + graph.add_physical_edge(node0, node1) + graph.add_physical_edge(node1, node2) + graph.add_physical_edge(node2, node3) + qindex = 0 + graph.register_input(node0, qindex) + graph.register_output(node3, qindex) + + flow = {node0: {node1}, node1: {node2}, node2: {node3}} + scheduler = Scheduler(graph, flow) + + # Test with specified strategy + config = ScheduleConfig(strategy=strategy) + success = scheduler.solve_schedule(config, use_greedy=True) + assert success + + # Validate schedule + scheduler.validate_schedule() + + +def test_solve_greedy_schedule_wrapper() -> None: + """Test the solve_greedy_schedule wrapper function.""" + # Create a simple graph + graph = GraphState() + node0 = graph.add_physical_node() + node1 = graph.add_physical_node() + node2 = graph.add_physical_node() + graph.add_physical_edge(node0, node1) + graph.add_physical_edge(node1, node2) + qindex = 0 + graph.register_input(node0, qindex) + graph.register_output(node2, qindex) + + flow = {node0: {node1}, node1: {node2}} + scheduler = Scheduler(graph, flow) + + # Test MINIMIZE_TIME (minimize_space=False) + result = solve_greedy_schedule(graph, scheduler.dag, minimize_space=False) + assert result is not None + prepare_time, measure_time = result + assert len(prepare_time) > 0 + assert len(measure_time) > 0 + + # Test MINIMIZE_SPACE (minimize_space=True) + result = solve_greedy_schedule(graph, scheduler.dag, minimize_space=True) + assert result is not None + prepare_time, measure_time = result + assert len(prepare_time) > 0 + assert len(measure_time) > 0 + + +def test_greedy_scheduler_performance() -> None: + """Test that greedy scheduler is significantly faster than CP-SAT on larger graphs.""" + # Create a larger graph (chain of 20 nodes) + graph = GraphState() + nodes = [graph.add_physical_node() for _ in range(20)] + + for i in range(19): + graph.add_physical_edge(nodes[i], nodes[i + 1]) + + qindex = 0 + graph.register_input(nodes[0], qindex) + graph.register_output(nodes[-1], qindex) + + flow = {nodes[i]: {nodes[i + 1]} for i in range(19)} + + # Time greedy scheduler + scheduler_greedy = Scheduler(graph, flow) + config = ScheduleConfig(strategy=Strategy.MINIMIZE_TIME) + + start_greedy = time.perf_counter() + success_greedy = scheduler_greedy.solve_schedule(config, use_greedy=True) + end_greedy = time.perf_counter() + greedy_time = end_greedy - start_greedy + + assert success_greedy + scheduler_greedy.validate_schedule() + + # Time CP-SAT scheduler + scheduler_cpsat = Scheduler(graph, flow) + + start_cpsat = time.perf_counter() + success_cpsat = scheduler_cpsat.solve_schedule(config, use_greedy=False, timeout=10) + end_cpsat = time.perf_counter() + cpsat_time = end_cpsat - start_cpsat + + assert success_cpsat + scheduler_cpsat.validate_schedule() + + # Print timing information for debugging + print(f"\nGreedy time: {greedy_time:.4f}s") + print(f"CP-SAT time: {cpsat_time:.4f}s") + print(f"Speedup: {cpsat_time / greedy_time:.1f}x") + + # Greedy should be significantly faster (at least 5x for this size) + # Note: We use a conservative factor to avoid flaky tests + assert greedy_time < cpsat_time + + +def test_greedy_scheduler_dag_constraints() -> None: + """Test that greedy scheduler respects DAG constraints.""" + # Create a graph with more complex dependencies + graph = GraphState() + nodes = [graph.add_physical_node() for _ in range(6)] + + # Create edges forming a DAG structure + # 0 -> 1 -> 3 -> 5 + # 2 -> 4 -> + graph.add_physical_edge(nodes[0], nodes[1]) + graph.add_physical_edge(nodes[1], nodes[2]) + graph.add_physical_edge(nodes[1], nodes[3]) + graph.add_physical_edge(nodes[2], nodes[4]) + graph.add_physical_edge(nodes[3], nodes[5]) + graph.add_physical_edge(nodes[4], nodes[5]) + + qindex = 0 + graph.register_input(nodes[0], qindex) + graph.register_output(nodes[5], qindex) + + # Create flow with dependencies + flow = { + nodes[0]: {nodes[1]}, + nodes[1]: {nodes[2], nodes[3]}, + nodes[2]: {nodes[4]}, + nodes[3]: {nodes[5]}, + nodes[4]: {nodes[5]}, + } + + scheduler = Scheduler(graph, flow) + config = ScheduleConfig(strategy=Strategy.MINIMIZE_TIME) + success = scheduler.solve_schedule(config, use_greedy=True) + + # Note: This flow creates a cyclic DAG (nodes 3 and 4 have circular dependency) + # Both CP-SAT and greedy schedulers should fail on invalid flows + # This test verifies that the greedy scheduler handles invalid input gracefully + assert not success # Should fail due to cyclic DAG + + +def test_greedy_scheduler_edge_constraints() -> None: + """Test that greedy scheduler respects edge constraints (neighbor preparation).""" + # Create a simple graph + graph = GraphState() + node0 = graph.add_physical_node() + node1 = graph.add_physical_node() + node2 = graph.add_physical_node() + graph.add_physical_edge(node0, node1) + graph.add_physical_edge(node1, node2) + qindex = 0 + graph.register_input(node0, qindex) + graph.register_output(node2, qindex) + + flow = {node0: {node1}, node1: {node2}} + scheduler = Scheduler(graph, flow) + config = ScheduleConfig(strategy=Strategy.MINIMIZE_TIME) + success = scheduler.solve_schedule(config, use_greedy=True) + assert success + + # Validate edge constraints via validate_schedule + scheduler.validate_schedule() + + # Manually check: neighbors must be prepared before measurement + # node0 (input) is prepared at time -1, node1 prepared at some time + # node0 must be measured after node1 is prepared + # This is ensured by the auto-scheduled entanglement times + + # Check that entanglement times were auto-scheduled correctly + edge01 = (node0, node1) + edge12 = (node1, node2) + assert scheduler.entangle_time[edge01] is not None + assert scheduler.entangle_time[edge12] is not None + + # Entanglement must happen before measurement + assert scheduler.entangle_time[edge01] < scheduler.measure_time[node0] + assert scheduler.entangle_time[edge12] < scheduler.measure_time[node1] From 5601fca6c8ce9bd2a72a7e5df7882184c057e392 Mon Sep 17 00:00:00 2001 From: masa10-f Date: Sat, 15 Nov 2025 14:34:05 +0900 Subject: [PATCH 04/21] apply auto-ruff fix --- graphqomb/greedy_scheduler.py | 4 +--- graphqomb/scheduler.py | 2 +- tests/test_greedy_scheduler.py | 1 - 3 files changed, 2 insertions(+), 5 deletions(-) diff --git a/graphqomb/greedy_scheduler.py b/graphqomb/greedy_scheduler.py index 59235289..bad5a6ef 100644 --- a/graphqomb/greedy_scheduler.py +++ b/graphqomb/greedy_scheduler.py @@ -13,7 +13,6 @@ from __future__ import annotations -from collections import defaultdict from graphlib import TopologicalSorter from typing import TYPE_CHECKING @@ -344,7 +343,6 @@ def solve_greedy_schedule( try: if minimize_space: return greedy_minimize_space(graph, dag) - else: - return greedy_minimize_time(graph, dag) + return greedy_minimize_time(graph, dag) except Exception: return None diff --git a/graphqomb/scheduler.py b/graphqomb/scheduler.py index 11c44c1c..f630e36b 100644 --- a/graphqomb/scheduler.py +++ b/graphqomb/scheduler.py @@ -12,8 +12,8 @@ from typing import TYPE_CHECKING, NamedTuple from graphqomb.feedforward import dag_from_flow -from graphqomb.schedule_solver import ScheduleConfig, Strategy, solve_schedule from graphqomb.greedy_scheduler import solve_greedy_schedule +from graphqomb.schedule_solver import ScheduleConfig, Strategy, solve_schedule if TYPE_CHECKING: from collections.abc import Mapping diff --git a/tests/test_greedy_scheduler.py b/tests/test_greedy_scheduler.py index 48c847e4..4a481892 100644 --- a/tests/test_greedy_scheduler.py +++ b/tests/test_greedy_scheduler.py @@ -4,7 +4,6 @@ import pytest -from graphqomb.common import Plane, PlannerMeasBasis from graphqomb.graphstate import GraphState from graphqomb.greedy_scheduler import ( greedy_minimize_space, From 9aad1d6e8192d15f18d9d2ac1d749fcadb713cca Mon Sep 17 00:00:00 2001 From: masa10-f Date: Sun, 16 Nov 2025 13:35:40 +0900 Subject: [PATCH 05/21] stash changes --- graphqomb/greedy_scheduler.py | 514 ++++++++++++++++++++-------------- 1 file changed, 310 insertions(+), 204 deletions(-) diff --git a/graphqomb/greedy_scheduler.py b/graphqomb/greedy_scheduler.py index bad5a6ef..75f43d0c 100644 --- a/graphqomb/greedy_scheduler.py +++ b/graphqomb/greedy_scheduler.py @@ -41,49 +41,16 @@ def _dag_parents(dag: Mapping[int, AbstractSet[int]], node: int) -> set[int]: return {parent for parent, children in dag.items() if node in children} -def _compute_critical_path_length(dag: Mapping[int, AbstractSet[int]]) -> dict[int, int]: - """Compute the critical path length for each node in the DAG. - - The critical path length is the length of the longest path from the node - to any output node (leaf). This is used as a priority metric for scheduling. - - Parameters - ---------- - dag : Mapping[int, AbstractSet[int]] - The directed acyclic graph (node -> children mapping) - - Returns - ------- - dict[int, int] - Mapping from node to its critical path length - """ - # Topological sort (children first for bottom-up computation) - topo_order = list(TopologicalSorter(dag).static_order()) - - critical_length: dict[int, int] = {} - for node in topo_order: - children = dag.get(node, set()) - if not children: - # Leaf node (output node) - critical_length[node] = 0 - else: - # Critical path = 1 + max(critical path of children) - child_lengths = [critical_length[child] for child in children] - critical_length[node] = max(child_lengths, default=0) + 1 - - return critical_length - - def greedy_minimize_time( graph: BaseGraphState, dag: Mapping[int, AbstractSet[int]], ) -> tuple[dict[int, int], dict[int, int]]: """Fast greedy scheduler optimizing for minimal execution time (makespan). - This algorithm uses Critical Path List Scheduling: - 1. Compute critical path length for each node - 2. Schedule nodes in order of decreasing critical path length - 3. Each node is scheduled as early as possible while respecting constraints + This algorithm uses level-by-level parallel scheduling: + 1. At each time step, measure all nodes whose parents are measured and neighbors are prepared + 2. Prepare children and neighbors just before they are needed + 3. DAG constraints are naturally satisfied by topological processing Computational Complexity: O(N + E) where N is number of nodes, E is number of edges Expected speedup: 100-1000x compared to CP-SAT @@ -101,99 +68,168 @@ def greedy_minimize_time( tuple[dict[int, int], dict[int, int]] A tuple of (prepare_time, measure_time) dictionaries """ - # Compute critical path for prioritization - critical_length = _compute_critical_path_length(dag) + prepare_time: dict[int, int] = {} + measure_time: dict[int, int] = {} + + # Track which nodes have been measured (or are output nodes that won't be measured) + measured: set[int] = set(graph.output_node_indices.keys()) + + # Input nodes are considered prepared at time -1 + prepared: set[int] = set(graph.input_node_indices.keys()) - # Get all nodes in topological order for processing + # Prepare neighbors of input nodes at time 0 (they can be prepared before input measurement) + # This avoids circular dependency: input measurement needs neighbor prep, but neighbor prep needs parent meas + # Output nodes are also prepared early since they don't have DAG parent constraints + for input_node in graph.input_node_indices: + for neighbor in graph.neighbors(input_node): + if neighbor not in prepared and neighbor not in graph.input_node_indices: + prepare_time[neighbor] = 0 + prepared.add(neighbor) + + # Also prepare output nodes at time 0 (they have no DAG parent constraints that matter) + for output_node in graph.output_node_indices: + if output_node not in prepared and output_node not in graph.input_node_indices: + prepare_time[output_node] = 0 + prepared.add(output_node) + + current_time = 0 + + # Get all nodes in topological order topo_order = list(TopologicalSorter(dag).static_order()) - # Sort by critical path length (longest critical path first for better parallelism) - sorted_nodes = sorted(topo_order, key=lambda n: -critical_length.get(n, 0)) + # Nodes that are candidates for measurement (not yet measured, not outputs) + unmeasured = {n for n in topo_order if n not in graph.output_node_indices} - # Initialize scheduling dictionaries - prepare_time: dict[int, int] = {} - measure_time: dict[int, int] = {} + while unmeasured: + # Find all nodes that can be measured at this time step: + # 1. All DAG parents (non-output) are measured + # 2. All neighbors are prepared (or will be prepared just before measurement) + ready_to_measure = [] - # PASS 1: Set preparation times - # Process in topological order (parents before children) - for node in sorted_nodes: - # Prepare non-input nodes - if node not in graph.input_node_indices: - # Constraint 1: Prepare after all DAG parents are measured + for node in unmeasured: + # Check DAG parents (only consider non-output parents) parents = _dag_parents(dag, node) - parent_meas_times = [measure_time[p] for p in parents if p in measure_time] - earliest_prepare = max(parent_meas_times, default=0) - - prepare_time[node] = earliest_prepare - - # PASS 2: Set measurement times - # Process in reverse topological order (children before parents) so that DAG constraints are satisfied - for node in reversed(sorted_nodes): - # Measure non-output nodes - if node not in graph.output_node_indices: - # Constraint 1: Neighbor preparation constraint - # All neighbors must be prepared before this node can be measured - neighbor_prep_times = [] + non_output_parents = [p for p in parents if p not in graph.output_node_indices] + if not all(p in measured for p in non_output_parents): + continue + + # Check neighbors - need to prepare unprepared neighbors first + neighbors = list(graph.neighbors(node)) + all_neighbors_ready = True + + for neighbor in neighbors: + if neighbor not in prepared: + # This neighbor needs to be prepared + # Can we prepare it? (its DAG parents must be measured) + neighbor_parents = _dag_parents(dag, neighbor) + non_output_neighbor_parents = [p for p in neighbor_parents if p not in graph.output_node_indices] + if not all(p in measured for p in non_output_neighbor_parents): + all_neighbors_ready = False + break + + if all_neighbors_ready: + ready_to_measure.append(node) + + if not ready_to_measure: + # No nodes can be measured - try to prepare more nodes + for node in unmeasured: + if node not in prepared and node not in graph.input_node_indices: + parents = _dag_parents(dag, node) + non_output_parents = [p for p in parents if p not in graph.output_node_indices] + if all(p in measured for p in non_output_parents): + prepare_time[node] = current_time + prepared.add(node) + + # Also prepare output nodes if their parents are measured + for node in graph.output_node_indices: + if node not in prepared and node not in graph.input_node_indices: + parents = _dag_parents(dag, node) + non_output_parents = [p for p in parents if p not in graph.output_node_indices] + if all(p in measured for p in non_output_parents): + prepare_time[node] = current_time + prepared.add(node) + + current_time += 1 + if current_time > len(topo_order) * 2: + # Safety check to avoid infinite loop + break + continue + + # Check if any node or neighbor was just prepared at current_time (need to wait before measuring) + needs_delay_for_prep = False + for node in ready_to_measure: + # Check if node itself was just prepared at current_time + if node not in graph.input_node_indices and prepare_time.get(node) == current_time: + needs_delay_for_prep = True + break + # Check if any neighbor was just prepared at current_time for neighbor in graph.neighbors(node): - if neighbor in graph.input_node_indices: - # Input nodes are considered prepared at time -1 - neighbor_prep_times.append(-1) - else: - neighbor_prep_times.append(prepare_time[neighbor]) - - # Earliest time when all neighbors are prepared - earliest_by_neighbors = max(neighbor_prep_times, default=-1) + 1 - - # Constraint 2: Preparation constraint (non-input nodes only) - # Must be measured after this node is prepared - if node in graph.input_node_indices: - # Input nodes: only need neighbors to be prepared - earliest_measure = earliest_by_neighbors - else: - # Non-input nodes: must be after both preparation and neighbor preparation - earliest_by_prep = prepare_time[node] + 1 - earliest_measure = max(earliest_by_prep, earliest_by_neighbors) - - # Constraint 3: DAG ordering - must be measured BEFORE all children - # Children are already processed (reverse topo order), so check their times + if neighbor not in graph.input_node_indices and prepare_time.get(neighbor) == current_time: + needs_delay_for_prep = True + break + if needs_delay_for_prep: + break + + # If something was just prepared at current_time, delay measurement to next time step + if needs_delay_for_prep: + current_time += 1 + else: + # Check if we need to prepare anything now + needs_prep_now = False + for node in ready_to_measure: + if node not in graph.input_node_indices and node not in prepared: + needs_prep_now = True + break + for neighbor in graph.neighbors(node): + if neighbor not in prepared and neighbor not in graph.input_node_indices: + needs_prep_now = True + break + if needs_prep_now: + break + + if needs_prep_now: + for node in ready_to_measure: + # Prepare the node itself if it's not an input node + if node not in graph.input_node_indices and node not in prepared: + prepare_time[node] = current_time + prepared.add(node) + + # Prepare unprepared neighbors + for neighbor in graph.neighbors(node): + if neighbor not in prepared and neighbor not in graph.input_node_indices: + prepare_time[neighbor] = current_time + prepared.add(neighbor) + + # Measure at next time step (after preparation) + current_time += 1 + + # Measure all ready nodes at the same time (maximize parallelism) + for node in ready_to_measure: + measure_time[node] = current_time + measured.add(node) + unmeasured.discard(node) + + # After measurement, prepare children nodes whose parents are now all measured + for node in ready_to_measure: children = dag.get(node, set()) - if children: - # Find the earliest child measurement time - child_meas_times = [measure_time[child] for child in children if child in measure_time] - if child_meas_times: - # Must be measured before the earliest child (strictly <) - earliest_child_time = min(child_meas_times) - # Upper bound: must be < earliest_child_time - # So latest possible time is earliest_child_time - 1 - # However, we cannot violate the neighbor constraint (hard minimum) - latest_possible = earliest_child_time - 1 - if latest_possible < earliest_measure: - # Conflict: cannot satisfy both constraints - # This indicates the schedule is infeasible with current prep times - # For greedy, we prioritize the neighbor constraint (entanglement must work) - # and accept sub-optimal DAG ordering - pass # Keep earliest_measure as is - else: - earliest_measure = latest_possible - - measure_time[node] = earliest_measure - - # PASS 3: Iterative fix-up to resolve any DAG constraint violations - # If a parent's measurement time >= child's measurement time, push the child later - # Repeat until no violations remain (cascading updates) - max_iterations = len(sorted_nodes) # Upper bound to avoid infinite loops - for _ in range(max_iterations): - violations_found = False - for node in sorted_nodes: - if node not in graph.output_node_indices and node in measure_time: - children = dag.get(node, set()) - for child in children: - if child in measure_time and measure_time[node] >= measure_time[child]: - # Violation: parent >= child, need to push child later - measure_time[child] = measure_time[node] + 1 - violations_found = True - if not violations_found: - break # No more violations, done + for child in children: + if child not in prepared and child not in graph.input_node_indices: + # Check if all non-output parents of this child are now measured + child_parents = _dag_parents(dag, child) + non_output_child_parents = [p for p in child_parents if p not in graph.output_node_indices] + if all(p in measured for p in non_output_child_parents): + prepare_time[child] = current_time + 1 + prepared.add(child) + + current_time += 1 + + # Ensure all non-input nodes are prepared (including output nodes) + for node in graph.physical_nodes: + if node not in graph.input_node_indices and node not in prepared: + # This node was never prepared - prepare it now + # (typically output nodes or unreachable nodes) + prepare_time[node] = current_time + prepared.add(node) return prepare_time, measure_time @@ -205,11 +241,11 @@ def greedy_minimize_space( """Fast greedy scheduler optimizing for minimal qubit usage (space). This algorithm uses a resource-aware greedy approach: - 1. Track alive nodes (prepared but not yet measured) at each time step - 2. Schedule measurements eagerly when nodes are no longer needed - 3. Delay preparation of nodes until necessary + 1. At each time step, measure one node that minimizes active qubit count + 2. Delay preparation of nodes until just before measurement + 3. Prioritize measuring nodes with fewest unprepared neighbors - Computational Complexity: O(N log N + E) where N is nodes, E is edges + Computational Complexity: O(N^2 + E) where N is nodes, E is edges Expected speedup: 100-1000x compared to CP-SAT Approximation quality: Typically near-optimal for space usage @@ -225,91 +261,161 @@ def greedy_minimize_space( tuple[dict[int, int], dict[int, int]] A tuple of (prepare_time, measure_time) dictionaries """ - # Reverse topological order (leaves to roots) for bottom-up scheduling - topo_order = list(TopologicalSorter(dag).static_order()) - - # Track when each node can be measured (earliest time when all neighbors are ready) prepare_time: dict[int, int] = {} measure_time: dict[int, int] = {} - # Track alive nodes and current time + # Track which nodes have been measured (or are output nodes that won't be measured) + measured: set[int] = set(graph.output_node_indices.keys()) + + # Input nodes are considered prepared at time -1 + prepared: set[int] = set(graph.input_node_indices.keys()) + + # Prepare neighbors of input nodes at time 0 (they can be prepared before input measurement) + # This avoids circular dependency: input measurement needs neighbor prep, but neighbor prep needs parent meas + # Output nodes are also prepared early since they don't have DAG parent constraints + for input_node in graph.input_node_indices: + for neighbor in graph.neighbors(input_node): + if neighbor not in prepared and neighbor not in graph.input_node_indices: + prepare_time[neighbor] = 0 + prepared.add(neighbor) + + # Also prepare output nodes at time 0 (they have no DAG parent constraints that matter) + for output_node in graph.output_node_indices: + if output_node not in prepared and output_node not in graph.input_node_indices: + prepare_time[output_node] = 0 + prepared.add(output_node) + current_time = 0 - alive_nodes: set[int] = set(graph.input_node_indices.keys()) # Input nodes are always alive - # Nodes ready to be measured (all neighbors prepared) - ready_to_measure: dict[int, int] = {} # node -> earliest measure time + # Get all nodes in topological order + topo_order = list(TopologicalSorter(dag).static_order()) + + # Nodes that are candidates for measurement (not yet measured, not outputs) + unmeasured = {n for n in topo_order if n not in graph.output_node_indices} - # Process nodes in topological order to set preparation times - for node in reversed(topo_order): - # Prepare non-input nodes - if node not in graph.input_node_indices: - # Constraint 1: Prepare after all DAG parents are measured + while unmeasured: + # Find all nodes that CAN be measured at this time step + candidates = [] + + for node in unmeasured: + # Check DAG parents (only non-output parents) parents = _dag_parents(dag, node) - parent_meas_times = [measure_time[p] for p in parents if p in measure_time] - earliest_prepare = max(parent_meas_times, default=0) - - prepare_time[node] = earliest_prepare - alive_nodes.add(node) - current_time = max(current_time, earliest_prepare) - - # Second pass: compute measurement times (now all nodes are prepared) - for node in reversed(topo_order): - # Check if node should be measured (non-output nodes) - if node not in graph.output_node_indices: - # Constraint 1: Neighbor preparation constraint - neighbor_prep_times = [] - for neighbor in graph.neighbors(node): - if neighbor in graph.input_node_indices: - neighbor_prep_times.append(-1) - else: - neighbor_prep_times.append(prepare_time[neighbor]) - - # Earliest time when all neighbors are prepared - earliest_by_neighbors = max(neighbor_prep_times, default=-1) + 1 - - # Constraint 2: Preparation constraint (non-input nodes only) - if node in graph.input_node_indices: - earliest_meas = earliest_by_neighbors - else: - earliest_by_prep = prepare_time[node] + 1 - earliest_meas = max(earliest_by_prep, earliest_by_neighbors) - - # Constraint 3: DAG ordering - must be measured BEFORE all children - children = dag.get(node, set()) - if children: - child_meas_times = [ready_to_measure[child] for child in children if child in ready_to_measure] - if child_meas_times: - earliest_child_time = min(child_meas_times) - # Must be < earliest_child_time - earliest_meas = min(earliest_meas, earliest_child_time - 1) - - ready_to_measure[node] = earliest_meas - - # Third pass: Schedule measurements to minimize space - # Use a greedy approach: measure nodes as soon as possible when they're ready - nodes_to_measure = [n for n in graph.physical_nodes if n not in graph.output_node_indices] - - # Sort by earliest measurement time - sorted_by_meas_time = sorted( - [(ready_to_measure.get(node, 0), node) for node in nodes_to_measure if node in ready_to_measure] - ) - - for _, node in sorted_by_meas_time: - measure_time[node] = ready_to_measure[node] - - # Fourth pass: Iterative fix-up to resolve any DAG constraint violations - max_iterations = len(topo_order) - for _ in range(max_iterations): - violations_found = False - for node in topo_order: - if node not in graph.output_node_indices and node in measure_time: - children = dag.get(node, set()) - for child in children: - if child in measure_time and measure_time[node] >= measure_time[child]: - measure_time[child] = measure_time[node] + 1 - violations_found = True - if not violations_found: - break + non_output_parents = [p for p in parents if p not in graph.output_node_indices] + if not all(p in measured for p in non_output_parents): + continue + + # Check neighbors - can we prepare them if needed? + neighbors = list(graph.neighbors(node)) + can_measure = True + unprepared_neighbor_count = 0 + + for neighbor in neighbors: + if neighbor not in prepared: + unprepared_neighbor_count += 1 + # Can we prepare this neighbor? + neighbor_parents = _dag_parents(dag, neighbor) + non_output_neighbor_parents = [p for p in neighbor_parents if p not in graph.output_node_indices] + if not all(p in measured for p in non_output_neighbor_parents): + can_measure = False + break + + if can_measure: + candidates.append((unprepared_neighbor_count, node)) + + if not candidates: + # No nodes can be measured - prepare more nodes + for node in unmeasured: + if node not in prepared and node not in graph.input_node_indices: + parents = _dag_parents(dag, node) + non_output_parents = [p for p in parents if p not in graph.output_node_indices] + if all(p in measured for p in non_output_parents): + prepare_time[node] = current_time + prepared.add(node) + + # Also prepare output nodes if their parents are measured + for node in graph.output_node_indices: + if node not in prepared and node not in graph.input_node_indices: + parents = _dag_parents(dag, node) + non_output_parents = [p for p in parents if p not in graph.output_node_indices] + if all(p in measured for p in non_output_parents): + prepare_time[node] = current_time + prepared.add(node) + + current_time += 1 + if current_time > len(topo_order) * 2: + # Safety check + break + continue + + # Choose the node with the fewest unprepared neighbors (minimize space) + candidates.sort() + _, node_to_measure = candidates[0] + + # Check if node or neighbor was just prepared at current_time (need to wait) + needs_delay_for_prep = False + if node_to_measure not in graph.input_node_indices and prepare_time.get(node_to_measure) == current_time: + needs_delay_for_prep = True + if not needs_delay_for_prep: + for neighbor in graph.neighbors(node_to_measure): + if neighbor not in graph.input_node_indices and prepare_time.get(neighbor) == current_time: + needs_delay_for_prep = True + break + + # If something was just prepared, delay measurement + if needs_delay_for_prep: + current_time += 1 + else: + # Check if preparation is needed now + needs_prep_now = False + if node_to_measure not in graph.input_node_indices and node_to_measure not in prepared: + needs_prep_now = True + if not needs_prep_now: + for neighbor in graph.neighbors(node_to_measure): + if neighbor not in prepared and neighbor not in graph.input_node_indices: + needs_prep_now = True + break + + # If preparation is needed, do it now and measure next timestep + if needs_prep_now: + # Prepare the node itself if needed + if node_to_measure not in graph.input_node_indices and node_to_measure not in prepared: + prepare_time[node_to_measure] = current_time + prepared.add(node_to_measure) + + # Prepare unprepared neighbors + for neighbor in graph.neighbors(node_to_measure): + if neighbor not in prepared and neighbor not in graph.input_node_indices: + prepare_time[neighbor] = current_time + prepared.add(neighbor) + + # Measure at next time step (after preparation) + current_time += 1 + + # Measure the selected node + measure_time[node_to_measure] = current_time + measured.add(node_to_measure) + unmeasured.discard(node_to_measure) + + # After measurement, prepare children nodes whose parents are now all measured + children = dag.get(node_to_measure, set()) + for child in children: + if child not in prepared and child not in graph.input_node_indices: + # Check if all non-output parents of this child are now measured + child_parents = _dag_parents(dag, child) + non_output_child_parents = [p for p in child_parents if p not in graph.output_node_indices] + if all(p in measured for p in non_output_child_parents): + prepare_time[child] = current_time + 1 + prepared.add(child) + + current_time += 1 + + # Ensure all non-input nodes are prepared (including output nodes) + for node in graph.physical_nodes: + if node not in graph.input_node_indices and node not in prepared: + # This node was never prepared - prepare it now + # (typically output nodes or unreachable nodes) + prepare_time[node] = current_time + prepared.add(node) return prepare_time, measure_time From 7b235979ec0532b97ce4f768b9e4566d9ea195bd Mon Sep 17 00:00:00 2001 From: Masato Fukushima Date: Sun, 16 Nov 2025 16:05:46 +0900 Subject: [PATCH 06/21] add throughput calculation in pattern --- graphqomb/pattern.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/graphqomb/pattern.py b/graphqomb/pattern.py index b9718da4..74aabf30 100644 --- a/graphqomb/pattern.py +++ b/graphqomb/pattern.py @@ -101,6 +101,18 @@ def depth(self) -> int: """ return sum(1 for cmd in self.commands if isinstance(cmd, TICK)) + def throughput(self) -> float: + """Calculate the number of measurements per TICK in the pattern. + + Returns + ------- + `float` + Number of measurements per TICK + """ + num_measurements = sum(1 for cmd in self.commands if isinstance(cmd, M)) + num_ticks = self.depth + return num_measurements / num_ticks + def is_runnable(pattern: Pattern) -> None: """Check if the pattern is runnable. From 259affe8005df8c25abed7a48850b5afcce075f1 Mon Sep 17 00:00:00 2001 From: Masato Fukushima Date: Sun, 16 Nov 2025 16:06:48 +0900 Subject: [PATCH 07/21] make throughput as a property --- graphqomb/pattern.py | 1 + 1 file changed, 1 insertion(+) diff --git a/graphqomb/pattern.py b/graphqomb/pattern.py index 74aabf30..58f3b541 100644 --- a/graphqomb/pattern.py +++ b/graphqomb/pattern.py @@ -101,6 +101,7 @@ def depth(self) -> int: """ return sum(1 for cmd in self.commands if isinstance(cmd, TICK)) + @property def throughput(self) -> float: """Calculate the number of measurements per TICK in the pattern. From 6558c8073b63efd4129fc2f077b2fadbbe39d631 Mon Sep 17 00:00:00 2001 From: masa10-f Date: Sun, 16 Nov 2025 19:37:49 +0900 Subject: [PATCH 08/21] fix greedy minimize time algorithm --- graphqomb/greedy_scheduler.py | 187 +++++----------------------------- 1 file changed, 28 insertions(+), 159 deletions(-) diff --git a/graphqomb/greedy_scheduler.py b/graphqomb/greedy_scheduler.py index 75f43d0c..46ada159 100644 --- a/graphqomb/greedy_scheduler.py +++ b/graphqomb/greedy_scheduler.py @@ -45,192 +45,61 @@ def greedy_minimize_time( graph: BaseGraphState, dag: Mapping[int, AbstractSet[int]], ) -> tuple[dict[int, int], dict[int, int]]: - """Fast greedy scheduler optimizing for minimal execution time (makespan). + r"""Fast greedy scheduler optimizing for minimal execution time (makespan). - This algorithm uses level-by-level parallel scheduling: - 1. At each time step, measure all nodes whose parents are measured and neighbors are prepared - 2. Prepare children and neighbors just before they are needed - 3. DAG constraints are naturally satisfied by topological processing - - Computational Complexity: O(N + E) where N is number of nodes, E is number of edges - Expected speedup: 100-1000x compared to CP-SAT - Approximation quality: Typically within 2x of optimal + This algorithm uses a straightforward greedy approach: + 1. At each time step, measure all nodes that can be measured + 2. Prepare all neighbors of measured nodes just before measurement Parameters ---------- - graph : BaseGraphState + graph : `BaseGraphState` The graph state to schedule - dag : Mapping[int, AbstractSet[int]] + dag : `collections.abc.Mapping`\[`int`, `collections.abc.Set`\[`int`\]\] The directed acyclic graph representing measurement dependencies Returns ------- - tuple[dict[int, int], dict[int, int]] + `tuple`\[`dict`\[`int`, `int`\], `dict`\[`int`, `int`\]\] A tuple of (prepare_time, measure_time) dictionaries + + Raises + ------ + RuntimeError + If no nodes can be measured at a given time step, indicating a possible """ prepare_time: dict[int, int] = {} measure_time: dict[int, int] = {} - # Track which nodes have been measured (or are output nodes that won't be measured) - measured: set[int] = set(graph.output_node_indices.keys()) + inv_dag: dict[int, set[int]] = {node: set() for node in dag} + for parent, children in dag.items(): + for child in children: + inv_dag[child].add(parent) - # Input nodes are considered prepared at time -1 prepared: set[int] = set(graph.input_node_indices.keys()) - - # Prepare neighbors of input nodes at time 0 (they can be prepared before input measurement) - # This avoids circular dependency: input measurement needs neighbor prep, but neighbor prep needs parent meas - # Output nodes are also prepared early since they don't have DAG parent constraints - for input_node in graph.input_node_indices: - for neighbor in graph.neighbors(input_node): - if neighbor not in prepared and neighbor not in graph.input_node_indices: - prepare_time[neighbor] = 0 - prepared.add(neighbor) - - # Also prepare output nodes at time 0 (they have no DAG parent constraints that matter) - for output_node in graph.output_node_indices: - if output_node not in prepared and output_node not in graph.input_node_indices: - prepare_time[output_node] = 0 - prepared.add(output_node) - + unmeasured = graph.physical_nodes - graph.output_node_indices.keys() current_time = 0 - # Get all nodes in topological order - topo_order = list(TopologicalSorter(dag).static_order()) - - # Nodes that are candidates for measurement (not yet measured, not outputs) - unmeasured = {n for n in topo_order if n not in graph.output_node_indices} - while unmeasured: - # Find all nodes that can be measured at this time step: - # 1. All DAG parents (non-output) are measured - # 2. All neighbors are prepared (or will be prepared just before measurement) - ready_to_measure = [] - + to_measure = set() for node in unmeasured: - # Check DAG parents (only consider non-output parents) - parents = _dag_parents(dag, node) - non_output_parents = [p for p in parents if p not in graph.output_node_indices] - if not all(p in measured for p in non_output_parents): - continue - - # Check neighbors - need to prepare unprepared neighbors first - neighbors = list(graph.neighbors(node)) - all_neighbors_ready = True - - for neighbor in neighbors: - if neighbor not in prepared: - # This neighbor needs to be prepared - # Can we prepare it? (its DAG parents must be measured) - neighbor_parents = _dag_parents(dag, neighbor) - non_output_neighbor_parents = [p for p in neighbor_parents if p not in graph.output_node_indices] - if not all(p in measured for p in non_output_neighbor_parents): - all_neighbors_ready = False - break - - if all_neighbors_ready: - ready_to_measure.append(node) - - if not ready_to_measure: - # No nodes can be measured - try to prepare more nodes - for node in unmeasured: - if node not in prepared and node not in graph.input_node_indices: - parents = _dag_parents(dag, node) - non_output_parents = [p for p in parents if p not in graph.output_node_indices] - if all(p in measured for p in non_output_parents): - prepare_time[node] = current_time - prepared.add(node) + if len(inv_dag[node]) == 0: + to_measure.add(node) - # Also prepare output nodes if their parents are measured - for node in graph.output_node_indices: - if node not in prepared and node not in graph.input_node_indices: - parents = _dag_parents(dag, node) - non_output_parents = [p for p in parents if p not in graph.output_node_indices] - if all(p in measured for p in non_output_parents): - prepare_time[node] = current_time - prepared.add(node) - - current_time += 1 - if current_time > len(topo_order) * 2: - # Safety check to avoid infinite loop - break - continue + if not to_measure: + msg = "No nodes can be measured; possible cyclic dependency or incomplete preparation." + raise RuntimeError(msg) - # Check if any node or neighbor was just prepared at current_time (need to wait before measuring) - needs_delay_for_prep = False - for node in ready_to_measure: - # Check if node itself was just prepared at current_time - if node not in graph.input_node_indices and prepare_time.get(node) == current_time: - needs_delay_for_prep = True - break - # Check if any neighbor was just prepared at current_time + for node in to_measure: for neighbor in graph.neighbors(node): - if neighbor not in graph.input_node_indices and prepare_time.get(neighbor) == current_time: - needs_delay_for_prep = True - break - if needs_delay_for_prep: - break - - # If something was just prepared at current_time, delay measurement to next time step - if needs_delay_for_prep: - current_time += 1 - else: - # Check if we need to prepare anything now - needs_prep_now = False - for node in ready_to_measure: - if node not in graph.input_node_indices and node not in prepared: - needs_prep_now = True - break - for neighbor in graph.neighbors(node): - if neighbor not in prepared and neighbor not in graph.input_node_indices: - needs_prep_now = True - break - if needs_prep_now: - break - - if needs_prep_now: - for node in ready_to_measure: - # Prepare the node itself if it's not an input node - if node not in graph.input_node_indices and node not in prepared: - prepare_time[node] = current_time - prepared.add(node) - - # Prepare unprepared neighbors - for neighbor in graph.neighbors(node): - if neighbor not in prepared and neighbor not in graph.input_node_indices: - prepare_time[neighbor] = current_time - prepared.add(neighbor) - - # Measure at next time step (after preparation) - current_time += 1 - - # Measure all ready nodes at the same time (maximize parallelism) - for node in ready_to_measure: + if neighbor not in prepared: + prepare_time[neighbor] = current_time + prepared.add(neighbor) measure_time[node] = current_time - measured.add(node) - unmeasured.discard(node) - - # After measurement, prepare children nodes whose parents are now all measured - for node in ready_to_measure: - children = dag.get(node, set()) - for child in children: - if child not in prepared and child not in graph.input_node_indices: - # Check if all non-output parents of this child are now measured - child_parents = _dag_parents(dag, child) - non_output_child_parents = [p for p in child_parents if p not in graph.output_node_indices] - if all(p in measured for p in non_output_child_parents): - prepare_time[child] = current_time + 1 - prepared.add(child) + unmeasured.remove(node) current_time += 1 - # Ensure all non-input nodes are prepared (including output nodes) - for node in graph.physical_nodes: - if node not in graph.input_node_indices and node not in prepared: - # This node was never prepared - prepare it now - # (typically output nodes or unreachable nodes) - prepare_time[node] = current_time - prepared.add(node) - return prepare_time, measure_time From b539077922ce01d92e8482f7b19eaea38d2f5f5b Mon Sep 17 00:00:00 2001 From: masa10-f Date: Sun, 16 Nov 2025 19:49:04 +0900 Subject: [PATCH 09/21] fix minimize space scheduler --- graphqomb/greedy_scheduler.py | 265 ++++++++++++---------------------- 1 file changed, 89 insertions(+), 176 deletions(-) diff --git a/graphqomb/greedy_scheduler.py b/graphqomb/greedy_scheduler.py index 46ada159..1207f871 100644 --- a/graphqomb/greedy_scheduler.py +++ b/graphqomb/greedy_scheduler.py @@ -2,7 +2,7 @@ This module provides fast greedy scheduling algorithms as an alternative to CP-SAT based optimization. The greedy algorithms provide approximate solutions -with 100-1000x speedup compared to CP-SAT, making them suitable for large-scale +with speedup compared to CP-SAT, making them suitable for large-scale graphs or when optimality is not critical. This module provides: @@ -23,24 +23,6 @@ from graphqomb.graphstate import BaseGraphState -def _dag_parents(dag: Mapping[int, AbstractSet[int]], node: int) -> set[int]: - """Find all parent nodes (predecessors) of a given node in the DAG. - - Parameters - ---------- - dag : Mapping[int, AbstractSet[int]] - The directed acyclic graph (node -> children mapping) - node : int - The node to find parents for - - Returns - ------- - set[int] - Set of parent nodes (nodes that have 'node' as a child) - """ - return {parent for parent, children in dag.items() if node in children} - - def greedy_minimize_time( graph: BaseGraphState, dag: Mapping[int, AbstractSet[int]], @@ -95,6 +77,7 @@ def greedy_minimize_time( if neighbor not in prepared: prepare_time[neighbor] = current_time prepared.add(neighbor) + inv_dag[neighbor].discard(node) # remove measured node from dependencies measure_time[node] = current_time unmeasured.remove(node) @@ -103,20 +86,17 @@ def greedy_minimize_time( return prepare_time, measure_time -def greedy_minimize_space( +def greedy_minimize_space( # noqa: C901, PLR0912 graph: BaseGraphState, dag: Mapping[int, AbstractSet[int]], ) -> tuple[dict[int, int], dict[int, int]]: """Fast greedy scheduler optimizing for minimal qubit usage (space). - This algorithm uses a resource-aware greedy approach: - 1. At each time step, measure one node that minimizes active qubit count - 2. Delay preparation of nodes until just before measurement - 3. Prioritize measuring nodes with fewest unprepared neighbors - - Computational Complexity: O(N^2 + E) where N is nodes, E is edges - Expected speedup: 100-1000x compared to CP-SAT - Approximation quality: Typically near-optimal for space usage + This algorithm uses a greedy approach to minimize the number of active + qubits at each time step: + 1. At each time step, select the next node to measure that minimizes the + number of new qubits that need to be prepared. + 2. Prepare neighbors of the measured node just before measurement. Parameters ---------- @@ -129,164 +109,100 @@ def greedy_minimize_space( ------- tuple[dict[int, int], dict[int, int]] A tuple of (prepare_time, measure_time) dictionaries + + Raises + ------ + RuntimeError + If no nodes can be measured at a given time step, indicating a possible + cyclic dependency or incomplete preparation. """ prepare_time: dict[int, int] = {} measure_time: dict[int, int] = {} - # Track which nodes have been measured (or are output nodes that won't be measured) - measured: set[int] = set(graph.output_node_indices.keys()) + topo_order = list(TopologicalSorter(dag).static_order()) + topo_order.reverse() # from parents to children + + inv_dag: dict[int, set[int]] = {node: set() for node in dag} + for parent, children in dag.items(): + for child in children: + inv_dag[child].add(parent) - # Input nodes are considered prepared at time -1 prepared: set[int] = set(graph.input_node_indices.keys()) + alive: set[int] = set(graph.input_node_indices.keys()) + unmeasured = graph.physical_nodes - graph.output_node_indices.keys() + current_time = 0 - # Prepare neighbors of input nodes at time 0 (they can be prepared before input measurement) - # This avoids circular dependency: input measurement needs neighbor prep, but neighbor prep needs parent meas - # Output nodes are also prepared early since they don't have DAG parent constraints - for input_node in graph.input_node_indices: - for neighbor in graph.neighbors(input_node): - if neighbor not in prepared and neighbor not in graph.input_node_indices: - prepare_time[neighbor] = 0 - prepared.add(neighbor) + while unmeasured: + candidate_nodes = set() + for node in alive: + if len(inv_dag[node]) == 0: + candidate_nodes.add(node) - # Also prepare output nodes at time 0 (they have no DAG parent constraints that matter) - for output_node in graph.output_node_indices: - if output_node not in prepared and output_node not in graph.input_node_indices: - prepare_time[output_node] = 0 - prepared.add(output_node) + if not candidate_nodes: + # If no alive nodes can be measured, pick from unmeasured + for node in unmeasured - alive: + if len(inv_dag[node]) == 0: + candidate_nodes.add(node) - current_time = 0 + if not candidate_nodes: + msg = "No nodes can be measured; possible cyclic dependency or incomplete preparation." + raise RuntimeError(msg) - # Get all nodes in topological order - topo_order = list(TopologicalSorter(dag).static_order()) + # calculate costs and pick the best node to measure + best_node_candidate: set[int] = set() + best_cost = float("inf") + for node in candidate_nodes: + cost = _calc_activate_cost(node, graph, prepared, inv_dag) + if cost < best_cost: + best_cost = cost + best_node_candidate = {node} + elif cost == best_cost: + best_node_candidate.add(node) + + # tie-breaker: choose the node that appears first in topological order + best_node = min(best_node_candidate, key=topo_order.index) + for neighbor in graph.neighbors(best_node): + if neighbor not in prepared: + prepare_time[neighbor] = current_time + prepared.add(neighbor) + inv_dag[neighbor].discard(best_node) # remove measured node from dependencies + alive.add(neighbor) + measure_time[best_node] = current_time + unmeasured.remove(best_node) + alive.discard(best_node) + current_time += 1 - # Nodes that are candidates for measurement (not yet measured, not outputs) - unmeasured = {n for n in topo_order if n not in graph.output_node_indices} + return prepare_time, measure_time - while unmeasured: - # Find all nodes that CAN be measured at this time step - candidates = [] - for node in unmeasured: - # Check DAG parents (only non-output parents) - parents = _dag_parents(dag, node) - non_output_parents = [p for p in parents if p not in graph.output_node_indices] - if not all(p in measured for p in non_output_parents): - continue - - # Check neighbors - can we prepare them if needed? - neighbors = list(graph.neighbors(node)) - can_measure = True - unprepared_neighbor_count = 0 - - for neighbor in neighbors: - if neighbor not in prepared: - unprepared_neighbor_count += 1 - # Can we prepare this neighbor? - neighbor_parents = _dag_parents(dag, neighbor) - non_output_neighbor_parents = [p for p in neighbor_parents if p not in graph.output_node_indices] - if not all(p in measured for p in non_output_neighbor_parents): - can_measure = False - break - - if can_measure: - candidates.append((unprepared_neighbor_count, node)) - - if not candidates: - # No nodes can be measured - prepare more nodes - for node in unmeasured: - if node not in prepared and node not in graph.input_node_indices: - parents = _dag_parents(dag, node) - non_output_parents = [p for p in parents if p not in graph.output_node_indices] - if all(p in measured for p in non_output_parents): - prepare_time[node] = current_time - prepared.add(node) - - # Also prepare output nodes if their parents are measured - for node in graph.output_node_indices: - if node not in prepared and node not in graph.input_node_indices: - parents = _dag_parents(dag, node) - non_output_parents = [p for p in parents if p not in graph.output_node_indices] - if all(p in measured for p in non_output_parents): - prepare_time[node] = current_time - prepared.add(node) - - current_time += 1 - if current_time > len(topo_order) * 2: - # Safety check - break - continue - - # Choose the node with the fewest unprepared neighbors (minimize space) - candidates.sort() - _, node_to_measure = candidates[0] - - # Check if node or neighbor was just prepared at current_time (need to wait) - needs_delay_for_prep = False - if node_to_measure not in graph.input_node_indices and prepare_time.get(node_to_measure) == current_time: - needs_delay_for_prep = True - if not needs_delay_for_prep: - for neighbor in graph.neighbors(node_to_measure): - if neighbor not in graph.input_node_indices and prepare_time.get(neighbor) == current_time: - needs_delay_for_prep = True - break - - # If something was just prepared, delay measurement - if needs_delay_for_prep: - current_time += 1 - else: - # Check if preparation is needed now - needs_prep_now = False - if node_to_measure not in graph.input_node_indices and node_to_measure not in prepared: - needs_prep_now = True - if not needs_prep_now: - for neighbor in graph.neighbors(node_to_measure): - if neighbor not in prepared and neighbor not in graph.input_node_indices: - needs_prep_now = True - break - - # If preparation is needed, do it now and measure next timestep - if needs_prep_now: - # Prepare the node itself if needed - if node_to_measure not in graph.input_node_indices and node_to_measure not in prepared: - prepare_time[node_to_measure] = current_time - prepared.add(node_to_measure) - - # Prepare unprepared neighbors - for neighbor in graph.neighbors(node_to_measure): - if neighbor not in prepared and neighbor not in graph.input_node_indices: - prepare_time[neighbor] = current_time - prepared.add(neighbor) - - # Measure at next time step (after preparation) - current_time += 1 - - # Measure the selected node - measure_time[node_to_measure] = current_time - measured.add(node_to_measure) - unmeasured.discard(node_to_measure) - - # After measurement, prepare children nodes whose parents are now all measured - children = dag.get(node_to_measure, set()) - for child in children: - if child not in prepared and child not in graph.input_node_indices: - # Check if all non-output parents of this child are now measured - child_parents = _dag_parents(dag, child) - non_output_child_parents = [p for p in child_parents if p not in graph.output_node_indices] - if all(p in measured for p in non_output_child_parents): - prepare_time[child] = current_time + 1 - prepared.add(child) +def _calc_activate_cost( + node: int, + graph: BaseGraphState, + prepared: set[int], + inv_dag: dict[int, set[int]], +) -> int: + """Calculate the cost of activating (preparing) a node. - current_time += 1 + The cost is defined as the number of new qubits that would become active + (prepared but not yet measured) if this node were to be measured next. - # Ensure all non-input nodes are prepared (including output nodes) - for node in graph.physical_nodes: - if node not in graph.input_node_indices and node not in prepared: - # This node was never prepared - prepare it now - # (typically output nodes or unreachable nodes) - prepare_time[node] = current_time - prepared.add(node) + Parameters + ---------- + node : int + The node to evaluate. + graph : BaseGraphState + The graph state. + prepared : set[int] + The set of currently prepared nodes. + inv_dag : dict[int, set[int]] + The inverse DAG representing dependencies. - return prepare_time, measure_time + Returns + ------- + int + The activation cost for the node. + """ + return len(graph.neighbors(node) - prepared) def solve_greedy_schedule( @@ -315,9 +231,6 @@ def solve_greedy_schedule( A tuple of (prepare_time, measure_time) dictionaries if successful, None if scheduling fails (should rarely happen for valid inputs) """ - try: - if minimize_space: - return greedy_minimize_space(graph, dag) - return greedy_minimize_time(graph, dag) - except Exception: - return None + if minimize_space: + return greedy_minimize_space(graph, dag) + return greedy_minimize_time(graph, dag) From 6869113356d13af4bde8a658cde01bd20dbe1d93 Mon Sep 17 00:00:00 2001 From: masa10-f Date: Mon, 17 Nov 2025 00:42:51 +0900 Subject: [PATCH 10/21] fix algorithm --- graphqomb/greedy_scheduler.py | 55 +++++++++++++++++++++++++---------- 1 file changed, 39 insertions(+), 16 deletions(-) diff --git a/graphqomb/greedy_scheduler.py b/graphqomb/greedy_scheduler.py index 1207f871..218916d9 100644 --- a/graphqomb/greedy_scheduler.py +++ b/graphqomb/greedy_scheduler.py @@ -53,13 +53,15 @@ def greedy_minimize_time( prepare_time: dict[int, int] = {} measure_time: dict[int, int] = {} - inv_dag: dict[int, set[int]] = {node: set() for node in dag} + unmeasured = graph.physical_nodes - graph.output_node_indices.keys() + + # Build inverse DAG: for each node, track which nodes must be measured before it + inv_dag: dict[int, set[int]] = {node: set() for node in graph.physical_nodes} for parent, children in dag.items(): for child in children: inv_dag[child].add(parent) prepared: set[int] = set(graph.input_node_indices.keys()) - unmeasured = graph.physical_nodes - graph.output_node_indices.keys() current_time = 0 while unmeasured: @@ -72,16 +74,26 @@ def greedy_minimize_time( msg = "No nodes can be measured; possible cyclic dependency or incomplete preparation." raise RuntimeError(msg) + needs_prep = False + # Prepare neighbors at current_time for node in to_measure: for neighbor in graph.neighbors(node): if neighbor not in prepared: prepare_time[neighbor] = current_time prepared.add(neighbor) - inv_dag[neighbor].discard(node) # remove measured node from dependencies - measure_time[node] = current_time + needs_prep = True + + # Measure at current_time if no prep needed, otherwise at current_time + 1 + meas_time = current_time + 1 if needs_prep else current_time + for node in to_measure: + measure_time[node] = meas_time unmeasured.remove(node) + # Remove measured node from dependencies of all its children in the DAG + for child in dag.get(node, set()): + if child in inv_dag: + inv_dag[child].remove(node) - current_time += 1 + current_time = meas_time + 1 return prepare_time, measure_time @@ -119,17 +131,19 @@ def greedy_minimize_space( # noqa: C901, PLR0912 prepare_time: dict[int, int] = {} measure_time: dict[int, int] = {} + unmeasured = graph.physical_nodes - graph.output_node_indices.keys() + topo_order = list(TopologicalSorter(dag).static_order()) topo_order.reverse() # from parents to children - inv_dag: dict[int, set[int]] = {node: set() for node in dag} + # Build inverse DAG: for each node, track which nodes must be measured before it + inv_dag: dict[int, set[int]] = {node: set() for node in graph.physical_nodes} for parent, children in dag.items(): for child in children: inv_dag[child].add(parent) prepared: set[int] = set(graph.input_node_indices.keys()) alive: set[int] = set(graph.input_node_indices.keys()) - unmeasured = graph.physical_nodes - graph.output_node_indices.keys() current_time = 0 while unmeasured: @@ -152,7 +166,7 @@ def greedy_minimize_space( # noqa: C901, PLR0912 best_node_candidate: set[int] = set() best_cost = float("inf") for node in candidate_nodes: - cost = _calc_activate_cost(node, graph, prepared, inv_dag) + cost = _calc_activate_cost(node, graph, prepared) if cost < best_cost: best_cost = cost best_node_candidate = {node} @@ -161,16 +175,28 @@ def greedy_minimize_space( # noqa: C901, PLR0912 # tie-breaker: choose the node that appears first in topological order best_node = min(best_node_candidate, key=topo_order.index) + + # Prepare neighbors at current_time + needs_prep = False for neighbor in graph.neighbors(best_node): if neighbor not in prepared: prepare_time[neighbor] = current_time prepared.add(neighbor) - inv_dag[neighbor].discard(best_node) # remove measured node from dependencies - alive.add(neighbor) - measure_time[best_node] = current_time + alive.add(neighbor) + needs_prep = True + + # Measure at current_time if no prep needed, otherwise at current_time + 1 + meas_time = current_time + 1 if needs_prep else current_time + measure_time[best_node] = meas_time unmeasured.remove(best_node) - alive.discard(best_node) - current_time += 1 + alive.remove(best_node) + + # Remove measured node from dependencies of all its children in the DAG + for child in dag.get(best_node, set()): + if child in inv_dag: + inv_dag[child].remove(best_node) + + current_time = meas_time + 1 return prepare_time, measure_time @@ -179,7 +205,6 @@ def _calc_activate_cost( node: int, graph: BaseGraphState, prepared: set[int], - inv_dag: dict[int, set[int]], ) -> int: """Calculate the cost of activating (preparing) a node. @@ -194,8 +219,6 @@ def _calc_activate_cost( The graph state. prepared : set[int] The set of currently prepared nodes. - inv_dag : dict[int, set[int]] - The inverse DAG representing dependencies. Returns ------- From 287b7b9530d25aaac6a198dd840d75eb8e8e20f8 Mon Sep 17 00:00:00 2001 From: masa10-f Date: Mon, 17 Nov 2025 00:43:44 +0900 Subject: [PATCH 11/21] update test --- tests/test_greedy_scheduler.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/tests/test_greedy_scheduler.py b/tests/test_greedy_scheduler.py index 4a481892..6bc5c5ac 100644 --- a/tests/test_greedy_scheduler.py +++ b/tests/test_greedy_scheduler.py @@ -334,12 +334,11 @@ def test_greedy_scheduler_dag_constraints() -> None: scheduler = Scheduler(graph, flow) config = ScheduleConfig(strategy=Strategy.MINIMIZE_TIME) - success = scheduler.solve_schedule(config, use_greedy=True) # Note: This flow creates a cyclic DAG (nodes 3 and 4 have circular dependency) - # Both CP-SAT and greedy schedulers should fail on invalid flows - # This test verifies that the greedy scheduler handles invalid input gracefully - assert not success # Should fail due to cyclic DAG + # The greedy scheduler should raise RuntimeError for invalid flows + with pytest.raises(RuntimeError, match="No nodes can be measured"): + scheduler.solve_schedule(config, use_greedy=True) def test_greedy_scheduler_edge_constraints() -> None: From 4d1054cdba045577698614180b8657d2a8a91bdf Mon Sep 17 00:00:00 2001 From: Masato Fukushima Date: Mon, 17 Nov 2025 12:49:19 +0900 Subject: [PATCH 12/21] add max qubit count attribute --- graphqomb/greedy_scheduler.py | 79 +++++++++++++++++++++++++++++------ 1 file changed, 67 insertions(+), 12 deletions(-) diff --git a/graphqomb/greedy_scheduler.py b/graphqomb/greedy_scheduler.py index 218916d9..7a06a4f3 100644 --- a/graphqomb/greedy_scheduler.py +++ b/graphqomb/greedy_scheduler.py @@ -17,7 +17,7 @@ from typing import TYPE_CHECKING if TYPE_CHECKING: - from collections.abc import Mapping + from collections.abc import Mapping, Sequence from collections.abc import Set as AbstractSet from graphqomb.graphstate import BaseGraphState @@ -26,6 +26,7 @@ def greedy_minimize_time( graph: BaseGraphState, dag: Mapping[int, AbstractSet[int]], + max_qubit_count: int | None = None, ) -> tuple[dict[int, int], dict[int, int]]: r"""Fast greedy scheduler optimizing for minimal execution time (makespan). @@ -65,23 +66,33 @@ def greedy_minimize_time( current_time = 0 while unmeasured: - to_measure = set() + measure_candidate = set() for node in unmeasured: if len(inv_dag[node]) == 0: - to_measure.add(node) + measure_candidate.add(node) - if not to_measure: + if not measure_candidate: msg = "No nodes can be measured; possible cyclic dependency or incomplete preparation." raise RuntimeError(msg) - needs_prep = False - # Prepare neighbors at current_time - for node in to_measure: - for neighbor in graph.neighbors(node): - if neighbor not in prepared: - prepare_time[neighbor] = current_time - prepared.add(neighbor) - needs_prep = True + if max_qubit_count is not None: + to_measure, to_prepare = _determine_measure_node( + graph, + measure_candidate, + prepared, + max_qubit_count, + ) + needs_prep = bool(to_prepare) + else: + to_measure = measure_candidate + needs_prep = False + # Prepare neighbors at current_time + for node in to_measure: + for neighbor in graph.neighbors(node): + if neighbor not in prepared: + prepare_time[neighbor] = current_time + prepared.add(neighbor) + needs_prep = True # Measure at current_time if no prep needed, otherwise at current_time + 1 meas_time = current_time + 1 if needs_prep else current_time @@ -98,6 +109,50 @@ def greedy_minimize_time( return prepare_time, measure_time +def _determine_measure_node( + graph: BaseGraphState, + measure_candidates: AbstractSet[int], + prepared: AbstractSet[int], + max_qubit_count: int, +) -> tuple[set[int], set[int]]: + r"""Determine which nodes to measure without exceeding max qubit count. + + Parameters + ---------- + graph : `BaseGraphState` + The graph state. + measure_candidates : `collections.abc.Set`\[`int`\] + The candidate nodes available for measurement. + prepared : `collections.abc.Set`\[`int`\] + The set of currently prepared nodes. + max_qubit_count : `int` + The maximum allowed number of active qubits. + + Returns + ------- + `tuple`\[`set`\[`int`\], `set`\[`int`\]\] + A tuple of (to_measure, to_prepare) sets indicating which nodes to measure and prepare. + + Raises + ------ + RuntimeError + If no nodes can be measured without exceeding the max qubit count. + """ + to_measure: set[int] = set() + to_activate: set[int] = set() + active_cost = 0 + for node in measure_candidates: + to_be_activated = graph.neighbors(node) - prepared + to_activate |= to_be_activated + if active_cost + len(to_be_activated) <= max_qubit_count: + to_measure.add(node) + active_cost += len(to_be_activated) + if not to_measure: + msg = "Cannot schedule more measurements without exceeding max qubit count. Please increase max_qubit_count." + raise RuntimeError(msg) + return to_measure, to_activate + + def greedy_minimize_space( # noqa: C901, PLR0912 graph: BaseGraphState, dag: Mapping[int, AbstractSet[int]], From 8b5fe5ae01df97505ade3fb1487d3b4bb512a24c Mon Sep 17 00:00:00 2001 From: Masato Fukushima Date: Mon, 17 Nov 2025 12:49:37 +0900 Subject: [PATCH 13/21] remove unnecessary type --- graphqomb/greedy_scheduler.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/graphqomb/greedy_scheduler.py b/graphqomb/greedy_scheduler.py index 7a06a4f3..2d56502d 100644 --- a/graphqomb/greedy_scheduler.py +++ b/graphqomb/greedy_scheduler.py @@ -17,7 +17,7 @@ from typing import TYPE_CHECKING if TYPE_CHECKING: - from collections.abc import Mapping, Sequence + from collections.abc import Mapping from collections.abc import Set as AbstractSet from graphqomb.graphstate import BaseGraphState From 65fbae1ada8db0952ec1c40275f51a361865e8aa Mon Sep 17 00:00:00 2001 From: Masato Fukushima Date: Mon, 17 Nov 2025 13:01:25 +0900 Subject: [PATCH 14/21] fix branch where max_qubit_count is not None --- graphqomb/greedy_scheduler.py | 66 ++++++++++++++++++++++++----------- 1 file changed, 46 insertions(+), 20 deletions(-) diff --git a/graphqomb/greedy_scheduler.py b/graphqomb/greedy_scheduler.py index 2d56502d..079ff751 100644 --- a/graphqomb/greedy_scheduler.py +++ b/graphqomb/greedy_scheduler.py @@ -63,6 +63,12 @@ def greedy_minimize_time( inv_dag[child].add(parent) prepared: set[int] = set(graph.input_node_indices.keys()) + alive: set[int] = set(graph.input_node_indices.keys()) + + if max_qubit_count is not None and len(alive) > max_qubit_count: + msg = "Initial number of active qubits exceeds max_qubit_count." + raise RuntimeError(msg) + current_time = 0 while unmeasured: @@ -76,13 +82,21 @@ def greedy_minimize_time( raise RuntimeError(msg) if max_qubit_count is not None: - to_measure, to_prepare = _determine_measure_node( + to_measure, to_prepare = _determine_measure_nodes( graph, measure_candidate, prepared, + alive, max_qubit_count, ) - needs_prep = bool(to_prepare) + needs_prep = False + # Prepare selected neighbors at current_time + for neighbor in to_prepare: + if neighbor not in prepared: + prepare_time[neighbor] = current_time + prepared.add(neighbor) + alive.add(neighbor) + needs_prep = True else: to_measure = measure_candidate needs_prep = False @@ -92,12 +106,14 @@ def greedy_minimize_time( if neighbor not in prepared: prepare_time[neighbor] = current_time prepared.add(neighbor) + alive.add(neighbor) needs_prep = True # Measure at current_time if no prep needed, otherwise at current_time + 1 meas_time = current_time + 1 if needs_prep else current_time for node in to_measure: measure_time[node] = meas_time + alive.remove(node) unmeasured.remove(node) # Remove measured node from dependencies of all its children in the DAG for child in dag.get(node, set()): @@ -109,10 +125,11 @@ def greedy_minimize_time( return prepare_time, measure_time -def _determine_measure_node( +def _determine_measure_nodes( graph: BaseGraphState, measure_candidates: AbstractSet[int], prepared: AbstractSet[int], + alive: AbstractSet[int], max_qubit_count: int, ) -> tuple[set[int], set[int]]: r"""Determine which nodes to measure without exceeding max qubit count. @@ -125,6 +142,8 @@ def _determine_measure_node( The candidate nodes available for measurement. prepared : `collections.abc.Set`\[`int`\] The set of currently prepared nodes. + alive : `collections.abc.Set`\[`int`\] + The set of currently active (prepared but not yet measured) nodes. max_qubit_count : `int` The maximum allowed number of active qubits. @@ -139,25 +158,32 @@ def _determine_measure_node( If no nodes can be measured without exceeding the max qubit count. """ to_measure: set[int] = set() - to_activate: set[int] = set() - active_cost = 0 + to_prepare: set[int] = set() + for node in measure_candidates: - to_be_activated = graph.neighbors(node) - prepared - to_activate |= to_be_activated - if active_cost + len(to_be_activated) <= max_qubit_count: + # Neighbors that still need to be prepared for this node + new_neighbors = graph.neighbors(node) - prepared + additional_to_prepare = new_neighbors - to_prepare + + # Projected number of active qubits after preparing these neighbors + projected_active = len(alive) + len(to_prepare) + len(additional_to_prepare) + + if projected_active <= max_qubit_count: to_measure.add(node) - active_cost += len(to_be_activated) + to_prepare |= new_neighbors + if not to_measure: msg = "Cannot schedule more measurements without exceeding max qubit count. Please increase max_qubit_count." raise RuntimeError(msg) - return to_measure, to_activate + + return to_measure, to_prepare def greedy_minimize_space( # noqa: C901, PLR0912 graph: BaseGraphState, dag: Mapping[int, AbstractSet[int]], ) -> tuple[dict[int, int], dict[int, int]]: - """Fast greedy scheduler optimizing for minimal qubit usage (space). + r"""Fast greedy scheduler optimizing for minimal qubit usage (space). This algorithm uses a greedy approach to minimize the number of active qubits at each time step: @@ -167,14 +193,14 @@ def greedy_minimize_space( # noqa: C901, PLR0912 Parameters ---------- - graph : BaseGraphState + graph : `BaseGraphState` The graph state to schedule - dag : Mapping[int, AbstractSet[int]] + dag : `collections.abc.Mapping`\[`int`, `collections.abc.Set`\[`int`\]\] The directed acyclic graph representing measurement dependencies Returns ------- - tuple[dict[int, int], dict[int, int]] + `tuple`\[`dict`\[`int`, `int`\], `dict`\[`int`, `int`\] A tuple of (prepare_time, measure_time) dictionaries Raises @@ -259,25 +285,25 @@ def greedy_minimize_space( # noqa: C901, PLR0912 def _calc_activate_cost( node: int, graph: BaseGraphState, - prepared: set[int], + prepared: AbstractSet[int], ) -> int: - """Calculate the cost of activating (preparing) a node. + r"""Calculate the cost of activating (preparing) a node. The cost is defined as the number of new qubits that would become active (prepared but not yet measured) if this node were to be measured next. Parameters ---------- - node : int + node : `int` The node to evaluate. - graph : BaseGraphState + graph : `BaseGraphState` The graph state. - prepared : set[int] + prepared : `collections.abc.Set`\[`int`\] The set of currently prepared nodes. Returns ------- - int + `int` The activation cost for the node. """ return len(graph.neighbors(node) - prepared) From 4f7032fec3f2480fad46de7fcd31a5ddd3083b0e Mon Sep 17 00:00:00 2001 From: Masato Fukushima Date: Mon, 17 Nov 2025 13:25:33 +0900 Subject: [PATCH 15/21] add test with max qubit constraints --- tests/test_greedy_scheduler.py | 96 ++++++++++++++++++++++++++++++++++ 1 file changed, 96 insertions(+) diff --git a/tests/test_greedy_scheduler.py b/tests/test_greedy_scheduler.py index 6bc5c5ac..05825d08 100644 --- a/tests/test_greedy_scheduler.py +++ b/tests/test_greedy_scheduler.py @@ -78,6 +78,102 @@ def test_greedy_minimize_space_simple() -> None: assert measure_time[node0] < measure_time[node1] +def _compute_max_alive_qubits( + graph: GraphState, + prepare_time: dict[int, int], + measure_time: dict[int, int], +) -> int: + """Compute the maximum number of alive qubits over time. + + A node is considered alive at time t if: + - It is an input node and t >= -1 and t < measurement time (if any), or + - It has a preparation time p and t >= p and t < measurement time (if any). + + Returns + ------- + int + The maximum number of alive qubits at any time step. + """ + # Determine time range to check + max_t = max(prepare_time.values() | measure_time.values(), default=0) + + max_alive = len(graph.input_node_indices) # At least inputs are alive at t = -1 + for t in range(max_t + 1): + alive_nodes = set() + for node in graph.physical_nodes: + # Determine preparation time + prep_t = -1 if node in graph.input_node_indices else prepare_time.get(node) + + if prep_t is None or t < prep_t: + continue + + # Determine measurement time (None for outputs or unscheduled) + meas_t = measure_time.get(node) + + if meas_t is None or t < meas_t: + alive_nodes.add(node) + + max_alive = max(max_alive, len(alive_nodes)) + + return max_alive + + +def test_greedy_minimize_time_with_max_qubit_count_respects_limit() -> None: + """Verify that greedy_minimize_time respects max_qubit_count.""" + graph = GraphState() + # chain graph: 0-1-2-3 + n0 = graph.add_physical_node() + n1 = graph.add_physical_node() + n2 = graph.add_physical_node() + n3 = graph.add_physical_node() + graph.add_physical_edge(n0, n1) + graph.add_physical_edge(n1, n2) + graph.add_physical_edge(n2, n3) + + qindex = 0 + graph.register_input(n0, qindex) + graph.register_output(n3, qindex) + + flow = {n0: {n1}, n1: {n2}, n2: {n3}} + scheduler = Scheduler(graph, flow) + + # Set max_qubit_count to 2 (a feasible value for this graph) + prepare_time, measure_time = greedy_minimize_time(graph, scheduler.dag, max_qubit_count=2) + + # Check basic properties + assert n1 in prepare_time + assert n0 not in prepare_time + assert n0 in measure_time + assert n2 in measure_time + assert n3 not in measure_time + + # Verify that the number of alive qubits never exceeds the limit + max_alive = _compute_max_alive_qubits(graph, prepare_time, measure_time) + assert max_alive <= 2 + + +def test_greedy_minimize_time_with_too_small_max_qubit_count_raises() -> None: + """Verify that greedy_minimize_time raises RuntimeError when max_qubit_count is too small.""" + graph = GraphState() + # chain graph: 0-1-2 (at least 2 qubits are needed) + n0 = graph.add_physical_node() + n1 = graph.add_physical_node() + n2 = graph.add_physical_node() + graph.add_physical_edge(n0, n1) + graph.add_physical_edge(n1, n2) + + qindex = 0 + graph.register_input(n0, qindex) + graph.register_output(n2, qindex) + + flow = {n0: {n1}, n1: {n2}} + scheduler = Scheduler(graph, flow) + + # max_qubit_count=1 is not feasible, so expect RuntimeError + with pytest.raises(RuntimeError, match="max_qubit_count"): + greedy_minimize_time(graph, scheduler.dag, max_qubit_count=1) + + def test_greedy_scheduler_via_solve_schedule() -> None: """Test greedy scheduler through Scheduler.solve_schedule with use_greedy=True.""" # Create a simple graph From b1739e4fe7fd9e5aa1cf36ec889ca5571094cdfa Mon Sep 17 00:00:00 2001 From: Masato Fukushima Date: Mon, 17 Nov 2025 14:11:12 +0900 Subject: [PATCH 16/21] fix test --- tests/test_greedy_scheduler.py | 53 +++++++++++++++++----------------- 1 file changed, 27 insertions(+), 26 deletions(-) diff --git a/tests/test_greedy_scheduler.py b/tests/test_greedy_scheduler.py index 05825d08..b74a995b 100644 --- a/tests/test_greedy_scheduler.py +++ b/tests/test_greedy_scheduler.py @@ -8,7 +8,6 @@ from graphqomb.greedy_scheduler import ( greedy_minimize_space, greedy_minimize_time, - solve_greedy_schedule, ) from graphqomb.schedule_solver import ScheduleConfig, Strategy from graphqomb.scheduler import Scheduler @@ -95,7 +94,7 @@ def _compute_max_alive_qubits( The maximum number of alive qubits at any time step. """ # Determine time range to check - max_t = max(prepare_time.values() | measure_time.values(), default=0) + max_t = max(set(prepare_time.values()) | set(measure_time.values()), default=0) max_alive = len(graph.input_node_indices) # At least inputs are alive at t = -1 for t in range(max_t + 1): @@ -191,8 +190,8 @@ def test_greedy_scheduler_via_solve_schedule() -> None: scheduler = Scheduler(graph, flow) # Test with greedy MINIMIZE_TIME - config = ScheduleConfig(strategy=Strategy.MINIMIZE_TIME) - success = scheduler.solve_schedule(config, use_greedy=True) + config = ScheduleConfig(strategy=Strategy.MINIMIZE_TIME, use_greedy=True) + success = scheduler.solve_schedule(config) assert success # Verify schedule is valid @@ -200,8 +199,8 @@ def test_greedy_scheduler_via_solve_schedule() -> None: # Test with greedy MINIMIZE_SPACE scheduler2 = Scheduler(graph, flow) - config = ScheduleConfig(strategy=Strategy.MINIMIZE_SPACE) - success = scheduler2.solve_schedule(config, use_greedy=True) + config = ScheduleConfig(strategy=Strategy.MINIMIZE_SPACE, use_greedy=True) + success = scheduler2.solve_schedule(config) assert success # Verify schedule is valid @@ -226,8 +225,8 @@ def test_greedy_vs_cpsat_correctness() -> None: # Test greedy scheduler scheduler_greedy = Scheduler(graph, flow) - config = ScheduleConfig(strategy=Strategy.MINIMIZE_TIME) - success_greedy = scheduler_greedy.solve_schedule(config, use_greedy=True) + config = ScheduleConfig(strategy=Strategy.MINIMIZE_TIME, use_greedy=True) + success_greedy = scheduler_greedy.solve_schedule(config) assert success_greedy # Verify greedy schedule is valid @@ -235,7 +234,8 @@ def test_greedy_vs_cpsat_correctness() -> None: # Test CP-SAT scheduler scheduler_cpsat = Scheduler(graph, flow) - success_cpsat = scheduler_cpsat.solve_schedule(config, use_greedy=False, timeout=10) + config = ScheduleConfig(strategy=Strategy.MINIMIZE_TIME, use_greedy=False) + success_cpsat = scheduler_cpsat.solve_schedule(config, timeout=10) assert success_cpsat # Verify CP-SAT schedule is valid @@ -280,8 +280,8 @@ def test_greedy_scheduler_larger_graph() -> None: # Test greedy scheduler scheduler = Scheduler(graph, flow) - config = ScheduleConfig(strategy=Strategy.MINIMIZE_TIME) - success = scheduler.solve_schedule(config, use_greedy=True) + config = ScheduleConfig(strategy=Strategy.MINIMIZE_TIME, use_greedy=True) + success = scheduler.solve_schedule(config) assert success # Validate the schedule @@ -312,16 +312,16 @@ def test_greedy_scheduler_both_strategies(strategy: Strategy) -> None: scheduler = Scheduler(graph, flow) # Test with specified strategy - config = ScheduleConfig(strategy=strategy) - success = scheduler.solve_schedule(config, use_greedy=True) + config = ScheduleConfig(strategy=strategy, use_greedy=True) + success = scheduler.solve_schedule(config) assert success # Validate schedule scheduler.validate_schedule() -def test_solve_greedy_schedule_wrapper() -> None: - """Test the solve_greedy_schedule wrapper function.""" +def test_greedy_minimize_space_wrapper() -> None: + """Test the greedy_minimize_space wrapper function.""" # Create a simple graph graph = GraphState() node0 = graph.add_physical_node() @@ -336,15 +336,15 @@ def test_solve_greedy_schedule_wrapper() -> None: flow = {node0: {node1}, node1: {node2}} scheduler = Scheduler(graph, flow) - # Test MINIMIZE_TIME (minimize_space=False) - result = solve_greedy_schedule(graph, scheduler.dag, minimize_space=False) + # Test MINIMIZE_TIME + result = greedy_minimize_time(graph, scheduler.dag) assert result is not None prepare_time, measure_time = result assert len(prepare_time) > 0 assert len(measure_time) > 0 - # Test MINIMIZE_SPACE (minimize_space=True) - result = solve_greedy_schedule(graph, scheduler.dag, minimize_space=True) + # Test MINIMIZE_SPACE + result = greedy_minimize_space(graph, scheduler.dag) assert result is not None prepare_time, measure_time = result assert len(prepare_time) > 0 @@ -368,10 +368,10 @@ def test_greedy_scheduler_performance() -> None: # Time greedy scheduler scheduler_greedy = Scheduler(graph, flow) - config = ScheduleConfig(strategy=Strategy.MINIMIZE_TIME) + config = ScheduleConfig(strategy=Strategy.MINIMIZE_TIME, use_greedy=True) start_greedy = time.perf_counter() - success_greedy = scheduler_greedy.solve_schedule(config, use_greedy=True) + success_greedy = scheduler_greedy.solve_schedule(config) end_greedy = time.perf_counter() greedy_time = end_greedy - start_greedy @@ -382,7 +382,8 @@ def test_greedy_scheduler_performance() -> None: scheduler_cpsat = Scheduler(graph, flow) start_cpsat = time.perf_counter() - success_cpsat = scheduler_cpsat.solve_schedule(config, use_greedy=False, timeout=10) + config = ScheduleConfig(strategy=Strategy.MINIMIZE_TIME, use_greedy=False) + success_cpsat = scheduler_cpsat.solve_schedule(config, timeout=10) end_cpsat = time.perf_counter() cpsat_time = end_cpsat - start_cpsat @@ -429,12 +430,12 @@ def test_greedy_scheduler_dag_constraints() -> None: } scheduler = Scheduler(graph, flow) - config = ScheduleConfig(strategy=Strategy.MINIMIZE_TIME) + config = ScheduleConfig(strategy=Strategy.MINIMIZE_TIME, use_greedy=True) # Note: This flow creates a cyclic DAG (nodes 3 and 4 have circular dependency) # The greedy scheduler should raise RuntimeError for invalid flows with pytest.raises(RuntimeError, match="No nodes can be measured"): - scheduler.solve_schedule(config, use_greedy=True) + scheduler.solve_schedule(config) def test_greedy_scheduler_edge_constraints() -> None: @@ -452,8 +453,8 @@ def test_greedy_scheduler_edge_constraints() -> None: flow = {node0: {node1}, node1: {node2}} scheduler = Scheduler(graph, flow) - config = ScheduleConfig(strategy=Strategy.MINIMIZE_TIME) - success = scheduler.solve_schedule(config, use_greedy=True) + config = ScheduleConfig(strategy=Strategy.MINIMIZE_TIME, use_greedy=True) + success = scheduler.solve_schedule(config) assert success # Validate edge constraints via validate_schedule From ea3154237ff43ebafddeca2794173ead72823101 Mon Sep 17 00:00:00 2001 From: Masato Fukushima Date: Mon, 17 Nov 2025 14:11:45 +0900 Subject: [PATCH 17/21] add greedy option for scheduler --- graphqomb/schedule_solver.py | 1 + graphqomb/scheduler.py | 16 ++++++---------- 2 files changed, 7 insertions(+), 10 deletions(-) diff --git a/graphqomb/schedule_solver.py b/graphqomb/schedule_solver.py index 90cbeb05..fa12f498 100644 --- a/graphqomb/schedule_solver.py +++ b/graphqomb/schedule_solver.py @@ -37,6 +37,7 @@ class ScheduleConfig: strategy: Strategy max_qubit_count: int | None = None max_time: int | None = None + use_greedy: bool = False @dataclass diff --git a/graphqomb/scheduler.py b/graphqomb/scheduler.py index f630e36b..423f7469 100644 --- a/graphqomb/scheduler.py +++ b/graphqomb/scheduler.py @@ -12,7 +12,7 @@ from typing import TYPE_CHECKING, NamedTuple from graphqomb.feedforward import dag_from_flow -from graphqomb.greedy_scheduler import solve_greedy_schedule +from graphqomb.greedy_scheduler import greedy_minimize_space, greedy_minimize_time from graphqomb.schedule_solver import ScheduleConfig, Strategy, solve_schedule if TYPE_CHECKING: @@ -484,7 +484,6 @@ def solve_schedule( self, config: ScheduleConfig | None = None, timeout: int = 60, - use_greedy: bool = False, ) -> bool: r"""Compute the schedule using constraint programming or greedy heuristics. @@ -495,10 +494,6 @@ def solve_schedule( timeout : `int`, optional Maximum solve time in seconds for CP-SAT solver, by default 60. Ignored when use_greedy=True. - use_greedy : `bool`, optional - If True, use fast greedy heuristics instead of CP-SAT. - Greedy algorithms are much faster than CP-SAT, but provide approximate solutions. - Default is False (use CP-SAT for optimal solutions). Returns ------- @@ -513,11 +508,12 @@ def solve_schedule( if config is None: config = ScheduleConfig(Strategy.MINIMIZE_TIME) - if use_greedy: + if config.use_greedy: # Use fast greedy heuristics - - minimize_space = config.strategy == Strategy.MINIMIZE_SPACE - result = solve_greedy_schedule(self.graph, self.dag, minimize_space) + if config.strategy == Strategy.MINIMIZE_TIME: + result = greedy_minimize_time(self.graph, self.dag, max_qubit_count=config.max_qubit_count) + else: # Strategy.MINIMIZE_SPACE + result = greedy_minimize_space(self.graph, self.dag) else: # Use CP-SAT solver for optimal solution result = solve_schedule(self.graph, self.dag, config, timeout) From d27eca3b4f1073d392b340428524b5b111be7d6c Mon Sep 17 00:00:00 2001 From: Masato Fukushima Date: Mon, 17 Nov 2025 14:12:07 +0900 Subject: [PATCH 18/21] remove wrapper --- graphqomb/greedy_scheduler.py | 31 ------------------------------- 1 file changed, 31 deletions(-) diff --git a/graphqomb/greedy_scheduler.py b/graphqomb/greedy_scheduler.py index 079ff751..6fa29a2f 100644 --- a/graphqomb/greedy_scheduler.py +++ b/graphqomb/greedy_scheduler.py @@ -307,34 +307,3 @@ def _calc_activate_cost( The activation cost for the node. """ return len(graph.neighbors(node) - prepared) - - -def solve_greedy_schedule( - graph: BaseGraphState, - dag: Mapping[int, AbstractSet[int]], - minimize_space: bool = False, -) -> tuple[dict[int, int], dict[int, int]] | None: - """Solve scheduling using greedy heuristics. - - This is a convenience wrapper that selects the appropriate greedy algorithm - based on the optimization objective. - - Parameters - ---------- - graph : BaseGraphState - The graph state to schedule - dag : Mapping[int, AbstractSet[int]] - The directed acyclic graph representing measurement dependencies - minimize_space : bool, default=False - If True, optimize for minimal qubit usage (space). - If False, optimize for minimal execution time. - - Returns - ------- - tuple[dict[int, int], dict[int, int]] | None - A tuple of (prepare_time, measure_time) dictionaries if successful, - None if scheduling fails (should rarely happen for valid inputs) - """ - if minimize_space: - return greedy_minimize_space(graph, dag) - return greedy_minimize_time(graph, dag) From 51df27162d1342d121319f9fc1ba9323b63e3fde Mon Sep 17 00:00:00 2001 From: Masato Fukushima Date: Mon, 17 Nov 2025 14:40:55 +0900 Subject: [PATCH 19/21] add extra properties into pattern --- graphqomb/pattern.py | 49 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 49 insertions(+) diff --git a/graphqomb/pattern.py b/graphqomb/pattern.py index 58f3b541..baf4170f 100644 --- a/graphqomb/pattern.py +++ b/graphqomb/pattern.py @@ -101,6 +101,55 @@ def depth(self) -> int: """ return sum(1 for cmd in self.commands if isinstance(cmd, TICK)) + @property + def volume(self) -> int: + """Calculate tha volume, summation of space for each timeslice. + + Returns + ------- + `int` + Volume of the pattern + """ + return sum(self.space) + + @property + def max_volume(self) -> int: + """Calculate the maximum volume, defined as max_space * depth. + + Returns + ------- + `int` + Maximum volume of the pattern + """ + return self.max_space * self.depth + + @property + def idle_times(self) -> dict[int, int]: + r"""Calculate the idle times for each qubit in the pattern. + + Returns + ------- + `dict`\[`int`, `int`\] + A dictionary mapping each qubit index to its idle time. + """ + idle_times: dict[int, int] = {} + prepared_time: dict[int, int] = dict.fromkeys(self.input_node_indices, -1) + + current_time = 0 + for cmd in self.commands: + if isinstance(cmd, TICK): + current_time += 1 + elif isinstance(cmd, N): + prepared_time[cmd.node] = current_time + elif isinstance(cmd, M): + idle_times[cmd.node] = current_time - prepared_time[cmd.node] + + for output_node in self.output_node_indices: + if output_node in prepared_time: + idle_times[output_node] = current_time - prepared_time[output_node] + + return idle_times + @property def throughput(self) -> float: """Calculate the number of measurements per TICK in the pattern. From 26330ca39d037d4fa39687c7f79ddf63d9cb9bb1 Mon Sep 17 00:00:00 2001 From: Masato Fukushima Date: Mon, 17 Nov 2025 16:13:27 +0900 Subject: [PATCH 20/21] add pauli simplification in feedforward module --- graphqomb/feedforward.py | 56 +++++++++++++++++++++++++++++++++++++++- 1 file changed, 55 insertions(+), 1 deletion(-) diff --git a/graphqomb/feedforward.py b/graphqomb/feedforward.py index fdb0102e..f176d859 100644 --- a/graphqomb/feedforward.py +++ b/graphqomb/feedforward.py @@ -19,7 +19,7 @@ import typing_extensions -from graphqomb.common import Plane +from graphqomb.common import Plane, determine_pauli_axis, Axis from graphqomb.graphstate import BaseGraphState, odd_neighbors if sys.version_info >= (3, 10): @@ -277,3 +277,57 @@ def propagate_correction_map( # noqa: C901, PLR0912 new_zflow[parent] ^= {child_z} return new_xflow, new_zflow + + +def pauli_simplification( # noqa: C901 + graph: BaseGraphState, + xflow: Mapping[int, AbstractSet[int]], + zflow: Mapping[int, AbstractSet[int]] | None = None, +) -> tuple[dict[int, set[int]], dict[int, set[int]]]: + r"""Simplify the correction maps by removing redundant Pauli corrections. + + Parameters + ---------- + graph : `BaseGraphState` + Underlying graph state. + xflow : `collections.abc.Mapping`\[`int`, `collections.abc.Set`\[`int`\]\] + Correction map for X. + zflow : `collections.abc.Mapping`\[`int`, `collections.abc.Set`\[`int`\]\] | `None` + Correction map for Z. If `None`, it is generated from xflow by odd neighbors. + + Returns + ------- + `tuple`\[`dict`\[`int`, `set`\[`int`\]\], `dict`\[`int`, `set`\[`int`\]\]] + Updated correction maps for X and Z after simplification. + """ + if zflow is None: + zflow = {node: odd_neighbors(xflow[node], graph) - {node} for node in xflow} + + new_xflow = {k: set(vs) for k, vs in xflow.items()} + new_zflow = {k: set(vs) for k, vs in zflow.items()} + + inv_xflow: dict[int, set[int]] = {} + inv_zflow: dict[int, set[int]] = {} + for k, vs in xflow.items(): + for v in vs: + inv_xflow.setdefault(v, set()).add(k) + for k, vs in zflow.items(): + for v in vs: + inv_zflow.setdefault(v, set()).add(k) + + for node in graph.physical_nodes - graph.output_node_indices.keys(): + meas_basis = graph.meas_bases.get(node) + meas_axis = determine_pauli_axis(meas_basis) + + if meas_axis == Axis.X: + for parent in inv_xflow.get(node, set()): + new_xflow[parent] -= {node} + elif meas_axis == Axis.Z: + for parent in inv_zflow.get(node, set()): + new_zflow[parent] -= {node} + elif meas_axis == Axis.Y: + for parent in inv_xflow.get(node, set()) & inv_zflow.get(node, set()): + new_xflow[parent] -= {node} + new_zflow[parent] -= {node} + + return new_xflow, new_zflow From cebc4578f79bce01ed688b1271ecef921242b1d3 Mon Sep 17 00:00:00 2001 From: Masato Fukushima Date: Mon, 17 Nov 2025 18:36:20 +0900 Subject: [PATCH 21/21] optimize the performance of greedy scheduler --- graphqomb/greedy_scheduler.py | 99 +++++++++++++++++++---------------- 1 file changed, 55 insertions(+), 44 deletions(-) diff --git a/graphqomb/greedy_scheduler.py b/graphqomb/greedy_scheduler.py index 6fa29a2f..84737311 100644 --- a/graphqomb/greedy_scheduler.py +++ b/graphqomb/greedy_scheduler.py @@ -23,7 +23,7 @@ from graphqomb.graphstate import BaseGraphState -def greedy_minimize_time( +def greedy_minimize_time( # noqa: C901, PLR0912 graph: BaseGraphState, dag: Mapping[int, AbstractSet[int]], max_qubit_count: int | None = None, @@ -71,54 +71,66 @@ def greedy_minimize_time( current_time = 0 - while unmeasured: - measure_candidate = set() - for node in unmeasured: - if len(inv_dag[node]) == 0: - measure_candidate.add(node) + # Nodes whose dependencies are all resolved and are not yet measured + measure_candidates: set[int] = {node for node in unmeasured if not inv_dag[node]} + + # Cache neighbors to avoid repeated set constructions in tight loops + neighbors_map = {node: graph.neighbors(node) for node in graph.physical_nodes} - if not measure_candidate: + while unmeasured: # noqa: PLR1702 + if not measure_candidates: msg = "No nodes can be measured; possible cyclic dependency or incomplete preparation." raise RuntimeError(msg) if max_qubit_count is not None: + # Choose measurement nodes from measure_candidates while respecting max_qubit_count to_measure, to_prepare = _determine_measure_nodes( - graph, - measure_candidate, + neighbors_map, + measure_candidates, prepared, alive, max_qubit_count, ) needs_prep = False - # Prepare selected neighbors at current_time for neighbor in to_prepare: if neighbor not in prepared: prepare_time[neighbor] = current_time prepared.add(neighbor) alive.add(neighbor) - needs_prep = True + needs_prep = True # toggle prep flag + + # If this neighbor already had no dependencies, it becomes measure candidate + if not inv_dag[neighbor] and neighbor in unmeasured: + measure_candidates.add(neighbor) else: - to_measure = measure_candidate + # Without a qubit limit, measure all currently measure candidates + to_measure = set(measure_candidates) needs_prep = False - # Prepare neighbors at current_time for node in to_measure: - for neighbor in graph.neighbors(node): + for neighbor in neighbors_map[node]: if neighbor not in prepared: prepare_time[neighbor] = current_time prepared.add(neighbor) alive.add(neighbor) needs_prep = True + if not inv_dag[neighbor] and neighbor in unmeasured: + measure_candidates.add(neighbor) + # Measure at current_time if no prep needed, otherwise at current_time + 1 meas_time = current_time + 1 if needs_prep else current_time + for node in to_measure: measure_time[node] = meas_time alive.remove(node) unmeasured.remove(node) + measure_candidates.remove(node) + # Remove measured node from dependencies of all its children in the DAG - for child in dag.get(node, set()): - if child in inv_dag: - inv_dag[child].remove(node) + for child in dag.get(node, ()): + inv_dag[child].remove(node) + if not inv_dag[child] and child in unmeasured: + measure_candidates.add(child) current_time = meas_time + 1 @@ -126,7 +138,7 @@ def greedy_minimize_time( def _determine_measure_nodes( - graph: BaseGraphState, + neighbors_map: Mapping[int, AbstractSet[int]], measure_candidates: AbstractSet[int], prepared: AbstractSet[int], alive: AbstractSet[int], @@ -136,8 +148,8 @@ def _determine_measure_nodes( Parameters ---------- - graph : `BaseGraphState` - The graph state. + neighbors_map : `collections.abc.Mapping`\[`int`, `collections.abc.Set`\[`int`\]\] + Mapping from node to its neighbors. measure_candidates : `collections.abc.Set`\[`int`\] The candidate nodes available for measurement. prepared : `collections.abc.Set`\[`int`\] @@ -162,7 +174,7 @@ def _determine_measure_nodes( for node in measure_candidates: # Neighbors that still need to be prepared for this node - new_neighbors = graph.neighbors(node) - prepared + new_neighbors = neighbors_map[node] - prepared additional_to_prepare = new_neighbors - to_prepare # Projected number of active qubits after preparing these neighbors @@ -216,6 +228,7 @@ def greedy_minimize_space( # noqa: C901, PLR0912 topo_order = list(TopologicalSorter(dag).static_order()) topo_order.reverse() # from parents to children + topo_rank = {node: i for i, node in enumerate(topo_order)} # Build inverse DAG: for each node, track which nodes must be measured before it inv_dag: dict[int, set[int]] = {node: set() for node in graph.physical_nodes} @@ -227,27 +240,21 @@ def greedy_minimize_space( # noqa: C901, PLR0912 alive: set[int] = set(graph.input_node_indices.keys()) current_time = 0 + # Cache neighbors once as the graph is static during scheduling + neighbors_map = {node: graph.neighbors(node) for node in graph.physical_nodes} + + measure_candidates: set[int] = {node for node in unmeasured if not inv_dag[node]} + while unmeasured: - candidate_nodes = set() - for node in alive: - if len(inv_dag[node]) == 0: - candidate_nodes.add(node) - - if not candidate_nodes: - # If no alive nodes can be measured, pick from unmeasured - for node in unmeasured - alive: - if len(inv_dag[node]) == 0: - candidate_nodes.add(node) - - if not candidate_nodes: + if not measure_candidates: msg = "No nodes can be measured; possible cyclic dependency or incomplete preparation." raise RuntimeError(msg) # calculate costs and pick the best node to measure best_node_candidate: set[int] = set() best_cost = float("inf") - for node in candidate_nodes: - cost = _calc_activate_cost(node, graph, prepared) + for node in measure_candidates: + cost = _calc_activate_cost(node, neighbors_map, prepared) if cost < best_cost: best_cost = cost best_node_candidate = {node} @@ -255,11 +262,12 @@ def greedy_minimize_space( # noqa: C901, PLR0912 best_node_candidate.add(node) # tie-breaker: choose the node that appears first in topological order - best_node = min(best_node_candidate, key=topo_order.index) + default_rank = len(topo_rank) + best_node = min(best_node_candidate, key=lambda n: topo_rank.get(n, default_rank)) # Prepare neighbors at current_time needs_prep = False - for neighbor in graph.neighbors(best_node): + for neighbor in neighbors_map[best_node]: if neighbor not in prepared: prepare_time[neighbor] = current_time prepared.add(neighbor) @@ -272,10 +280,13 @@ def greedy_minimize_space( # noqa: C901, PLR0912 unmeasured.remove(best_node) alive.remove(best_node) + measure_candidates.remove(best_node) + # Remove measured node from dependencies of all its children in the DAG - for child in dag.get(best_node, set()): - if child in inv_dag: - inv_dag[child].remove(best_node) + for child in dag.get(best_node, ()): + inv_dag[child].remove(best_node) + if not inv_dag[child] and child in unmeasured: + measure_candidates.add(child) current_time = meas_time + 1 @@ -284,7 +295,7 @@ def greedy_minimize_space( # noqa: C901, PLR0912 def _calc_activate_cost( node: int, - graph: BaseGraphState, + neighbors_map: Mapping[int, AbstractSet[int]], prepared: AbstractSet[int], ) -> int: r"""Calculate the cost of activating (preparing) a node. @@ -296,8 +307,8 @@ def _calc_activate_cost( ---------- node : `int` The node to evaluate. - graph : `BaseGraphState` - The graph state. + neighbors_map : `collections.abc.Mapping`\[`int`, `collections.abc.Set`\[`int`\]\] + Cached neighbor sets for graph nodes. prepared : `collections.abc.Set`\[`int`\] The set of currently prepared nodes. @@ -306,4 +317,4 @@ def _calc_activate_cost( `int` The activation cost for the node. """ - return len(graph.neighbors(node) - prepared) + return len(neighbors_map[node] - prepared)