diff --git a/src/guidellm/benchmark/aggregator.py b/src/guidellm/benchmark/aggregator.py index af7f1a13..f10eb5ed 100644 --- a/src/guidellm/benchmark/aggregator.py +++ b/src/guidellm/benchmark/aggregator.py @@ -32,11 +32,11 @@ GenerationRequest, GenerativeRequestLoaderDescription, RequestLoaderDescription, + RequestT, + ResponseT, ) from guidellm.scheduler import ( GenerativeRequestsWorkerDescription, - RequestT, - ResponseT, SchedulerRequestResult, WorkerDescription, ) diff --git a/src/guidellm/benchmark/benchmarker.py b/src/guidellm/benchmark/benchmarker.py index 11b6d245..0e34e322 100644 --- a/src/guidellm/benchmark/benchmarker.py +++ b/src/guidellm/benchmark/benchmarker.py @@ -27,12 +27,12 @@ GenerationRequest, GenerativeRequestLoaderDescription, RequestLoaderDescription, + RequestT, + ResponseT, ) from guidellm.scheduler import ( GenerativeRequestsWorker, RequestsWorker, - RequestT, - ResponseT, Scheduler, SchedulerRequestResult, SchedulingStrategy, diff --git a/src/guidellm/request/__init__.py b/src/guidellm/request/__init__.py index db3059cc..fd0ec355 100644 --- a/src/guidellm/request/__init__.py +++ b/src/guidellm/request/__init__.py @@ -5,11 +5,17 @@ RequestLoaderDescription, ) from .request import GenerationRequest +from .session import GenerativeRequestSession, RequestSession +from .types import RequestT, ResponseT __all__ = [ "GenerationRequest", "GenerativeRequestLoader", "GenerativeRequestLoaderDescription", + "GenerativeRequestSession", "RequestLoader", "RequestLoaderDescription", + "RequestSession", + "RequestT", + "ResponseT", ] diff --git a/src/guidellm/request/loader.py b/src/guidellm/request/loader.py index 50ab3cca..452e4733 100644 --- a/src/guidellm/request/loader.py +++ b/src/guidellm/request/loader.py @@ -15,6 +15,7 @@ from guidellm.dataset import ColumnInputTypes, load_dataset from guidellm.objects import StandardBaseModel from guidellm.request.request import GenerationRequest +from guidellm.request.session import GenerativeRequestSession __all__ = [ "GenerativeRequestLoader", @@ -30,10 +31,10 @@ class RequestLoaderDescription(StandardBaseModel): class RequestLoader(Iterable): @abstractmethod - def __iter__(self): ... + def __iter__(self) -> Iterator: ... @abstractmethod - def __len__(self): ... + def __len__(self) -> int: ... @property @abstractmethod @@ -105,14 +106,14 @@ def __init__( self.preserve_iter_state = iter_type == "infinite" # ensure no caching requests self._preserved_iter = None - def __iter__(self) -> Iterator[GenerationRequest]: + def __iter__(self) -> Iterator[GenerativeRequestSession]: scope_create_count = 0 while (dataset_iter := self._get_dataset_iter(scope_create_count)) is not None: scope_create_count += 1 for item in dataset_iter: - yield self._create_request(item) + yield GenerativeRequestSession(self._create_request(item)) self._preserved_iter = None diff --git a/src/guidellm/request/session.py b/src/guidellm/request/session.py new file mode 100644 index 00000000..9e00b37d --- /dev/null +++ b/src/guidellm/request/session.py @@ -0,0 +1,55 @@ +from abc import ABC, abstractmethod +from typing import Generic, TypeVar + +from guidellm.backend.response import ResponseSummary +from guidellm.request.request import GenerationRequest + +__all__ = ["GenerativeRequestSession", "RequestSession"] + +RequestT = TypeVar("RequestT") +ResponseT = TypeVar("ResponseT") + + +class RequestSession(ABC, Generic[RequestT, ResponseT]): + """ + A series of requests that build upon each other to + form a conversion between the user and the model. + """ + + @abstractmethod + def __len__(self) -> int: ... + + @abstractmethod + def get_next_request(self) -> RequestT: ... + + @abstractmethod + def get_next_delay(self) -> float: ... + + @abstractmethod + def push_response(self, response: ResponseT) -> None: ... + + @property + @abstractmethod + def complete(self) -> bool: ... + + +class GenerativeRequestSession(RequestSession[GenerationRequest, ResponseSummary]): + def __init__(self, request: GenerationRequest) -> None: + self.request = request + self._complete = False + + def __len__(self) -> int: + return 1 + + def get_next_request(self) -> GenerationRequest: + return self.request + + def get_next_delay(self) -> float: + return 0.0 + + def push_response(self, response: ResponseSummary) -> None: # noqa: ARG002 + self._complete = True + + @property + def complete(self) -> bool: + return self._complete diff --git a/src/guidellm/scheduler/types.py b/src/guidellm/request/types.py similarity index 66% rename from src/guidellm/scheduler/types.py rename to src/guidellm/request/types.py index 42535d71..f82493be 100644 --- a/src/guidellm/scheduler/types.py +++ b/src/guidellm/request/types.py @@ -1,6 +1,9 @@ from typing import TypeVar -__all__ = ["RequestT", "ResponseT"] +__all__ = [ + "RequestT", + "ResponseT", +] RequestT = TypeVar("RequestT") diff --git a/src/guidellm/scheduler/__init__.py b/src/guidellm/scheduler/__init__.py index 37bf1fd5..d3aa0aab 100644 --- a/src/guidellm/scheduler/__init__.py +++ b/src/guidellm/scheduler/__init__.py @@ -15,14 +15,12 @@ ThroughputStrategy, strategy_display_str, ) -from .types import RequestT, ResponseT from .worker import ( GenerativeRequestsWorker, GenerativeRequestsWorkerDescription, RequestsWorker, ResolveStatus, WorkerDescription, - WorkerProcessRequest, WorkerProcessResult, ) @@ -32,10 +30,8 @@ "ConcurrentStrategy", "GenerativeRequestsWorker", "GenerativeRequestsWorkerDescription", - "RequestT", "RequestsWorker", "ResolveStatus", - "ResponseT", "Scheduler", "SchedulerRequestInfo", "SchedulerRequestResult", @@ -46,7 +42,6 @@ "SynchronousStrategy", "ThroughputStrategy", "WorkerDescription", - "WorkerProcessRequest", "WorkerProcessResult", "strategy_display_str", ] diff --git a/src/guidellm/scheduler/queues.py b/src/guidellm/scheduler/queues.py new file mode 100644 index 00000000..6ccc6704 --- /dev/null +++ b/src/guidellm/scheduler/queues.py @@ -0,0 +1,25 @@ +""" +Helper module for importing the correct queue types. +""" + +from dataclasses import dataclass +from queue import Empty as QueueEmpty +from queue import Full as QueueFull +from queue import Queue +from typing import Generic + +from guidellm.request.types import RequestT, ResponseT +from guidellm.scheduler.result import WorkerProcessRequest, WorkerProcessResult + +__all__ = [ + "MPQueues", + "Queue", + "QueueEmpty", + "QueueFull", +] + + +@dataclass +class MPQueues(Generic[RequestT, ResponseT]): + requests: Queue[WorkerProcessRequest[RequestT, ResponseT]] + responses: Queue[WorkerProcessResult[RequestT, ResponseT]] diff --git a/src/guidellm/scheduler/result.py b/src/guidellm/scheduler/result.py index 0f12687f..125b33a7 100644 --- a/src/guidellm/scheduler/result.py +++ b/src/guidellm/scheduler/result.py @@ -1,3 +1,4 @@ +from dataclasses import dataclass from typing import ( Generic, Literal, @@ -5,14 +6,17 @@ ) from guidellm.objects import StandardBaseModel +from guidellm.request.session import RequestSession +from guidellm.request.types import RequestT, ResponseT from guidellm.scheduler.strategy import SchedulingStrategy -from guidellm.scheduler.types import RequestT, ResponseT __all__ = [ "SchedulerRequestInfo", "SchedulerRequestResult", "SchedulerResult", "SchedulerRunInfo", + "WorkerProcessRequest", + "WorkerProcessResult", ] @@ -135,3 +139,18 @@ class SchedulerRequestResult( request: RequestT request_info: SchedulerRequestInfo response: Optional[ResponseT] = None + + +@dataclass +class WorkerProcessRequest(Generic[RequestT, ResponseT]): + session: RequestSession[RequestT, ResponseT] + timeout_time: float + queued_time: float + + +@dataclass +class WorkerProcessResult(Generic[RequestT, ResponseT]): + type_: Literal["request_scheduled", "request_start", "request_complete"] + request: RequestT + response: Optional[ResponseT] + info: SchedulerRequestInfo diff --git a/src/guidellm/scheduler/scheduler.py b/src/guidellm/scheduler/scheduler.py index 06203827..d74578b9 100644 --- a/src/guidellm/scheduler/scheduler.py +++ b/src/guidellm/scheduler/scheduler.py @@ -1,10 +1,10 @@ import asyncio import math -import multiprocessing -import multiprocessing.queues import time from collections.abc import AsyncGenerator, Iterable, Iterator from concurrent.futures import ProcessPoolExecutor +from multiprocessing import Manager +from threading import Event from typing import ( Any, Generic, @@ -15,17 +15,21 @@ from loguru import logger from guidellm.config import settings +from guidellm.request.types import ( + RequestT, + ResponseT, +) +from guidellm.scheduler.queues import MPQueues, Queue, QueueEmpty from guidellm.scheduler.result import ( SchedulerRequestResult, SchedulerResult, SchedulerRunInfo, + WorkerProcessRequest, + WorkerProcessResult, ) from guidellm.scheduler.strategy import SchedulingStrategy -from guidellm.scheduler.types import RequestT, ResponseT from guidellm.scheduler.worker import ( RequestsWorker, - WorkerProcessRequest, - WorkerProcessResult, ) __all__ = ["Scheduler"] @@ -114,16 +118,20 @@ async def run( raise ValueError(f"Invalid max_duration: {max_duration}") with ( - multiprocessing.Manager() as manager, + Manager() as manager, ProcessPoolExecutor( max_workers=scheduling_strategy.processes_limit ) as executor, ): requests_iter: Optional[Iterator[Any]] = None - futures, requests_queue, responses_queue = await self._start_processes( + # TODO: Configurable delay and move somewhere more appropriate + scheduling_strategy.start_time = ( + time.time() + ) # Add a small delay to allow processes to start + futures, queues, stop_event = await self._start_processes( manager, executor, scheduling_strategy ) - run_info, requests_iter, times_iter = self._run_setup( + run_info, requests_iter = self._run_setup( futures, scheduling_strategy, max_number, max_duration ) yield SchedulerResult( @@ -138,24 +146,20 @@ async def run( if future.done() and (err := future.exception()) is not None: raise err - if ( - requests_iter is None - and run_info.completed_requests >= run_info.created_requests - ): + if requests_iter is None and run_info.processing_requests <= 0: # we've exhausted all requests we've wanted to run # and yielded all responses break requests_iter = self._add_requests( requests_iter, - times_iter, - requests_queue, + queues.requests, run_info, ) await asyncio.sleep(0) # enable requests to start iter_result = self._check_result_ready( - responses_queue, + queues.responses, run_info, ) if iter_result is not None: @@ -171,7 +175,7 @@ async def run( run_info=run_info, ) - await self._stop_processes(futures, requests_queue) + await self._stop_processes(futures, stop_event) async def _start_processes( self, @@ -180,14 +184,17 @@ async def _start_processes( scheduling_strategy: SchedulingStrategy, ) -> tuple[ list[asyncio.Future], - multiprocessing.Queue, - multiprocessing.Queue, + MPQueues[RequestT, ResponseT], + Event, ]: await self.worker.prepare_multiprocessing() - requests_queue = manager.Queue( - maxsize=scheduling_strategy.queued_requests_limit + queues: MPQueues[RequestT, ResponseT] = MPQueues( + requests=manager.Queue( + maxsize=scheduling_strategy.processing_requests_limit + ), + responses=manager.Queue(), ) - responses_queue = manager.Queue() + stop_event = manager.Event() num_processes = min( scheduling_strategy.processes_limit, @@ -212,36 +219,23 @@ async def _start_processes( futures = [] loop = asyncio.get_event_loop() for id_, requests_limit in zip(process_ids, process_requests_limits): - if scheduling_strategy.processing_mode == "sync": - futures.append( - loop.run_in_executor( - executor, - self.worker.process_loop_synchronous, - requests_queue, - responses_queue, - id_, - ) - ) - elif scheduling_strategy.processing_mode == "async": - futures.append( - loop.run_in_executor( - executor, - self.worker.process_loop_asynchronous, - requests_queue, - responses_queue, - requests_limit, - id_, - ) - ) - else: - raise ValueError( - f"Invalid processing mode: {scheduling_strategy.processing_mode} " - f"for strategy: {scheduling_strategy}" + futures.append( + loop.run_in_executor( + executor, + self.worker.process_loop_asynchronous, + queues, + scheduling_strategy, + stop_event, + False, # TODO: Make configurable + requests_limit, + id_, + num_processes, ) + ) await asyncio.sleep(0.1) # give time for processes to start - return futures, requests_queue, responses_queue + return futures, queues, stop_event def _run_setup( self, @@ -249,11 +243,9 @@ def _run_setup( scheduling_strategy: SchedulingStrategy, max_number: Optional[int], max_duration: Optional[float], - ) -> tuple[SchedulerRunInfo, Iterator[Any], Iterator[float]]: + ) -> tuple[SchedulerRunInfo, Iterator[Any]]: requests_iter = iter(self.request_loader) - start_time = time.time() - times_iter = iter(scheduling_strategy.request_times()) - end_time = time.time() + (max_duration or math.inf) + end_time = scheduling_strategy.start_time + (max_duration or math.inf) end_number = max_number or math.inf try: @@ -271,26 +263,28 @@ def _run_setup( ) info = SchedulerRunInfo( - start_time=start_time, + start_time=scheduling_strategy.start_time, end_time=end_time, end_number=end_number, processes=len(processes), strategy=scheduling_strategy, ) - return info, requests_iter, times_iter + return info, requests_iter def _add_requests( self, requests_iter: Optional[Iterator[Any]], - times_iter: Iterator[float], - requests_queue: multiprocessing.Queue, + requests_queue: Queue[WorkerProcessRequest[RequestT, ResponseT]], run_info: SchedulerRunInfo, ) -> Optional[Iterator[Any]]: if requests_iter is not None: try: added_count = 0 + if time.time() >= run_info.end_time: + raise StopIteration + while ( not requests_queue.full() and added_count < settings.max_add_requests_per_loop @@ -298,23 +292,17 @@ def _add_requests( if run_info.created_requests >= run_info.end_number: raise StopIteration - if ( - request_time := next(times_iter) - ) >= run_info.end_time or time.time() >= run_info.end_time: - raise StopIteration - - request = next(requests_iter) - work_req: WorkerProcessRequest[RequestT] = WorkerProcessRequest( - request=request, - start_time=request_time, + session = next(requests_iter) + work_req = WorkerProcessRequest( + session=session, timeout_time=run_info.end_time, queued_time=time.time(), ) requests_queue.put(work_req) - run_info.created_requests += 1 - run_info.queued_requests += 1 - added_count += 1 + run_info.created_requests += len(session) + run_info.queued_requests += len(session) + added_count += len(session) except StopIteration: # we've reached the limit number, limit time, or exhausted the requests # set to None to stop adding more and tell the loop no more requests @@ -324,14 +312,14 @@ def _add_requests( def _check_result_ready( self, - responses_queue: multiprocessing.Queue, + responses_queue: Queue[WorkerProcessResult[RequestT, ResponseT]], run_info: SchedulerRunInfo, ) -> Optional[SchedulerRequestResult[RequestT, ResponseT]]: try: process_response: WorkerProcessResult[RequestT, ResponseT] = ( responses_queue.get_nowait() ) - except multiprocessing.queues.Empty: # type: ignore[attr-defined] + except QueueEmpty: return None if process_response.type_ == "request_scheduled": @@ -374,9 +362,9 @@ def _check_result_ready( async def _stop_processes( self, futures: list[asyncio.Future], - requests_queue: multiprocessing.Queue, + stop_event: Event, ): - for _ in futures: - requests_queue.put(None) + # stop all processes + stop_event.set() await asyncio.gather(*futures) diff --git a/src/guidellm/scheduler/strategy.py b/src/guidellm/scheduler/strategy.py index 200c799e..329f0427 100644 --- a/src/guidellm/scheduler/strategy.py +++ b/src/guidellm/scheduler/strategy.py @@ -44,6 +44,10 @@ class SchedulingStrategy(StandardBaseModel): type_: Literal["strategy"] = Field( description="The type of scheduling strategy schedule requests with.", ) + start_time: float = Field( + default_factory=time.time, + description="The start time for the scheduling strategy.", + ) @property def processing_mode(self) -> Literal["sync", "async"]: @@ -175,8 +179,9 @@ def request_times(self) -> Generator[float, None, None]: :return: A generator that yields time.time() for immediate request scheduling. """ + init_time = self.start_time while True: - yield time.time() + yield max(init_time, time.time()) class ConcurrentStrategy(SchedulingStrategy): @@ -226,7 +231,9 @@ def processes_limit(self) -> int: :return: {self.streams} for the concurrent scheduling strategy to limit the worker processes to the number of streams. """ - return self.streams + cpu_cores = os.cpu_count() or 1 + + return min(max(1, cpu_cores - 1), self.streams) @property def queued_requests_limit(self) -> int: @@ -260,8 +267,9 @@ def request_times(self) -> Generator[float, None, None]: :return: A generator that yields time.time() for immediate request scheduling. """ + init_time = self.start_time while True: - yield time.time() + yield max(init_time, time.time()) class ThroughputStrategy(SchedulingStrategy): @@ -334,10 +342,9 @@ def request_times(self) -> Generator[float, None, None]: :return: A generator that yields the start time.time() for immediate request scheduling. """ - start_time = time.time() - + init_time = self.start_time while True: - yield start_time + yield init_time class AsyncConstantStrategy(ThroughputStrategy): @@ -389,24 +396,24 @@ def request_times(self) -> Generator[float, None, None]: :return: A generator that yields timestamps for request scheduling. """ - start_time = time.time() constant_increment = 1.0 / self.rate + init_time = self.start_time # handle bursts first to get to the desired rate if self.initial_burst is not None: # send an initial burst equal to the rate # to reach the target rate burst_count = math.floor(self.rate) for _ in range(burst_count): - yield start_time + yield init_time - start_time += constant_increment + init_time += constant_increment counter = 0 # continue with constant rate after bursting while True: - yield start_time + constant_increment * counter + yield init_time + constant_increment * counter counter += 1 @@ -459,24 +466,23 @@ def request_times(self) -> Generator[float, None, None]: :return: A generator that yields timestamps for request scheduling. """ - start_time = time.time() - + init_time = self.start_time if self.initial_burst is not None: # send an initial burst equal to the rate # to reach the target rate burst_count = math.floor(self.rate) for _ in range(burst_count): - yield start_time + yield init_time else: - yield start_time + yield init_time # set the random seed for reproducibility rand = random.Random(self.random_seed) # noqa: S311 while True: inter_arrival_time = rand.expovariate(self.rate) - start_time += inter_arrival_time - yield start_time + init_time += inter_arrival_time + yield init_time def strategy_display_str(strategy: Union[StrategyType, SchedulingStrategy]) -> str: diff --git a/src/guidellm/scheduler/worker.py b/src/guidellm/scheduler/worker.py index a53b14c2..aaefadaf 100644 --- a/src/guidellm/scheduler/worker.py +++ b/src/guidellm/scheduler/worker.py @@ -1,11 +1,11 @@ import asyncio import math -import multiprocessing -import multiprocessing.queues import time from abc import ABC, abstractmethod from collections.abc import AsyncGenerator from dataclasses import dataclass +from itertools import islice +from threading import Event from typing import ( Any, Generic, @@ -26,8 +26,14 @@ ) from guidellm.objects import StandardBaseModel from guidellm.request import GenerationRequest -from guidellm.scheduler.result import SchedulerRequestInfo -from guidellm.scheduler.types import RequestT, ResponseT +from guidellm.request.types import RequestT, ResponseT +from guidellm.scheduler.queues import MPQueues, Queue, QueueEmpty +from guidellm.scheduler.result import ( + SchedulerRequestInfo, + WorkerProcessRequest, + WorkerProcessResult, +) +from guidellm.scheduler.strategy import SchedulingStrategy __all__ = [ "GenerativeRequestsWorker", @@ -35,27 +41,9 @@ "RequestsWorker", "ResolveStatus", "WorkerDescription", - "WorkerProcessRequest", - "WorkerProcessResult", ] -@dataclass -class WorkerProcessRequest(Generic[RequestT]): - request: RequestT - start_time: float - timeout_time: float - queued_time: float - - -@dataclass -class WorkerProcessResult(Generic[RequestT, ResponseT]): - type_: Literal["request_scheduled", "request_start", "request_complete"] - request: RequestT - response: Optional[ResponseT] - info: SchedulerRequestInfo - - @dataclass class ResolveStatus: requested: bool @@ -120,28 +108,25 @@ async def resolve( """ ... - async def get_request( - self, requests_queue: multiprocessing.Queue - ) -> Optional[WorkerProcessRequest[RequestT]]: - return await asyncio.to_thread(requests_queue.get) # type: ignore[attr-defined] - async def send_result( self, - results_queue: multiprocessing.Queue, + results_queue: Queue[WorkerProcessResult[RequestT, ResponseT]], result: WorkerProcessResult[RequestT, ResponseT], ): await asyncio.to_thread(results_queue.put, result) # type: ignore[attr-defined] async def resolve_scheduler_request( self, - request: Any, - queued_time: float, + process_request: WorkerProcessRequest[RequestT, ResponseT], dequeued_time: float, start_time: float, - timeout_time: float, - results_queue: multiprocessing.Queue, + results_queue: Queue[WorkerProcessResult[RequestT, ResponseT]], process_id: int, - ): + ) -> WorkerProcessRequest[RequestT, ResponseT]: + request = process_request.session.get_next_request() + timeout_time = process_request.timeout_time + queued_time = process_request.queued_time + info = SchedulerRequestInfo( targeted_start_time=start_time, queued_time=queued_time, @@ -185,74 +170,94 @@ async def resolve_scheduler_request( ) asyncio.create_task(self.send_result(results_queue, result)) - def process_loop_synchronous( - self, - requests_queue: multiprocessing.Queue, - results_queue: multiprocessing.Queue, - process_id: int, - ): - async def _process_runner(): - while ( - process_request := await self.get_request(requests_queue) - ) is not None: - dequeued_time = time.time() - - await self.resolve_scheduler_request( - request=process_request.request, - queued_time=process_request.queued_time, - dequeued_time=dequeued_time, - start_time=process_request.start_time, - timeout_time=process_request.timeout_time, - results_queue=results_queue, - process_id=process_id, - ) - - try: - asyncio.run(_process_runner()) - except Exception as exc: # noqa: BLE001 - logger.error( - f"Error in worker process {process_id}: {exc}", - exc_info=True, - stack_info=True, - ) + process_request.session.push_response(response) + return process_request def process_loop_asynchronous( self, - requests_queue: multiprocessing.Queue, - results_queue: multiprocessing.Queue, + queues: MPQueues[RequestT, ResponseT], + strategy: SchedulingStrategy, + stop_event: Event, + prioritize_sessions: bool, max_concurrency: int, process_id: int, + num_processes: int, ): async def _process_runner(): - pending = asyncio.Semaphore(max_concurrency) - - if pending.locked(): - raise ValueError("Async worker called with max_concurrency < 1") - - while ( - process_request := await self.get_request(requests_queue) - ) is not None: - dequeued_time = time.time() - - await pending.acquire() + lock = asyncio.Semaphore(max_concurrency) + pending_requests: list[WorkerProcessRequest[RequestT, ResponseT]] = [] + times_iter = islice( + strategy.request_times(), + process_id, + None, + num_processes, + ) - def _task_done(_: asyncio.Task): - nonlocal pending - pending.release() + start_time = None + while not stop_event.is_set(): + if start_time is None: + start_time = next(times_iter) + + # Yield control to the event loop. Sleep if we are way ahead + await asyncio.sleep(start_time - time.time() - 1) + await lock.acquire() + + process_request = None + try: + process_request = ( + pending_requests.pop() + if pending_requests + else queues.requests.get_nowait() + ) + dequeued_time = time.time() + except QueueEmpty: + lock.release() + continue + + async def wait_then_requeue( + process_request: WorkerProcessRequest[RequestT, ResponseT], + ): + # Wait to requeue the request session if it specifies a delay + if delay := process_request.session.get_next_delay(): + await asyncio.sleep(delay) + + # Push session to the stack + process_request.queued_time = time.time() + pending_requests.append(process_request) + if prioritize_sessions: + # Release the lock with the session on top of the stack + lock.release() + + def _request_callback( + future: asyncio.Future[WorkerProcessRequest[RequestT, ResponseT]], + ): + # If we are prioritizing sessions, hold + # the lock until the session is done + nonlocal lock + if not prioritize_sessions: + lock.release() + + try: + process_request = future.result() + except asyncio.CancelledError: + return + if not process_request.session.complete: + asyncio.create_task(wait_then_requeue(process_request)) + elif prioritize_sessions: + # no more requests in this session, release the lock + lock.release() task = asyncio.create_task( self.resolve_scheduler_request( - request=process_request.request, - queued_time=process_request.queued_time, + process_request=process_request, dequeued_time=dequeued_time, - start_time=process_request.start_time, - timeout_time=process_request.timeout_time, - results_queue=results_queue, + start_time=start_time, + results_queue=queues.responses, process_id=process_id, ) ) - task.add_done_callback(_task_done) - await asyncio.sleep(0) # enable start task immediately + task.add_done_callback(_request_callback) + start_time = None try: asyncio.run(_process_runner()) @@ -309,32 +314,25 @@ async def prepare_multiprocessing(self): """ await self.backend.prepare_multiprocessing() - def process_loop_synchronous( - self, - requests_queue: multiprocessing.Queue, - results_queue: multiprocessing.Queue, - process_id: int, - ): - asyncio.run(self.backend.validate()) - super().process_loop_synchronous( - requests_queue=requests_queue, - results_queue=results_queue, - process_id=process_id, - ) - def process_loop_asynchronous( self, - requests_queue: multiprocessing.Queue, - results_queue: multiprocessing.Queue, + queues: MPQueues[GenerationRequest, ResponseSummary], + strategy: SchedulingStrategy, + stop_event: Event, + prioritize_sessions: bool, max_concurrency: int, process_id: int, + num_processes: int, ): asyncio.run(self.backend.validate()) super().process_loop_asynchronous( - requests_queue=requests_queue, - results_queue=results_queue, + queues=queues, + strategy=strategy, + stop_event=stop_event, + prioritize_sessions=prioritize_sessions, max_concurrency=max_concurrency, process_id=process_id, + num_processes=num_processes, ) async def resolve(