diff --git a/fastdeploy/engine/sched/resource_manager_v1.py b/fastdeploy/engine/sched/resource_manager_v1.py index 91ce594e993..4a49ae15e5f 100644 --- a/fastdeploy/engine/sched/resource_manager_v1.py +++ b/fastdeploy/engine/sched/resource_manager_v1.py @@ -647,9 +647,11 @@ def _allocate_decode_and_extend(): break request = self.waiting[0] - if (self._is_mm_request(request) and self.exist_mm_prefill(scheduled_reqs)) or ( - paddle.is_compiled_with_xpu() and self.exist_prefill(scheduled_reqs) - ): + if ( + not envs.FD_ENABLE_MAX_PREFILL + and self._is_mm_request(request) + and self.exist_mm_prefill(scheduled_reqs) + ) or (paddle.is_compiled_with_xpu() and self.exist_prefill(scheduled_reqs)): break if request.status == RequestStatus.WAITING: result = self._waiting_async_process(request) diff --git a/fastdeploy/entrypoints/engine_client.py b/fastdeploy/entrypoints/engine_client.py index 78918314509..8a97c39c32c 100644 --- a/fastdeploy/entrypoints/engine_client.py +++ b/fastdeploy/entrypoints/engine_client.py @@ -48,6 +48,7 @@ ParameterError, StatefulSemaphore, api_server_logger, + to_tensor, ) @@ -387,6 +388,8 @@ def _send_task(self, task): if not self.enable_mm: self.zmq_client.send_json(task) else: + if envs.FD_ENABLE_E2W_TENSOR_CONVERT: + to_tensor([task]) self.zmq_client.send_pyobj(task) def valid_parameters(self, data): diff --git a/fastdeploy/worker/gpu_model_runner.py b/fastdeploy/worker/gpu_model_runner.py index d5be2801fab..b54d11b93a2 100644 --- a/fastdeploy/worker/gpu_model_runner.py +++ b/fastdeploy/worker/gpu_model_runner.py @@ -478,12 +478,14 @@ def _apply_mm_inputs(self, request: Request, multi_vision_inputs: dict, rope_3d_ multi_vision_inputs["grid_thw_lst"].extend( inputs["grid_thw"][request.num_image_start : request.num_image_end] ) - multi_vision_inputs["cu_seqlens"].extend( - inputs["vit_seqlen"][request.num_image_start : request.num_image_end] - ) - multi_vision_inputs["vit_position_ids_lst"].extend( - inputs["vit_position_ids"][request.num_image_start : request.num_image_end] - ) + if hasattr(inputs, "vit_seqlen"): + multi_vision_inputs["cu_seqlens"].extend( + inputs["vit_seqlen"][request.num_image_start : request.num_image_end] + ) + if hasattr(inputs, "vit_position_ids"): + multi_vision_inputs["vit_position_ids_lst"].extend( + inputs["vit_position_ids"][request.num_image_start : request.num_image_end] + ) else: vision_inputs = inputs if self.encoder_cache: @@ -2738,8 +2740,12 @@ def extract_vision_features_ernie(self, inputs: list[paddle.Tensor]) -> paddle.T def extract_vision_features_qwen(self, inputs: list[paddle.Tensor]) -> paddle.Tensor: assert inputs["images"] is not None - grid_thw = inputs["grid_thw"] - images = inputs["images"] + if envs.FD_ENABLE_MAX_PREFILL: + images = paddle.concat(inputs["images_lst"]).cast("bfloat16") + grid_thw = paddle.to_tensor(inputs["grid_thw_lst"], dtype="int64") + else: + grid_thw = inputs["grid_thw"] + images = inputs["images"] with paddle.amp.auto_cast( True, custom_black_list=self.amp_black,