Skip to content

Commit 4e392e8

Browse files
authored
[BugFix]fix v1 loader lm head fp32 (#5270) (#5287)
1 parent bab01e9 commit 4e392e8

File tree

3 files changed

+6
-3
lines changed

3 files changed

+6
-3
lines changed

fastdeploy/model_executor/layers/mtp_linear.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -120,10 +120,10 @@ def load_state_dict(self, state_dict):
120120
weight_tensor = get_tensor(state_dict.pop(self.weight_key)).astype(paddle.get_default_dtype())
121121
if self.linear.weight.shape != weight_tensor.shape:
122122
weight_tensor = weight_tensor.transpose([1, 0])
123-
self.linear.weight.set_value(weight_tensor)
123+
self.linear.weight.set_value(weight_tensor.astype(self.linear.weight.dtype))
124124

125125
if self.bias_key is not None:
126-
bias = get_tensor(state_dict.pop(self.bias_key)).astype(paddle.get_default_dtype())
126+
bias = get_tensor(state_dict.pop(self.bias_key)).astype(self.linear.bias.dtype)
127127
self.linear.bias.set_value(bias)
128128

129129
def forward(self, input):

fastdeploy/model_executor/models/qwen2_5_vl/qwen2_5_vl.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -251,7 +251,9 @@ def set_state_dict(self, state_dict: Dict[str, Union[np.ndarray, paddle.Tensor]]
251251
self.model.load_state_dict(state_dict)
252252
self.visual.load_state_dict(state_dict)
253253
if self.tie_word_embeddings:
254-
self.lm_head.linear.weight.set_value(self.model.embed_tokens.embeddings.weight.transpose([1, 0]))
254+
self.lm_head.linear.weight.set_value(
255+
self.model.embed_tokens.embeddings.weight.transpose([1, 0]).astype(self.lm_head.linear.weight.dtype)
256+
)
255257
else:
256258
self.lm_head.load_state_dict(state_dict)
257259

tests/entrypoints/test_generation.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -50,6 +50,7 @@ def setUpClass(cls):
5050
model=MODEL_NAME,
5151
max_num_batched_tokens=4096,
5252
tensor_parallel_size=1,
53+
lm_head_fp32=True,
5354
engine_worker_queue_port=int(os.getenv("FD_ENGINE_QUEUE_PORT")),
5455
cache_queue_port=int(os.getenv("FD_CACHE_QUEUE_PORT")),
5556
)

0 commit comments

Comments
 (0)