We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent c7d1e4d commit d891fa5Copy full SHA for d891fa5
mindnlp/transformers/models/qwen2_vl/modeling_qwen2_vl.py
@@ -1230,8 +1230,10 @@ def forward(
1230
pixel_values = pixel_values.type(self.visual.get_dtype())
1231
image_embeds = self.visual(pixel_values, grid_thw=image_grid_thw)
1232
image_mask = (input_ids == self.config.image_token_id).unsqueeze(-1).expand_as(inputs_embeds)
1233
+ inputs_embeds = inputs_embeds.astype(mindspore.float16)
1234
image_embeds = image_embeds.to(inputs_embeds.dtype)
1235
inputs_embeds = inputs_embeds.masked_scatter(image_mask, image_embeds)
1236
+ inputs_embeds = inputs_embeds.astype(mindspore.bfloat16)
1237
1238
if pixel_values_videos is not None:
1239
pixel_values_videos = pixel_values_videos.type(self.visual.get_dtype())
0 commit comments