-
Notifications
You must be signed in to change notification settings - Fork 499
Adding StateDictAdapter #1601
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Merged
Merged
Adding StateDictAdapter #1601
Changes from all commits
Commits
Show all changes
12 commits
Select commit
Hold shift + click to select a range
a09953b
Fix config file path in run_train.sh
8dbceeb
Adding state_dict_adapter
a755f53
Adding state_dict_adapter
0a04bde
Resolve README conflict
ee4485f
Merge branch 'main' into main
HosseinKaviani-H 41f6589
Resolve README conflict and add StateDictAdapter changes
8c93715
Merge branch 'main' of https://github.com/HosseinKaviani-H/torchtitan…
4c790fb
Update __init__.py file
43c5565
Merge branch 'pytorch:main' into main
HosseinKaviani-H 665525f
Merge branch 'pytorch:main' into main
HosseinKaviani-H fe9ca45
Merge branch 'pytorch:main' into main
HosseinKaviani-H 174da03
Merge branch 'pytorch:main' into main
HosseinKaviani-H File filter
Filter by extension
Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
There are no files selected for viewing
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,86 @@ | ||
# Copyright (c) Meta Platforms, Inc. and affiliates. | ||
# All rights reserved. | ||
# | ||
# This source code is licensed under the BSD-style license found in the | ||
# LICENSE file in the root directory of this source tree. | ||
|
||
""" | ||
This script is adapted from torchtitan/models/llama3/model/state_dict_adapter.py. | ||
|
||
We can use this script to adapt the checkpoint from HF to the format that we can load into the torchtitan model and vice versa. | ||
This can enable us to do a parity test with the HF implementation and make sure that our results are | ||
aligned with the HF implementation. | ||
|
||
""" | ||
import re | ||
from typing import Any | ||
|
||
from torchtitan.protocols.state_dict_adapter import StateDictAdapter | ||
|
||
from .args import Qwen3ModelArgs | ||
|
||
|
||
class Qwen3StateDictAdapter(StateDictAdapter): | ||
def __init__(self, model_args: Qwen3ModelArgs, hf_assets_path: str | None): | ||
super().__init__(model_args, hf_assets_path) | ||
|
||
self.model_args = model_args | ||
self.hf_assets_path = hf_assets_path | ||
|
||
self.from_hf_map = { | ||
"model.embed_tokens.weight": "tok_embeddings.weight", | ||
"model.layers.{}.self_attn.q_proj.weight": "layers.{}.attention.wq.weight", | ||
"model.layers.{}.self_attn.k_proj.weight": "layers.{}.attention.wk.weight", | ||
"model.layers.{}.self_attn.v_proj.weight": "layers.{}.attention.wv.weight", | ||
"model.layers.{}.self_attn.o_proj.weight": "layers.{}.attention.wo.weight", | ||
"model.layers.{}.self_attn.q_norm.weight": "layers.{}.attention.q_norm.weight", | ||
"model.layers.{}.self_attn.k_norm.weight": "layers.{}.attention.k_norm.weight", | ||
"model.layers.{}.self_attn.rotary_emb.inv_freq": None, | ||
"model.layers.{}.mlp.gate_proj.weight": "layers.{}.feed_forward.w1.weight", | ||
"model.layers.{}.mlp.up_proj.weight": "layers.{}.feed_forward.w3.weight", | ||
"model.layers.{}.mlp.down_proj.weight": "layers.{}.feed_forward.w2.weight", | ||
"model.layers.{}.input_layernorm.weight": "layers.{}.attention_norm.weight", | ||
"model.layers.{}.post_attention_layernorm.weight": "layers.{}.ffn_norm.weight", | ||
"model.norm.weight": "norm.weight", | ||
"lm_head.weight": "output.weight", | ||
} | ||
|
||
def to_hf(self, state_dict: dict[str, Any]) -> dict[str, Any]: | ||
|
||
to_hf_map = {v: k for k, v in self.from_hf_map.items()} | ||
hf_state_dict = {} | ||
|
||
for key, value in state_dict.items(): | ||
if "layers" in key: | ||
abstract_key = re.sub(r"(\d+)", "{}", key, count=1) | ||
layer_num = re.search(r"\d+", key).group(0) | ||
new_key = to_hf_map[abstract_key] | ||
|
||
if new_key is None: | ||
continue | ||
new_key = new_key.format(layer_num) | ||
else: | ||
new_key = to_hf_map[key] | ||
|
||
hf_state_dict[new_key] = value | ||
|
||
return hf_state_dict | ||
|
||
def from_hf(self, hf_state_dict: dict[str, Any]) -> dict[str, Any]: | ||
|
||
state_dict = {} | ||
|
||
for key, value in hf_state_dict.items(): | ||
if "layers" in key: | ||
abstract_key = re.sub(r"(\d+)", "{}", key, count=1) | ||
layer_num = re.search(r"\d+", key).group(0) | ||
new_key = self.from_hf_map[abstract_key] | ||
|
||
if new_key is None: | ||
continue | ||
new_key = new_key.format(layer_num) | ||
else: | ||
new_key = self.from_hf_map[key] | ||
|
||
state_dict[new_key] = value | ||
return state_dict |
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
Uh oh!
There was an error while loading. Please reload this page.