Skip to content

Commit 0dc3fc6

Browse files
committed
change all 'iion' to 'iign'
1 parent 5f3246c commit 0dc3fc6

File tree

4 files changed

+14
-14
lines changed

4 files changed

+14
-14
lines changed

internnav/dataset/internvla_n1_lerobot_dataset.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1371,7 +1371,7 @@ def __getitem__(self, i):
13711371
def make_supervised_data_module(tokenizer: transformers.PreTrainedTokenizer, data_args) -> Dict:
13721372
"""Make dataset and collator for supervised fine-tuning."""
13731373
train_datasets = []
1374-
if data_args.iion_dataset_use:
1374+
if data_args.iign_dataset_use:
13751375
train_datasets.append(VLLNDataset(tokenizer=tokenizer, data_args=data_args))
13761376
if data_args.vln_dataset_use:
13771377
train_datasets.append(NavPixelGoalDataset(tokenizer=tokenizer, data_args=data_args))

internnav/dataset/vlln_lerobot_dataset.py

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -15,31 +15,31 @@
1515
from .rope2d import get_rope_index_2, get_rope_index_25
1616

1717
# Define placeholders for dataset paths
18-
IION_split1 = {
18+
IIGN_split1 = {
1919
"data_path": "projects/VL-LN-Bench/traj_data/mp3d_split1",
2020
"height": 125,
2121
"pitch_1": 0,
2222
"pitch_2": 30,
2323
}
2424

25-
IION_split2 = {
25+
IIGN_split2 = {
2626
"data_path": "projects/VL-LN-Bench/traj_data/mp3d_split2",
2727
"height": 125,
2828
"pitch_1": 0,
2929
"pitch_2": 30,
3030
}
3131

32-
IION_split3 = {
32+
IIGN_split3 = {
3333
"data_path": "projects/VL-LN-Bench/traj_data/mp3d_split3",
3434
"height": 125,
3535
"pitch_1": 0,
3636
"pitch_2": 30,
3737
}
3838

3939
data_dict = {
40-
"iion_split1": IION_split1,
41-
"iion_split2": IION_split2,
42-
"iion_split3": IION_split3,
40+
"iign_split1": IIGN_split1,
41+
"iign_split2": IIGN_split2,
42+
"iign_split3": IIGN_split3,
4343
}
4444

4545
IGNORE_INDEX = -100
@@ -55,14 +55,14 @@
5555

5656
class VLLNDataset(Dataset):
5757
"""
58-
Dataset for 'Vision-Language'-'Language-Navigation' (VL-LN) / IION-style training.
58+
Dataset for 'Vision-Language'-'Language-Navigation' (VL-LN) / IIGN-style training.
5959
6060
Args:
6161
tokenizer (transformers.PreTrainedTokenizer): Tokenizer used to encode
6262
the chat template and produce `input_ids` / `labels`.
6363
data_args: A config-like object that must provide at least:
64-
- iion_dataset_use (str): comma-separated dataset names, optionally
65-
with sampling rate suffix like `iion_split1%50`.
64+
- iign_dataset_use (str): comma-separated dataset names, optionally
65+
with sampling rate suffix like `iign_split1%50`.
6666
- model_type (str): decides which rope-index function to use.
6767
- sample_step (int): stride for sampling start frames.
6868
- pixel_goal_only (bool): whether to keep only pixel-goal samples.
@@ -74,7 +74,7 @@ class VLLNDataset(Dataset):
7474

7575
def __init__(self, tokenizer: transformers.PreTrainedTokenizer, data_args):
7676
super(VLLNDataset, self).__init__()
77-
dataset = data_args.iion_dataset_use.split(",")
77+
dataset = data_args.iign_dataset_use.split(",")
7878
dataset_list = data_list(dataset)
7979
rank0_print(f"Loading datasets: {dataset_list}")
8080
self.video_max_total_pixels = getattr(data_args, "video_max_total_pixels", 1664 * 28 * 28)

internnav/trainer/internvla_n1_argument.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@ class DataArguments:
2929
video_min_frame_pixels: int = field(default=4 * 28 * 28)
3030

3131
vln_dataset_use: str = field(default="")
32-
iion_dataset_use: str = field(default="")
32+
iign_dataset_use: str = field(default="")
3333
sample_step: int = field(default=4)
3434
num_history: Optional[int] = field(default=8)
3535
predict_step_num: Optional[int] = field(default=32)

scripts/train/qwenvl_train/train_system2_vlln.sh

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@ max_pixels=313600
2727
min_pixels=3136
2828

2929
# Dataset configuration (replace with public dataset names)
30-
iion_datasets=iion_split1,iion_split2 #,iion_split3
30+
iign_datasets=iign_split1,iign_split2 #,iign_split3
3131

3232
# Output configuration
3333
run_name=InternVLA-N1-vlln
@@ -38,7 +38,7 @@ srun torchrun --nnodes=$SLURM_NNODES --nproc_per_node=8 \
3838
internnav/trainer/internvla_vlln_trainer.py \
3939
--deepspeed ${deepspeed} \
4040
--model_name_or_path "${llm}" \
41-
--iion_dataset_use ${iion_datasets} \
41+
--iign_dataset_use ${iign_datasets} \
4242
--data_flatten False \
4343
--tune_mm_vision True \
4444
--tune_mm_mlp True \

0 commit comments

Comments
 (0)