diff --git a/simpletuner/examples/flux2-klein-9b-i2i.lycoris-lokr/config.json b/simpletuner/examples/flux2-klein-9b-i2i.lycoris-lokr/config.json new file mode 100644 index 000000000..ef2768ea4 --- /dev/null +++ b/simpletuner/examples/flux2-klein-9b-i2i.lycoris-lokr/config.json @@ -0,0 +1,41 @@ +{ + "base_model_precision": "int8-torchao", + "checkpoint_step_interval": 50, + "data_backend_config": "config/examples/multidatabackend-controlnet-512px.json", + "disable_bucket_pruning": true, + "flow_schedule_shift": 3, + "grad_clip_method": "value", + "gradient_checkpointing": true, + "hub_model_id": "simpletuner-example-flux2-lycoris-lokr", + "learning_rate": 1e-4, + "lora_alpha": 128, + "lora_rank": 128, + "lora_type": "lycoris", + "lr_scheduler": "constant", + "lycoris_config": "config/examples/flux2-klein-9b-i2i.lycoris-lokr/lycoris_config.json", + "max_train_steps": 100, + "model_family": "flux2", + "model_flavour": "klein-9b", + "model_type": "lora", + "num_eval_images": 25, + "num_train_epochs": 0, + "optimizer": "adamw_bf16", + "output_dir": "output/examples/flux2-klein-9b-i2i.lycoris-lokr", + "push_checkpoints_to_hub": false, + "push_to_hub": false, + "quantize_via": "cpu", + "report_to": "none", + "seed": 42, + "tracker_project_name": "lycoris-training", + "tracker_run_name": "example-training-run", + "train_batch_size": 3, + "use_ema": false, + "vae_batch_size": 1, + "validation_disable_unconditional": true, + "validation_guidance": 5.0, + "validation_guidance_rescale": 0.0, + "validation_num_inference_steps": 30, + "validation_resolution": "1024x1024", + "validation_seed": 42, + "validation_steps": 50 +} diff --git a/simpletuner/examples/flux2-klein-9b-i2i.lycoris-lokr/lycoris_config.json b/simpletuner/examples/flux2-klein-9b-i2i.lycoris-lokr/lycoris_config.json new file mode 100644 index 000000000..50d606877 --- /dev/null +++ b/simpletuner/examples/flux2-klein-9b-i2i.lycoris-lokr/lycoris_config.json @@ -0,0 +1,22 @@ +{ + "bypass_mode": true, + "algo": "lokr", + "multiplier": 1.0, + "full_matrix": true, + "linear_dim": 10000, + "linear_alpha": 1, + "factor": 4, + "apply_preset": { + "target_module": [ + "Flux2Attention", "Flux2FeedForward" + ], + "module_algo_map": { + "Flux2FeedForward": { + "factor": 4 + }, + "Flux2Attention": { + "factor": 2 + } + } + } +}