~scriptsaikohya_ss
kohya_ssstartcmdonly_flux.bat
8 months ago• 7
{}
@echo off
echo 1 >F:\xampp\www\training.log
python -m venv venv
call venv\Scripts\deactivate.bat
call venv\Scripts\activate.bat
rem max step = images * step (~50)
set max_steps="25000"
rem use fp8
set in_pretrain="F:/stable-g2/kohya_ss/models/flux1-schnell-fp8-e4m3fn.safetensors"
set in_pret5="F:/stable-g2/kohya_ss/models/t5xxl_fp8_e4m3fn.safetensors"
rem use fp16 (full)
rem set in_pretrain="F:/stable-g2/stable-diffusion-webui-forge/models/Stable-diffusion/flux1-schnell.safetensors"
rem set in_pret5="F:/stable-g2/stable-diffusion-webui-forge/models/text_encoder/t5xxl_fp16.safetensors"
set in_preclip="F:/stable-g2/kohya_ss/models/clip_l.safetensors"
set in_preae="F:/stable-g2/kohya_ss/models/ae.safetensors"
set OUTPUT_DIR="F:/stable-g2/kohya_ss/output"
set TRAIN_DIR="C:\\trainimages\\img4\\"
set TRAIN_RES=1024
set TRAIN_SEED="0"
set TRAIN_LR="1e-4"
set OUTPUT_MODEL_NAME="flux-lora-analie4"
rem kohya sd
rem accelerate launch --num_cpu_threads_per_process=8 "train_network.py" --enable_bucket --pretrained_model_name_or_path="%in_pretrain%" --train_data_dir="C:/trainimages/img" --resolution=1024,1024 --output_dir="%OUTPUT_DIR%" --logging_dir="C:/trainimages/log" --network_alpha="128" --save_model_as=safetensors --network_module=networks.lora --text_encoder_lr=5e-5 --unet_lr=0.0001 --lr_scheduler_num_cycles="1" --learning_rate="0.00003" --lr_scheduler="cosine" --train_batch_size="1" --max_train_steps="%max_steps%" --save_every_n_epochs="1" --mixed_precision="fp16" --save_precision="fp16" --seed="0" --caption_extension=".txt" --cache_latents --max_data_loader_n_workers="1" --clip_skip=1 --optimizer_type="AdamW8bit" --bucket_reso_steps=64 --mem_eff_attn --gradient_checkpointing --xformers --bucket_no_upscale
rem cd sd-scripts-sd3
cd sd-scripts
rem lora
accelerate launch --config_file="../config_files/accelerate/default_config.yaml" --num_cpu_threads_per_process=2 "flux_train_network.py" --pretrained_model_name_or_path="%in_pretrain%" --clip_l="%in_preclip%" --t5xxl="%in_pret5%" --ae="%in_preae%" --cache_latents_to_disk --save_model_as safetensors --persistent_data_loader_workers --max_data_loader_n_workers=2 --train_data_dir="%TRAIN_DIR%" --resolution=%TRAIN_RES% --seed="%TRAIN_SEED%" --gradient_checkpointing --gradient_accumulation_steps=1 --mixed_precision="bf16" --save_precision="bf16" --network_module="networks.lora_flux" --network_dim=16 --network_alpha=8 --optimizer_type="adamw8bit" --highvram --learning_rate="%TRAIN_LR%" --cache_text_encoder_outputs --cache_text_encoder_outputs_to_disk --max_train_epochs=1 --save_every_n_epochs=1 --save_every_n_steps=300 --output_dir="%OUTPUT_DIR%" --output_name="%OUTPUT_MODEL_NAME%" --timestep_sampling=shift --discrete_flow_shift="3.1582" --model_prediction_type="raw" --guidance_scale="1.0" --train_batch_size=1 --lr_scheduler="constant" --enable_bucket --min_bucket_reso=256 --max_bucket_reso=1280 --bucket_reso_steps=64 --split_mode --network_args="train_blocks=single" --sdpa --fp8_base
rem --split_mode --network_args="train_blocks=single"
rem --fused_backward_pass --cpu_offload_checkpointing
rem --sdpa --fp8_base
rem --dataset_config="dataset_1024_bs2.toml"s
rem --mixed_precision="bf16" --save_precision="bf16"
rem --lr_warmup_steps=0
rem --highvram
rem --learning_rate="1e-4" 0.0001
rem --learning_rate="5e-5" 0.00005
rem 1e-5 for finetune ? 0.00001
rem triton ?
rem pip3 install --pre torch torchvision torchaudio torchtriton --extra-index-url https://download.pytorch.org/whl/nightly/cu118 --force
rem https://github.com/AUTOMATIC1111/stable-diffusion-webui/discussions/6932
rem https://pytorch.org/get-started/locally/
rem finetune https://github.com/kohya-ss/sd-scripts/issues/1587
rem accelerate launch --config_file="../config_files/accelerate/default_config.yaml" --num_cpu_threads_per_process=2 "flux_train_network.py" --pretrained_model_name_or_path="%in_pretrain%" --clip_l="%in_preclip%" --t5xxl="%in_pret5%" --ae="%in_preae%" --cache_latents_to_disk --save_model_as safetensors --persistent_data_loader_workers --max_data_loader_n_workers=2 --train_data_dir="C:/trainimages/img2/" --resolution=512 --seed="42" --gradient_checkpointing --gradient_accumulation_steps=1 --mixed_precision="fp16" --save_precision="fp16" --network_module="networks.lora_flux" --network_dim=8 --network_alpha=4 --optimizer_type="adafactor" --optimizer_args="relative_step=False scale_parameter=False warmup_init=False" --lr_scheduler constant_with_warmup --max_grad_norm 0.0 --highvram --learning_rate="1e-5" --cache_text_encoder_outputs --cache_text_encoder_outputs_to_disk --max_train_epochs=1 --save_every_n_epochs=1 --output_dir="%OUTPUT_DIR%" --output_name="flux-lora-analie2" --timestep_sampling=shift --discrete_flow_shift="3.1582" --model_prediction_type="raw" --guidance_scale="1.0" --train_batch_size=1 --lr_scheduler="constant" --enable_bucket --min_bucket_reso=256 --max_bucket_reso=1280 --bucket_reso_steps=64 --fused_backward_pass --cpu_offload_checkpointing --full_bf16
rem adamw8bit adafactor prodigy
rem cosine constant constant_with_warmup
rem --optimizer_args='["relative_step=False","scale_parameter=False","warmup_init=False"]'
rem opt args for adafactor in finetuning only
rem pip install -U prodigyopt
rem pip install flash-attn --no-build-isolation for missing flash ?
rem then reinstall torch and cuda as usual~
cd ..
rem kohya flux ?
rem accelerate launch "train_dreambooth_flux.py" --pretrained_model_name_or_path="%MODEL_NAME%" --instance_data_dir="%INSTANCE_DIR%" --output_dir="%OUTPUT_DIR%" --mixed_precision="bf16" --instance_prompt="a photo of analie" --resolution=1024 --train_batch_size=1 --guidance_scale=1 --gradient_accumulation_steps=4 --optimizer="prodigy" --learning_rate=1. --report_to="wandb" --lr_scheduler="constant" --lr_warmup_steps=0 --max_train_steps=500 --validation_prompt="A photo of analie wearing a diaper" --validation_epochs=25 --seed="0"
echo 0 >F:\xampp\www\training.log
rem xflux flux
rem accelerate launch "train_flux_lora_deepspeed.py" --config "train_configs/test_lora.yaml"
Top
2 335 028 visits