~scriptsaikohya_ss
5 itemsDownload ./*

..
merger.bat
startcmdonly-sd.bat
startcmdonly_flux.bat
test.bat
test.py


kohya_ssstartcmdonly-sd.bat
4 KB• 8•  8 months ago•  DownloadRawClose
8 months ago•  8

{}
@echo off


echo 1 >F:\xampp\www\training.log

python -m venv venv

call venv\Scripts\deactivate.bat
call venv\Scripts\activate.bat

rem set in_pretrain="F:/stable-g2/kohya_ss/models/v1-5-pruned.fp16.safetensors"

set in_pretrain="F:/stable-g2/stable-diffusion-webui-forge/models/Stable-diffusion/goddessOfRealism_gorPONYV3VAE.safetensors"

rem max step = images * step (~50)
set max_steps="3300"

rem set in_preclip="F:/stable-g2/kohya_ss/models/clip_l.safetensors"
rem set in_pret5="F:/stable-g2/kohya_ss/models/model.fp16.safetensors"
rem set in_preae="F:/stable-g2/kohya_ss/models/diffusion_pytorch_model.fp16.safetensors"

set OUTPUT_DIR="F:/stable-g2/kohya_ss/output"

set TRAIN_DIR="C:/trainimages/img4/"
rem set TRAIN_RES=512
set TRAIN_RES=512
set TRAIN_SEED="0"
set TRAIN_LR="5e-4"

rem set OUTPUT_MODEL_NAME="sd-lora-analie5"
set OUTPUT_MODEL_NAME="pony-lora-analie2"

set optimizer="adamw8bit"
rem set optimizer="prodigy"

rem set trainnetwork="train_network.py"
rem sdxl for pony
set trainnetwork="sdxl_train_network.py"

rem kohya sd
rem accelerate launch --num_cpu_threads_per_process=8 "train_network.py" --enable_bucket --pretrained_model_name_or_path="%in_pretrain%" --train_data_dir="C:/trainimages/img" --resolution=1024,1024 --output_dir="%OUTPUT_DIR%" --logging_dir="C:/trainimages/log" --network_alpha="128" --save_model_as=safetensors --network_module=networks.lora --text_encoder_lr=5e-5 --unet_lr=0.0001 --lr_scheduler_num_cycles="1" --learning_rate="0.00003" --lr_scheduler="cosine" --train_batch_size="1" --max_train_steps="%max_steps%" --save_every_n_epochs="1" --mixed_precision="fp16" --save_precision="fp16" --seed="0" --caption_extension=".txt" --cache_latents --max_data_loader_n_workers="1" --clip_skip=1 --optimizer_type="AdamW8bit" --bucket_reso_steps=64 --mem_eff_attn --gradient_checkpointing --xformers --bucket_no_upscale

cd sd-scripts

accelerate launch --config_file="../config_files/accelerate/default_config.yaml" --num_cpu_threads_per_process=2 "%trainnetwork%" --pretrained_model_name_or_path="%in_pretrain%" --clip_skip=1 --save_model_as=safetensors --sdpa --persistent_data_loader_workers --max_data_loader_n_workers=2 --train_data_dir="%TRAIN_DIR%" --resolution=%TRAIN_RES% --seed="%TRAIN_SEED%" --gradient_checkpointing --gradient_accumulation_steps=1 --mixed_precision="fp16" --save_precision="fp16" --network_module="networks.lora" --network_dim=16 --network_alpha=8 --optimizer_type="%optimizer%" --text_encoder_lr="1e-4" --unet_lr="5e-4" --learning_rate="%TRAIN_LR%" --highvram --max_train_epochs=10 --save_every_n_epochs=1 --output_dir="%OUTPUT_DIR%" --output_name="%OUTPUT_MODEL_NAME%" --train_batch_size=2 --max_train_steps="%max_steps%" --lr_scheduler="constant" --enable_bucket --bucket_no_upscale --bucket_reso_steps=64

rem  --mem_eff_attn (1.5 only) --xformers
rem --dataset_config="dataset_1024_bs2.toml"s
rem  --lr_warmup_steps=0
rem  --fused_backward_pass
rem  --network_args="train_blocks=single"
rem --save_every_n_steps=300
rem --lr_scheduler="cosine_with_restarts"

rem adamw8bit adafactor prodigy
rem cosine constant constant_with_warmup
rem --optimizer_args='["relative_step=False","scale_parameter=False","warmup_init=False"]'
rem opt args for adafactor in finetuning only

rem pip install -U prodigyopt
rem pip install flash-attn --no-build-isolation for missing flash ?
rem then reinstall torch and cuda as usual~
cd ..
rem kohya flux ?
rem accelerate launch "train_dreambooth_flux.py" --pretrained_model_name_or_path="%MODEL_NAME%" --instance_data_dir="%INSTANCE_DIR%" --output_dir="%OUTPUT_DIR%" --mixed_precision="bf16" --instance_prompt="a photo of analie" --resolution=1024 --train_batch_size=1 --guidance_scale=1 --gradient_accumulation_steps=4 --optimizer="prodigy" --learning_rate=1. --report_to="wandb" --lr_scheduler="constant" --lr_warmup_steps=0 --max_train_steps=500 --validation_prompt="A photo of analie wearing a diaper" --validation_epochs=25 --seed="0"


echo 0 >F:\xampp\www\training.log




rem xflux flux
rem accelerate launch "train_flux_lora_deepspeed.py" --config "train_configs/test_lora.yaml"

Top
©twily.info 2013 - 2025
twily at twily dot info



2 335 034 visits
... ^ v