core.training_args

core.training_args

extra axolotl specific training args

Classes

Name Description
AxolotlCPOConfig CPO config for CPO training
AxolotlKTOConfig KTO config for KTO training
AxolotlORPOConfig ORPO config for ORPO training
AxolotlPRMConfig PRM config for PRM training
AxolotlRewardConfig Reward config for Reward training
AxolotlTrainingArguments Training arguments for Causal trainer
AxolotlTrainingMixins Mixin class for the Axolotl training args.

AxolotlCPOConfig

core.training_args.AxolotlCPOConfig(
    self,
    model_type=None,
    lr_quadratic_warmup=False,
    pretraining=False,
    sample_packing=False,
    multipack_real_batches=False,
    eval_sample_packing=None,
    sample_packing_efficiency=1.0,
    sample_packing_bin_size=200,
    sample_packing_group_size=100000,
    max_seq_length=2048,
    relora_steps=None,
    relora_warmup_steps=None,
    relora_anneal_steps=None,
    relora_prune_ratio=0.9,
    bench_split='eval',
    bench_dataset='pharaouk/dharma-1/dharma_1_mini.json',
    do_bench_eval=False,
    do_causal_lm_eval=False,
    max_bench_samples=None,
    bench_source_max_len=2048,
    dataloader_prefetch_factor=None,
    cosine_min_lr_ratio=None,
    cosine_constant_lr_ratio=None,
    loraplus_lr_ratio=None,
    loraplus_lr_embedding=1e-06,
    embedding_lr_scale=None,
    lr_groups=None,
    embedding_lr=None,
    qlora=False,
    orpo_alpha=None,
    lisa_n_layers=None,
    lisa_step_interval=None,
    lisa_layers_attribute=None,
    curriculum_sampling=None,
    alternate_optimizer=None,
    alternate_lr_scheduler_type=None,
    chat_template=None,
    kd_ce_alpha=None,
    kd_alpha=1.0,
    kd_temperature=1.0,
    kd_zscore_base_temp=None,
    kd_top_k_before_softmax=None,
    sequence_parallel_degree=1,
    image_size=None,
    image_resize_algorithm=None,
    simpo_gamma=None,
)

CPO config for CPO training

AxolotlKTOConfig

core.training_args.AxolotlKTOConfig(
    self,
    model_type=None,
    lr_quadratic_warmup=False,
    pretraining=False,
    sample_packing=False,
    multipack_real_batches=False,
    eval_sample_packing=None,
    sample_packing_efficiency=1.0,
    sample_packing_bin_size=200,
    sample_packing_group_size=100000,
    max_seq_length=2048,
    relora_steps=None,
    relora_warmup_steps=None,
    relora_anneal_steps=None,
    relora_prune_ratio=0.9,
    bench_split='eval',
    bench_dataset='pharaouk/dharma-1/dharma_1_mini.json',
    do_bench_eval=False,
    do_causal_lm_eval=False,
    max_bench_samples=None,
    bench_source_max_len=2048,
    dataloader_prefetch_factor=None,
    cosine_min_lr_ratio=None,
    cosine_constant_lr_ratio=None,
    loraplus_lr_ratio=None,
    loraplus_lr_embedding=1e-06,
    embedding_lr_scale=None,
    lr_groups=None,
    embedding_lr=None,
    qlora=False,
    orpo_alpha=None,
    lisa_n_layers=None,
    lisa_step_interval=None,
    lisa_layers_attribute=None,
    curriculum_sampling=None,
    alternate_optimizer=None,
    alternate_lr_scheduler_type=None,
    chat_template=None,
    kd_ce_alpha=None,
    kd_alpha=1.0,
    kd_temperature=1.0,
    kd_zscore_base_temp=None,
    kd_top_k_before_softmax=None,
    sequence_parallel_degree=1,
    image_size=None,
    image_resize_algorithm=None,
)

KTO config for KTO training

AxolotlORPOConfig

core.training_args.AxolotlORPOConfig(
    self,
    model_type=None,
    lr_quadratic_warmup=False,
    pretraining=False,
    sample_packing=False,
    multipack_real_batches=False,
    eval_sample_packing=None,
    sample_packing_efficiency=1.0,
    sample_packing_bin_size=200,
    sample_packing_group_size=100000,
    max_seq_length=2048,
    relora_steps=None,
    relora_warmup_steps=None,
    relora_anneal_steps=None,
    relora_prune_ratio=0.9,
    bench_split='eval',
    bench_dataset='pharaouk/dharma-1/dharma_1_mini.json',
    do_bench_eval=False,
    do_causal_lm_eval=False,
    max_bench_samples=None,
    bench_source_max_len=2048,
    dataloader_prefetch_factor=None,
    cosine_min_lr_ratio=None,
    cosine_constant_lr_ratio=None,
    loraplus_lr_ratio=None,
    loraplus_lr_embedding=1e-06,
    embedding_lr_scale=None,
    lr_groups=None,
    embedding_lr=None,
    qlora=False,
    orpo_alpha=None,
    lisa_n_layers=None,
    lisa_step_interval=None,
    lisa_layers_attribute=None,
    curriculum_sampling=None,
    alternate_optimizer=None,
    alternate_lr_scheduler_type=None,
    chat_template=None,
    kd_ce_alpha=None,
    kd_alpha=1.0,
    kd_temperature=1.0,
    kd_zscore_base_temp=None,
    kd_top_k_before_softmax=None,
    sequence_parallel_degree=1,
    image_size=None,
    image_resize_algorithm=None,
)

ORPO config for ORPO training

AxolotlPRMConfig

core.training_args.AxolotlPRMConfig(
    self,
    model_type=None,
    lr_quadratic_warmup=False,
    pretraining=False,
    sample_packing=False,
    multipack_real_batches=False,
    eval_sample_packing=None,
    sample_packing_efficiency=1.0,
    sample_packing_bin_size=200,
    sample_packing_group_size=100000,
    max_seq_length=2048,
    relora_steps=None,
    relora_warmup_steps=None,
    relora_anneal_steps=None,
    relora_prune_ratio=0.9,
    bench_split='eval',
    bench_dataset='pharaouk/dharma-1/dharma_1_mini.json',
    do_bench_eval=False,
    do_causal_lm_eval=False,
    max_bench_samples=None,
    bench_source_max_len=2048,
    dataloader_prefetch_factor=None,
    cosine_min_lr_ratio=None,
    cosine_constant_lr_ratio=None,
    loraplus_lr_ratio=None,
    loraplus_lr_embedding=1e-06,
    embedding_lr_scale=None,
    lr_groups=None,
    embedding_lr=None,
    qlora=False,
    orpo_alpha=None,
    lisa_n_layers=None,
    lisa_step_interval=None,
    lisa_layers_attribute=None,
    curriculum_sampling=None,
    alternate_optimizer=None,
    alternate_lr_scheduler_type=None,
    chat_template=None,
    kd_ce_alpha=None,
    kd_alpha=1.0,
    kd_temperature=1.0,
    kd_zscore_base_temp=None,
    kd_top_k_before_softmax=None,
    sequence_parallel_degree=1,
    image_size=None,
    image_resize_algorithm=None,
)

PRM config for PRM training

AxolotlRewardConfig

core.training_args.AxolotlRewardConfig(
    self,
    model_type=None,
    lr_quadratic_warmup=False,
    pretraining=False,
    sample_packing=False,
    multipack_real_batches=False,
    eval_sample_packing=None,
    sample_packing_efficiency=1.0,
    sample_packing_bin_size=200,
    sample_packing_group_size=100000,
    max_seq_length=2048,
    relora_steps=None,
    relora_warmup_steps=None,
    relora_anneal_steps=None,
    relora_prune_ratio=0.9,
    bench_split='eval',
    bench_dataset='pharaouk/dharma-1/dharma_1_mini.json',
    do_bench_eval=False,
    do_causal_lm_eval=False,
    max_bench_samples=None,
    bench_source_max_len=2048,
    dataloader_prefetch_factor=None,
    cosine_min_lr_ratio=None,
    cosine_constant_lr_ratio=None,
    loraplus_lr_ratio=None,
    loraplus_lr_embedding=1e-06,
    embedding_lr_scale=None,
    lr_groups=None,
    embedding_lr=None,
    qlora=False,
    orpo_alpha=None,
    lisa_n_layers=None,
    lisa_step_interval=None,
    lisa_layers_attribute=None,
    curriculum_sampling=None,
    alternate_optimizer=None,
    alternate_lr_scheduler_type=None,
    chat_template=None,
    kd_ce_alpha=None,
    kd_alpha=1.0,
    kd_temperature=1.0,
    kd_zscore_base_temp=None,
    kd_top_k_before_softmax=None,
    sequence_parallel_degree=1,
    image_size=None,
    image_resize_algorithm=None,
)

Reward config for Reward training

AxolotlTrainingArguments

core.training_args.AxolotlTrainingArguments(
    self,
    model_type=None,
    lr_quadratic_warmup=False,
    pretraining=False,
    sample_packing=False,
    multipack_real_batches=False,
    eval_sample_packing=None,
    sample_packing_efficiency=1.0,
    sample_packing_bin_size=200,
    sample_packing_group_size=100000,
    max_seq_length=2048,
    relora_steps=None,
    relora_warmup_steps=None,
    relora_anneal_steps=None,
    relora_prune_ratio=0.9,
    bench_split='eval',
    bench_dataset='pharaouk/dharma-1/dharma_1_mini.json',
    do_bench_eval=False,
    do_causal_lm_eval=False,
    max_bench_samples=None,
    bench_source_max_len=2048,
    dataloader_prefetch_factor=None,
    cosine_min_lr_ratio=None,
    cosine_constant_lr_ratio=None,
    loraplus_lr_ratio=None,
    loraplus_lr_embedding=1e-06,
    embedding_lr_scale=None,
    lr_groups=None,
    embedding_lr=None,
    qlora=False,
    orpo_alpha=None,
    lisa_n_layers=None,
    lisa_step_interval=None,
    lisa_layers_attribute=None,
    curriculum_sampling=None,
    alternate_optimizer=None,
    alternate_lr_scheduler_type=None,
    chat_template=None,
    kd_ce_alpha=None,
    kd_alpha=1.0,
    kd_temperature=1.0,
    kd_zscore_base_temp=None,
    kd_top_k_before_softmax=None,
    sequence_parallel_degree=1,
    image_size=None,
    image_resize_algorithm=None,
)

Training arguments for Causal trainer

This code is duplicated due to HF TrainingArguments not setting output_dir with a default value so it can’t be used as a mixin.

AxolotlTrainingMixins

core.training_args.AxolotlTrainingMixins(
    self,
    model_type=None,
    lr_quadratic_warmup=False,
    pretraining=False,
    sample_packing=False,
    multipack_real_batches=False,
    eval_sample_packing=None,
    sample_packing_efficiency=1.0,
    sample_packing_bin_size=200,
    sample_packing_group_size=100000,
    max_seq_length=2048,
    relora_steps=None,
    relora_warmup_steps=None,
    relora_anneal_steps=None,
    relora_prune_ratio=0.9,
    bench_split='eval',
    bench_dataset='pharaouk/dharma-1/dharma_1_mini.json',
    do_bench_eval=False,
    do_causal_lm_eval=False,
    max_bench_samples=None,
    bench_source_max_len=2048,
    dataloader_prefetch_factor=None,
    cosine_min_lr_ratio=None,
    cosine_constant_lr_ratio=None,
    loraplus_lr_ratio=None,
    loraplus_lr_embedding=1e-06,
    embedding_lr_scale=None,
    lr_groups=None,
    embedding_lr=None,
    qlora=False,
    orpo_alpha=None,
    lisa_n_layers=None,
    lisa_step_interval=None,
    lisa_layers_attribute=None,
    curriculum_sampling=None,
    alternate_optimizer=None,
    alternate_lr_scheduler_type=None,
    chat_template=None,
    kd_ce_alpha=None,
    kd_alpha=1.0,
    kd_temperature=1.0,
    kd_zscore_base_temp=None,
    kd_top_k_before_softmax=None,
    sequence_parallel_degree=1,
    image_size=None,
    image_resize_algorithm=None,
)

Mixin class for the Axolotl training args.