Update Finetune4bConfig.py

This commit is contained in:
yamashi 2023-04-07 00:43:15 +02:00 committed by GitHub
parent 778035152d
commit 95cd390d25
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
1 changed files with 3 additions and 1 deletions

View File

@ -15,7 +15,7 @@ class Finetune4bConfig:
warmup_steps: int, save_steps: int, save_total_limit: int, logging_steps: int,
checkpoint: bool, skip: bool, verbose: bool,
txt_row_thd: int, use_eos_token: bool, groupsize: int,
local_rank: int,
local_rank: int, flash_attention: bool
):
"""
Args:
@ -48,6 +48,7 @@ class Finetune4bConfig:
use_eos_token (bool): Use Eos token instead of padding with 0
groupsize (int): Group size of V2 model, use -1 to load V1 model
local_rank (int): local rank if using torch.distributed.launch
flash_attention (bool): Enables flash attention
"""
self.dataset = dataset
self.ds_type = ds_type
@ -84,6 +85,7 @@ class Finetune4bConfig:
if self.ddp:
self.gradient_accumulation_steps = self.gradient_accumulation_steps // self.world_size
self.groupsize = groupsize
self.flash_attention = flash_attention
def __str__(self) -> str: