Merge pull request #18 from s4rduk4r/finetune-refactor
Refactor finetune.py
This commit is contained in:
commit
cd1a299ba3
|
|
@ -0,0 +1,78 @@
|
||||||
|
class Finetune4bConfig:
|
||||||
|
"""Config holder for LLaMA 4bit finetuning
|
||||||
|
"""
|
||||||
|
def __init__(self, dataset: str, ds_type: str,
|
||||||
|
lora_out_dir: str, lora_apply_dir : str,
|
||||||
|
llama_q4_config_dir: str, llama_q4_model: str,
|
||||||
|
mbatch_size: int, batch_size: int,
|
||||||
|
epochs: int, lr: float,
|
||||||
|
cutoff_len: int,
|
||||||
|
lora_r: int, lora_alpha: int, lora_dropout: float,
|
||||||
|
val_set_size: float,
|
||||||
|
gradient_checkpointing: bool,
|
||||||
|
gradient_checkpointing_ratio: float,
|
||||||
|
warmup_steps: int, save_steps: int, save_total_limit: int, logging_steps: int,
|
||||||
|
checkpoint: bool, skip: bool
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Args:
|
||||||
|
dataset (str): Path to dataset file
|
||||||
|
ds_type (str): Dataset structure format
|
||||||
|
lora_out_dir (str): Directory to place new LoRA
|
||||||
|
lora_apply_dir (str): Path to directory from which LoRA has to be applied before training
|
||||||
|
llama_q4_config_dir (str): Path to the config.json, tokenizer_config.json, etc
|
||||||
|
llama_q4_model (str): Path to the quantized model in huggingface format
|
||||||
|
mbatch_size (int): Micro-batch size
|
||||||
|
batch_size (int): Batch size
|
||||||
|
epochs (int): Epochs
|
||||||
|
lr (float): Learning rate
|
||||||
|
cutoff_len (int): Cutoff length
|
||||||
|
lora_r (int): LoRA R
|
||||||
|
lora_alpha (int): LoRA Alpha
|
||||||
|
lora_dropout (float): LoRA Dropout
|
||||||
|
gradient_checkpointing (bool) : Use gradient checkpointing
|
||||||
|
gradient_checkpointing_ratio (float) : Gradient checkpoint ratio
|
||||||
|
val_set_size (int): Validation set size
|
||||||
|
warmup_steps (int): Warmup steps before training
|
||||||
|
save_steps (int): Save steps
|
||||||
|
save_total_limit (int): Save total limit
|
||||||
|
logging_steps (int): Logging steps
|
||||||
|
checkpoint (bool): Produce checkpoint instead of LoRA
|
||||||
|
skip (bool): Don't train model
|
||||||
|
"""
|
||||||
|
self.dataset = dataset
|
||||||
|
self.ds_type = ds_type
|
||||||
|
self.lora_out_dir = lora_out_dir
|
||||||
|
self.lora_apply_dir = lora_apply_dir
|
||||||
|
self.llama_q4_config_dir = llama_q4_config_dir
|
||||||
|
self.llama_q4_model = llama_q4_model
|
||||||
|
self.mbatch_size = mbatch_size
|
||||||
|
self.batch_size = batch_size
|
||||||
|
self.gradient_accumulation_steps = self.batch_size // self.mbatch_size
|
||||||
|
self.epochs = epochs
|
||||||
|
self.lr = lr
|
||||||
|
self.cutoff_len = cutoff_len
|
||||||
|
self.lora_r = lora_r
|
||||||
|
self.lora_alpha = lora_alpha
|
||||||
|
self.lora_dropout = 0 if gradient_checkpointing else lora_dropout # should be 0 if gradient checkpointing is on
|
||||||
|
self.val_set_size = int(val_set_size) if val_set_size > 1.0 else float(val_set_size)
|
||||||
|
self.gradient_checkpointing = gradient_checkpointing
|
||||||
|
self.gradient_checkpointing_ratio = gradient_checkpointing_ratio
|
||||||
|
self.warmup_steps = warmup_steps
|
||||||
|
self.save_steps = save_steps
|
||||||
|
self.save_total_limit = save_total_limit
|
||||||
|
self.logging_steps = logging_steps
|
||||||
|
self.checkpoint = checkpoint
|
||||||
|
self.skip = skip
|
||||||
|
|
||||||
|
|
||||||
|
def __str__(self) -> str:
|
||||||
|
s = f"\nParameters:\n{'config':-^20}\n{self.dataset=}\n{self.ds_type=}\n{self.lora_out_dir=}\n{self.lora_apply_dir=}\n{self.llama_q4_config_dir=}\n{self.llama_q4_model=}\n\n" +\
|
||||||
|
f"{'training':-^20}\n" +\
|
||||||
|
f"{self.mbatch_size=}\n{self.batch_size=}\n{self.gradient_accumulation_steps=}\n{self.epochs=}\n{self.lr=}\n{self.cutoff_len=}\n" +\
|
||||||
|
f"{self.lora_r=}\n{self.lora_alpha=}\n{self.lora_dropout=}\n{self.val_set_size=}\n" +\
|
||||||
|
f"{self.gradient_checkpointing=}\n{self.gradient_checkpointing_ratio=}\n" +\
|
||||||
|
f"{self.warmup_steps=}\n{self.save_steps=}\n{self.save_total_limit=}\n" +\
|
||||||
|
f"{self.logging_steps=}\n" +\
|
||||||
|
f"{self.checkpoint=}\n{self.skip=}"
|
||||||
|
return s.replace("self.", "")
|
||||||
|
|
@ -0,0 +1,85 @@
|
||||||
|
import os
|
||||||
|
import argparse
|
||||||
|
from Finetune4bConfig import Finetune4bConfig
|
||||||
|
|
||||||
|
def parse_commandline():
|
||||||
|
parser = argparse.ArgumentParser(
|
||||||
|
prog=__file__.split(os.path.sep)[-1],
|
||||||
|
description="Produce LoRA in 4bit training",
|
||||||
|
usage="%(prog)s [config] [training]\n\nAll arguments are optional"
|
||||||
|
)
|
||||||
|
|
||||||
|
parser.add_argument("dataset", nargs="?",
|
||||||
|
default="./dataset.json",
|
||||||
|
help="Path to dataset file. Default: %(default)s"
|
||||||
|
)
|
||||||
|
|
||||||
|
parser_config = parser.add_argument_group("config")
|
||||||
|
parser_training = parser.add_argument_group("training")
|
||||||
|
|
||||||
|
# Config args group
|
||||||
|
parser_config.add_argument("--ds_type", choices=["txt", "alpaca"], default="alpaca", required=False,
|
||||||
|
help="Dataset structure format. Default: %(default)s"
|
||||||
|
)
|
||||||
|
parser_config.add_argument("--lora_out_dir", default="alpaca_lora", required=False,
|
||||||
|
help="Directory to place new LoRA. Default: %(default)s"
|
||||||
|
)
|
||||||
|
parser_config.add_argument("--lora_apply_dir", default=None, required=False,
|
||||||
|
help="Path to directory from which LoRA has to be applied before training. Default: %(default)s"
|
||||||
|
)
|
||||||
|
parser_config.add_argument("--llama_q4_config_dir", default="./llama-13b-4bit/", required=False,
|
||||||
|
help="Path to the config.json, tokenizer_config.json, etc. Default: %(default)s"
|
||||||
|
)
|
||||||
|
parser_config.add_argument("--llama_q4_model", default="./llama-13b-4bit.pt", required=False,
|
||||||
|
help="Path to the quantized model in huggingface format. Default: %(default)s"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Training args group
|
||||||
|
parser_training.add_argument("--mbatch_size", default=1, type=int, help="Micro-batch size. Default: %(default)s")
|
||||||
|
parser_training.add_argument("--batch_size", default=2, type=int, help="Batch size. Default: %(default)s")
|
||||||
|
parser_training.add_argument("--epochs", default=3, type=int, help="Epochs. Default: %(default)s")
|
||||||
|
parser_training.add_argument("--lr", default=2e-4, type=float, help="Learning rate. Default: %(default)s")
|
||||||
|
parser_training.add_argument("--cutoff_len", default=256, type=int, help="Default: %(default)s")
|
||||||
|
parser_training.add_argument("--lora_r", default=8, type=int, help="Default: %(default)s")
|
||||||
|
parser_training.add_argument("--lora_alpha", default=16, type=int, help="Default: %(default)s")
|
||||||
|
parser_training.add_argument("--lora_dropout", default=0.05, type=float, help="Default: %(default)s")
|
||||||
|
parser_training.add_argument("--grad_chckpt", action="store_true", required=False, help="Use gradient checkpoint. For 30B model. Default: %(default)s")
|
||||||
|
parser_training.add_argument("--grad_chckpt_ratio", default=1, type=float, help="Gradient checkpoint ratio. Default: %(default)s")
|
||||||
|
parser_training.add_argument("--val_set_size", default=0.2, type=float, help="Validation set size. Default: %(default)s")
|
||||||
|
parser_training.add_argument("--warmup_steps", default=50, type=int, help="Default: %(default)s")
|
||||||
|
parser_training.add_argument("--save_steps", default=50, type=int, help="Default: %(default)s")
|
||||||
|
parser_training.add_argument("--save_total_limit", default=3, type=int, help="Default: %(default)s")
|
||||||
|
parser_training.add_argument("--logging_steps", default=10, type=int, help="Default: %(default)s")
|
||||||
|
parser_training.add_argument("-c", "--checkpoint", action="store_true", help="Produce checkpoint instead of LoRA. Default: %(default)s")
|
||||||
|
parser_training.add_argument("--skip", action="store_true", help="Don't train model. Can be useful to produce checkpoint from existing LoRA. Default: %(default)s")
|
||||||
|
|
||||||
|
return vars(parser.parse_args())
|
||||||
|
|
||||||
|
|
||||||
|
def get_config() -> Finetune4bConfig:
|
||||||
|
args = parse_commandline()
|
||||||
|
return Finetune4bConfig(
|
||||||
|
dataset=args["dataset"],
|
||||||
|
ds_type=args["ds_type"],
|
||||||
|
lora_out_dir=args["lora_out_dir"],
|
||||||
|
lora_apply_dir=args["lora_apply_dir"],
|
||||||
|
llama_q4_config_dir=args["llama_q4_config_dir"],
|
||||||
|
llama_q4_model=args["llama_q4_model"],
|
||||||
|
mbatch_size=args["mbatch_size"],
|
||||||
|
batch_size=args["batch_size"],
|
||||||
|
epochs=args["epochs"],
|
||||||
|
lr=args["lr"],
|
||||||
|
cutoff_len=args["cutoff_len"],
|
||||||
|
lora_r=args["lora_r"],
|
||||||
|
lora_alpha=args["lora_alpha"],
|
||||||
|
lora_dropout=args["lora_dropout"],
|
||||||
|
val_set_size=args["val_set_size"],
|
||||||
|
gradient_checkpointing=args["grad_chckpt"],
|
||||||
|
gradient_checkpointing_ratio=args["grad_chckpt_ratio"],
|
||||||
|
warmup_steps=args["warmup_steps"],
|
||||||
|
save_steps=args["save_steps"],
|
||||||
|
save_total_limit=args["save_total_limit"],
|
||||||
|
logging_steps=args["logging_steps"],
|
||||||
|
checkpoint=args["checkpoint"],
|
||||||
|
skip=args["skip"]
|
||||||
|
)
|
||||||
292
finetune.py
292
finetune.py
|
|
@ -1,156 +1,136 @@
|
||||||
import os
|
"""
|
||||||
import sys
|
llama-4b trainer with support of Stanford Alpaca-like JSON datasets (short for SAD)
|
||||||
sys.path.insert(0, './repository/transformers/src')
|
Intended to use with https://github.com/johnsmith0031/alpaca_lora_4bit
|
||||||
sys.path.insert(0, './repository/GPTQ-for-LLaMa')
|
|
||||||
sys.path.insert(0, './repository/peft/src')
|
SAD structure:
|
||||||
|
[
|
||||||
import peft
|
{
|
||||||
import peft.tuners.lora
|
"instruction": "Give null hypothesis",
|
||||||
assert peft.tuners.lora.is_gptq_available()
|
"input": "6 subjects were given a drug (treatment group) and an additional 6 subjects a placebo (control group).",
|
||||||
|
"output": "Drug is equivalent of placebo"
|
||||||
import time
|
},
|
||||||
import torch
|
{
|
||||||
import transformers
|
"instruction": "What does RNA stand for?",
|
||||||
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
|
"input": "",
|
||||||
import accelerate
|
"output": "RNA stands for ribonucleic acid."
|
||||||
from modelutils import find_layers
|
}
|
||||||
from autograd_4bit import make_quant_for_4bit_autograd
|
]
|
||||||
from autograd_4bit import load_llama_model_4bit_low_ram
|
"""
|
||||||
from datasets import load_dataset, Dataset
|
|
||||||
import json
|
import sys
|
||||||
from peft import prepare_model_for_int8_training, LoraConfig, get_peft_model, get_peft_model_state_dict, PeftModel
|
sys.path.insert(0, './repository/transformers/src')
|
||||||
|
sys.path.insert(0, './repository/GPTQ-for-LLaMa')
|
||||||
|
sys.path.insert(0, './repository/peft/src')
|
||||||
# Parameters
|
|
||||||
DATA_PATH = "./data.txt"
|
import peft
|
||||||
OUTPUT_DIR = "alpaca_lora"
|
import peft.tuners.lora
|
||||||
lora_path_old = ''
|
assert peft.tuners.lora.is_gptq_available()
|
||||||
config_path = './llama-13b-4bit/'
|
|
||||||
model_path = './llama-13b-4bit.pt'
|
import torch
|
||||||
|
import transformers
|
||||||
MICRO_BATCH_SIZE = 1
|
from autograd_4bit import load_llama_model_4bit_low_ram
|
||||||
BATCH_SIZE = 2
|
from peft import LoraConfig, get_peft_model, get_peft_model_state_dict, PeftModel
|
||||||
GRADIENT_ACCUMULATION_STEPS = BATCH_SIZE // MICRO_BATCH_SIZE
|
|
||||||
EPOCHS = 3
|
# ! Config
|
||||||
LEARNING_RATE = 2e-4
|
from arg_parser import get_config
|
||||||
CUTOFF_LEN = 256
|
import train_data
|
||||||
LORA_R = 8
|
|
||||||
LORA_ALPHA = 16
|
ft_config = get_config()
|
||||||
LORA_DROPOUT = 0.05 # should be 0 if gradient checkpointing is on
|
|
||||||
VAL_SET_SIZE = 0
|
# * Show loaded parameters
|
||||||
TARGET_MODULES = [
|
print(f"{ft_config}\n")
|
||||||
"q_proj",
|
|
||||||
"v_proj",
|
if ft_config.gradient_checkpointing:
|
||||||
]
|
print('Disable Dropout.')
|
||||||
GRADIENT_CHECKPOINTING = False
|
|
||||||
GRADIENT_CHECKPOINTING_RATIO = 1
|
# Load Basic Model
|
||||||
warmup_steps = 50
|
model, tokenizer = load_llama_model_4bit_low_ram(ft_config.llama_q4_config_dir, ft_config.llama_q4_model)
|
||||||
save_steps = 50
|
|
||||||
save_total_limit = 3
|
# Config Lora
|
||||||
logging_steps = 10
|
lora_config = LoraConfig(
|
||||||
|
r=ft_config.lora_r,
|
||||||
if LORA_DROPOUT > 0 and GRADIENT_CHECKPOINTING:
|
lora_alpha=ft_config.lora_alpha,
|
||||||
LORA_DROPOUT = 0
|
target_modules=["q_proj", "v_proj"],
|
||||||
print('Disable Dropout.')
|
lora_dropout=ft_config.lora_dropout,
|
||||||
|
bias="none",
|
||||||
# Load Basic Model
|
task_type="CAUSAL_LM",
|
||||||
model, tokenizer = load_llama_model_4bit_low_ram(config_path, model_path)
|
)
|
||||||
|
if ft_config.lora_apply_dir is None:
|
||||||
# Config Lora
|
model = get_peft_model(model, lora_config)
|
||||||
config = LoraConfig(
|
else:
|
||||||
r=LORA_R,
|
model = PeftModel.from_pretrained(model, ft_config.lora_apply_dir, device_map={'': 0}, torch_dtype=torch.float32) # ! Direct copy from inference.py
|
||||||
lora_alpha=LORA_ALPHA,
|
print(ft_config.lora_apply_dir, 'loaded')
|
||||||
target_modules=["q_proj", "v_proj"],
|
|
||||||
lora_dropout=LORA_DROPOUT,
|
# Scales to half
|
||||||
bias="none",
|
print('Fitting 4bit scales and zeros to half')
|
||||||
task_type="CAUSAL_LM",
|
for n, m in model.named_modules():
|
||||||
)
|
if '4bit' in str(type(m)):
|
||||||
if lora_path_old == '':
|
m.zeros = m.zeros.half()
|
||||||
model = get_peft_model(model, config)
|
m.scales = m.scales.half()
|
||||||
else:
|
|
||||||
model = PeftModel.from_pretrained(model, lora_path_old)
|
# Set tokenizer
|
||||||
print(lora_path_old, 'loaded')
|
tokenizer.pad_token_id = 0
|
||||||
|
|
||||||
# Scales to half
|
if not ft_config.skip:
|
||||||
print('Fitting 4bit scales and zeros to half')
|
# Load Data
|
||||||
for n, m in model.named_modules():
|
data = None
|
||||||
if '4bit' in str(type(m)):
|
match ft_config.ds_type:
|
||||||
m.zeros = m.zeros.half()
|
case "txt" if not ft_config.skip:
|
||||||
m.scales = m.scales.half()
|
#### LLaMA
|
||||||
|
data = train_data.TrainTxt(ft_config.dataset, ft_config.val_set_size, tokenizer, ft_config.cutoff_len)
|
||||||
# Set tokenizer
|
case "alpaca" if not ft_config.skip:
|
||||||
tokenizer.pad_token_id = 0
|
#### Stanford Alpaca-like Data
|
||||||
|
data = train_data.TrainSAD(ft_config.dataset, ft_config.val_set_size, tokenizer, ft_config.cutoff_len)
|
||||||
# Load Data
|
case _:
|
||||||
with open(DATA_PATH, 'r', encoding='utf8') as file:
|
raise NotImplementedError("ERROR: Unknown dataset format")
|
||||||
txt = file.read()
|
data.prepare_data()
|
||||||
txt = txt.replace('\r\n', '\n')
|
####
|
||||||
rows = [r for r in txt.split('\n') if r != '']
|
|
||||||
data = Dataset.from_dict({"input": rows})
|
# Use gradient checkpointing
|
||||||
exceed_count = 0
|
if ft_config.gradient_checkpointing:
|
||||||
def tokenize(prompt):
|
print('Applying gradient checkpointing ...')
|
||||||
# there's probably a way to do this with the tokenizer settings
|
from gradient_checkpointing import apply_gradient_checkpointing
|
||||||
# but again, gotta move fast
|
apply_gradient_checkpointing(model, checkpoint_ratio=ft_config.gradient_checkpointing_ratio)
|
||||||
global exceed_count
|
|
||||||
prompt = prompt['input']
|
trainer = transformers.Trainer(
|
||||||
result = tokenizer(
|
model=model,
|
||||||
prompt,
|
train_dataset=data.train_data,
|
||||||
truncation=True,
|
eval_dataset=data.val_data,
|
||||||
max_length=CUTOFF_LEN + 1,
|
args=transformers.TrainingArguments(
|
||||||
padding="max_length",
|
per_device_train_batch_size=ft_config.mbatch_size,
|
||||||
)
|
gradient_accumulation_steps=ft_config.gradient_accumulation_steps,
|
||||||
d = {
|
warmup_steps=ft_config.warmup_steps,
|
||||||
"input_ids": result["input_ids"][:-1],
|
num_train_epochs=ft_config.epochs,
|
||||||
"attention_mask": result["attention_mask"][:-1],
|
learning_rate=ft_config.lr,
|
||||||
}
|
fp16=True,
|
||||||
if sum(d['attention_mask']) >= CUTOFF_LEN:
|
logging_steps=ft_config.logging_steps,
|
||||||
exceed_count += 1
|
evaluation_strategy="no",
|
||||||
return d
|
save_strategy="steps",
|
||||||
data = data.shuffle().map(lambda x: tokenize(x))
|
eval_steps=None,
|
||||||
print('Train Data: {:.2f}%'.format(exceed_count / len(data) * 100), 'outliers')
|
save_steps=ft_config.save_steps,
|
||||||
train_data = data
|
output_dir=ft_config.lora_out_dir,
|
||||||
|
save_total_limit=ft_config.save_total_limit,
|
||||||
# Use gradient checkpointing
|
load_best_model_at_end=False
|
||||||
if GRADIENT_CHECKPOINTING:
|
),
|
||||||
print('Applying gradient checkpointing ...')
|
data_collator=transformers.DataCollatorForLanguageModeling(tokenizer, mlm=False),
|
||||||
from gradient_checkpointing import apply_gradient_checkpointing
|
)
|
||||||
apply_gradient_checkpointing(model, checkpoint_ratio=GRADIENT_CHECKPOINTING_RATIO)
|
model.config.use_cache = False
|
||||||
|
|
||||||
trainer = transformers.Trainer(
|
# Set Model dict
|
||||||
model=model,
|
old_state_dict = model.state_dict
|
||||||
train_dataset=train_data,
|
model.state_dict = (
|
||||||
args=transformers.TrainingArguments(
|
lambda self, *_, **__: get_peft_model_state_dict(self, old_state_dict())
|
||||||
per_device_train_batch_size=MICRO_BATCH_SIZE,
|
).__get__(model, type(model))
|
||||||
gradient_accumulation_steps=GRADIENT_ACCUMULATION_STEPS,
|
|
||||||
warmup_steps=warmup_steps,
|
# Run Trainer
|
||||||
num_train_epochs=EPOCHS,
|
trainer.train()
|
||||||
learning_rate=LEARNING_RATE,
|
|
||||||
fp16=True,
|
print('Train completed.')
|
||||||
logging_steps=logging_steps,
|
|
||||||
evaluation_strategy="no",
|
# Save Model
|
||||||
save_strategy="steps",
|
model.save_pretrained(ft_config.lora_out_dir)
|
||||||
eval_steps=None,
|
|
||||||
save_steps=save_steps,
|
if ft_config.checkpoint:
|
||||||
output_dir=OUTPUT_DIR,
|
print("Warning: Merge model + LoRA and save the whole checkpoint not implemented yet.")
|
||||||
save_total_limit=save_total_limit,
|
|
||||||
load_best_model_at_end=False
|
print('Model Saved.')
|
||||||
),
|
|
||||||
data_collator=transformers.DataCollatorForLanguageModeling(tokenizer, mlm=False),
|
|
||||||
)
|
|
||||||
model.config.use_cache = False
|
|
||||||
|
|
||||||
# Set Model dict
|
|
||||||
old_state_dict = model.state_dict
|
|
||||||
model.state_dict = (
|
|
||||||
lambda self, *_, **__: get_peft_model_state_dict(self, old_state_dict())
|
|
||||||
).__get__(model, type(model))
|
|
||||||
|
|
||||||
# Run Trainer
|
|
||||||
trainer.train()
|
|
||||||
|
|
||||||
print('Train completed.')
|
|
||||||
|
|
||||||
# Save Model
|
|
||||||
model.save_pretrained(OUTPUT_DIR)
|
|
||||||
|
|
||||||
print('Model Saved.')
|
|
||||||
|
|
@ -0,0 +1,126 @@
|
||||||
|
from abc import ABC, abstractmethod
|
||||||
|
from typing import Dict, Any
|
||||||
|
from datasets import load_dataset, Dataset
|
||||||
|
|
||||||
|
|
||||||
|
# Abstract train data loader
|
||||||
|
class ATrainData(ABC):
|
||||||
|
"""
|
||||||
|
"""
|
||||||
|
@abstractmethod
|
||||||
|
def __init__(self, dataset: str, val_set_size: int, tokenizer, cutoff_len: int) -> None:
|
||||||
|
"""
|
||||||
|
Args:
|
||||||
|
dataset (str): Path to dataset
|
||||||
|
val_set_size (int) : Size of validation set
|
||||||
|
tokenizer (_type_): Tokenizer
|
||||||
|
"""
|
||||||
|
self.tokenizer = tokenizer
|
||||||
|
self.dataset = dataset
|
||||||
|
self.val_set_size = val_set_size
|
||||||
|
self.cutoff_len = cutoff_len
|
||||||
|
self.train_data = None
|
||||||
|
self.val_data = None
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def tokenize(self, prompt: str) -> Dict[str, Any]:
|
||||||
|
"""Tokenization method
|
||||||
|
|
||||||
|
Args:
|
||||||
|
prompt (str): Prompt string from dataset
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict[str, Any]: token
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def prepare_data(self) -> None:
|
||||||
|
"""Loads dataset from file and prepares train_data property for trainer
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
# LLaMA txt train data loader
|
||||||
|
class TrainTxt(ATrainData):
|
||||||
|
def __init__(self, dataset: str, val_set_size: int, tokenizer, cutoff_len):
|
||||||
|
super().__init__(dataset, val_set_size, tokenizer, cutoff_len) # TODO: Validation size isn't used
|
||||||
|
self.cutoff_len = cutoff_len
|
||||||
|
self.exceed_count = 0
|
||||||
|
|
||||||
|
def tokenize(self, prompt: str) -> Dict[str, Any]:
|
||||||
|
# there's probably a way to do this with the tokenizer settings
|
||||||
|
# but again, gotta move fast
|
||||||
|
prompt = prompt['input']
|
||||||
|
result = self.tokenizer(
|
||||||
|
prompt,
|
||||||
|
truncation=True,
|
||||||
|
max_length=self.cutoff_len + 1,
|
||||||
|
padding="max_length",
|
||||||
|
)
|
||||||
|
d = {
|
||||||
|
"input_ids": result["input_ids"][:-1],
|
||||||
|
"attention_mask": result["attention_mask"][:-1],
|
||||||
|
}
|
||||||
|
if sum(d['attention_mask']) >= self.cutoff_len:
|
||||||
|
self.exceed_count += 1
|
||||||
|
return d
|
||||||
|
|
||||||
|
def prepare_data(self):
|
||||||
|
with open(self.dataset, 'r', encoding='utf8') as file:
|
||||||
|
txt = file.read()
|
||||||
|
txt = txt.replace('\r\n', '\n')
|
||||||
|
rows = [r for r in txt.split('\n') if r != '']
|
||||||
|
data = Dataset.from_dict({"input": rows})
|
||||||
|
data = data.shuffle().map(lambda x: self.tokenize(x))
|
||||||
|
print('Train Data: {:.2f}%'.format(self.exceed_count / len(data) * 100), 'outliers')
|
||||||
|
self.train_data = data
|
||||||
|
|
||||||
|
|
||||||
|
# Stanford Alpaca-like Data
|
||||||
|
class TrainSAD(ATrainData):
|
||||||
|
def __init__(self, dataset: str, val_set_size: int, tokenizer, cutoff_len) -> None:
|
||||||
|
super().__init__(dataset, val_set_size, tokenizer, cutoff_len)
|
||||||
|
|
||||||
|
def tokenize(self, prompt: str) -> Dict[str, Any]:
|
||||||
|
# there's probably a way to do this with the tokenizer settings
|
||||||
|
# but again, gotta move fast
|
||||||
|
result = self.tokenizer(
|
||||||
|
prompt,
|
||||||
|
truncation=True,
|
||||||
|
max_length=self.cutoff_len + 1,
|
||||||
|
padding="max_length",
|
||||||
|
)
|
||||||
|
return {
|
||||||
|
"input_ids": result["input_ids"][:-1],
|
||||||
|
"attention_mask": result["attention_mask"][:-1],
|
||||||
|
}
|
||||||
|
|
||||||
|
def prepare_data(self) -> None:
|
||||||
|
data = load_dataset("json", data_files=self.dataset)
|
||||||
|
|
||||||
|
if self.val_set_size > 0:
|
||||||
|
train_val = data["train"].train_test_split(
|
||||||
|
test_size=self.val_set_size, shuffle=True, seed=42 # ! Seed = 42 (?)
|
||||||
|
)
|
||||||
|
self.train_data = train_val["train"].shuffle().map(self.generate_and_tokenize_prompt)
|
||||||
|
self.val_data = train_val["test"].shuffle().map(self.generate_and_tokenize_prompt)
|
||||||
|
else:
|
||||||
|
self.train_data = data["train"].shuffle().map(self.generate_and_tokenize_prompt)
|
||||||
|
self.val_data = None
|
||||||
|
|
||||||
|
# Auxiliary methods
|
||||||
|
def generate_prompt(self, data_point):
|
||||||
|
return "{0}\n\n{1}\n{2}\n\n{3}\n{4}\n\n{5}\n{6}".format(
|
||||||
|
"Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.",
|
||||||
|
"### Instruction:",
|
||||||
|
data_point["instruction"],
|
||||||
|
"### Input:",
|
||||||
|
data_point["input"],
|
||||||
|
"### Response:",
|
||||||
|
data_point["output"]
|
||||||
|
)
|
||||||
|
|
||||||
|
def generate_and_tokenize_prompt(self, data_point):
|
||||||
|
prompt = self.generate_prompt(data_point)
|
||||||
|
return self.tokenize(prompt)
|
||||||
Loading…
Reference in New Issue