fix bug and remove bnb

This commit is contained in:
John Smith 2023-04-20 09:51:57 +08:00
parent 90e628121a
commit 3b18aa1cc6
3 changed files with 1 additions and 3 deletions

View File

@ -249,7 +249,7 @@ def load_llama_model_4bit_low_ram_and_offload(config_path, model_path, lora_path
if lora_path is not None:
from peft import PeftModel
from monkeypatch.peft_tuners_lora_monkey_patch import Linear4bitLt
model = PeftModel.from_pretrained(model, lora_path, device_map={'': 'cpu'}, torch_dtype=torch.float32)
model = PeftModel.from_pretrained(model, lora_path, device_map={'': 'cpu'}, torch_dtype=torch.float32, is_trainable=True)
print(Style.BRIGHT + Fore.GREEN + '{} Lora Applied.'.format(lora_path))
model.seqlen = seqlen

View File

@ -2,7 +2,6 @@ import math
import re
import torch
import warnings
import bitsandbytes as bnb
from peft.tuners import lora
from peft.tuners.lora import is_bnb_available, Linear, Linear8bitLt, LoraLayer

View File

@ -1,6 +1,5 @@
torch
accelerate
bitsandbytes
datasets
sentencepiece
safetensors