fix bug and remove bnb
This commit is contained in:
parent
90e628121a
commit
3b18aa1cc6
|
|
@ -249,7 +249,7 @@ def load_llama_model_4bit_low_ram_and_offload(config_path, model_path, lora_path
|
|||
if lora_path is not None:
|
||||
from peft import PeftModel
|
||||
from monkeypatch.peft_tuners_lora_monkey_patch import Linear4bitLt
|
||||
model = PeftModel.from_pretrained(model, lora_path, device_map={'': 'cpu'}, torch_dtype=torch.float32)
|
||||
model = PeftModel.from_pretrained(model, lora_path, device_map={'': 'cpu'}, torch_dtype=torch.float32, is_trainable=True)
|
||||
print(Style.BRIGHT + Fore.GREEN + '{} Lora Applied.'.format(lora_path))
|
||||
|
||||
model.seqlen = seqlen
|
||||
|
|
|
|||
|
|
@ -2,7 +2,6 @@ import math
|
|||
import re
|
||||
import torch
|
||||
import warnings
|
||||
import bitsandbytes as bnb
|
||||
|
||||
from peft.tuners import lora
|
||||
from peft.tuners.lora import is_bnb_available, Linear, Linear8bitLt, LoraLayer
|
||||
|
|
|
|||
|
|
@ -1,6 +1,5 @@
|
|||
torch
|
||||
accelerate
|
||||
bitsandbytes
|
||||
datasets
|
||||
sentencepiece
|
||||
safetensors
|
||||
|
|
|
|||
Loading…
Reference in New Issue