update reference

This commit is contained in:
John Smith 2023-04-13 10:35:10 +08:00
parent 5ff11b5bf2
commit 6aab31bd73
2 changed files with 3 additions and 2 deletions

View File

@ -248,7 +248,7 @@ def load_llama_model_4bit_low_ram_and_offload(config_path, model_path, lora_path
if lora_path is not None:
from peft import PeftModel
from peft.tuners.lora import Linear4bitLt
from monkeypatch.peft_tuners_lora_monkey_patch import Linear4bitLt
model = PeftModel.from_pretrained(model, lora_path, device_map={'': 'cpu'}, torch_dtype=torch.float32)
print(Style.BRIGHT + Fore.GREEN + '{} Lora Applied.'.format(lora_path))

View File

@ -3,7 +3,8 @@ import torch
import autograd_4bit
from autograd_4bit import load_llama_model_4bit_low_ram, Autograd4bitQuantLinear
from peft import PeftModel
from peft.tuners.lora import Linear4bitLt
from monkeypatch.peft_tuners_lora_monkey_patch import replace_peft_model_with_gptq_lora_model, Linear4bitLt
replace_peft_model_with_gptq_lora_model()
patch_encode_func = False