fix bug
This commit is contained in:
parent
b5af5c00e1
commit
f9c94f27cc
|
|
@ -4,7 +4,7 @@ import torch
|
||||||
import warnings
|
import warnings
|
||||||
|
|
||||||
from peft.tuners import lora
|
from peft.tuners import lora
|
||||||
from peft.tuners.lora import is_bnb_available, Linear, Linear8bitLt, LoraLayer
|
from peft.tuners.lora import Linear, LoraLayer
|
||||||
from peft.utils import _get_submodules, PeftType
|
from peft.utils import _get_submodules, PeftType
|
||||||
from torch import nn
|
from torch import nn
|
||||||
from transformers.pytorch_utils import Conv1D
|
from transformers.pytorch_utils import Conv1D
|
||||||
|
|
@ -87,12 +87,7 @@ class Linear4bitLt(Autograd4bitQuantLinear, LoraLayer):
|
||||||
class GPTQLoraModel(lora.LoraModel):
|
class GPTQLoraModel(lora.LoraModel):
|
||||||
def _find_and_replace(self, adapter_name):
|
def _find_and_replace(self, adapter_name):
|
||||||
lora_config = self.peft_config[adapter_name]
|
lora_config = self.peft_config[adapter_name]
|
||||||
loaded_in_8bit = getattr(self.model, "is_loaded_in_8bit", False)
|
loaded_in_8bit = False
|
||||||
if loaded_in_8bit and not is_bnb_available():
|
|
||||||
raise ImportError(
|
|
||||||
"To use Lora with 8-bit quantization, please install the `bitsandbytes` package. "
|
|
||||||
"You can install it with `pip install bitsandbytes`."
|
|
||||||
)
|
|
||||||
is_target_modules_in_base_model = False
|
is_target_modules_in_base_model = False
|
||||||
kwargs = {
|
kwargs = {
|
||||||
"r": lora_config.r,
|
"r": lora_config.r,
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue