Bugfix in custom_monkey_patch for v1 models
Previously generation would fail with:
File "/alpaca_lora_4bit/text-generation-webui/matmul_utils_4bit.py", line 79, in _matmul4bit_v1_recons
quant_cuda.vecquant4recons_v1(qweight, buffer, scales, zeros)
RuntimeError: expected scalar type Half but found Float
See #71
This commit is contained in:
parent
5d3267d80d
commit
ee7d94a1f3
|
|
@ -24,7 +24,7 @@ def load_model_llama(*args, **kwargs):
|
||||||
print('Apply auto switch and half')
|
print('Apply auto switch and half')
|
||||||
for n, m in model.named_modules():
|
for n, m in model.named_modules():
|
||||||
if isinstance(m, Autograd4bitQuantLinear) or isinstance(m, Linear4bitLt):
|
if isinstance(m, Autograd4bitQuantLinear) or isinstance(m, Linear4bitLt):
|
||||||
if m.groupsize == -1:
|
if m.is_v1_model:
|
||||||
m.zeros = m.zeros.half()
|
m.zeros = m.zeros.half()
|
||||||
m.scales = m.scales.half()
|
m.scales = m.scales.half()
|
||||||
m.bias = m.bias.half()
|
m.bias = m.bias.half()
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue