From 23446459ab392cc704e8bc00d9a7722f2300a807 Mon Sep 17 00:00:00 2001 From: Marc Sun Date: Thu, 31 Aug 2023 18:15:13 +0000 Subject: [PATCH] add warning and better compatibility --- optimum/gptq/quantizer.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/optimum/gptq/quantizer.py b/optimum/gptq/quantizer.py index 88f3f3b95f..3c6d9c0dfc 100644 --- a/optimum/gptq/quantizer.py +++ b/optimum/gptq/quantizer.py @@ -129,6 +129,7 @@ def __init__( self.pad_token_id = pad_token_id self.disable_exllama = disable_exllama self.max_input_length = max_input_length + self.quant_method = QuantizationMethod.GPTQ if self.bits not in [2, 3, 4, 8]: raise ValueError("only support quantize to [2,3,4,8] bits.") @@ -445,6 +446,12 @@ def tmp(_, input, output): "Found modules on cpu/disk. Using Exllama backend requires all the modules to be on GPU. Setting `disable_exllama=True`" ) self.disable_exllama = True + elif self.desc_act: + logger.warning( + "Using Exllama backend with act_order will reorder the weights offline, thus you will not be able to save the model with the right weights." + "Setting `disable_exllama=True`. You should only use Exllama backend with act_order for inference. " + ) + self.disable_exllama = True # Step 4: Pack the model at the end (Replacing the layers) self.pack_model(model=model, quantizers=quantizers)