diff --git a/src/axolotl/utils/models.py b/src/axolotl/utils/models.py index 8d24524a2..35005bfce 100644 --- a/src/axolotl/utils/models.py +++ b/src/axolotl/utils/models.py @@ -96,12 +96,6 @@ def check_model_config(cfg: DictDefault, model_config: Union[AutoConfig, DictDef "Please make sure to point to a GPTQ model." ) - if not cfg.gptq and quant_config_exists and not cfg.load_in_4bit: - raise ValueError( - "model_config.quantization_config is set but `gptq` flag is not. " - "Please use the `gptq` flag to train quantized model or point to a non-quantized model." - ) - lora_modules_to_save = get_linear_embedding_layers(model_config.model_type) if ( cfg.adapter