diff --git a/modules/AutoGPTQ_loader.py b/modules/AutoGPTQ_loader.py index 8623cf8d..f33803e8 100644 --- a/modules/AutoGPTQ_loader.py +++ b/modules/AutoGPTQ_loader.py @@ -1,6 +1,6 @@ from pathlib import Path -from accelerate import is_xpu_available +from accelerate.utils import is_xpu_available from auto_gptq import AutoGPTQForCausalLM, BaseQuantizeConfig import modules.shared as shared diff --git a/modules/GPTQ_loader.py b/modules/GPTQ_loader.py index cdbcf08b..fe5577e1 100644 --- a/modules/GPTQ_loader.py +++ b/modules/GPTQ_loader.py @@ -5,7 +5,7 @@ from pathlib import Path import accelerate import torch import transformers -from accelerate import is_xpu_available +from accelerate.utils import is_xpu_available from gptq_for_llama import llama_inference_offload from gptq_for_llama.modelutils import find_layers from gptq_for_llama.quant import make_quant diff --git a/modules/models.py b/modules/models.py index de160022..cbead69d 100644 --- a/modules/models.py +++ b/modules/models.py @@ -7,12 +7,8 @@ from pathlib import Path import torch import transformers -from accelerate import ( - infer_auto_device_map, - init_empty_weights, - is_ccl_available, - is_xpu_available -) +from accelerate import infer_auto_device_map, init_empty_weights +from accelerate.utils import is_ccl_available, is_xpu_available from transformers import ( AutoConfig, AutoModel, diff --git a/requirements.txt b/requirements.txt index d00a8b23..a8764c3f 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,4 @@ -accelerate==0.23.* +accelerate==0.24.* colorama datasets einops