Fix is_ccl_available & is_xpu_available imports

This commit is contained in:
oobabooga 2023-10-26 20:26:25 -07:00
parent 778a010df8
commit 839a87bac8
4 changed files with 5 additions and 9 deletions

View File

@ -1,6 +1,6 @@
from pathlib import Path
from accelerate import is_xpu_available
from accelerate.utils import is_xpu_available
from auto_gptq import AutoGPTQForCausalLM, BaseQuantizeConfig
import modules.shared as shared

View File

@ -5,7 +5,7 @@ from pathlib import Path
import accelerate
import torch
import transformers
from accelerate import is_xpu_available
from accelerate.utils import is_xpu_available
from gptq_for_llama import llama_inference_offload
from gptq_for_llama.modelutils import find_layers
from gptq_for_llama.quant import make_quant

View File

@ -7,12 +7,8 @@ from pathlib import Path
import torch
import transformers
from accelerate import (
infer_auto_device_map,
init_empty_weights,
is_ccl_available,
is_xpu_available
)
from accelerate import infer_auto_device_map, init_empty_weights
from accelerate.utils import is_ccl_available, is_xpu_available
from transformers import (
AutoConfig,
AutoModel,

View File

@ -1,4 +1,4 @@
accelerate==0.23.*
accelerate==0.24.*
colorama
datasets
einops