From 84d957ba6238741bdf9e7cfde41f20cd0b4aa023 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E4=BF=9E=E8=88=AA?= Date: Mon, 6 Nov 2023 07:42:45 +0800 Subject: [PATCH] [Fix] fix openai embedding_model loading as str (#4147) --- extensions/openai/embeddings.py | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/extensions/openai/embeddings.py b/extensions/openai/embeddings.py index 96f44d91..d6d30721 100644 --- a/extensions/openai/embeddings.py +++ b/extensions/openai/embeddings.py @@ -26,23 +26,21 @@ def load_embedding_model(model: str) -> SentenceTransformer: initialize_embedding_params() global embeddings_device, embeddings_model try: - embeddings_model = 'loading...' # flag + print(f"\Try embedding model: {model} on {embeddings_device}") # see: https://www.sbert.net/docs/package_reference/SentenceTransformer.html#sentence_transformers.SentenceTransformer - emb_model = SentenceTransformer(model, device=embeddings_device) - # ... emb_model.device doesn't seem to work, always cpu anyways? but specify cpu anyways to free more VRAM - print(f"\nLoaded embedding model: {model} on {emb_model.device} [always seems to say 'cpu', even if 'cuda'], max sequence length: {emb_model.max_seq_length}") + embeddings_model = SentenceTransformer(model, device=embeddings_device) + # ... embeddings_model.device doesn't seem to work, always cpu anyways? but specify cpu anyways to free more VRAM + print(f"\nLoaded embedding model: {model} on {embeddings_model.device} [always seems to say 'cpu', even if 'cuda'], max sequence length: {embeddings_model.max_seq_length}") except Exception as e: embeddings_model = None raise ServiceUnavailableError(f"Error: Failed to load embedding model: {model}", internal_message=repr(e)) - return emb_model - def get_embeddings_model() -> SentenceTransformer: initialize_embedding_params() global embeddings_model, st_model if st_model and not embeddings_model: - embeddings_model = load_embedding_model(st_model) # lazy load the model + load_embedding_model(st_model) # lazy load the model return embeddings_model @@ -53,7 +51,11 @@ def get_embeddings_model_name() -> str: def get_embeddings(input: list) -> np.ndarray: - return get_embeddings_model().encode(input, convert_to_numpy=True, normalize_embeddings=True, convert_to_tensor=False, device=embeddings_device) + model = get_embeddings_model() + debug_msg(f"embedding model : {model}") + embedding = model.encode(input, convert_to_numpy=True, normalize_embeddings=True, convert_to_tensor=False) + debug_msg(f"embedding result : {embedding}") # might be too long even for debug, use at you own will + return embedding def embeddings(input: list, encoding_format: str) -> dict: