Refactor engines module to fetch engine details

from API

Update chat.py

Signed-off-by: Daniel Salvatierra <dsalvat1@gmail.com>
This commit is contained in:
dsalvatierra 2023-11-17 20:48:38 -05:00 committed by AT
parent db70f1752a
commit 76413e1d03
2 changed files with 59 additions and 46 deletions

View File

@ -1,39 +1,35 @@
import logging
import time
from typing import Dict, List
from api_v1.settings import settings
from fastapi import APIRouter, Depends, Response, Security, status
from typing import List
from uuid import uuid4
from fastapi import APIRouter
from pydantic import BaseModel, Field
from api_v1.settings import settings
from fastapi.responses import StreamingResponse
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
### This should follow https://github.com/openai/openai-openapi/blob/master/openapi.yaml
class ChatCompletionMessage(BaseModel):
role: str
content: str
class ChatCompletionRequest(BaseModel):
model: str = Field(..., description='The model to generate a completion from.')
messages: List[ChatCompletionMessage] = Field(..., description='The model to generate a completion from.')
model: str = Field(settings.model, description='The model to generate a completion from.')
messages: List[ChatCompletionMessage] = Field(..., description='Messages for the chat completion.')
class ChatCompletionChoice(BaseModel):
message: ChatCompletionMessage
index: int
logprobs: float
finish_reason: str
class ChatCompletionUsage(BaseModel):
prompt_tokens: int
completion_tokens: int
total_tokens: int
class ChatCompletionResponse(BaseModel):
id: str
object: str = 'text_completion'
@ -42,20 +38,38 @@ class ChatCompletionResponse(BaseModel):
choices: List[ChatCompletionChoice]
usage: ChatCompletionUsage
router = APIRouter(prefix="/chat", tags=["Completions Endpoints"])
@router.post("/completions", response_model=ChatCompletionResponse)
async def chat_completion(request: ChatCompletionRequest):
'''
Completes a GPT4All model response.
Completes a GPT4All model response based on the last message in the chat.
'''
# Example: Echo the last message content with some modification
if request.messages:
last_message = request.messages[-1].content
response_content = f"Echo: {last_message}"
else:
response_content = "No messages received."
return ChatCompletionResponse(
id='asdf',
created=time.time(),
model=request.model,
choices=[{}],
usage={'prompt_tokens': 0, 'completion_tokens': 0, 'total_tokens': 0},
# Create a chat message for the response
response_message = ChatCompletionMessage(role="system", content=response_content)
# Create a choice object with the response message
response_choice = ChatCompletionChoice(
message=response_message,
index=0,
logprobs=-1.0, # Placeholder value
finish_reason="length" # Placeholder value
)
# Create the response object
chat_response = ChatCompletionResponse(
id=str(uuid4()),
created=int(time.time()),
model=request.model,
choices=[response_choice],
usage=ChatCompletionUsage(prompt_tokens=0, completion_tokens=0, total_tokens=0), # Placeholder values
)
return chat_response

View File

@ -1,40 +1,39 @@
import logging
from typing import Dict, List
from api_v1.settings import settings
from fastapi import APIRouter, Depends, Response, Security, status
import requests
from fastapi import APIRouter, HTTPException
from pydantic import BaseModel, Field
from typing import List, Dict
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
### This should follow https://github.com/openai/openai-openapi/blob/master/openapi.yaml
# Define the router for the engines module
router = APIRouter(prefix="/engines", tags=["Search Endpoints"])
# Define the models for the engines module
class ListEnginesResponse(BaseModel):
data: List[Dict] = Field(..., description="All available models.")
class EngineResponse(BaseModel):
data: List[Dict] = Field(..., description="All available models.")
router = APIRouter(prefix="/engines", tags=["Search Endpoints"])
# Define the routes for the engines module
@router.get("/", response_model=ListEnginesResponse)
async def list_engines():
'''
List all available GPT4All models from
https://raw.githubusercontent.com/nomic-ai/gpt4all/main/gpt4all-chat/metadata/models2.json
'''
raise NotImplementedError()
return ListEnginesResponse(data=[])
try:
response = requests.get('https://raw.githubusercontent.com/nomic-ai/gpt4all/main/gpt4all-chat/metadata/models2.json')
response.raise_for_status() # This will raise an HTTPError if the HTTP request returned an unsuccessful status code
engines = response.json()
return ListEnginesResponse(data=engines)
except requests.RequestException as e:
logger.error(f"Error fetching engine list: {e}")
raise HTTPException(status_code=500, detail="Error fetching engine list")
# Define the routes for the engines module
@router.get("/{engine_id}", response_model=EngineResponse)
async def retrieve_engine(engine_id: str):
''' '''
raise NotImplementedError()
return EngineResponse()
try:
# Implement logic to fetch a specific engine's details
# This is a placeholder, replace with your actual data retrieval logic
engine_details = {"id": engine_id, "name": "Engine Name", "description": "Engine Description"}
return EngineResponse(data=[engine_details])
except Exception as e:
logger.error(f"Error fetching engine details: {e}")
raise HTTPException(status_code=500, detail=f"Error fetching details for engine {engine_id}")