Spaces:
Runtime error
Runtime error
File size: 2,937 Bytes
cb0bf83 f88a286 cb0bf83 e1d21ef cb0bf83 f88a286 cb0bf83 f88a286 cb0bf83 f88a286 cb0bf83 f88a286 cb0bf83 f88a286 4c2b726 f88a286 cb0bf83 f88a286 cb0bf83 f88a286 cb0bf83 f88a286 cb0bf83 f88a286 cb0bf83 f88a286 cb0bf83 f88a286 cb0bf83 f88a286 cb0bf83 f88a286 e1d21ef f88a286 e1d21ef |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 |
# app.py
import os
import logging
from fastapi import FastAPI, HTTPException
from fastapi.responses import JSONResponse
from pydantic import BaseModel
from huggingface_hub import InferenceClient
from typing import Optional
# Set up logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
# Initialize FastAPI app
app = FastAPI(
title="LLM Chat API",
description="API for getting chat responses from Llama model",
version="1.0.0"
)
class ChatRequest(BaseModel):
text: str
class ChatResponse(BaseModel):
response: str
status: str
def llm_chat_response(text: str) -> str:
try:
HF_TOKEN = os.getenv("HF_TOKEN")
logger.info("Checking HF_TOKEN...")
if not HF_TOKEN:
logger.error("HF_TOKEN not found in environment variables")
raise HTTPException(status_code=500, detail="HF_TOKEN not configured")
logger.info("Initializing InferenceClient...")
client = InferenceClient(token=HF_TOKEN) # Changed from api_key to token
messages = [
{
"role": "user",
"content": [
{
"type": "text",
"text": text + str(' describe in one line only')
}
]
}
]
logger.info("Sending request to model...")
response_from_llama = client.chat.completions.create(
model="meta-llama/Llama-3.2-11B-Vision-Instruct",
messages=messages,
max_tokens=500
)
return response_from_llama.choices[0].message['content']
except Exception as e:
logger.error(f"Error in llm_chat_response: {str(e)}")
raise HTTPException(status_code=500, detail=str(e))
@app.post("/chat", response_model=ChatResponse)
async def chat(request: ChatRequest):
try:
logger.info(f"Received chat request with text: {request.text}")
response = llm_chat_response(request.text)
return ChatResponse(response=response, status="success")
except HTTPException as he:
logger.error(f"HTTP Exception in chat endpoint: {str(he)}")
raise he
except Exception as e:
logger.error(f"Unexpected error in chat endpoint: {str(e)}")
raise HTTPException(status_code=500, detail=str(e))
@app.get("/")
async def root():
return {"message": "Welcome to the LLM Chat API. Use POST /chat endpoint to get responses."}
@app.exception_handler(404)
async def not_found_handler(request, exc):
return JSONResponse(
status_code=404,
content={"error": "Endpoint not found. Please use POST /chat for queries."}
)
@app.exception_handler(405)
async def method_not_allowed_handler(request, exc):
return JSONResponse(
status_code=405,
content={"error": "Method not allowed. Please check the API documentation."}
) |