Spaces:
Running
Running
# # app.py | |
# import os | |
# import logging | |
# from fastapi import FastAPI, HTTPException | |
# from fastapi.responses import JSONResponse | |
# from pydantic import BaseModel | |
# from huggingface_hub import InferenceClient | |
# from typing import Optional | |
# # Set up logging | |
# logging.basicConfig(level=logging.INFO) | |
# logger = logging.getLogger(__name__) | |
# # Initialize FastAPI app | |
# app = FastAPI( | |
# title="LLM Chat API", | |
# description="API for getting chat responses from Llama model", | |
# version="1.0.0" | |
# ) | |
# class ChatRequest(BaseModel): | |
# text: str | |
# class ChatResponse(BaseModel): | |
# response: str | |
# status: str | |
# def llm_chat_response(text: str) -> str: | |
# try: | |
# HF_TOKEN = os.getenv("HF_TOKEN") | |
# logger.info("Checking HF_TOKEN...") | |
# if not HF_TOKEN: | |
# logger.error("HF_TOKEN not found in environment variables") | |
# raise HTTPException(status_code=500, detail="HF_TOKEN not configured") | |
# logger.info("Initializing InferenceClient...") | |
# client = InferenceClient( | |
# provider="sambanova", | |
# api_key=HF_TOKEN | |
# ) | |
# messages = [ | |
# { | |
# "role": "user", | |
# "content": [ | |
# { | |
# "type": "text", | |
# "text": text + " describe in one line only" | |
# } | |
# ] | |
# } | |
# ] | |
# logger.info("Sending request to model...") | |
# completion = client.chat.completions.create( | |
# model="meta-llama/Llama-3.2-11B-Vision-Instruct", | |
# messages=messages, | |
# max_tokens=500 | |
# ) | |
# return completion.choices[0].message['content'] | |
# except Exception as e: | |
# logger.error(f"Error in llm_chat_response: {str(e)}") | |
# raise HTTPException(status_code=500, detail=str(e)) | |
# @app.post("/chat", response_model=ChatResponse) | |
# async def chat(request: ChatRequest): | |
# try: | |
# logger.info(f"Received chat request with text: {request.text}") | |
# response = llm_chat_response(request.text) | |
# return ChatResponse(response=response, status="success") | |
# except HTTPException as he: | |
# logger.error(f"HTTP Exception in chat endpoint: {str(he)}") | |
# raise he | |
# except Exception as e: | |
# logger.error(f"Unexpected error in chat endpoint: {str(e)}") | |
# raise HTTPException(status_code=500, detail=str(e)) | |
# @app.get("/") | |
# async def root(): | |
# return {"message": "Welcome to the LLM Chat API. Use POST /chat endpoint to get responses."} | |
# @app.exception_handler(404) | |
# async def not_found_handler(request, exc): | |
# return JSONResponse( | |
# status_code=404, | |
# content={"error": "Endpoint not found. Please use POST /chat for queries."} | |
# ) | |
# @app.exception_handler(405) | |
# async def method_not_allowed_handler(request, exc): | |
# return JSONResponse( | |
# status_code=405, | |
# content={"error": "Method not allowed. Please check the API documentation."} | |
# ) | |
# app.py | |
import os | |
import logging | |
from fastapi import FastAPI, HTTPException | |
from fastapi.responses import JSONResponse | |
from pydantic import BaseModel | |
from huggingface_hub import InferenceClient | |
from typing import Optional | |
# Set up logging | |
logging.basicConfig(level=logging.INFO) | |
logger = logging.getLogger(__name__) | |
# Initialize FastAPI app | |
app = FastAPI( | |
title="LLM Chat API", | |
description="API for getting chat responses from Llama model with image support", | |
version="1.1.0" | |
) | |
class ChatRequest(BaseModel): | |
text: str | |
image_url: Optional[str] = None | |
class ChatResponse(BaseModel): | |
response: str | |
status: str | |
def llm_chat_response(text: str, image_url: Optional[str] = None) -> str: | |
try: | |
HF_TOKEN = os.getenv("HF_TOKEN") | |
logger.info("Checking HF_TOKEN...") | |
if not HF_TOKEN: | |
logger.error("HF_TOKEN not found in environment variables") | |
raise HTTPException(status_code=500, detail="HF_TOKEN not configured") | |
logger.info("Initializing InferenceClient...") | |
client = InferenceClient( | |
provider="sambanova", | |
api_key=HF_TOKEN | |
) | |
# Prepare content list for the message | |
content = [ | |
{ | |
"type": "text", | |
"text": text + " describe in one line only" | |
} | |
] | |
# Add image to content if provided | |
if image_url: | |
logger.info(f"Adding image URL to request: {image_url}") | |
content.append({ | |
"type": "image_url", | |
"image_url": { | |
"url": image_url | |
} | |
}) | |
messages = [ | |
{ | |
"role": "user", | |
"content": content | |
} | |
] | |
logger.info("Sending request to model...") | |
completion = client.chat.completions.create( | |
model="meta-llama/Llama-3.2-11B-Vision-Instruct", | |
messages=messages, | |
max_tokens=500 | |
) | |
return completion.choices[0].message['content'] | |
except Exception as e: | |
logger.error(f"Error in llm_chat_response: {str(e)}") | |
raise HTTPException(status_code=500, detail=str(e)) | |
async def chat(request: ChatRequest): | |
try: | |
logger.info(f"Received chat request with text: {request.text}") | |
if request.image_url: | |
logger.info(f"Image URL included: {request.image_url}") | |
response = llm_chat_response(request.text, request.image_url) | |
return ChatResponse(response=response, status="success") | |
except HTTPException as he: | |
logger.error(f"HTTP Exception in chat endpoint: {str(he)}") | |
raise he | |
except Exception as e: | |
logger.error(f"Unexpected error in chat endpoint: {str(e)}") | |
raise HTTPException(status_code=500, detail=str(e)) | |
async def root(): | |
return {"message": "Welcome to the LLM Chat API with image support. Use POST /chat endpoint to get responses."} | |
async def not_found_handler(request, exc): | |
return JSONResponse( | |
status_code=404, | |
content={"error": "Endpoint not found. Please use POST /chat for queries."} | |
) | |
async def method_not_allowed_handler(request, exc): | |
return JSONResponse( | |
status_code=405, | |
content={"error": "Method not allowed. Please check the API documentation."} | |
) |