File size: 1,782 Bytes
cb0bf83
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
# app.py
import os
from fastapi import FastAPI, HTTPException
from pydantic import BaseModel
from huggingface_hub import InferenceClient
from typing import Optional

# Initialize FastAPI app
app = FastAPI(
    title="LLM Chat API",
    description="API for getting chat responses from Llama model",
    version="1.0.0"
)

class ChatRequest(BaseModel):
    text: str

class ChatResponse(BaseModel):
    response: str

def llm_chat_response(text: str) -> str:
    try:
        HF_TOKEN = os.getenv("HF_TOKEN")
        if not HF_TOKEN:
            raise HTTPException(status_code=500, detail="HF_TOKEN not configured")
        
        client = InferenceClient(api_key=HF_TOKEN)
        messages = [
            {
                "role": "user",
                "content": [
                    {
                        "type": "text",
                        "text": text + str('describe in one line only')
                    }
                ]
            }
        ]
        
        response_from_llama = client.chat.completions.create(
            model="meta-llama/Llama-3.2-11B-Vision-Instruct",
            messages=messages,
            max_tokens=500
        )
        return response_from_llama.choices[0].message['content']
    except Exception as e:
        raise HTTPException(status_code=500, detail=str(e))

@app.post("/chat", response_model=ChatResponse)
async def chat(request: ChatRequest):
    try:
        response = llm_chat_response(request.text)
        return ChatResponse(response=response)
    except HTTPException as he:
        raise he
    except Exception as e:
        raise HTTPException(status_code=500, detail=str(e))

@app.get("/")
async def root():
    return {"message": "Welcome to the LLM Chat API. Use POST /chat endpoint to get responses."}