|
from fastapi import FastAPI, HTTPException |
|
from pydantic import BaseModel |
|
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline |
|
import torch |
|
import logging |
|
|
|
|
|
app = FastAPI() |
|
|
|
|
|
logging.basicConfig(level=logging.INFO) |
|
logger = logging.getLogger(__name__) |
|
|
|
|
|
model_id = "gpt2" |
|
tokenizer = AutoTokenizer.from_pretrained(model_id) |
|
model = AutoModelForCausalLM.from_pretrained(model_id) |
|
|
|
|
|
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer) |
|
|
|
|
|
PRE_PROMPT = "You are a helpful virtual assistant. Answer the user's question clearly and concisely." |
|
|
|
|
|
class TextGenerationRequest(BaseModel): |
|
prompt: str |
|
max_new_tokens: int = 50 |
|
temperature: float = 0.7 |
|
top_k: int = 50 |
|
top_p: float = 0.9 |
|
do_sample: bool = True |
|
|
|
|
|
@app.post("/generate-text") |
|
async def generate_text(request: TextGenerationRequest): |
|
try: |
|
logger.info("Generating text...") |
|
|
|
|
|
combined_input = f"{PRE_PROMPT} {request.prompt}" |
|
|
|
|
|
outputs = pipe( |
|
combined_input, |
|
max_new_tokens=request.max_new_tokens, |
|
temperature=request.temperature, |
|
top_k=request.top_k, |
|
top_p=request.top_p, |
|
do_sample=request.do_sample, |
|
return_full_text=False |
|
) |
|
return {"generated_text": outputs[0]["generated_text"]} |
|
except Exception as e: |
|
logger.error(f"Error generating text: {e}") |
|
raise HTTPException(status_code=500, detail=str(e)) |
|
|
|
|
|
@app.get("/test") |
|
async def root(): |
|
return {"message": "API is running!"} |