Fred808 commited on
Commit
516168a
·
verified ·
1 Parent(s): 222058c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +323 -55
app.py CHANGED
@@ -1,79 +1,347 @@
1
- from fastapi import FastAPI, File, UploadFile, HTTPException
 
 
2
  import requests
3
  import base64
4
- from pydantic import BaseModel
5
- from typing import Optional
6
 
7
- app = FastAPI()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8
 
9
- # NVIDIA API endpoint and API key
10
- NVIDIA_API_URL = "https://ai.api.nvidia.com/v1/gr/meta/llama-3.2-90b-vision-instruct/chat/completions"
11
- API_KEY = "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" # Replace with your actual API key
12
 
13
- # Request model for text-based input
14
- class TextRequest(BaseModel):
15
- message: str
16
- max_tokens: Optional[int] = 512
17
- temperature: Optional[float] = 1.0
18
- top_p: Optional[float] = 1.0
19
 
20
- # Function to call the NVIDIA API
21
- def call_nvidia_api(payload: dict):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22
  headers = {
23
- "Authorization": f"Bearer {API_KEY}",
24
- "Accept": "application/json",
25
  }
26
- response = requests.post(NVIDIA_API_URL, headers=headers, json=payload)
27
- if response.status_code != 200:
28
- raise HTTPException(status_code=response.status_code, detail="NVIDIA API request failed")
29
- return response.json()
30
-
31
- # Endpoint for text-based input
32
- @app.post("/chat/text")
33
- async def chat_with_text(request: TextRequest):
34
- payload = {
35
- "model": "meta/llama-3.2-90b-vision-instruct",
36
- "messages": [{"role": "user", "content": request.message}],
37
- "max_tokens": request.max_tokens,
38
- "temperature": request.temperature,
39
- "top_p": request.top_p,
40
- "stream": False,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
41
  }
42
  try:
43
- response = call_nvidia_api(payload)
44
- return {"response": response["choices"][0]["message"]["content"]}
 
 
 
45
  except Exception as e:
46
- raise HTTPException(status_code=500, detail=str(e))
 
 
47
 
48
- # Endpoint for image-based input
49
- @app.post("/chat/image")
50
- async def chat_with_image(file: UploadFile = File(...)):
51
- # Read and encode the image file to base64
52
- image_data = await file.read()
53
- base64_image = base64.b64encode(image_data).decode("utf-8")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
54
 
55
- # Prepare the payload for the NVIDIA API
 
 
 
 
 
 
 
 
 
56
  payload = {
57
  "model": "meta/llama-3.2-90b-vision-instruct",
58
  "messages": [
59
  {
60
  "role": "user",
61
- "content": f'What is in this image? <img src="data:image/png;base64,{base64_image}" />',
62
  }
63
  ],
64
  "max_tokens": 512,
65
- "temperature": 1.0,
66
- "top_p": 1.0,
67
- "stream": False,
68
  }
 
 
 
 
69
 
70
- try:
71
- response = call_nvidia_api(payload)
72
- return {"response": response["choices"][0]["message"]["content"]}
73
- except Exception as e:
74
- raise HTTPException(status_code=500, detail=str(e))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
75
 
76
- # Root endpoint
77
- @app.get("/")
78
- async def root():
79
- return {"message": "Welcome to the NVIDIA API FastAPI wrapper!"}
 
1
+ # chatbot_api.py
2
+ import os
3
+ import time
4
  import requests
5
  import base64
6
+ from datetime import datetime
7
+ from bs4 import BeautifulSoup
8
 
9
+ from fastapi import FastAPI, Request, HTTPException, BackgroundTasks, UploadFile, File
10
+ from fastapi.responses import JSONResponse, StreamingResponse
11
+
12
+ import openai
13
+
14
+ from sqlalchemy.ext.asyncio import create_async_engine, AsyncSession
15
+ from sqlalchemy.orm import sessionmaker, declarative_base
16
+ from sqlalchemy import Column, Integer, String, DateTime, Text
17
+
18
+ # --- Configuration & Environment Variables ---
19
+ SPOONACULAR_API_KEY = os.getenv("SPOONACULAR_API_KEY", "815bf76e0764456293f0e96e080e8f60")
20
+ PAYSTACK_SECRET_KEY = os.getenv("PAYSTACK_SECRET_KEY", "pk_test_3222fb257041f1f2fd5ef33eafd19e1db4bdb634")
21
+ DATABASE_URL = os.getenv("DATABASE_URL", "postgresql+asyncpg://postgres.lgbnxplydqdymepehirg:[email protected]:5432/postgres")
22
+ NVIDIA_API_KEY = os.getenv("NVIDIA_API_KEY", "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") # For NVIDIA LLM endpoints
23
+
24
+ openai.api_key = os.getenv("OPENAI_API_KEY", "your_openai_api_key")
25
+
26
+ # --- Database Setup ---
27
+ Base = declarative_base()
28
+
29
+ class ChatHistory(Base):
30
+ __tablename__ = "chat_history"
31
+ id = Column(Integer, primary_key=True, index=True)
32
+ user_id = Column(String, index=True)
33
+ timestamp = Column(DateTime, default=datetime.utcnow)
34
+ direction = Column(String) # 'inbound' or 'outbound'
35
+ message = Column(Text)
36
+
37
+ class Order(Base):
38
+ __tablename__ = "orders"
39
+ id = Column(Integer, primary_key=True, index=True)
40
+ order_id = Column(String, unique=True, index=True)
41
+ user_id = Column(String, index=True)
42
+ dish = Column(String)
43
+ quantity = Column(String)
44
+ price = Column(String, default="0") # Price as string (or use a numeric type)
45
+ status = Column(String, default="Pending Payment") # e.g., Pending Payment, Paid, Completed
46
+ payment_reference = Column(String, nullable=True)
47
+ timestamp = Column(DateTime, default=datetime.utcnow)
48
+
49
+ # Create the asynchronous engine. Make sure DATABASE_URL is configured correctly.
50
+ engine = create_async_engine(DATABASE_URL, echo=True)
51
+ async_session = sessionmaker(engine, class_=AsyncSession, expire_on_commit=False)
52
 
53
+ async def init_db():
54
+ async with engine.begin() as conn:
55
+ await conn.run_sync(Base.metadata.create_all)
56
 
57
+ # --- Global In-Memory Stores for Ephemeral Data ---
58
+ user_state = {} # Example: { user_id: { "flow": "order", "step": int, "data": dict } }
 
 
 
 
59
 
60
+ # Local menu with nutritional details
61
+ menu_items = [
62
+ {"name": "Jollof Rice", "description": "A spicy and flavorful rice dish", "price": 1500, "nutrition": "Calories: 300 kcal, Carbs: 50g, Protein: 10g, Fat: 5g"},
63
+ {"name": "Fried Rice", "description": "A savory rice dish with vegetables and meat", "price": 1200, "nutrition": "Calories: 350 kcal, Carbs: 55g, Protein: 12g, Fat: 8g"},
64
+ {"name": "Chicken Wings", "description": "Crispy fried chicken wings", "price": 2000, "nutrition": "Calories: 400 kcal, Carbs: 20g, Protein: 25g, Fat: 15g"},
65
+ {"name": "Egusi Soup", "description": "A rich and hearty soup made with melon seeds", "price": 1000, "nutrition": "Calories: 250 kcal, Carbs: 15g, Protein: 8g, Fat: 10g"}
66
+ ]
67
+
68
+ # --- Utility Functions ---
69
+
70
+ async def log_chat_to_db(user_id: str, direction: str, message: str):
71
+ """Store chat messages into the database asynchronously."""
72
+ async with async_session() as session:
73
+ entry = ChatHistory(user_id=user_id, direction=direction, message=message)
74
+ session.add(entry)
75
+ await session.commit()
76
+
77
+ def google_image_scrape(query: str) -> str:
78
+ """
79
+ Scrape Google Images using BeautifulSoup to get an image URL for the query.
80
+ Note: This basic scraper may break if Google changes its markup.
81
+ """
82
  headers = {
83
+ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64)"
 
84
  }
85
+ search_url = f"https://www.google.com/search?tbm=isch&q={query}"
86
+ try:
87
+ response = requests.get(search_url, headers=headers, timeout=5)
88
+ except Exception:
89
+ return ""
90
+ if response.status_code == 200:
91
+ soup = BeautifulSoup(response.text, "html.parser")
92
+ img_tags = soup.find_all("img")
93
+ for img in img_tags:
94
+ src = img.get("src")
95
+ if src and src.startswith("http"):
96
+ return src
97
+ return ""
98
+
99
+ def create_paystack_payment_link(email: str, amount: int, reference: str) -> dict:
100
+ """
101
+ Call Paystack to initialize a transaction.
102
+ - email: customer's email
103
+ - amount: in kobo (multiply NGN amount by 100)
104
+ - reference: unique order reference
105
+ Returns a dict with the payment link and status.
106
+ """
107
+ url = "https://api.paystack.co/transaction/initialize"
108
+ headers = {
109
+ "Authorization": f"Bearer {PAYSTACK_SECRET_KEY}",
110
+ "Content-Type": "application/json",
111
+ }
112
+ data = {
113
+ "email": email,
114
+ "amount": amount,
115
+ "reference": reference,
116
+ "callback_url": "https://yourdomain.com/payment_callback" # Replace with your callback URL.
117
  }
118
  try:
119
+ response = requests.post(url, json=data, headers=headers, timeout=10)
120
+ if response.status_code == 200:
121
+ return response.json()
122
+ else:
123
+ return {"status": False, "message": "Failed to initialize payment."}
124
  except Exception as e:
125
+ return {"status": False, "message": str(e)}
126
+
127
+ # --- NVIDIA LLM Streaming Functions ---
128
 
129
+ def stream_text_completion(prompt: str):
130
+ """
131
+ Stream text completion using NVIDIA's text-only model.
132
+ Uses the OpenAI client interface pointed to NVIDIA's endpoint.
133
+ """
134
+ from openai import OpenAI # Using OpenAI client library
135
+ client = OpenAI(
136
+ base_url="https://integrate.api.nvidia.com/v1",
137
+ api_key=NVIDIA_API_KEY
138
+ )
139
+ completion = client.chat.completions.create(
140
+ model="meta/llama-3.1-405b-instruct",
141
+ messages=[{"role": "user", "content": prompt}],
142
+ temperature=0.2,
143
+ top_p=0.7,
144
+ max_tokens=1024,
145
+ stream=True
146
+ )
147
+ for chunk in completion:
148
+ if chunk.choices[0].delta.content is not None:
149
+ yield chunk.choices[0].delta.content
150
 
151
+ def stream_image_completion(image_b64: str):
152
+ """
153
+ Stream image-based query using NVIDIA's vision model.
154
+ The image (in base64) is embedded in an HTML <img> tag.
155
+ """
156
+ invoke_url = "https://ai.api.nvidia.com/v1/gr/meta/llama-3.2-90b-vision-instruct/chat/completions"
157
+ headers = {
158
+ "Authorization": f"Bearer {NVIDIA_API_KEY}",
159
+ "Accept": "text/event-stream"
160
+ }
161
  payload = {
162
  "model": "meta/llama-3.2-90b-vision-instruct",
163
  "messages": [
164
  {
165
  "role": "user",
166
+ "content": f'What is in this image? <img src="data:image/png;base64,{image_b64}" />'
167
  }
168
  ],
169
  "max_tokens": 512,
170
+ "temperature": 1.00,
171
+ "top_p": 1.00,
172
+ "stream": True
173
  }
174
+ response = requests.post(invoke_url, headers=headers, json=payload, stream=True)
175
+ for line in response.iter_lines():
176
+ if line:
177
+ yield line.decode("utf-8") + "\n"
178
 
179
+ # --- Internal Flow: Order Processing & Payment Integration ---
180
+ def process_internal_flow(user_id: str, message: str) -> str:
181
+ """
182
+ A simple two-step order flow:
183
+ - Step 1: Ask for dish.
184
+ - Step 2: Ask for quantity.
185
+ After collecting these details, the order is saved and a payment link is generated.
186
+ """
187
+ if user_id in user_state:
188
+ state = user_state[user_id]
189
+ flow = state.get("flow")
190
+ step = state.get("step")
191
+ data = state.get("data", {})
192
+ if flow == "order":
193
+ if step == 1:
194
+ data["dish"] = message.title()
195
+ state["step"] = 2
196
+ return f"You selected {data['dish']}. How many servings would you like?"
197
+ elif step == 2:
198
+ data["quantity"] = message
199
+ order_id = f"ORD-{int(time.time())}"
200
+ data["order_id"] = order_id
201
+ # Price calculation example (₦1500 per serving)
202
+ price_per_serving = 1500
203
+ total_price = int(data["quantity"]) * price_per_serving
204
+ data["price"] = str(total_price)
205
+ # Save order details to the DB asynchronously.
206
+ import asyncio
207
+ async def save_order():
208
+ async with async_session() as session:
209
+ order = Order(
210
+ order_id=order_id,
211
+ user_id=user_id,
212
+ dish=data["dish"],
213
+ quantity=data["quantity"],
214
+ price=str(total_price),
215
+ status="Pending Payment"
216
+ )
217
+ session.add(order)
218
+ await session.commit()
219
+ asyncio.create_task(save_order())
220
+ # Clear the in-memory state.
221
+ del user_state[user_id]
222
+ # Assume we have the customer's email; using a placeholder.
223
+ email = "[email protected]"
224
+ payment_data = create_paystack_payment_link(email, total_price * 100, order_id)
225
+ if payment_data.get("status"):
226
+ payment_link = payment_data["data"]["authorization_url"]
227
+ return (f"Thank you for your order of {data['quantity']} serving(s) of {data['dish']}! "
228
+ f"Your Order ID is {order_id}.\nPlease complete payment here: {payment_link}")
229
+ else:
230
+ return f"Your order has been placed with Order ID {order_id}, but we could not initialize payment. Please try again later."
231
+ else:
232
+ if "order" in message.lower():
233
+ user_state[user_id] = {"flow": "order", "step": 1, "data": {}}
234
+ return "Sure! What dish would you like to order?"
235
+ return ""
236
+
237
+ # --- FastAPI Setup & Endpoints ---
238
+ app = FastAPI()
239
+
240
+ @app.on_event("startup")
241
+ async def on_startup():
242
+ await init_db()
243
+
244
+ @app.post("/chatbot")
245
+ async def chatbot_response(request: Request, background_tasks: BackgroundTasks):
246
+ """
247
+ Main chatbot endpoint.
248
+ Expects a JSON payload with:
249
+ - 'user_id'
250
+ - 'message' (text query)
251
+ - Optionally, 'is_image': true and 'image_base64': <base64 string> for image queries.
252
+ Streaming responses will be returned.
253
+ """
254
+ data = await request.json()
255
+ user_id = data.get("user_id")
256
+ user_message = data.get("message", "").strip()
257
+ is_image = data.get("is_image", False)
258
+ image_b64 = data.get("image_base64", None)
259
+
260
+ if not user_id:
261
+ raise HTTPException(status_code=400, detail="Missing user_id in payload.")
262
+
263
+ # Log inbound message if it's a text query (for image queries, you might log separately).
264
+ if user_message:
265
+ background_tasks.add_task(log_chat_to_db, user_id, "inbound", user_message)
266
+ if is_image and image_b64 is None:
267
+ raise HTTPException(status_code=400, detail="is_image is true but no image_base64 provided.")
268
+
269
+ # If an image is provided, use the image model.
270
+ if is_image and image_b64:
271
+ # Verify the image is small enough.
272
+ if len(image_b64) >= 180_000:
273
+ raise HTTPException(status_code=400, detail="Image too large. Use a smaller image or the assets API.")
274
+ # Return a streaming response from the image-based LLM.
275
+ return StreamingResponse(stream_image_completion(image_b64), media_type="text/plain")
276
+
277
+ # --- Process textual queries (menu, nutritional facts, internal flows) ---
278
+ if "menu" in user_message.lower():
279
+ menu_with_images = []
280
+ for item in menu_items:
281
+ image_url = google_image_scrape(item["name"])
282
+ menu_with_images.append({"name": item["name"], "description": item["description"], "price": item["price"], "image_url": image_url})
283
+ response_payload = {
284
+ "response": "Here’s our delicious menu:",
285
+ "menu": menu_with_images,
286
+ "follow_up": ("Would you like to see nutritional facts for any dish? "
287
+ "Just type, for example, 'Nutritional facts for Jollof Rice'.")
288
+ }
289
+ background_tasks.add_task(log_chat_to_db, user_id, "outbound", str(response_payload))
290
+ return JSONResponse(content=response_payload)
291
+
292
+ if "nutritional facts for" in user_message.lower():
293
+ dish_name = user_message.lower().replace("nutritional facts for", "").strip().title()
294
+ dish = next((item for item in menu_items if item["name"].lower() == dish_name.lower()), None)
295
+ if dish:
296
+ response_text = f"Nutritional facts for {dish['name']}:\n{dish['nutrition']}"
297
+ else:
298
+ response_text = f"Sorry, I couldn't find nutritional facts for {dish_name}."
299
+ background_tasks.add_task(log_chat_to_db, user_id, "outbound", response_text)
300
+ return JSONResponse(content={"response": response_text})
301
+
302
+ internal_response = process_internal_flow(user_id, user_message)
303
+ if internal_response:
304
+ background_tasks.add_task(log_chat_to_db, user_id, "outbound", internal_response)
305
+ return JSONResponse(content={"response": internal_response})
306
+
307
+ # --- Fallback: Use NVIDIA text LLM with streaming ---
308
+ prompt = f"User query: {user_message}\nGenerate a helpful response for a restaurant chatbot."
309
+
310
+ # Create a streaming response using the NVIDIA text model.
311
+ def stream_response():
312
+ for chunk in stream_text_completion(prompt):
313
+ yield chunk
314
+
315
+ background_tasks.add_task(log_chat_to_db, user_id, "outbound", f"LLM fallback response for prompt: {prompt}")
316
+ return StreamingResponse(stream_response(), media_type="text/plain")
317
+
318
+ @app.get("/chat_history/{user_id}")
319
+ async def get_chat_history(user_id: str):
320
+ """
321
+ Retrieve the chat history for a given user from the database.
322
+ """
323
+ async with async_session() as session:
324
+ result = await session.execute(
325
+ ChatHistory.__table__.select().where(ChatHistory.user_id == user_id)
326
+ )
327
+ history = result.fetchall()
328
+ return [dict(row) for row in history]
329
+
330
+ @app.get("/order/{order_id}")
331
+ async def get_order(order_id: str):
332
+ """
333
+ Retrieve details for a specific order from the database.
334
+ """
335
+ async with async_session() as session:
336
+ result = await session.execute(
337
+ Order.__table__.select().where(Order.order_id == order_id)
338
+ )
339
+ order = result.fetchone()
340
+ if order:
341
+ return dict(order)
342
+ else:
343
+ raise HTTPException(status_code=404, detail="Order not found.")
344
 
345
+ if __name__ == "__main__":
346
+ import uvicorn
347
+ uvicorn.run(app, host="0.0.0.0", port=8000)