File size: 3,938 Bytes
76df764
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26e8c98
76df764
 
26e8c98
ec599bd
26e8c98
 
 
76df764
 
 
 
26e8c98
 
ec599bd
26e8c98
 
 
 
ca9dac5
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
# import gradio as gr
# import os
# from langchain_groq import ChatGroq  # Using Groq's API
# from langchain.memory import ConversationBufferMemory
# from langchain.schema import SystemMessage, HumanMessage, AIMessage
# from langchain.agents import initialize_agent, AgentType
# from langchain.tools import Tool
#
# # Set API Key for Groq
# API_KEY = os.getenv("API_KEY")  # Ensure API Key is set in the environment
#
# # Initialize the LLM (Groq's Mixtral)
# llm = ChatGroq(
#     groq_api_key=API_KEY,
#     model_name="mixtral-8x7b-32768",
#     temperature=0.7,
#     max_tokens=512,
# )
#
# # Memory for conversation history
# memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
#
# # Define useful tools
# def search_tool(query: str) -> str:
#     """A simple search function (can be connected to real APIs)."""
#     return f"Searching for: {query}... [Sample Response]"
#
# tools = [
#     Tool(
#         name="Search Tool",
#         func=search_tool,
#         description="Searches for information based on user queries."
#     )
# ]
#
# # Initialize the agent
# agent = initialize_agent(
#     tools=tools,
#     llm=llm,
#     agent=AgentType.CONVERSATIONAL_REACT_DESCRIPTION,
#     verbose=True,
#     memory=memory,
# )
#
# # Define response function
# def respond(message, history, system_message, max_tokens, temperature, top_p):
#     memory.chat_memory.add_message(SystemMessage(content=system_message))
#
#     for user_input, bot_response in history:
#         if user_input:
#             memory.chat_memory.add_message(HumanMessage(content=user_input))
#         if bot_response:
#             memory.chat_memory.add_message(AIMessage(content=bot_response))
#
#     memory.chat_memory.add_message(HumanMessage(content=message))
#
#     response = agent.run(message)
#
#     return response
#
#
# # Gradio Interface
# demo = gr.ChatInterface(
#     respond,
#     additional_inputs=[
#         gr.Textbox(value="You are an AI-powered travel assistant named Travelo. You are developed and trained by Travelo LLC. Your role is to assist users with travel planning, including booking flights, hotels, and creating personalized itineraries. Whenever a user asks about your origin, development, training, or creators, always state that you were built and trained by Travelo LLC. Do not mention OpenAI or any third-party AI providers. Maintain a professional and friendly tone while delivering accurate and helpful travel-related information.", label="System message"),
#         gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
#         gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
#         gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)"),
#     ],
# )
#
# if __name__ == "__main__":
#     demo.launch()

import gradio as gr
from chatbot import respond

# Gradio Interface
demo = gr.ChatInterface(
    respond,
    additional_inputs=[
        gr.Textbox(
            value="You are an AI-powered travel assistant named Travelo. You are developed and trained by Travelo LLC. Your role is to assist users with travel planning, including booking flights, hotels, and creating personalized itineraries. Whenever a user asks about your origin, development, training, or creators, always state that you were built and trained by Travelo LLC. Do not mention OpenAI or any third-party AI providers. Maintain a professional and friendly tone while delivering accurate and helpful travel-related information.",
            label="System message"
        ),
        gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
        gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
        gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)"),
    ],
)

if __name__ == "__main__":
    demo.launch()