Ritvik commited on
Commit
76df764
·
1 Parent(s): 2195637

Save local changes before pull

Browse files
.idea/.gitignore ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ # Default ignored files
2
+ /shelf/
3
+ /workspace.xml
4
+ # Editor-based HTTP Client requests
5
+ /httpRequests/
6
+ # Datasource local storage ignored files
7
+ /dataSources/
8
+ /dataSources.local.xml
.idea/Travel_AI_V1.iml ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <module type="PYTHON_MODULE" version="4">
3
+ <component name="NewModuleRootManager">
4
+ <content url="file://$MODULE_DIR$">
5
+ <excludeFolder url="file://$MODULE_DIR$/.venv" />
6
+ </content>
7
+ <orderEntry type="jdk" jdkName="Python 3.9 (Travel_AI_V1)" jdkType="Python SDK" />
8
+ <orderEntry type="sourceFolder" forTests="false" />
9
+ </component>
10
+ </module>
.idea/inspectionProfiles/profiles_settings.xml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ <component name="InspectionProjectProfileManager">
2
+ <settings>
3
+ <option name="USE_PROJECT_PROFILE" value="false" />
4
+ <version value="1.0" />
5
+ </settings>
6
+ </component>
.idea/misc.xml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <project version="4">
3
+ <component name="Black">
4
+ <option name="sdkName" value="Python 3.9 (Travel_AI_V1)" />
5
+ </component>
6
+ <component name="ProjectRootManager" version="2" project-jdk-name="Python 3.9 (Travel_AI_V1)" project-jdk-type="Python SDK" />
7
+ </project>
.idea/modules.xml ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <project version="4">
3
+ <component name="ProjectModuleManager">
4
+ <modules>
5
+ <module fileurl="file://$PROJECT_DIR$/.idea/Travel_AI_V1.iml" filepath="$PROJECT_DIR$/.idea/Travel_AI_V1.iml" />
6
+ </modules>
7
+ </component>
8
+ </project>
.idea/vcs.xml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <project version="4">
3
+ <component name="VcsDirectoryMappings">
4
+ <mapping directory="" vcs="Git" />
5
+ </component>
6
+ </project>
agent.py CHANGED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from langchain_groq import ChatGroq
3
+ from langchain.agents import initialize_agent, AgentType
4
+ from memory import memory
5
+ from tools import tools
6
+
7
+ # Load API Key for Groq from environment variables
8
+ API_KEY = os.getenv("API_KEY")
9
+
10
+ # Ensure API Key is set
11
+ if not API_KEY:
12
+ raise ValueError("API_KEY is not set. Please define it in your environment variables.")
13
+
14
+ # Initialize the LLM (Groq's Mixtral)
15
+ llm = ChatGroq(
16
+ groq_api_key=API_KEY,
17
+ model_name="mixtral-8x7b-32768",
18
+ temperature=0.7,
19
+ max_tokens=512,
20
+ )
21
+
22
+ # Initialize the conversational agent
23
+ agent = initialize_agent(
24
+ tools=tools,
25
+ llm=llm,
26
+ agent=AgentType.CONVERSATIONAL_REACT_DESCRIPTION,
27
+ verbose=True,
28
+ memory=memory,
29
+ )
app.py CHANGED
@@ -1,69 +1,89 @@
1
- import gradio as gr
2
- import os
3
- from langchain_groq import ChatGroq # Using Groq's API
4
- from langchain.memory import ConversationBufferMemory
5
- from langchain.schema import SystemMessage, HumanMessage, AIMessage
6
- from langchain.agents import initialize_agent, AgentType
7
- from langchain.tools import Tool
8
-
9
- # Set API Key for Groq
10
- API_KEY = os.getenv("API_KEY") # Ensure API Key is set in the environment
11
-
12
- # Initialize the LLM (Groq's Mixtral)
13
- llm = ChatGroq(
14
- groq_api_key=API_KEY,
15
- model_name="mixtral-8x7b-32768",
16
- temperature=0.7,
17
- max_tokens=512,
18
- )
19
-
20
- # Memory for conversation history
21
- memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
22
-
23
- # Define useful tools
24
- def search_tool(query: str) -> str:
25
- """A simple search function (can be connected to real APIs)."""
26
- return f"Searching for: {query}... [Sample Response]"
27
-
28
- tools = [
29
- Tool(
30
- name="Search Tool",
31
- func=search_tool,
32
- description="Searches for information based on user queries."
33
- )
34
- ]
35
-
36
- # Initialize the agent
37
- agent = initialize_agent(
38
- tools=tools,
39
- llm=llm,
40
- agent=AgentType.CONVERSATIONAL_REACT_DESCRIPTION,
41
- verbose=True,
42
- memory=memory,
43
- )
44
-
45
- # Define response function
46
- def respond(message, history, system_message, max_tokens, temperature, top_p):
47
- memory.chat_memory.add_message(SystemMessage(content=system_message))
48
-
49
- for user_input, bot_response in history:
50
- if user_input:
51
- memory.chat_memory.add_message(HumanMessage(content=user_input))
52
- if bot_response:
53
- memory.chat_memory.add_message(AIMessage(content=bot_response))
54
-
55
- memory.chat_memory.add_message(HumanMessage(content=message))
56
-
57
- response = agent.run(message)
58
-
59
- return response
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
60
 
 
 
61
 
62
  # Gradio Interface
63
  demo = gr.ChatInterface(
64
  respond,
65
  additional_inputs=[
66
- gr.Textbox(value="You are a helpful Travel AI assistant.Your name is Travelo", label="System message"),
 
 
 
67
  gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
68
  gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
69
  gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)"),
 
1
+ # import gradio as gr
2
+ # import os
3
+ # from langchain_groq import ChatGroq # Using Groq's API
4
+ # from langchain.memory import ConversationBufferMemory
5
+ # from langchain.schema import SystemMessage, HumanMessage, AIMessage
6
+ # from langchain.agents import initialize_agent, AgentType
7
+ # from langchain.tools import Tool
8
+ #
9
+ # # Set API Key for Groq
10
+ # API_KEY = os.getenv("API_KEY") # Ensure API Key is set in the environment
11
+ #
12
+ # # Initialize the LLM (Groq's Mixtral)
13
+ # llm = ChatGroq(
14
+ # groq_api_key=API_KEY,
15
+ # model_name="mixtral-8x7b-32768",
16
+ # temperature=0.7,
17
+ # max_tokens=512,
18
+ # )
19
+ #
20
+ # # Memory for conversation history
21
+ # memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
22
+ #
23
+ # # Define useful tools
24
+ # def search_tool(query: str) -> str:
25
+ # """A simple search function (can be connected to real APIs)."""
26
+ # return f"Searching for: {query}... [Sample Response]"
27
+ #
28
+ # tools = [
29
+ # Tool(
30
+ # name="Search Tool",
31
+ # func=search_tool,
32
+ # description="Searches for information based on user queries."
33
+ # )
34
+ # ]
35
+ #
36
+ # # Initialize the agent
37
+ # agent = initialize_agent(
38
+ # tools=tools,
39
+ # llm=llm,
40
+ # agent=AgentType.CONVERSATIONAL_REACT_DESCRIPTION,
41
+ # verbose=True,
42
+ # memory=memory,
43
+ # )
44
+ #
45
+ # # Define response function
46
+ # def respond(message, history, system_message, max_tokens, temperature, top_p):
47
+ # memory.chat_memory.add_message(SystemMessage(content=system_message))
48
+ #
49
+ # for user_input, bot_response in history:
50
+ # if user_input:
51
+ # memory.chat_memory.add_message(HumanMessage(content=user_input))
52
+ # if bot_response:
53
+ # memory.chat_memory.add_message(AIMessage(content=bot_response))
54
+ #
55
+ # memory.chat_memory.add_message(HumanMessage(content=message))
56
+ #
57
+ # response = agent.run(message)
58
+ #
59
+ # return response
60
+ #
61
+ #
62
+ # # Gradio Interface
63
+ # demo = gr.ChatInterface(
64
+ # respond,
65
+ # additional_inputs=[
66
+ # gr.Textbox(value="You are an AI-powered travel assistant named Travelo. You are developed and trained by Travelo LLC. Your role is to assist users with travel planning, including booking flights, hotels, and creating personalized itineraries. Whenever a user asks about your origin, development, training, or creators, always state that you were built and trained by Travelo LLC. Do not mention OpenAI or any third-party AI providers. Maintain a professional and friendly tone while delivering accurate and helpful travel-related information.", label="System message"),
67
+ # gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
68
+ # gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
69
+ # gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)"),
70
+ # ],
71
+ # )
72
+ #
73
+ # if __name__ == "__main__":
74
+ # demo.launch()
75
 
76
+ import gradio as gr
77
+ from chatbot import respond
78
 
79
  # Gradio Interface
80
  demo = gr.ChatInterface(
81
  respond,
82
  additional_inputs=[
83
+ gr.Textbox(
84
+ value="You are an AI-powered travel assistant named Travelo. You are developed and trained by Travelo LLC. Your role is to assist users with travel planning, including booking flights, hotels, and creating personalized itineraries. Whenever a user asks about your origin, development, training, or creators, always state that you were built and trained by Travelo LLC. Do not mention OpenAI or any third-party AI providers. Maintain a professional and friendly tone while delivering accurate and helpful travel-related information.",
85
+ label="System message"
86
+ ),
87
  gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
88
  gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
89
  gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)"),
chatbot.py CHANGED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain.schema import SystemMessage, HumanMessage, AIMessage
2
+ from memory import memory
3
+ from agent import agent
4
+
5
+
6
+ def respond(message, history, system_message, max_tokens, temperature, top_p):
7
+ # Ensure system message is in memory
8
+ memory.chat_memory.add_message(SystemMessage(content=system_message))
9
+
10
+ # Add conversation history to memory
11
+ for user_input, bot_response in history:
12
+ if user_input:
13
+ memory.chat_memory.add_message(HumanMessage(content=user_input))
14
+ if bot_response:
15
+ memory.chat_memory.add_message(AIMessage(content=bot_response))
16
+
17
+ # Process new message
18
+ memory.chat_memory.add_message(HumanMessage(content=message))
19
+
20
+ # Generate response using the agent
21
+ response = agent.run(message)
22
+
23
+ return response
memory.py CHANGED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ from langchain.memory import ConversationBufferMemory
2
+
3
+ # Memory for storing conversation history
4
+ memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
tools.py CHANGED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain.tools import Tool
2
+
3
+ def search_tool(query: str) -> str:
4
+ """A simple search function (can be connected to real APIs)."""
5
+ return f"Searching for: {query}... [Sample Response]"
6
+
7
+ # Define the list of tools
8
+ tools = [
9
+ Tool(
10
+ name="Search Tool",
11
+ func=search_tool,
12
+ description="Searches for information based on user queries."
13
+ )
14
+ ]