Update app.py
Browse files
app.py
CHANGED
@@ -27,8 +27,10 @@ def process_message(message, history, analysis_prompt, rethinking_prompt, refine
|
|
27 |
|
28 |
gpt4o_prompt = f"{analysis_prompt}\n\nConversation history:\n{context}\n\nUser query: {message}\n\nPlease analyze this query and respond accordingly."
|
29 |
gpt4o_response = get_llm_response(gpt4o_prompt, "gpt-4o-mini")
|
30 |
-
|
31 |
-
|
|
|
|
|
32 |
yield full_response
|
33 |
|
34 |
if "<error>" in " ".join(gpt4o_response):
|
@@ -36,8 +38,10 @@ def process_message(message, history, analysis_prompt, rethinking_prompt, refine
|
|
36 |
|
37 |
llama_prompt = f"{rethinking_prompt}\n\nConversation history:\n{context}\n\nOriginal user query: {message}\n\nInitial response: {' '.join(gpt4o_response)}\n\nPlease review and suggest improvements or confirm if satisfactory."
|
38 |
llama_response = get_llm_response(llama_prompt, "gpt-4o-mini")
|
39 |
-
|
40 |
-
|
|
|
|
|
41 |
yield full_response
|
42 |
|
43 |
if "<error>" in " ".join(llama_response):
|
@@ -46,20 +50,15 @@ def process_message(message, history, analysis_prompt, rethinking_prompt, refine
|
|
46 |
if "done" not in " ".join(llama_response).lower():
|
47 |
final_gpt4o_prompt = f"{refinement_prompt}\n\nConversation history:\n{context}\n\nOriginal user query: {message}\n\nInitial response: {' '.join(gpt4o_response)}\n\nSuggestion: {' '.join(llama_response)}\n\nPlease provide a final response considering the suggestion."
|
48 |
final_response = get_llm_response(final_gpt4o_prompt, "gpt-4o-mini")
|
49 |
-
|
50 |
-
|
|
|
|
|
51 |
yield full_response
|
52 |
else:
|
53 |
full_response += "\n\nFinal Response: The initial response is satisfactory and no further refinement is needed."
|
54 |
yield full_response
|
55 |
|
56 |
-
def stream_words(prefix, words):
|
57 |
-
response = prefix
|
58 |
-
for word in words:
|
59 |
-
response += word + " "
|
60 |
-
time.sleep(0.1) # Adjust this value to control the speed of word streaming
|
61 |
-
yield response
|
62 |
-
|
63 |
def respond(message, history, analysis_prompt, rethinking_prompt, refinement_prompt):
|
64 |
for chunk in process_message(message, history, analysis_prompt, rethinking_prompt, refinement_prompt):
|
65 |
yield chunk
|
|
|
27 |
|
28 |
gpt4o_prompt = f"{analysis_prompt}\n\nConversation history:\n{context}\n\nUser query: {message}\n\nPlease analyze this query and respond accordingly."
|
29 |
gpt4o_response = get_llm_response(gpt4o_prompt, "gpt-4o-mini")
|
30 |
+
full_response += "Analysis:\n"
|
31 |
+
for word in gpt4o_response:
|
32 |
+
full_response += word + " "
|
33 |
+
time.sleep(0.1)
|
34 |
yield full_response
|
35 |
|
36 |
if "<error>" in " ".join(gpt4o_response):
|
|
|
38 |
|
39 |
llama_prompt = f"{rethinking_prompt}\n\nConversation history:\n{context}\n\nOriginal user query: {message}\n\nInitial response: {' '.join(gpt4o_response)}\n\nPlease review and suggest improvements or confirm if satisfactory."
|
40 |
llama_response = get_llm_response(llama_prompt, "gpt-4o-mini")
|
41 |
+
full_response += "\n\nRethinking:\n"
|
42 |
+
for word in llama_response:
|
43 |
+
full_response += word + " "
|
44 |
+
time.sleep(0.1)
|
45 |
yield full_response
|
46 |
|
47 |
if "<error>" in " ".join(llama_response):
|
|
|
50 |
if "done" not in " ".join(llama_response).lower():
|
51 |
final_gpt4o_prompt = f"{refinement_prompt}\n\nConversation history:\n{context}\n\nOriginal user query: {message}\n\nInitial response: {' '.join(gpt4o_response)}\n\nSuggestion: {' '.join(llama_response)}\n\nPlease provide a final response considering the suggestion."
|
52 |
final_response = get_llm_response(final_gpt4o_prompt, "gpt-4o-mini")
|
53 |
+
full_response += "\n\nFinal Response:\n"
|
54 |
+
for word in final_response:
|
55 |
+
full_response += word + " "
|
56 |
+
time.sleep(0.1)
|
57 |
yield full_response
|
58 |
else:
|
59 |
full_response += "\n\nFinal Response: The initial response is satisfactory and no further refinement is needed."
|
60 |
yield full_response
|
61 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
62 |
def respond(message, history, analysis_prompt, rethinking_prompt, refinement_prompt):
|
63 |
for chunk in process_message(message, history, analysis_prompt, rethinking_prompt, refinement_prompt):
|
64 |
yield chunk
|