Spaces:
Sleeping
Sleeping
Initial Draft
Browse files
app.py
CHANGED
@@ -137,8 +137,10 @@ def fn_generate_QnA_response(mv_user_question, mv_pdf_input_file, mv_processing_
|
|
137 |
template=lv_template,
|
138 |
input_variables=["question", "context"]
|
139 |
)
|
140 |
-
lv_model = ChatGoogleGenerativeAI(model="gemini-pro",
|
141 |
-
|
|
|
|
|
142 |
|
143 |
lv_file_name = mv_pdf_input_file.name[:-4] + ".txt"
|
144 |
lv_temp_file_path = os.path.join(os.path.join("vectordb","txt"),lv_file_name)
|
@@ -153,12 +155,13 @@ def fn_generate_QnA_response(mv_user_question, mv_pdf_input_file, mv_processing_
|
|
153 |
context=lv_text_data
|
154 |
)
|
155 |
|
156 |
-
lv_llm_response = lv_model.invoke(lv_qa_formatted_prompt)
|
|
|
157 |
|
158 |
print("Step5: LLM response generated")
|
159 |
fn_display_user_messages("Step5: LLM response generated","Info", mv_processing_message)
|
160 |
|
161 |
-
return lv_llm_response
|
162 |
|
163 |
|
164 |
# Main Program
|
|
|
137 |
template=lv_template,
|
138 |
input_variables=["question", "context"]
|
139 |
)
|
140 |
+
# lv_model = ChatGoogleGenerativeAI(model="gemini-pro",
|
141 |
+
# temperature=0.7, top_p=0.85)
|
142 |
+
|
143 |
+
lv_model = genai.GenerativeModel('gemini-pro')
|
144 |
|
145 |
lv_file_name = mv_pdf_input_file.name[:-4] + ".txt"
|
146 |
lv_temp_file_path = os.path.join(os.path.join("vectordb","txt"),lv_file_name)
|
|
|
155 |
context=lv_text_data
|
156 |
)
|
157 |
|
158 |
+
# lv_llm_response = lv_model.invoke(lv_qa_formatted_prompt).content
|
159 |
+
lv_llm_response = lv_model.generate_content(lv_qa_formatted_prompt).text
|
160 |
|
161 |
print("Step5: LLM response generated")
|
162 |
fn_display_user_messages("Step5: LLM response generated","Info", mv_processing_message)
|
163 |
|
164 |
+
return lv_llm_response
|
165 |
|
166 |
|
167 |
# Main Program
|