create files
Browse files- utils/prebuilt_chain.py +60 -0
utils/prebuilt_chain.py
ADDED
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain.chains import create_history_aware_retriever
|
2 |
+
from langchain.chains.combine_documents import create_stuff_documents_chain
|
3 |
+
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
|
4 |
+
|
5 |
+
def history_aware_retriever(llm, retriever):
|
6 |
+
"""
|
7 |
+
Create a chain that takes conversation history and returns documents.
|
8 |
+
If there is no chat_history, then the input is just passed directly to the retriever.
|
9 |
+
If there is chat_history, then the prompt and LLM will be used to generate a search query.
|
10 |
+
That search query is then passed to the retriever.
|
11 |
+
|
12 |
+
Args:
|
13 |
+
llm: The language model.
|
14 |
+
retriever: The retriever to use for finding relevant documents.
|
15 |
+
"""
|
16 |
+
contextualize_q_system_prompt = (
|
17 |
+
"Given a chat history and the latest user question "
|
18 |
+
"which might reference context in the chat history, "
|
19 |
+
"formulate a standalone question which can be understood "
|
20 |
+
"without the chat history. Do NOT answer the question, just "
|
21 |
+
"reformulate it if needed and otherwise return it as is."
|
22 |
+
)
|
23 |
+
contextualize_q_prompt = ChatPromptTemplate.from_messages(
|
24 |
+
[
|
25 |
+
("system", contextualize_q_system_prompt),
|
26 |
+
MessagesPlaceholder("chat_history"),
|
27 |
+
("human", "{input}"),
|
28 |
+
]
|
29 |
+
)
|
30 |
+
history_aware_retriever = create_history_aware_retriever(
|
31 |
+
llm, retriever, contextualize_q_prompt
|
32 |
+
)
|
33 |
+
|
34 |
+
return history_aware_retriever
|
35 |
+
|
36 |
+
def documents_retriever(llm):
|
37 |
+
"""
|
38 |
+
Create a chain for passing a list of Documents to a model.
|
39 |
+
|
40 |
+
Args:
|
41 |
+
llm: The language model.
|
42 |
+
"""
|
43 |
+
system_prompt = (
|
44 |
+
"You are an helpfull assistant. "
|
45 |
+
"Use the following pieces of retrieved context to answer the question. "
|
46 |
+
"If you don't know the answer or the context is not retrieved, SAY THAT YOU DON'T KNOW!!. "
|
47 |
+
"Always response in Bahasa Indonesia or Indonesian Language. "
|
48 |
+
"Context: {context}"
|
49 |
+
)
|
50 |
+
|
51 |
+
qa_prompt = ChatPromptTemplate.from_messages(
|
52 |
+
[
|
53 |
+
("system", system_prompt),
|
54 |
+
MessagesPlaceholder("chat_history"),
|
55 |
+
("human", "{input}"),
|
56 |
+
]
|
57 |
+
)
|
58 |
+
question_answer_chain = create_stuff_documents_chain(llm, qa_prompt)
|
59 |
+
|
60 |
+
return question_answer_chain
|