smvaideesh commited on
Commit
fd6fdc6
·
verified ·
1 Parent(s): d16e30f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +136 -136
app.py CHANGED
@@ -1,136 +1,136 @@
1
- import gradio as gr
2
- from huggingface_hub import InferenceClient
3
- from langchain.embeddings import HuggingFaceEmbeddings
4
- from langchain.vectorstores import FAISS
5
- from langchain.text_splitter import CharacterTextSplitter
6
- from langchain.document_loaders import PyPDFLoader
7
- import os
8
-
9
- # Load the model client
10
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
11
-
12
- # Initialize vector store
13
- vector_store = None
14
-
15
- # Preload and process the PDF document
16
- #PDF_PATH = "generalsymptoms.pdf" # Path to the pre-defined PDF document
17
-
18
- PDF_PATH = "C:/Users/palanive/Documents/generalsymptoms.pdf"
19
-
20
- def preload_pdf():
21
- global vector_store
22
-
23
- # Load PDF and extract text
24
- loader = PyPDFLoader(PDF_PATH)
25
- documents = loader.load()
26
-
27
- # Split the text into smaller chunks for retrieval
28
- text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=100)
29
- docs = text_splitter.split_documents(documents)
30
-
31
- # Compute embeddings for the chunks
32
- embeddings = HuggingFaceEmbeddings()
33
- vector_store = FAISS.from_documents(docs, embeddings)
34
-
35
- print(f"PDF '{PDF_PATH}' loaded and indexed successfully.")
36
-
37
- # Response generation
38
- def respond(
39
- message,
40
- history: list[tuple[str, str]],
41
- system_message,
42
- # max_tokens,
43
- # temperature,
44
- # top_p,
45
- ):
46
- global vector_store
47
-
48
- if vector_store is None:
49
- return "The PDF document is not loaded. Please check the code setup."
50
-
51
- # Retrieve relevant chunks from the PDF
52
- relevant_docs = vector_store.similarity_search(message, k=3)
53
- context = "\n".join([doc.page_content for doc in relevant_docs])
54
-
55
- # Combine system message, context, and user message
56
- full_system_message = (
57
- f"{system_message}\n\nContext from the document:\n{context}\n\n"
58
- )
59
-
60
- messages = [{"role": "system", "content": full_system_message}]
61
-
62
- for val in history:
63
- if val[0]:
64
- messages.append({"role": "user", "content": val[0]})
65
- if val[1]:
66
- messages.append({"role": "assistant", "content": val[1]})
67
-
68
- messages.append({"role": "user", "content": message})
69
-
70
- response = ""
71
-
72
- for message in client.chat_completion(
73
- messages,
74
- # max_tokens=max_tokens,
75
- stream=True,
76
- # temperature=temperature,
77
- # top_p=top_p,
78
- ):
79
- token = message.choices[0].delta.content
80
- response += token
81
- yield response
82
-
83
- # Gradio interface
84
- #demo = gr.Blocks()
85
-
86
- demo = gr.Blocks(css="""
87
-
88
- .gr-chat-container {
89
- display: flex;
90
- background-color: skyblue;
91
- justify-content: center;
92
- align-items: center;
93
- height: 100vh;
94
- padding: 20px;
95
- }
96
-
97
- .gr-chat {
98
- height: 80vh;
99
- width: 70vw;
100
- justify-content: center;
101
- align-items: center;
102
- border: 1px solid #ccc;
103
- padding: 10px;
104
- box-shadow: 2px 2px 10px rgba(0, 0, 0, 0.1);
105
- overflow-y: auto;
106
- }
107
- """)
108
-
109
-
110
- with demo:
111
- with gr.Row(elem_classes=["gr-chat-container"]):
112
- with gr.Column(elem_classes=["gr-chat"]):
113
- chatbot = gr.ChatInterface(
114
- respond,
115
- additional_inputs=[
116
- gr.Textbox(
117
- value=(
118
- "You are going to act like a medical practitioner. Hear the symptoms, "
119
- "diagnose the disease, mention the disease name as heading, and suggest tips "
120
- "to overcome the issue. Base your answers on the provided document. limit the response to 3-4 sentences. list out the response point by point"
121
- ),visible=False,
122
- label="System message",
123
- ),
124
- ],
125
- examples=[
126
- ["I am not well and feeling feverish, tired"],
127
- ["Can you guide me through quick health tips?"],
128
- ["How do I stop worrying about things I can't control?"],
129
- ],
130
- title="Diagnify 🕊️",
131
- )
132
-
133
-
134
- if __name__ == "__main__":
135
- preload_pdf()
136
- demo.launch()
 
1
+ import gradio as gr
2
+ from huggingface_hub import InferenceClient
3
+ from langchain.embeddings import HuggingFaceEmbeddings
4
+ from langchain.vectorstores import FAISS
5
+ from langchain.text_splitter import CharacterTextSplitter
6
+ from langchain.document_loaders import PyPDFLoader
7
+ import os
8
+
9
+ # Load the model client
10
+ client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
11
+
12
+ # Initialize vector store
13
+ vector_store = None
14
+
15
+ # Preload and process the PDF document
16
+ #PDF_PATH = "general symptoms.pdf" # Path to the pre-defined PDF document
17
+
18
+ PDF_PATH ="general symptoms.pdf"
19
+
20
+ def preload_pdf():
21
+ global vector_store
22
+
23
+ # Load PDF and extract text
24
+ loader = PyPDFLoader(PDF_PATH)
25
+ documents = loader.load()
26
+
27
+ # Split the text into smaller chunks for retrieval
28
+ text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=100)
29
+ docs = text_splitter.split_documents(documents)
30
+
31
+ # Compute embeddings for the chunks
32
+ embeddings = HuggingFaceEmbeddings()
33
+ vector_store = FAISS.from_documents(docs, embeddings)
34
+
35
+ print(f"PDF '{PDF_PATH}' loaded and indexed successfully.")
36
+
37
+ # Response generation
38
+ def respond(
39
+ message,
40
+ history: list[tuple[str, str]],
41
+ system_message,
42
+ # max_tokens,
43
+ # temperature,
44
+ # top_p,
45
+ ):
46
+ global vector_store
47
+
48
+ if vector_store is None:
49
+ return "The PDF document is not loaded. Please check the code setup."
50
+
51
+ # Retrieve relevant chunks from the PDF
52
+ relevant_docs = vector_store.similarity_search(message, k=3)
53
+ context = "\n".join([doc.page_content for doc in relevant_docs])
54
+
55
+ # Combine system message, context, and user message
56
+ full_system_message = (
57
+ f"{system_message}\n\nContext from the document:\n{context}\n\n"
58
+ )
59
+
60
+ messages = [{"role": "system", "content": full_system_message}]
61
+
62
+ for val in history:
63
+ if val[0]:
64
+ messages.append({"role": "user", "content": val[0]})
65
+ if val[1]:
66
+ messages.append({"role": "assistant", "content": val[1]})
67
+
68
+ messages.append({"role": "user", "content": message})
69
+
70
+ response = ""
71
+
72
+ for message in client.chat_completion(
73
+ messages,
74
+ # max_tokens=max_tokens,
75
+ stream=True,
76
+ # temperature=temperature,
77
+ # top_p=top_p,
78
+ ):
79
+ token = message.choices[0].delta.content
80
+ response += token
81
+ yield response
82
+
83
+ # Gradio interface
84
+ #demo = gr.Blocks()
85
+
86
+ demo = gr.Blocks(css="""
87
+
88
+ .gr-chat-container {
89
+ display: flex;
90
+ background-color: skyblue;
91
+ justify-content: center;
92
+ align-items: center;
93
+ height: 100vh;
94
+ padding: 20px;
95
+ }
96
+
97
+ .gr-chat {
98
+ height: 80vh;
99
+ width: 70vw;
100
+ justify-content: center;
101
+ align-items: center;
102
+ border: 1px solid #ccc;
103
+ padding: 10px;
104
+ box-shadow: 2px 2px 10px rgba(0, 0, 0, 0.1);
105
+ overflow-y: auto;
106
+ }
107
+ """)
108
+
109
+
110
+ with demo:
111
+ with gr.Row(elem_classes=["gr-chat-container"]):
112
+ with gr.Column(elem_classes=["gr-chat"]):
113
+ chatbot = gr.ChatInterface(
114
+ respond,
115
+ additional_inputs=[
116
+ gr.Textbox(
117
+ value=(
118
+ "You are going to act like a medical practitioner. Hear the symptoms, "
119
+ "diagnose the disease, mention the disease name as heading, and suggest tips "
120
+ "to overcome the issue. Base your answers on the provided document. limit the response to 3-4 sentences. list out the response point by point"
121
+ ),visible=False,
122
+ label="System message",
123
+ ),
124
+ ],
125
+ examples=[
126
+ ["I am not well and feeling feverish, tired"],
127
+ ["Can you guide me through quick health tips?"],
128
+ ["How do I stop worrying about things I can't control?"],
129
+ ],
130
+ title="Diagnify 🕊️",
131
+ )
132
+
133
+
134
+ if __name__ == "__main__":
135
+ preload_pdf()
136
+ demo.launch()