smvaideesh commited on
Commit
12af91f
ยท
verified ยท
1 Parent(s): 77f3f29

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +77 -26
app.py CHANGED
@@ -1,10 +1,38 @@
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
 
 
 
 
 
3
 
4
-
5
  client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
 
 
 
 
8
  def respond(
9
  message,
10
  history: list[tuple[str, str]],
@@ -13,8 +41,21 @@ def respond(
13
  temperature,
14
  top_p,
15
  ):
16
- system_message = "You are a good listener. Hear the symptoms user are reporting and try to diagnose what kind of issue user might have and suggest this might be the issue and try to give some inputs to overcome this kind of issue and some good habits to avoid such issues , suggest avoiding negative thoughts, and guide through steps to manage the health issue. Discuss what's on your mind. limit the reply with 3 to 4 sentences"
17
- messages = [{"role": "system", "content": system_message}]
 
 
 
 
 
 
 
 
 
 
 
 
 
18
 
19
  for val in history:
20
  if val[0]:
@@ -34,34 +75,44 @@ def respond(
34
  top_p=top_p,
35
  ):
36
  token = message.choices[0].delta.content
37
-
38
  response += token
39
  yield response
40
 
 
 
41
 
42
- demo = gr.ChatInterface(
43
- respond,
44
- additional_inputs=[
45
- gr.Textbox(value = "You are going to act like a medical practitioner, and concise and point-specific speaker. You hear the symptoms and guide tips to overcome the issue and suggest some good habits, suggest avoiding negative thoughts, and guide through steps to manage the health issue. Discuss what's on your mind, or ask me for a quick health tips.", label="System message"),
46
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
47
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
48
- gr.Slider(
49
- minimum=0.1,
50
- maximum=1.0,
51
- value=0.95,
52
- step=0.05,
53
- label="Top-p (nucleus sampling)",
54
- ),
55
- ],
56
-
57
- examples = [
58
- ["I feel stressed."],
59
- ["Can you guide me through a quick health tips?"],
60
- ["How do I stop worrying about things I can't control?"]
61
- ],
62
- title = 'Diagnify ๐Ÿ•Š๏ธ '
63
- )
64
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
65
 
66
  if __name__ == "__main__":
 
67
  demo.launch()
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
+ from langchain.embeddings import HuggingFaceEmbeddings
4
+ from langchain.vectorstores import FAISS
5
+ from langchain.text_splitter import CharacterTextSplitter
6
+ from langchain.document_loaders import PyPDFLoader
7
+ import os
8
 
9
+ # Load the model client
10
  client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
11
 
12
+ # Initialize vector store
13
+ vector_store = None
14
+
15
+ # Preload and process the PDF document
16
+ PDF_PATH = "C:/Users/palanive/Documents/generalsymptoms.pdf" # Path to the pre-defined PDF document
17
+
18
+ def preload_pdf():
19
+ global vector_store
20
+
21
+ # Load PDF and extract text
22
+ loader = PyPDFLoader(PDF_PATH)
23
+ documents = loader.load()
24
+
25
+ # Split the text into smaller chunks for retrieval
26
+ text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=100)
27
+ docs = text_splitter.split_documents(documents)
28
+
29
+ # Compute embeddings for the chunks
30
+ embeddings = HuggingFaceEmbeddings()
31
+ vector_store = FAISS.from_documents(docs, embeddings)
32
 
33
+ print(f"PDF '{PDF_PATH}' loaded and indexed successfully.")
34
+
35
+ # Response generation
36
  def respond(
37
  message,
38
  history: list[tuple[str, str]],
 
41
  temperature,
42
  top_p,
43
  ):
44
+ global vector_store
45
+
46
+ if vector_store is None:
47
+ return "The PDF document is not loaded. Please check the code setup."
48
+
49
+ # Retrieve relevant chunks from the PDF
50
+ relevant_docs = vector_store.similarity_search(message, k=3)
51
+ context = "\n".join([doc.page_content for doc in relevant_docs])
52
+
53
+ # Combine system message, context, and user message
54
+ full_system_message = (
55
+ f"{system_message}\n\nContext from the document:\n{context}\n\n"
56
+ )
57
+
58
+ messages = [{"role": "system", "content": full_system_message}]
59
 
60
  for val in history:
61
  if val[0]:
 
75
  top_p=top_p,
76
  ):
77
  token = message.choices[0].delta.content
 
78
  response += token
79
  yield response
80
 
81
+ # Gradio interface
82
+ demo = gr.Blocks()
83
 
84
+ with demo:
85
+ gr.Markdown("# Health Mate ๐Ÿ•Š๏ธ (RAG-based)")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
86
 
87
+ chatbot = gr.ChatInterface(
88
+ respond,
89
+ additional_inputs=[
90
+ gr.Textbox(
91
+ value=(
92
+ "You are going to act like a medical practitioner. Hear the symptoms, "
93
+ "diagnose the disease, mention the disease name as heading, and suggest tips "
94
+ "to overcome the issue. Base your answers on the provided document. limit the response to 3-4 sentences. list out the response point by point"
95
+ ),
96
+ label="System message",
97
+ ),
98
+ gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
99
+ gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
100
+ gr.Slider(
101
+ minimum=0.1,
102
+ maximum=1.0,
103
+ value=0.95,
104
+ step=0.05,
105
+ label="Top-p (nucleus sampling)",
106
+ ),
107
+ ],
108
+ examples=[
109
+ ["I feel stressed."],
110
+ ["Can you guide me through quick health tips?"],
111
+ ["How do I stop worrying about things I can't control?"],
112
+ ],
113
+ title="Health Mate ๐Ÿ•Š๏ธ",
114
+ )
115
 
116
  if __name__ == "__main__":
117
+ preload_pdf()
118
  demo.launch()