import gradio as gr from huggingface_hub import InferenceClient from langchain.embeddings import HuggingFaceEmbeddings from langchain.vectorstores import FAISS from langchain.text_splitter import CharacterTextSplitter from langchain.document_loaders import PyPDFLoader import os # Load the model client client = InferenceClient("HuggingFaceH4/zephyr-7b-beta") # Initialize vector store vector_store = None # Preload and process the PDF document #PDF_PATH = "general symptoms.pdf" # Path to the pre-defined PDF document PDF_PATH ="general symptoms.pdf" def preload_pdf(): global vector_store # Load PDF and extract text loader = PyPDFLoader(PDF_PATH) documents = loader.load() # Split the text into smaller chunks for retrieval text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=100) docs = text_splitter.split_documents(documents) # Compute embeddings for the chunks embeddings = HuggingFaceEmbeddings() vector_store = FAISS.from_documents(docs, embeddings) print(f"PDF '{PDF_PATH}' loaded and indexed successfully.") # Response generation def respond( message, history: list[tuple[str, str]], system_message, # max_tokens, # temperature, # top_p, ): global vector_store if vector_store is None: return "The PDF document is not loaded. Please check the code setup." # Retrieve relevant chunks from the PDF relevant_docs = vector_store.similarity_search(message, k=3) context = "\n".join([doc.page_content for doc in relevant_docs]) # Combine system message, context, and user message full_system_message = ( f"{system_message}\n\nContext from the document:\n{context}\n\n" ) messages = [{"role": "system", "content": full_system_message}] for val in history: if val[0]: messages.append({"role": "user", "content": val[0]}) if val[1]: messages.append({"role": "assistant", "content": val[1]}) messages.append({"role": "user", "content": message}) response = "" for message in client.chat_completion( messages, # max_tokens=max_tokens, stream=True, # temperature=temperature, # top_p=top_p, ): token = message.choices[0].delta.content response += token yield response # Gradio interface #demo = gr.Blocks() demo = gr.Blocks(css=""" .gr-chat-container { display: flex; background-color: skyblue; justify-content: center; align-items: center; height: 100vh; padding: 20px; } .gr-chat { height: 80vh; width: 70vw; justify-content: center; align-items: center; border: 1px solid #ccc; padding: 10px; box-shadow: 2px 2px 10px rgba(0, 0, 0, 0.1); overflow-y: auto; } """) with demo: with gr.Row(elem_classes=["gr-chat-container"]): with gr.Column(elem_classes=["gr-chat"]): chatbot = gr.ChatInterface( respond, additional_inputs=[ gr.Textbox( value=( "You are going to act like a medical practitioner. Hear the symptoms, " "diagnose the disease, mention the disease name as heading, and suggest tips " "to overcome the issue. Base your answers on the provided document. limit the response to 3-4 sentences. list out the response point by point" ),visible=False, label="System message", ), ], examples=[ ["I am not well and feeling feverish, tired"], ["Can you guide me through quick health tips?"], ["How do I stop worrying about things I can't control?"], ], title="Diagnify 🕊️", ) if __name__ == "__main__": preload_pdf() demo.launch()