frimponge's picture
Update app.py
66bad1a verified
import os
import tensorflow as tf
from keras.models import load_model
import gradio as gr
import cv2
import numpy as np
from huggingface_hub import login
from pymongo import MongoClient
from transformers import AutoModelForCausalLM, AutoTokenizer
from concurrent.futures import ThreadPoolExecutor
# Ensure the Hugging Face token is set
tok = os.environ.get('HF_Token')
if tok:
print("Logging in to Hugging Face with provided token...")
login(token=tok, add_to_git_credential=True)
print("Login successful.")
else:
print("Warning: Hugging Face token not found in environment variables.")
# MongoDB Setup (for inventory, record-keeping, etc.)
print("Setting up MongoDB connection...")
MONGO_URI = os.environ.get("MONGO_URI")
client = MongoClient(MONGO_URI)
db = client.poultry_management # Database
print("MongoDB connection established.")
# Check GPU availability for TensorFlow
print("Checking TensorFlow setup...")
print("TensorFlow version:", tf.__version__)
print("Eager execution:", tf.executing_eagerly())
print("TensorFlow GPU Available:", tf.config.list_physical_devices('GPU'))
# Set TensorFlow to use mixed precision with available GPU
from tensorflow.keras import mixed_precision
if len(tf.config.list_physical_devices('GPU')) > 0:
# Ensure the GPU supports mixed precision
gpu_device = tf.config.list_physical_devices('GPU')[0]
gpu_info = tf.config.experimental.get_device_details(gpu_device)
if 'compute_capability' in gpu_info and gpu_info['compute_capability'][0] >= 7:
# Set mixed precision policy to use float16 for better performance on supported GPUs
policy = mixed_precision.Policy('mixed_float16')
mixed_precision.set_global_policy(policy)
print("Using mixed precision with GPU")
else:
print("GPU does not support mixed precision or may not provide significant benefits. Using default precision.")
else:
print("Using CPU without mixed precision")
# Load TensorFlow/Keras models with GPU support if available, otherwise use CPU
try:
# Select device based on GPU availability
device_name = '/GPU:0' if len(tf.config.list_physical_devices('GPU')) > 0 else '/CPU:0'
print(f"Loading models on {device_name}...")
with tf.device(device_name):
my_model = load_model('models/disease_model.h5', compile=False)
print("Disease detection model loaded successfully.")
auth_model = load_model('models/auth_model.h5', compile=False)
print("Authentication model loaded successfully.")
print(f"Models loaded successfully on {device_name}.")
except Exception as e:
print(f"Error loading models: {e}")
if 'weight_decay' in str(e):
print("Invalid argument 'weight_decay' found. Please adjust optimizer settings.")
my_model, auth_model = None, None
# Updated Disease names and recommendations based on fecal analysis
name_disease = {0: 'Coccidiosis', 1: 'Healthy', 2: 'New Castle Disease', 3: 'Salmonella'}
result = {0: 'Critical', 1: 'No issue', 2: 'Critical', 3: 'Critical'}
recommend = {
0: 'Panadol',
1: 'You have no need Medicine',
2: 'Percetamol',
3: 'Ponston'
}
class PoultryFarmBot:
def __init__(self):
self.db = db # MongoDB database for future use
print("PoultryFarmBot initialized with MongoDB connection.")
# Image Preprocessing for Fecal Disease Detection
def preprocess_image(self, image):
try:
print("Preprocessing image for disease detection...")
# Resize the image to match model input size (224x224)
image_check = cv2.resize(image, (224, 224))
# Add batch dimension to the image array
image_check = np.expand_dims(image_check, axis=0)
print("Image preprocessing successful.")
return image_check
except Exception as e:
print(f"Error in image preprocessing: {e}")
return None
# Predict Disease from Fecal Image
def predict(self, image):
print("Starting disease prediction...")
# Preprocess the image before prediction
image_check = self.preprocess_image(image)
if image_check is None or my_model is None:
print("Image preprocessing failed or model not loaded.")
return "Image preprocessing failed or model not loaded.", None, None, None
# Predict using the fecal disease detection model
try:
print("Running model prediction...")
indx = my_model.predict(image_check).argmax()
print(f"Prediction complete. Predicted index: {indx}")
name = name_disease.get(indx, "Unknown disease")
status = result.get(indx, "unknown condition")
recom = recommend.get(indx, "no recommendation available")
# Generate additional information about the disease using Llama 2
detailed_response = self.generate_disease_response(name, status, recom)
print("Generated detailed response using Llama 2.")
return detailed_response, name, status, recom
except Exception as e:
print(f"Error during prediction: {e}")
return "Error during prediction.", None, None, None
# Generate a detailed response using Llama 2 for disease information and recommendations
def generate_disease_response(self, disease_name, status, recommendation):
print("Generating detailed disease response...")
# Create a prompt for Llama 2 to generate detailed disease information
prompt = (
f"The disease detected is {disease_name}, classified as {status}. "
f"Recommended action: {recommendation}. "
f"Here is some information about {disease_name}: causes, symptoms, and treatment methods "
"to effectively manage this condition on a poultry farm."
)
response = llama2_response(prompt)
# Post-process to remove the prompt if accidentally included in the response
final_response = response.replace(prompt, "").strip()
print("Detailed disease response generated.")
return final_response
# Diagnose Disease Using Fecal Image
def diagnose_disease(self, image):
print("Diagnosing disease from provided image...")
# Ensure image is valid and has elements
if image is not None and image.size > 0:
return self.predict(image)
print("Invalid image provided.")
return "Please provide an image of poultry fecal matter for disease detection.", None, None, None
# Initialize the bot instance
print("Initializing PoultryFarmBot instance...")
bot = PoultryFarmBot()
# Load Llama 2 model and tokenizer for text generation
print("Loading Llama 3.2 model and tokenizer...")
model_name = "meta-llama/Llama-3.2-1B"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name)
print("Llama 3.2 model and tokenizer loaded successfully.")
# Set the padding token to EOS token or add a new padding token
if tokenizer.pad_token is None:
print("Adding pad token to tokenizer...")
tokenizer.add_special_tokens({'pad_token': '[PAD]'})
model.resize_token_embeddings(len(tokenizer))
print("Pad token added and model resized.")
# Define Llama 3.2 response generation
def llama2_response(user_input):
try:
print("Generating response using Llama 2...")
# Tokenize user input for the Llama 2 model
inputs = tokenizer(user_input, return_tensors="pt", truncation=True, max_length=500, padding=True)
# Generate a response using the Llama 2 model
outputs = model.generate(
inputs["input_ids"],
max_length=150,
do_sample=True,
temperature=0.7,
pad_token_id=tokenizer.pad_token_id, # Use the newly set padding token
attention_mask=inputs["attention_mask"]
)
# Decode and return the response
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
print("Response generated successfully.")
return response
except Exception as e:
print(f"Error generating response: {e}")
return f"Error generating response: {str(e)}"
# Main chatbot function: handles both generative AI and disease detection
def chatbot_response(image, text):
print("Received user input for chatbot response...")
# If an image is provided, perform disease detection
if image is not None:
print("Image provided, attempting disease diagnosis...")
diagnosis, name, status, recom = bot.diagnose_disease(image)
if name and status and recom:
print("Diagnosis complete, returning detailed response.")
return diagnosis
else:
print("Diagnosis failed or incomplete, returning diagnostic message only.")
return diagnosis # Return only the diagnostic message if no disease found
else:
print("No image provided, using Llama 3.2 for text response...")
# Use Llama 3.2 for more accurate responses to user text queries
return llama2_response(text)
# Gradio interface styling and layout with ChatGPT-like theme
print("Setting up Gradio interface...")
with gr.Blocks(theme=gr.themes.Soft(primary_hue="green", neutral_hue="slate")) as chatbot_interface:
gr.Markdown("# 🐔 Poultry Management Chatbot")
gr.Markdown(
"This chatbot can help you manage your poultry with conversational AI. Upload an image of poultry fecal matter for disease detection or just ask questions!"
)
with gr.Row():
with gr.Column(scale=1):
# Image input for poultry feces (optional)
fecal_image = gr.Image(
label="Upload Image of Poultry Feces (Optional)",
type="numpy",
elem_id="image-upload",
show_label=True,
)
with gr.Column(scale=2):
# Text input for user questions
user_input = gr.Textbox(
label="Type your question or chat with the assistant",
placeholder="Ask a question about poultry management...",
lines=3,
elem_id="user-input",
)
# Output box to display chatbot responses
output_box = gr.Textbox(
label="Response",
placeholder="The response will appear here...",
interactive=False,
lines=10,
elem_id="output-box",
)
# Submit button to trigger response generation
submit_button = gr.Button(
"Submit",
variant="primary",
elem_id="submit-button"
)
submit_button.click(
fn=chatbot_response,
inputs=[fecal_image, user_input],
outputs=[output_box]
)
# Launch the Gradio interface
if __name__ == "__main__":
print("Launching Gradio interface...")
chatbot_interface.queue().launch(debug=True)