|
import os |
|
import tensorflow as tf |
|
from keras.models import load_model |
|
import gradio as gr |
|
import cv2 |
|
import numpy as np |
|
from huggingface_hub import login |
|
from pymongo import MongoClient |
|
from transformers import AutoModelForCausalLM, AutoTokenizer |
|
from concurrent.futures import ThreadPoolExecutor |
|
|
|
|
|
tok = os.environ.get('HF_Token') |
|
if tok: |
|
print("Logging in to Hugging Face with provided token...") |
|
login(token=tok, add_to_git_credential=True) |
|
print("Login successful.") |
|
else: |
|
print("Warning: Hugging Face token not found in environment variables.") |
|
|
|
|
|
print("Setting up MongoDB connection...") |
|
MONGO_URI = os.environ.get("MONGO_URI") |
|
client = MongoClient(MONGO_URI) |
|
db = client.poultry_management |
|
print("MongoDB connection established.") |
|
|
|
|
|
print("Checking TensorFlow setup...") |
|
print("TensorFlow version:", tf.__version__) |
|
print("Eager execution:", tf.executing_eagerly()) |
|
print("TensorFlow GPU Available:", tf.config.list_physical_devices('GPU')) |
|
|
|
|
|
from tensorflow.keras import mixed_precision |
|
|
|
if len(tf.config.list_physical_devices('GPU')) > 0: |
|
|
|
gpu_device = tf.config.list_physical_devices('GPU')[0] |
|
gpu_info = tf.config.experimental.get_device_details(gpu_device) |
|
if 'compute_capability' in gpu_info and gpu_info['compute_capability'][0] >= 7: |
|
|
|
policy = mixed_precision.Policy('mixed_float16') |
|
mixed_precision.set_global_policy(policy) |
|
print("Using mixed precision with GPU") |
|
else: |
|
print("GPU does not support mixed precision or may not provide significant benefits. Using default precision.") |
|
else: |
|
print("Using CPU without mixed precision") |
|
|
|
|
|
try: |
|
|
|
device_name = '/GPU:0' if len(tf.config.list_physical_devices('GPU')) > 0 else '/CPU:0' |
|
print(f"Loading models on {device_name}...") |
|
with tf.device(device_name): |
|
my_model = load_model('models/disease_model.h5', compile=False) |
|
print("Disease detection model loaded successfully.") |
|
auth_model = load_model('models/auth_model.h5', compile=False) |
|
print("Authentication model loaded successfully.") |
|
print(f"Models loaded successfully on {device_name}.") |
|
except Exception as e: |
|
print(f"Error loading models: {e}") |
|
if 'weight_decay' in str(e): |
|
print("Invalid argument 'weight_decay' found. Please adjust optimizer settings.") |
|
my_model, auth_model = None, None |
|
|
|
|
|
name_disease = {0: 'Coccidiosis', 1: 'Healthy', 2: 'New Castle Disease', 3: 'Salmonella'} |
|
result = {0: 'Critical', 1: 'No issue', 2: 'Critical', 3: 'Critical'} |
|
recommend = { |
|
0: 'Panadol', |
|
1: 'You have no need Medicine', |
|
2: 'Percetamol', |
|
3: 'Ponston' |
|
} |
|
|
|
|
|
class PoultryFarmBot: |
|
def __init__(self): |
|
self.db = db |
|
print("PoultryFarmBot initialized with MongoDB connection.") |
|
|
|
|
|
def preprocess_image(self, image): |
|
try: |
|
print("Preprocessing image for disease detection...") |
|
|
|
image_check = cv2.resize(image, (224, 224)) |
|
|
|
image_check = np.expand_dims(image_check, axis=0) |
|
print("Image preprocessing successful.") |
|
return image_check |
|
except Exception as e: |
|
print(f"Error in image preprocessing: {e}") |
|
return None |
|
|
|
|
|
def predict(self, image): |
|
print("Starting disease prediction...") |
|
|
|
image_check = self.preprocess_image(image) |
|
if image_check is None or my_model is None: |
|
print("Image preprocessing failed or model not loaded.") |
|
return "Image preprocessing failed or model not loaded.", None, None, None |
|
|
|
|
|
try: |
|
print("Running model prediction...") |
|
indx = my_model.predict(image_check).argmax() |
|
print(f"Prediction complete. Predicted index: {indx}") |
|
name = name_disease.get(indx, "Unknown disease") |
|
status = result.get(indx, "unknown condition") |
|
recom = recommend.get(indx, "no recommendation available") |
|
|
|
|
|
detailed_response = self.generate_disease_response(name, status, recom) |
|
print("Generated detailed response using Llama 2.") |
|
return detailed_response, name, status, recom |
|
except Exception as e: |
|
print(f"Error during prediction: {e}") |
|
return "Error during prediction.", None, None, None |
|
|
|
|
|
def generate_disease_response(self, disease_name, status, recommendation): |
|
print("Generating detailed disease response...") |
|
|
|
prompt = ( |
|
f"The disease detected is {disease_name}, classified as {status}. " |
|
f"Recommended action: {recommendation}. " |
|
f"Here is some information about {disease_name}: causes, symptoms, and treatment methods " |
|
"to effectively manage this condition on a poultry farm." |
|
) |
|
response = llama2_response(prompt) |
|
|
|
final_response = response.replace(prompt, "").strip() |
|
print("Detailed disease response generated.") |
|
return final_response |
|
|
|
|
|
def diagnose_disease(self, image): |
|
print("Diagnosing disease from provided image...") |
|
|
|
if image is not None and image.size > 0: |
|
return self.predict(image) |
|
print("Invalid image provided.") |
|
return "Please provide an image of poultry fecal matter for disease detection.", None, None, None |
|
|
|
|
|
|
|
print("Initializing PoultryFarmBot instance...") |
|
bot = PoultryFarmBot() |
|
|
|
|
|
print("Loading Llama 3.2 model and tokenizer...") |
|
model_name = "meta-llama/Llama-3.2-1B" |
|
tokenizer = AutoTokenizer.from_pretrained(model_name) |
|
model = AutoModelForCausalLM.from_pretrained(model_name) |
|
print("Llama 3.2 model and tokenizer loaded successfully.") |
|
|
|
|
|
if tokenizer.pad_token is None: |
|
print("Adding pad token to tokenizer...") |
|
tokenizer.add_special_tokens({'pad_token': '[PAD]'}) |
|
model.resize_token_embeddings(len(tokenizer)) |
|
print("Pad token added and model resized.") |
|
|
|
|
|
|
|
def llama2_response(user_input): |
|
try: |
|
print("Generating response using Llama 2...") |
|
|
|
inputs = tokenizer(user_input, return_tensors="pt", truncation=True, max_length=500, padding=True) |
|
|
|
outputs = model.generate( |
|
inputs["input_ids"], |
|
max_length=150, |
|
do_sample=True, |
|
temperature=0.7, |
|
pad_token_id=tokenizer.pad_token_id, |
|
attention_mask=inputs["attention_mask"] |
|
) |
|
|
|
|
|
response = tokenizer.decode(outputs[0], skip_special_tokens=True) |
|
print("Response generated successfully.") |
|
return response |
|
except Exception as e: |
|
print(f"Error generating response: {e}") |
|
return f"Error generating response: {str(e)}" |
|
|
|
|
|
|
|
def chatbot_response(image, text): |
|
print("Received user input for chatbot response...") |
|
|
|
if image is not None: |
|
print("Image provided, attempting disease diagnosis...") |
|
diagnosis, name, status, recom = bot.diagnose_disease(image) |
|
if name and status and recom: |
|
print("Diagnosis complete, returning detailed response.") |
|
return diagnosis |
|
else: |
|
print("Diagnosis failed or incomplete, returning diagnostic message only.") |
|
return diagnosis |
|
else: |
|
print("No image provided, using Llama 3.2 for text response...") |
|
|
|
return llama2_response(text) |
|
|
|
|
|
|
|
print("Setting up Gradio interface...") |
|
with gr.Blocks(theme=gr.themes.Soft(primary_hue="green", neutral_hue="slate")) as chatbot_interface: |
|
gr.Markdown("# 🐔 Poultry Management Chatbot") |
|
gr.Markdown( |
|
"This chatbot can help you manage your poultry with conversational AI. Upload an image of poultry fecal matter for disease detection or just ask questions!" |
|
) |
|
|
|
with gr.Row(): |
|
with gr.Column(scale=1): |
|
|
|
fecal_image = gr.Image( |
|
label="Upload Image of Poultry Feces (Optional)", |
|
type="numpy", |
|
elem_id="image-upload", |
|
show_label=True, |
|
) |
|
with gr.Column(scale=2): |
|
|
|
user_input = gr.Textbox( |
|
label="Type your question or chat with the assistant", |
|
placeholder="Ask a question about poultry management...", |
|
lines=3, |
|
elem_id="user-input", |
|
) |
|
|
|
|
|
output_box = gr.Textbox( |
|
label="Response", |
|
placeholder="The response will appear here...", |
|
interactive=False, |
|
lines=10, |
|
elem_id="output-box", |
|
) |
|
|
|
|
|
submit_button = gr.Button( |
|
"Submit", |
|
variant="primary", |
|
elem_id="submit-button" |
|
) |
|
submit_button.click( |
|
fn=chatbot_response, |
|
inputs=[fecal_image, user_input], |
|
outputs=[output_box] |
|
) |
|
|
|
|
|
if __name__ == "__main__": |
|
print("Launching Gradio interface...") |
|
chatbot_interface.queue().launch(debug=True) |
|
|