File size: 10,949 Bytes
fc33cc9 6b85e51 1e5e843 4a029da bd07a7b 6b85e51 9a14f1d 6b85e51 eea17b2 6b85e51 77812f3 6b85e51 4a029da 6b85e51 b8c7a7f 6b85e51 a1b58dc 6b85e51 a1b58dc 6b85e51 b8c7a7f a1b58dc 6b85e51 4a029da 6b85e51 fb975db 6b85e51 b8c7a7f 6b85e51 fb975db 6b85e51 a1b58dc 6b85e51 b8c7a7f 6b85e51 4a029da 6b85e51 1cef079 4a029da 50296ea 6b85e51 b8c7a7f 6b85e51 b8c7a7f 6b85e51 4a029da 6b85e51 517f12f 6b85e51 b8c7a7f 6b85e51 66bad1a 6b85e51 517f12f 6b85e51 4a029da 6b85e51 517f12f 6b85e51 4a029da 6b85e51 b8c7a7f 6b85e51 1e5e843 6b85e51 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 |
import os
import tensorflow as tf
from keras.models import load_model
import gradio as gr
import cv2
import numpy as np
from huggingface_hub import login
from pymongo import MongoClient
from transformers import AutoModelForCausalLM, AutoTokenizer
from concurrent.futures import ThreadPoolExecutor
# Ensure the Hugging Face token is set
tok = os.environ.get('HF_Token')
if tok:
print("Logging in to Hugging Face with provided token...")
login(token=tok, add_to_git_credential=True)
print("Login successful.")
else:
print("Warning: Hugging Face token not found in environment variables.")
# MongoDB Setup (for inventory, record-keeping, etc.)
print("Setting up MongoDB connection...")
MONGO_URI = os.environ.get("MONGO_URI")
client = MongoClient(MONGO_URI)
db = client.poultry_management # Database
print("MongoDB connection established.")
# Check GPU availability for TensorFlow
print("Checking TensorFlow setup...")
print("TensorFlow version:", tf.__version__)
print("Eager execution:", tf.executing_eagerly())
print("TensorFlow GPU Available:", tf.config.list_physical_devices('GPU'))
# Set TensorFlow to use mixed precision with available GPU
from tensorflow.keras import mixed_precision
if len(tf.config.list_physical_devices('GPU')) > 0:
# Ensure the GPU supports mixed precision
gpu_device = tf.config.list_physical_devices('GPU')[0]
gpu_info = tf.config.experimental.get_device_details(gpu_device)
if 'compute_capability' in gpu_info and gpu_info['compute_capability'][0] >= 7:
# Set mixed precision policy to use float16 for better performance on supported GPUs
policy = mixed_precision.Policy('mixed_float16')
mixed_precision.set_global_policy(policy)
print("Using mixed precision with GPU")
else:
print("GPU does not support mixed precision or may not provide significant benefits. Using default precision.")
else:
print("Using CPU without mixed precision")
# Load TensorFlow/Keras models with GPU support if available, otherwise use CPU
try:
# Select device based on GPU availability
device_name = '/GPU:0' if len(tf.config.list_physical_devices('GPU')) > 0 else '/CPU:0'
print(f"Loading models on {device_name}...")
with tf.device(device_name):
my_model = load_model('models/disease_model.h5', compile=False)
print("Disease detection model loaded successfully.")
auth_model = load_model('models/auth_model.h5', compile=False)
print("Authentication model loaded successfully.")
print(f"Models loaded successfully on {device_name}.")
except Exception as e:
print(f"Error loading models: {e}")
if 'weight_decay' in str(e):
print("Invalid argument 'weight_decay' found. Please adjust optimizer settings.")
my_model, auth_model = None, None
# Updated Disease names and recommendations based on fecal analysis
name_disease = {0: 'Coccidiosis', 1: 'Healthy', 2: 'New Castle Disease', 3: 'Salmonella'}
result = {0: 'Critical', 1: 'No issue', 2: 'Critical', 3: 'Critical'}
recommend = {
0: 'Panadol',
1: 'You have no need Medicine',
2: 'Percetamol',
3: 'Ponston'
}
class PoultryFarmBot:
def __init__(self):
self.db = db # MongoDB database for future use
print("PoultryFarmBot initialized with MongoDB connection.")
# Image Preprocessing for Fecal Disease Detection
def preprocess_image(self, image):
try:
print("Preprocessing image for disease detection...")
# Resize the image to match model input size (224x224)
image_check = cv2.resize(image, (224, 224))
# Add batch dimension to the image array
image_check = np.expand_dims(image_check, axis=0)
print("Image preprocessing successful.")
return image_check
except Exception as e:
print(f"Error in image preprocessing: {e}")
return None
# Predict Disease from Fecal Image
def predict(self, image):
print("Starting disease prediction...")
# Preprocess the image before prediction
image_check = self.preprocess_image(image)
if image_check is None or my_model is None:
print("Image preprocessing failed or model not loaded.")
return "Image preprocessing failed or model not loaded.", None, None, None
# Predict using the fecal disease detection model
try:
print("Running model prediction...")
indx = my_model.predict(image_check).argmax()
print(f"Prediction complete. Predicted index: {indx}")
name = name_disease.get(indx, "Unknown disease")
status = result.get(indx, "unknown condition")
recom = recommend.get(indx, "no recommendation available")
# Generate additional information about the disease using Llama 2
detailed_response = self.generate_disease_response(name, status, recom)
print("Generated detailed response using Llama 2.")
return detailed_response, name, status, recom
except Exception as e:
print(f"Error during prediction: {e}")
return "Error during prediction.", None, None, None
# Generate a detailed response using Llama 2 for disease information and recommendations
def generate_disease_response(self, disease_name, status, recommendation):
print("Generating detailed disease response...")
# Create a prompt for Llama 2 to generate detailed disease information
prompt = (
f"The disease detected is {disease_name}, classified as {status}. "
f"Recommended action: {recommendation}. "
f"Here is some information about {disease_name}: causes, symptoms, and treatment methods "
"to effectively manage this condition on a poultry farm."
)
response = llama2_response(prompt)
# Post-process to remove the prompt if accidentally included in the response
final_response = response.replace(prompt, "").strip()
print("Detailed disease response generated.")
return final_response
# Diagnose Disease Using Fecal Image
def diagnose_disease(self, image):
print("Diagnosing disease from provided image...")
# Ensure image is valid and has elements
if image is not None and image.size > 0:
return self.predict(image)
print("Invalid image provided.")
return "Please provide an image of poultry fecal matter for disease detection.", None, None, None
# Initialize the bot instance
print("Initializing PoultryFarmBot instance...")
bot = PoultryFarmBot()
# Load Llama 2 model and tokenizer for text generation
print("Loading Llama 3.2 model and tokenizer...")
model_name = "meta-llama/Llama-3.2-1B"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name)
print("Llama 3.2 model and tokenizer loaded successfully.")
# Set the padding token to EOS token or add a new padding token
if tokenizer.pad_token is None:
print("Adding pad token to tokenizer...")
tokenizer.add_special_tokens({'pad_token': '[PAD]'})
model.resize_token_embeddings(len(tokenizer))
print("Pad token added and model resized.")
# Define Llama 3.2 response generation
def llama2_response(user_input):
try:
print("Generating response using Llama 2...")
# Tokenize user input for the Llama 2 model
inputs = tokenizer(user_input, return_tensors="pt", truncation=True, max_length=500, padding=True)
# Generate a response using the Llama 2 model
outputs = model.generate(
inputs["input_ids"],
max_length=150,
do_sample=True,
temperature=0.7,
pad_token_id=tokenizer.pad_token_id, # Use the newly set padding token
attention_mask=inputs["attention_mask"]
)
# Decode and return the response
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
print("Response generated successfully.")
return response
except Exception as e:
print(f"Error generating response: {e}")
return f"Error generating response: {str(e)}"
# Main chatbot function: handles both generative AI and disease detection
def chatbot_response(image, text):
print("Received user input for chatbot response...")
# If an image is provided, perform disease detection
if image is not None:
print("Image provided, attempting disease diagnosis...")
diagnosis, name, status, recom = bot.diagnose_disease(image)
if name and status and recom:
print("Diagnosis complete, returning detailed response.")
return diagnosis
else:
print("Diagnosis failed or incomplete, returning diagnostic message only.")
return diagnosis # Return only the diagnostic message if no disease found
else:
print("No image provided, using Llama 3.2 for text response...")
# Use Llama 3.2 for more accurate responses to user text queries
return llama2_response(text)
# Gradio interface styling and layout with ChatGPT-like theme
print("Setting up Gradio interface...")
with gr.Blocks(theme=gr.themes.Soft(primary_hue="green", neutral_hue="slate")) as chatbot_interface:
gr.Markdown("# 🐔 Poultry Management Chatbot")
gr.Markdown(
"This chatbot can help you manage your poultry with conversational AI. Upload an image of poultry fecal matter for disease detection or just ask questions!"
)
with gr.Row():
with gr.Column(scale=1):
# Image input for poultry feces (optional)
fecal_image = gr.Image(
label="Upload Image of Poultry Feces (Optional)",
type="numpy",
elem_id="image-upload",
show_label=True,
)
with gr.Column(scale=2):
# Text input for user questions
user_input = gr.Textbox(
label="Type your question or chat with the assistant",
placeholder="Ask a question about poultry management...",
lines=3,
elem_id="user-input",
)
# Output box to display chatbot responses
output_box = gr.Textbox(
label="Response",
placeholder="The response will appear here...",
interactive=False,
lines=10,
elem_id="output-box",
)
# Submit button to trigger response generation
submit_button = gr.Button(
"Submit",
variant="primary",
elem_id="submit-button"
)
submit_button.click(
fn=chatbot_response,
inputs=[fecal_image, user_input],
outputs=[output_box]
)
# Launch the Gradio interface
if __name__ == "__main__":
print("Launching Gradio interface...")
chatbot_interface.queue().launch(debug=True)
|