Reality123b commited on
Commit
61ae39b
·
verified ·
1 Parent(s): 64f58fd

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +69 -4
app.py CHANGED
@@ -33,6 +33,8 @@ class XylariaChat:
33
 
34
  self.image_api_url = "https://api-inference.huggingface.co/models/Salesforce/blip-image-captioning-large"
35
  self.image_api_headers = {"Authorization": f"Bearer {self.hf_token}"}
 
 
36
 
37
  self.conversation_history = []
38
  self.persistent_memory = []
@@ -421,6 +423,24 @@ class XylariaChat:
421
 
422
  except Exception as e:
423
  return f"Error processing image: {str(e)}"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
424
 
425
  def perform_math_ocr(self, image_path):
426
  try:
@@ -540,14 +560,59 @@ class XylariaChat:
540
  return prompt
541
 
542
  def create_interface(self):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
543
  def streaming_response(message, chat_history, image_filepath, math_ocr_image_path):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
544
 
545
  ocr_text = ""
546
  if math_ocr_image_path:
547
  ocr_text = self.perform_math_ocr(math_ocr_image_path)
548
  if ocr_text.startswith("Error"):
549
  updated_history = chat_history + [[message, ocr_text]]
550
- yield "", updated_history, None, None
551
  return
552
  else:
553
  message = f"Math OCR Result: {ocr_text}\n\nUser's message: {message}"
@@ -559,7 +624,7 @@ class XylariaChat:
559
 
560
  if isinstance(response_stream, str):
561
  updated_history = chat_history + [[message, response_stream]]
562
- yield "", updated_history, None, None
563
  return
564
 
565
  full_response = ""
@@ -572,11 +637,11 @@ class XylariaChat:
572
  full_response += chunk_content
573
 
574
  updated_history[-1][1] = full_response
575
- yield "", updated_history, None, None
576
  except Exception as e:
577
  print(f"Streaming error: {e}")
578
  updated_history[-1][1] = f"Error during response: {e}"
579
- yield "", updated_history, None, None
580
  return
581
 
582
  full_response = self.adjust_response_based_on_state(full_response)
 
33
 
34
  self.image_api_url = "https://api-inference.huggingface.co/models/Salesforce/blip-image-captioning-large"
35
  self.image_api_headers = {"Authorization": f"Bearer {self.hf_token}"}
36
+
37
+ self.image_gen_api_url = "https://api-inference.huggingface.co/models/hugovntr/flux-schnell-realism"
38
 
39
  self.conversation_history = []
40
  self.persistent_memory = []
 
423
 
424
  except Exception as e:
425
  return f"Error processing image: {str(e)}"
426
+
427
+ def generate_image(self, prompt):
428
+ try:
429
+ payload = {"inputs": prompt}
430
+ response = requests.post(
431
+ self.image_gen_api_url,
432
+ headers=self.image_api_headers,
433
+ json=payload
434
+ )
435
+
436
+ if response.status_code == 200:
437
+ image_bytes = response.content
438
+ return image_bytes
439
+ else:
440
+ return f"Error generating image: {response.status_code} - {response.text}"
441
+
442
+ except Exception as e:
443
+ return f"Error generating image: {str(e)}"
444
 
445
  def perform_math_ocr(self, image_path):
446
  try:
 
560
  return prompt
561
 
562
  def create_interface(self):
563
+ loading_svg = """<svg width="256" height="256" viewBox="0 0 256 256" xmlns="http://www.w3.org/2000/svg">
564
+ <style>
565
+ rect {
566
+ animation: fillAnimation 3s ease-in-out infinite;
567
+ }
568
+ @keyframes fillAnimation {
569
+ 0% { fill: #626262; }
570
+ 50% { fill: #111111; }
571
+ 100% { fill: #626262; }
572
+ }
573
+ text {
574
+ font-family: 'Helvetica Neue', Arial, sans-serif; /* Choose a good font */
575
+ font-weight: 300; /* Slightly lighter font weight */
576
+ text-shadow: 0px 2px 4px rgba(0, 0, 0, 0.4); /* Subtle shadow */
577
+ }
578
+ </style>
579
+ <rect width="256" height="256" rx="20" fill="#888888" />
580
+ <text x="50%" y="50%" dominant-baseline="middle" text-anchor="middle" font-size="24" fill="white" opacity="0.8">
581
+ <tspan>creating your image</tspan>
582
+ <tspan x="50%" dy="1.2em">with xylaria iris</tspan>
583
+   </text>
584
+ </svg>"""
585
+
586
  def streaming_response(message, chat_history, image_filepath, math_ocr_image_path):
587
+ if message.strip().lower() == "/image":
588
+ chat_history.append([message, ""])
589
+ yield "", chat_history, None, None, None
590
+
591
+ image_prompt = self.conversation_history[-2]["content"] if len(self.conversation_history) >= 2 else ""
592
+ if not image_prompt:
593
+ image_prompt = "A realistic image"
594
+
595
+ image_bytes = self.generate_image(image_prompt)
596
+ if isinstance(image_bytes, bytes):
597
+ base64_image = base64.b64encode(image_bytes).decode("utf-8")
598
+ image_html = f'<img src="data:image/png;base64,{base64_image}" alt="Generated Image" style="max-width: 100%; max-height: 400px;">'
599
+ chat_history[-1][1] = image_html
600
+
601
+ self.conversation_history.append(ChatMessage(role="user", content=message).to_dict())
602
+ self.conversation_history.append(ChatMessage(role="assistant", content=image_html).to_dict())
603
+
604
+ yield "", chat_history, None, None, None
605
+ else:
606
+ chat_history[-1][1] = image_bytes
607
+ yield "", chat_history, None, None, None
608
+ return
609
 
610
  ocr_text = ""
611
  if math_ocr_image_path:
612
  ocr_text = self.perform_math_ocr(math_ocr_image_path)
613
  if ocr_text.startswith("Error"):
614
  updated_history = chat_history + [[message, ocr_text]]
615
+ yield "", updated_history, None, None, None
616
  return
617
  else:
618
  message = f"Math OCR Result: {ocr_text}\n\nUser's message: {message}"
 
624
 
625
  if isinstance(response_stream, str):
626
  updated_history = chat_history + [[message, response_stream]]
627
+ yield "", updated_history, None, None, None
628
  return
629
 
630
  full_response = ""
 
637
  full_response += chunk_content
638
 
639
  updated_history[-1][1] = full_response
640
+ yield "", updated_history, None, None, None
641
  except Exception as e:
642
  print(f"Streaming error: {e}")
643
  updated_history[-1][1] = f"Error during response: {e}"
644
+ yield "", updated_history, None, None, None
645
  return
646
 
647
  full_response = self.adjust_response_based_on_state(full_response)