Update app.py
Browse files
app.py
CHANGED
@@ -148,55 +148,114 @@ def test_model_availability(api_key, model_name, model_type="chat"):
|
|
148 |
)
|
149 |
return False
|
150 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
151 |
def create_base64_markdown_image(image_url):
|
152 |
try:
|
153 |
response = session.get(image_url, stream=True)
|
154 |
response.raise_for_status()
|
155 |
-
|
156 |
-
image_data = response.content
|
157 |
|
158 |
-
|
|
|
159 |
|
160 |
-
original_width, original_height = image.size
|
161 |
-
|
162 |
-
new_width = original_width // 4
|
163 |
-
new_height = original_height // 4
|
164 |
-
|
165 |
-
resized_image = image.resize((new_width, new_height), Image.LANCZOS)
|
166 |
-
|
167 |
buffered = BytesIO()
|
168 |
resized_image.save(buffered, format="PNG")
|
|
|
169 |
|
170 |
-
|
171 |
-
|
172 |
-
base64_encoded = base64.b64encode(resized_image_data).decode('utf-8')
|
173 |
-
|
174 |
-
mime_type = 'image/png'
|
175 |
-
|
176 |
-
markdown_image_link = f"![](data:{mime_type};base64,{base64_encoded})"
|
177 |
-
logging.info(f"Created base64 markdown image link.")
|
178 |
return markdown_image_link
|
179 |
-
except requests.exceptions.RequestException as e:
|
180 |
-
logging.error(f"Error downloading image: {e}")
|
181 |
-
return None
|
182 |
except Exception as e:
|
183 |
-
logging.error(f"Error
|
184 |
return None
|
185 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
186 |
def refresh_models():
|
187 |
global models
|
188 |
|
189 |
-
# 获取各类型模型列表
|
190 |
models["text"] = get_all_models(FREE_MODEL_TEST_KEY, "chat")
|
191 |
models["embedding"] = get_all_models(FREE_MODEL_TEST_KEY, "embedding")
|
192 |
models["image"] = get_all_models(FREE_MODEL_TEST_KEY, "text-to-image")
|
193 |
|
194 |
-
# 重置免费模型列表
|
195 |
models["free_text"] = []
|
196 |
models["free_embedding"] = []
|
197 |
models["free_image"] = []
|
198 |
|
199 |
-
# 处理禁用模型
|
200 |
ban_models = []
|
201 |
ban_models_str = os.environ.get("BAN_MODELS")
|
202 |
if ban_models_str:
|
@@ -208,12 +267,10 @@ def refresh_models():
|
|
208 |
except json.JSONDecodeError:
|
209 |
logging.warning("环境变量 BAN_MODELS JSON 解析失败,请检查格式。")
|
210 |
|
211 |
-
# 过滤禁用模型
|
212 |
models["text"] = [model for model in models["text"] if model not in ban_models]
|
213 |
models["embedding"] = [model for model in models["embedding"] if model not in ban_models]
|
214 |
models["image"] = [model for model in models["image"] if model not in ban_models]
|
215 |
|
216 |
-
# 使用统一的测试函数测试各类型模型
|
217 |
model_types = [
|
218 |
("text", "chat"),
|
219 |
("embedding", "embedding"),
|
@@ -240,7 +297,6 @@ def refresh_models():
|
|
240 |
except Exception as exc:
|
241 |
logging.error(f"{model_type}模型 {model} 测试生成异常: {exc}")
|
242 |
|
243 |
-
# 记录日志
|
244 |
for model_type in ["text", "embedding", "image"]:
|
245 |
logging.info(f"所有{model_type}模型列表:{models[model_type]}")
|
246 |
logging.info(f"免费{model_type}模型列表:{models[f'free_{model_type}']}")
|
@@ -248,7 +304,6 @@ def refresh_models():
|
|
248 |
|
249 |
def load_keys():
|
250 |
global key_status
|
251 |
-
# 重置key状态
|
252 |
for status in key_status:
|
253 |
key_status[status] = []
|
254 |
|
@@ -277,11 +332,9 @@ def load_keys():
|
|
277 |
futures = [executor.submit(process_key_with_logging, key) for key in unique_keys]
|
278 |
concurrent.futures.wait(futures)
|
279 |
|
280 |
-
# 记录每种状态的keys
|
281 |
for status, keys in key_status.items():
|
282 |
logging.info(f"{status.capitalize()} KEYS: {keys}")
|
283 |
|
284 |
-
# 更新全局变量
|
285 |
global invalid_keys_global, free_keys_global, unverified_keys_global, valid_keys_global
|
286 |
invalid_keys_global = key_status["invalid"]
|
287 |
free_keys_global = key_status["free"]
|
@@ -454,7 +507,6 @@ def list_models():
|
|
454 |
|
455 |
detailed_models = []
|
456 |
|
457 |
-
# 合并所有类型的模型
|
458 |
all_models = chain(
|
459 |
models["text"],
|
460 |
models["embedding"],
|
@@ -637,68 +689,7 @@ def handsome_images_generations():
|
|
637 |
response_data = {}
|
638 |
|
639 |
if "stable-diffusion" in model_name or model_name in ["black-forest-labs/FLUX.1-schnell", "Pro/black-forest-labs/FLUX.1-schnell","black-forest-labs/FLUX.1-dev", "black-forest-labs/FLUX.1-pro"]:
|
640 |
-
siliconflow_data =
|
641 |
-
"model": model_name,
|
642 |
-
"prompt": data.get("prompt"),
|
643 |
-
|
644 |
-
}
|
645 |
-
|
646 |
-
if model_name == "black-forest-labs/FLUX.1-pro":
|
647 |
-
siliconflow_data["width"] = data.get("width", 1024)
|
648 |
-
siliconflow_data["height"] = data.get("height", 768)
|
649 |
-
siliconflow_data["prompt_upsampling"] = data.get("prompt_upsampling", False)
|
650 |
-
siliconflow_data["image_prompt"] = data.get("image_prompt")
|
651 |
-
siliconflow_data["steps"] = data.get("steps", 20)
|
652 |
-
siliconflow_data["guidance"] = data.get("guidance", 3)
|
653 |
-
siliconflow_data["safety_tolerance"] = data.get("safety_tolerance", 2)
|
654 |
-
siliconflow_data["interval"] = data.get("interval", 2)
|
655 |
-
siliconflow_data["output_format"] = data.get("output_format", "png")
|
656 |
-
seed = data.get("seed")
|
657 |
-
if isinstance(seed, int) and 0 < seed < 9999999999:
|
658 |
-
siliconflow_data["seed"] = seed
|
659 |
-
|
660 |
-
if siliconflow_data["width"] < 256 or siliconflow_data["width"] > 1440 or siliconflow_data["width"] % 32 != 0:
|
661 |
-
siliconflow_data["width"] = 1024
|
662 |
-
if siliconflow_data["height"] < 256 or siliconflow_data["height"] > 1440 or siliconflow_data["height"] % 32 != 0:
|
663 |
-
siliconflow_data["height"] = 768
|
664 |
-
|
665 |
-
if siliconflow_data["steps"] < 1 or siliconflow_data["steps"] > 50:
|
666 |
-
siliconflow_data["steps"] = 20
|
667 |
-
if siliconflow_data["guidance"] < 1.5 or siliconflow_data["guidance"] > 5:
|
668 |
-
siliconflow_data["guidance"] = 3
|
669 |
-
if siliconflow_data["safety_tolerance"] < 0 or siliconflow_data["safety_tolerance"] > 6:
|
670 |
-
siliconflow_data["safety_tolerance"] = 2
|
671 |
-
if siliconflow_data["interval"] < 1 or siliconflow_data["interval"] > 4 :
|
672 |
-
siliconflow_data["interval"] = 2
|
673 |
-
else:
|
674 |
-
siliconflow_data["image_size"] = data.get("image_size", "1024x1024")
|
675 |
-
siliconflow_data["prompt_enhancement"] = data.get("prompt_enhancement", False)
|
676 |
-
seed = data.get("seed")
|
677 |
-
if isinstance(seed, int) and 0 < seed < 9999999999:
|
678 |
-
siliconflow_data["seed"] = seed
|
679 |
-
|
680 |
-
if model_name not in ["black-forest-labs/FLUX.1-schnell", "Pro/black-forest-labs/FLUX.1-schnell"]:
|
681 |
-
siliconflow_data["batch_size"] = data.get("n", 1)
|
682 |
-
siliconflow_data["num_inference_steps"] = data.get("steps", 20)
|
683 |
-
siliconflow_data["guidance_scale"] = data.get("guidance_scale", 7.5)
|
684 |
-
siliconflow_data["negative_prompt"] = data.get("negative_prompt")
|
685 |
-
if siliconflow_data["batch_size"] < 1:
|
686 |
-
siliconflow_data["batch_size"] = 1
|
687 |
-
if siliconflow_data["batch_size"] > 4:
|
688 |
-
siliconflow_data["batch_size"] = 4
|
689 |
-
|
690 |
-
if siliconflow_data["num_inference_steps"] < 1:
|
691 |
-
siliconflow_data["num_inference_steps"] = 1
|
692 |
-
if siliconflow_data["num_inference_steps"] > 50:
|
693 |
-
siliconflow_data["num_inference_steps"] = 50
|
694 |
-
|
695 |
-
if siliconflow_data["guidance_scale"] < 0:
|
696 |
-
siliconflow_data["guidance_scale"] = 0
|
697 |
-
if siliconflow_data["guidance_scale"] > 100:
|
698 |
-
siliconflow_data["guidance_scale"] = 100
|
699 |
-
|
700 |
-
if "image_size" in siliconflow_data and siliconflow_data["image_size"] not in ["1024x1024", "512x1024", "768x512", "768x1024", "1024x576", "576x1024","960x1280", "720x1440", "720x1280"]:
|
701 |
-
siliconflow_data["image_size"] = "1024x1024"
|
702 |
|
703 |
try:
|
704 |
start_time = time.time()
|
@@ -814,94 +805,10 @@ def handsome_chat_completions():
|
|
814 |
}
|
815 |
|
816 |
if model_name in models["image"]:
|
817 |
-
|
818 |
-
|
819 |
-
|
820 |
-
|
821 |
-
if isinstance(message["content"], str):
|
822 |
-
user_content += message["content"] + " "
|
823 |
-
elif isinstance(message["content"], list):
|
824 |
-
for item in message["content"]:
|
825 |
-
if (
|
826 |
-
isinstance(item, dict) and
|
827 |
-
item.get("type") == "text"
|
828 |
-
):
|
829 |
-
user_content += (
|
830 |
-
item.get("text", "") +
|
831 |
-
" "
|
832 |
-
)
|
833 |
-
user_content = user_content.strip()
|
834 |
-
|
835 |
-
siliconflow_data = {
|
836 |
-
"model": model_name,
|
837 |
-
"prompt": user_content,
|
838 |
-
}
|
839 |
-
|
840 |
-
if model_name == "black-forest-labs/FLUX.1-pro":
|
841 |
-
siliconflow_data["width"] = data.get("width", 1024)
|
842 |
-
siliconflow_data["height"] = data.get("height", 768)
|
843 |
-
siliconflow_data["prompt_upsampling"] = data.get("prompt_upsampling", False)
|
844 |
-
siliconflow_data["image_prompt"] = data.get("image_prompt")
|
845 |
-
siliconflow_data["steps"] = data.get("steps", 20)
|
846 |
-
siliconflow_data["guidance"] = data.get("guidance", 3)
|
847 |
-
siliconflow_data["safety_tolerance"] = data.get("safety_tolerance", 2)
|
848 |
-
siliconflow_data["interval"] = data.get("interval", 2)
|
849 |
-
siliconflow_data["output_format"] = data.get("output_format", "png")
|
850 |
-
seed = data.get("seed")
|
851 |
-
if isinstance(seed, int) and 0 < seed < 9999999999:
|
852 |
-
siliconflow_data["seed"] = seed
|
853 |
-
if siliconflow_data["width"] < 256 or siliconflow_data["width"] > 1440 or siliconflow_data["width"] % 32 != 0:
|
854 |
-
siliconflow_data["width"] = 1024
|
855 |
-
if siliconflow_data["height"] < 256 or siliconflow_data["height"] > 1440 or siliconflow_data["height"] % 32 != 0:
|
856 |
-
siliconflow_data["height"] = 768
|
857 |
-
|
858 |
-
if siliconflow_data["steps"] < 1 or siliconflow_data["steps"] > 50:
|
859 |
-
siliconflow_data["steps"] = 20
|
860 |
-
if siliconflow_data["guidance"] < 1.5 or siliconflow_data["guidance"] > 5:
|
861 |
-
siliconflow_data["guidance"] = 3
|
862 |
-
if siliconflow_data["safety_tolerance"] < 0 or siliconflow_data["safety_tolerance"] > 6:
|
863 |
-
siliconflow_data["safety_tolerance"] = 2
|
864 |
-
if siliconflow_data["interval"] < 1 or siliconflow_data["interval"] > 4 :
|
865 |
-
siliconflow_data["interval"] = 2
|
866 |
-
else:
|
867 |
-
siliconflow_data["image_size"] = "1024x1024"
|
868 |
-
siliconflow_data["batch_size"] = 1
|
869 |
-
siliconflow_data["num_inference_steps"] = 20
|
870 |
-
siliconflow_data["guidance_scale"] = 7.5
|
871 |
-
siliconflow_data["prompt_enhancement"] = False
|
872 |
-
|
873 |
-
if data.get("size"):
|
874 |
-
siliconflow_data["image_size"] = data.get("size")
|
875 |
-
if data.get("n"):
|
876 |
-
siliconflow_data["batch_size"] = data.get("n")
|
877 |
-
if data.get("steps"):
|
878 |
-
siliconflow_data["num_inference_steps"] = data.get("steps")
|
879 |
-
if data.get("guidance_scale"):
|
880 |
-
siliconflow_data["guidance_scale"] = data.get("guidance_scale")
|
881 |
-
if data.get("negative_prompt"):
|
882 |
-
siliconflow_data["negative_prompt"] = data.get("negative_prompt")
|
883 |
-
if data.get("seed"):
|
884 |
-
siliconflow_data["seed"] = data.get("seed")
|
885 |
-
if data.get("prompt_enhancement"):
|
886 |
-
siliconflow_data["prompt_enhancement"] = data.get("prompt_enhancement")
|
887 |
-
|
888 |
-
if siliconflow_data["batch_size"] < 1:
|
889 |
-
siliconflow_data["batch_size"] = 1
|
890 |
-
if siliconflow_data["batch_size"] > 4:
|
891 |
-
siliconflow_data["batch_size"] = 4
|
892 |
-
|
893 |
-
if siliconflow_data["num_inference_steps"] < 1:
|
894 |
-
siliconflow_data["num_inference_steps"] = 1
|
895 |
-
if siliconflow_data["num_inference_steps"] > 50:
|
896 |
-
siliconflow_data["num_inference_steps"] = 50
|
897 |
-
|
898 |
-
if siliconflow_data["guidance_scale"] < 0:
|
899 |
-
siliconflow_data["guidance_scale"] = 0
|
900 |
-
if siliconflow_data["guidance_scale"] > 100:
|
901 |
-
siliconflow_data["guidance_scale"] = 100
|
902 |
-
|
903 |
-
if siliconflow_data["image_size"] not in ["1024x1024", "512x1024", "768x512", "768x1024", "1024x576", "576x1024", "960x1280", "720x1440", "720x1280"]:
|
904 |
-
siliconflow_data["image_size"] = "1024x1024"
|
905 |
|
906 |
try:
|
907 |
start_time = time.time()
|
@@ -909,7 +816,6 @@ def handsome_chat_completions():
|
|
909 |
IMAGE_ENDPOINT,
|
910 |
headers=headers,
|
911 |
json=siliconflow_data,
|
912 |
-
timeout=120,
|
913 |
stream=data.get("stream", False)
|
914 |
)
|
915 |
|
@@ -1110,8 +1016,7 @@ def handsome_chat_completions():
|
|
1110 |
TEST_MODEL_ENDPOINT,
|
1111 |
headers=headers,
|
1112 |
json=data,
|
1113 |
-
stream=data.get("stream", False)
|
1114 |
-
timeout=600
|
1115 |
)
|
1116 |
|
1117 |
if response.status_code == 429:
|
@@ -1184,24 +1089,7 @@ def handsome_chat_completions():
|
|
1184 |
f"行内容: {line}"
|
1185 |
)
|
1186 |
|
1187 |
-
user_content = ""
|
1188 |
-
messages = data.get("messages", [])
|
1189 |
-
for message in messages:
|
1190 |
-
if message["role"] == "user":
|
1191 |
-
if isinstance(message["content"], str):
|
1192 |
-
user_content += message["content"] + " "
|
1193 |
-
elif isinstance(message["content"], list):
|
1194 |
-
for item in message["content"]:
|
1195 |
-
if (
|
1196 |
-
isinstance(item, dict) and
|
1197 |
-
item.get("type") == "text"
|
1198 |
-
):
|
1199 |
-
user_content += (
|
1200 |
-
item.get("text", "") +
|
1201 |
-
" "
|
1202 |
-
)
|
1203 |
-
|
1204 |
-
user_content = user_content.strip()
|
1205 |
|
1206 |
user_content_replaced = user_content.replace(
|
1207 |
'\n', '\\n'
|
@@ -1254,23 +1142,7 @@ def handsome_chat_completions():
|
|
1254 |
completion_tokens = 0
|
1255 |
response_content = ""
|
1256 |
|
1257 |
-
user_content = ""
|
1258 |
-
messages = data.get("messages", [])
|
1259 |
-
for message in messages:
|
1260 |
-
if message["role"] == "user":
|
1261 |
-
if isinstance(message["content"], str):
|
1262 |
-
user_content += message["content"] + " "
|
1263 |
-
elif isinstance(message["content"], list):
|
1264 |
-
for item in message["content"]:
|
1265 |
-
if (
|
1266 |
-
isinstance(item, dict) and
|
1267 |
-
item.get("type") == "text"
|
1268 |
-
):
|
1269 |
-
user_content += (
|
1270 |
-
item.get("text", "") + " "
|
1271 |
-
)
|
1272 |
-
|
1273 |
-
user_content = user_content.strip()
|
1274 |
|
1275 |
user_content_replaced = user_content.replace(
|
1276 |
'\n', '\\n'
|
|
|
148 |
)
|
149 |
return False
|
150 |
|
151 |
+
def process_image_url(image_url, response_format=None):
|
152 |
+
if not image_url:
|
153 |
+
return {"url": ""}
|
154 |
+
|
155 |
+
if response_format == "b64_json":
|
156 |
+
try:
|
157 |
+
response = session.get(image_url, stream=True)
|
158 |
+
response.raise_for_status()
|
159 |
+
image = Image.open(response.raw)
|
160 |
+
buffered = io.BytesIO()
|
161 |
+
image.save(buffered, format="PNG")
|
162 |
+
img_str = base64.b64encode(buffered.getvalue()).decode()
|
163 |
+
return {"b64_json": img_str}
|
164 |
+
except Exception as e:
|
165 |
+
logging.error(f"图片转base64失败: {e}")
|
166 |
+
return {"url": image_url}
|
167 |
+
return {"url": image_url}
|
168 |
+
|
169 |
def create_base64_markdown_image(image_url):
|
170 |
try:
|
171 |
response = session.get(image_url, stream=True)
|
172 |
response.raise_for_status()
|
173 |
+
image = Image.open(BytesIO(response.content))
|
|
|
174 |
|
175 |
+
new_size = tuple(dim // 4 for dim in image.size)
|
176 |
+
resized_image = image.resize(new_size, Image.LANCZOS)
|
177 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
178 |
buffered = BytesIO()
|
179 |
resized_image.save(buffered, format="PNG")
|
180 |
+
base64_encoded = base64.b64encode(buffered.getvalue()).decode('utf-8')
|
181 |
|
182 |
+
markdown_image_link = f"![](data:image/png;base64,{base64_encoded})"
|
183 |
+
logging.info("Created base64 markdown image link.")
|
|
|
|
|
|
|
|
|
|
|
|
|
184 |
return markdown_image_link
|
|
|
|
|
|
|
185 |
except Exception as e:
|
186 |
+
logging.error(f"Error creating markdown image: {e}")
|
187 |
return None
|
188 |
|
189 |
+
def extract_user_content(messages):
|
190 |
+
user_content = ""
|
191 |
+
for message in messages:
|
192 |
+
if message["role"] == "user":
|
193 |
+
if isinstance(message["content"], str):
|
194 |
+
user_content += message["content"] + " "
|
195 |
+
elif isinstance(message["content"], list):
|
196 |
+
for item in message["content"]:
|
197 |
+
if isinstance(item, dict) and item.get("type") == "text":
|
198 |
+
user_content += item.get("text", "") + " "
|
199 |
+
return user_content.strip()
|
200 |
+
|
201 |
+
def get_siliconflow_data(model_name, data):
|
202 |
+
siliconflow_data = {
|
203 |
+
"model": model_name,
|
204 |
+
"prompt": data.get("prompt") or "",
|
205 |
+
}
|
206 |
+
|
207 |
+
if model_name == "black-forest-labs/FLUX.1-pro":
|
208 |
+
siliconflow_data.update({
|
209 |
+
"width": max(256, min(1440, (data.get("width", 1024) // 32) * 32)),
|
210 |
+
"height": max(256, min(1440, (data.get("height", 768) // 32) * 32)),
|
211 |
+
"prompt_upsampling": data.get("prompt_upsampling", False),
|
212 |
+
"image_prompt": data.get("image_prompt"),
|
213 |
+
"steps": max(1, min(50, data.get("steps", 20))),
|
214 |
+
"guidance": max(1.5, min(5, data.get("guidance", 3))),
|
215 |
+
"safety_tolerance": max(0, min(6, data.get("safety_tolerance", 2))),
|
216 |
+
"interval": max(1, min(4, data.get("interval", 2))),
|
217 |
+
"output_format": data.get("output_format", "png")
|
218 |
+
})
|
219 |
+
|
220 |
+
seed = data.get("seed")
|
221 |
+
if isinstance(seed, int) and 0 < seed < 9999999999:
|
222 |
+
siliconflow_data["seed"] = seed
|
223 |
+
|
224 |
+
else:
|
225 |
+
siliconflow_data.update({
|
226 |
+
"image_size": data.get("image_size", "1024x1024"),
|
227 |
+
"prompt_enhancement": data.get("prompt_enhancement", False)
|
228 |
+
})
|
229 |
+
|
230 |
+
seed = data.get("seed")
|
231 |
+
if isinstance(seed, int) and 0 < seed < 9999999999:
|
232 |
+
siliconflow_data["seed"] = seed
|
233 |
+
|
234 |
+
if model_name not in ["black-forest-labs/FLUX.1-schnell", "Pro/black-forest-labs/FLUX.1-schnell"]:
|
235 |
+
siliconflow_data.update({
|
236 |
+
"batch_size": max(1, min(4, data.get("n", 1))),
|
237 |
+
"num_inference_steps": max(1, min(50, data.get("steps", 20))),
|
238 |
+
"guidance_scale": max(0, min(100, data.get("guidance_scale", 7.5))),
|
239 |
+
"negative_prompt": data.get("negative_prompt")
|
240 |
+
})
|
241 |
+
|
242 |
+
valid_sizes = ["1024x1024", "512x1024", "768x512", "768x1024", "1024x576", "576x1024", "960x1280", "720x1440", "720x1280"]
|
243 |
+
if "image_size" in siliconflow_data and siliconflow_data["image_size"] not in valid_sizes:
|
244 |
+
siliconflow_data["image_size"] = "1024x1024"
|
245 |
+
|
246 |
+
return siliconflow_data
|
247 |
+
|
248 |
def refresh_models():
|
249 |
global models
|
250 |
|
|
|
251 |
models["text"] = get_all_models(FREE_MODEL_TEST_KEY, "chat")
|
252 |
models["embedding"] = get_all_models(FREE_MODEL_TEST_KEY, "embedding")
|
253 |
models["image"] = get_all_models(FREE_MODEL_TEST_KEY, "text-to-image")
|
254 |
|
|
|
255 |
models["free_text"] = []
|
256 |
models["free_embedding"] = []
|
257 |
models["free_image"] = []
|
258 |
|
|
|
259 |
ban_models = []
|
260 |
ban_models_str = os.environ.get("BAN_MODELS")
|
261 |
if ban_models_str:
|
|
|
267 |
except json.JSONDecodeError:
|
268 |
logging.warning("环境变量 BAN_MODELS JSON 解析失败,请检查格式。")
|
269 |
|
|
|
270 |
models["text"] = [model for model in models["text"] if model not in ban_models]
|
271 |
models["embedding"] = [model for model in models["embedding"] if model not in ban_models]
|
272 |
models["image"] = [model for model in models["image"] if model not in ban_models]
|
273 |
|
|
|
274 |
model_types = [
|
275 |
("text", "chat"),
|
276 |
("embedding", "embedding"),
|
|
|
297 |
except Exception as exc:
|
298 |
logging.error(f"{model_type}模型 {model} 测试生成异常: {exc}")
|
299 |
|
|
|
300 |
for model_type in ["text", "embedding", "image"]:
|
301 |
logging.info(f"所有{model_type}模型列表:{models[model_type]}")
|
302 |
logging.info(f"免费{model_type}模型列表:{models[f'free_{model_type}']}")
|
|
|
304 |
|
305 |
def load_keys():
|
306 |
global key_status
|
|
|
307 |
for status in key_status:
|
308 |
key_status[status] = []
|
309 |
|
|
|
332 |
futures = [executor.submit(process_key_with_logging, key) for key in unique_keys]
|
333 |
concurrent.futures.wait(futures)
|
334 |
|
|
|
335 |
for status, keys in key_status.items():
|
336 |
logging.info(f"{status.capitalize()} KEYS: {keys}")
|
337 |
|
|
|
338 |
global invalid_keys_global, free_keys_global, unverified_keys_global, valid_keys_global
|
339 |
invalid_keys_global = key_status["invalid"]
|
340 |
free_keys_global = key_status["free"]
|
|
|
507 |
|
508 |
detailed_models = []
|
509 |
|
|
|
510 |
all_models = chain(
|
511 |
models["text"],
|
512 |
models["embedding"],
|
|
|
689 |
response_data = {}
|
690 |
|
691 |
if "stable-diffusion" in model_name or model_name in ["black-forest-labs/FLUX.1-schnell", "Pro/black-forest-labs/FLUX.1-schnell","black-forest-labs/FLUX.1-dev", "black-forest-labs/FLUX.1-pro"]:
|
692 |
+
siliconflow_data = get_siliconflow_data(model_name, data)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
693 |
|
694 |
try:
|
695 |
start_time = time.time()
|
|
|
805 |
}
|
806 |
|
807 |
if model_name in models["image"]:
|
808 |
+
if isinstance(data.get("messages"), list):
|
809 |
+
data = data.copy()
|
810 |
+
data["prompt"] = extract_user_content(data["messages"])
|
811 |
+
siliconflow_data = get_siliconflow_data(model_name, data)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
812 |
|
813 |
try:
|
814 |
start_time = time.time()
|
|
|
816 |
IMAGE_ENDPOINT,
|
817 |
headers=headers,
|
818 |
json=siliconflow_data,
|
|
|
819 |
stream=data.get("stream", False)
|
820 |
)
|
821 |
|
|
|
1016 |
TEST_MODEL_ENDPOINT,
|
1017 |
headers=headers,
|
1018 |
json=data,
|
1019 |
+
stream=data.get("stream", False)
|
|
|
1020 |
)
|
1021 |
|
1022 |
if response.status_code == 429:
|
|
|
1089 |
f"行内容: {line}"
|
1090 |
)
|
1091 |
|
1092 |
+
user_content = extract_user_content(data.get("messages", []))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1093 |
|
1094 |
user_content_replaced = user_content.replace(
|
1095 |
'\n', '\\n'
|
|
|
1142 |
completion_tokens = 0
|
1143 |
response_content = ""
|
1144 |
|
1145 |
+
user_content = extract_user_content(data.get("messages", []))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1146 |
|
1147 |
user_content_replaced = user_content.replace(
|
1148 |
'\n', '\\n'
|