Update app.py
Browse files
app.py
CHANGED
@@ -365,6 +365,7 @@ def handsome_chat_completions():
|
|
365 |
"completion_tokens": 0,
|
366 |
"total_tokens": 0
|
367 |
}
|
|
|
368 |
}
|
369 |
return jsonify(canned_response)
|
370 |
elif any(phrase in user_content_lower for phrase in phrases_to_check):
|
@@ -404,6 +405,7 @@ def handsome_chat_completions():
|
|
404 |
"completion_tokens": 0,
|
405 |
"total_tokens": 0
|
406 |
}
|
|
|
407 |
}
|
408 |
return jsonify(canned_response)
|
409 |
|
@@ -441,19 +443,6 @@ def handsome_chat_completions():
|
|
441 |
def generate():
|
442 |
first_chunk_time = None
|
443 |
full_response_content = ""
|
444 |
-
|
445 |
-
message_data = {
|
446 |
-
"choices": [
|
447 |
-
{
|
448 |
-
"delta": {
|
449 |
-
"content": "这是公益api,模型全部可用且保真,请不要对模型进行无意义的测试,请尽量不要使用高级模型解决没必要的问题。\n"
|
450 |
-
},
|
451 |
-
"index": 0,
|
452 |
-
"finish_reason": None
|
453 |
-
}
|
454 |
-
]
|
455 |
-
}
|
456 |
-
# yield f"data: {json.dumps(message_data)}\n\n".encode("utf-8")
|
457 |
|
458 |
for chunk in response.iter_content(chunk_size=2048):
|
459 |
if chunk:
|
@@ -481,12 +470,6 @@ def handsome_chat_completions():
|
|
481 |
try:
|
482 |
response_json = json.loads(line)
|
483 |
|
484 |
-
# if "usage" in response_json:
|
485 |
-
# if "completion_tokens" in response_json["usage"]:
|
486 |
-
# completion_tokens += response_json["usage"]["completion_tokens"]
|
487 |
-
# if "prompt_tokens" in response_json["usage"]:
|
488 |
-
# prompt_tokens += response_json["usage"]["prompt_tokens"]
|
489 |
-
|
490 |
if "choices" in response_json:
|
491 |
for choice in response_json["choices"]:
|
492 |
if "delta" in choice and "content" in choice["delta"]:
|
@@ -552,7 +535,6 @@ def handsome_chat_completions():
|
|
552 |
"choices"
|
553 |
][0]["message"]["content"]
|
554 |
response_content = response_content
|
555 |
-
# response_content = "这是公益api,模型全部可用且保真,请不要对模型进行无意义的测试,请尽量不要使用高级模型解决没必要的问题。\n" + response_content
|
556 |
except (KeyError, ValueError, IndexError) as e:
|
557 |
logging.error(
|
558 |
f"解析非流式响应 JSON 失败: {e}, "
|
@@ -614,4 +596,4 @@ if __name__ == '__main__':
|
|
614 |
refresh_models()
|
615 |
logging.info("首次刷新模型列表已手动触发执行")
|
616 |
|
617 |
-
app.run(debug=False,host='0.0.0.0',port=int(os.environ.get('PORT', 7860)))
|
|
|
365 |
"completion_tokens": 0,
|
366 |
"total_tokens": 0
|
367 |
}
|
368 |
+
"model_name": model_name
|
369 |
}
|
370 |
return jsonify(canned_response)
|
371 |
elif any(phrase in user_content_lower for phrase in phrases_to_check):
|
|
|
405 |
"completion_tokens": 0,
|
406 |
"total_tokens": 0
|
407 |
}
|
408 |
+
"model_name": model_name
|
409 |
}
|
410 |
return jsonify(canned_response)
|
411 |
|
|
|
443 |
def generate():
|
444 |
first_chunk_time = None
|
445 |
full_response_content = ""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
446 |
|
447 |
for chunk in response.iter_content(chunk_size=2048):
|
448 |
if chunk:
|
|
|
470 |
try:
|
471 |
response_json = json.loads(line)
|
472 |
|
|
|
|
|
|
|
|
|
|
|
|
|
473 |
if "choices" in response_json:
|
474 |
for choice in response_json["choices"]:
|
475 |
if "delta" in choice and "content" in choice["delta"]:
|
|
|
535 |
"choices"
|
536 |
][0]["message"]["content"]
|
537 |
response_content = response_content
|
|
|
538 |
except (KeyError, ValueError, IndexError) as e:
|
539 |
logging.error(
|
540 |
f"解析非流式响应 JSON 失败: {e}, "
|
|
|
596 |
refresh_models()
|
597 |
logging.info("首次刷新模型列表已手动触发执行")
|
598 |
|
599 |
+
app.run(debug=False,host='0.0.0.0',port=int(os.environ.get('PORT', 7860)))
|