Update api/utils.py
Browse files- api/utils.py +3 -20
api/utils.py
CHANGED
|
@@ -12,7 +12,6 @@ from fastapi.security import HTTPAuthorizationCredentials
|
|
| 12 |
|
| 13 |
from api.config import APP_SECRET, BASE_URL
|
| 14 |
from api.models import ChatRequest
|
| 15 |
-
|
| 16 |
from api.logger import setup_logger
|
| 17 |
|
| 18 |
logger = setup_logger(__name__)
|
|
@@ -60,7 +59,6 @@ async def process_streaming_response(request: ChatRequest):
|
|
| 60 |
agent_mode = AGENT_MODE.get(request.model, {})
|
| 61 |
trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
|
| 62 |
|
| 63 |
-
# Log reduced information
|
| 64 |
logger.info(
|
| 65 |
f"Streaming request for model: '{request.model}', "
|
| 66 |
f"agent mode: {agent_mode}, trending agent mode: {trending_agent_mode}"
|
|
@@ -101,17 +99,7 @@ async def process_streaming_response(request: ChatRequest):
|
|
| 101 |
response.raise_for_status()
|
| 102 |
async for line in response.aiter_lines():
|
| 103 |
timestamp = int(datetime.now().timestamp())
|
| 104 |
-
|
| 105 |
-
content = line if line.endswith("\n") else line + "\n"
|
| 106 |
-
if "https://www.blackbox.ai" in content:
|
| 107 |
-
validate.getHid(True)
|
| 108 |
-
content = "Hid has been refreshed; feel free to restart the conversation.\n"
|
| 109 |
-
yield f"data: {json.dumps(create_chat_completion_data(content, request.model, timestamp))}\n\n"
|
| 110 |
-
break
|
| 111 |
-
if content.startswith("$@$v=undefined-rv1$@$"):
|
| 112 |
-
yield f"data: {json.dumps(create_chat_completion_data(content[21:], request.model, timestamp))}\n\n"
|
| 113 |
-
else:
|
| 114 |
-
yield f"data: {json.dumps(create_chat_completion_data(content, request.model, timestamp))}\n\n"
|
| 115 |
|
| 116 |
yield f"data: {json.dumps(create_chat_completion_data('', request.model, timestamp, 'stop'))}\n\n"
|
| 117 |
yield "data: [DONE]\n\n"
|
|
@@ -126,7 +114,6 @@ async def process_non_streaming_response(request: ChatRequest):
|
|
| 126 |
agent_mode = AGENT_MODE.get(request.model, {})
|
| 127 |
trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
|
| 128 |
|
| 129 |
-
# Log reduced information
|
| 130 |
logger.info(
|
| 131 |
f"Non-streaming request for model: '{request.model}', "
|
| 132 |
f"agent mode: {agent_mode}, trending agent mode: {trending_agent_mode}"
|
|
@@ -161,12 +148,8 @@ async def process_non_streaming_response(request: ChatRequest):
|
|
| 161 |
method="POST", url=f"{BASE_URL}/api/chat", headers=headers, json=json_data
|
| 162 |
) as response:
|
| 163 |
async for chunk in response.aiter_text():
|
| 164 |
-
full_response += chunk
|
| 165 |
-
|
| 166 |
-
validate.getHid(True)
|
| 167 |
-
full_response = "Hid has been refreshed; feel free to restart the conversation."
|
| 168 |
-
if full_response.startswith("$@$v=undefined-rv1$@$"):
|
| 169 |
-
full_response = full_response[21:]
|
| 170 |
return {
|
| 171 |
"id": f"chatcmpl-{uuid.uuid4()}",
|
| 172 |
"object": "chat.completion",
|
|
|
|
| 12 |
|
| 13 |
from api.config import APP_SECRET, BASE_URL
|
| 14 |
from api.models import ChatRequest
|
|
|
|
| 15 |
from api.logger import setup_logger
|
| 16 |
|
| 17 |
logger = setup_logger(__name__)
|
|
|
|
| 59 |
agent_mode = AGENT_MODE.get(request.model, {})
|
| 60 |
trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
|
| 61 |
|
|
|
|
| 62 |
logger.info(
|
| 63 |
f"Streaming request for model: '{request.model}', "
|
| 64 |
f"agent mode: {agent_mode}, trending agent mode: {trending_agent_mode}"
|
|
|
|
| 99 |
response.raise_for_status()
|
| 100 |
async for line in response.aiter_lines():
|
| 101 |
timestamp = int(datetime.now().timestamp())
|
| 102 |
+
yield f"data: {json.dumps(create_chat_completion_data(line, request.model, timestamp))}\n\n"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 103 |
|
| 104 |
yield f"data: {json.dumps(create_chat_completion_data('', request.model, timestamp, 'stop'))}\n\n"
|
| 105 |
yield "data: [DONE]\n\n"
|
|
|
|
| 114 |
agent_mode = AGENT_MODE.get(request.model, {})
|
| 115 |
trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
|
| 116 |
|
|
|
|
| 117 |
logger.info(
|
| 118 |
f"Non-streaming request for model: '{request.model}', "
|
| 119 |
f"agent mode: {agent_mode}, trending agent mode: {trending_agent_mode}"
|
|
|
|
| 148 |
method="POST", url=f"{BASE_URL}/api/chat", headers=headers, json=json_data
|
| 149 |
) as response:
|
| 150 |
async for chunk in response.aiter_text():
|
| 151 |
+
full_response += chunk
|
| 152 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
| 153 |
return {
|
| 154 |
"id": f"chatcmpl-{uuid.uuid4()}",
|
| 155 |
"object": "chat.completion",
|