2023-08-12 17:49:31 +02:00
|
|
|
"""This module contains the streaming logic for the API."""
|
|
|
|
|
2023-08-04 03:30:56 +02:00
|
|
|
import os
|
2023-08-04 17:29:49 +02:00
|
|
|
import json
|
2023-08-06 00:43:36 +02:00
|
|
|
import dhooks
|
2023-08-04 03:30:56 +02:00
|
|
|
import asyncio
|
|
|
|
import aiohttp
|
|
|
|
import starlette
|
|
|
|
|
2023-08-04 17:29:49 +02:00
|
|
|
from rich import print
|
2023-08-04 03:30:56 +02:00
|
|
|
from dotenv import load_dotenv
|
2023-08-05 02:30:42 +02:00
|
|
|
from python_socks._errors import ProxyError
|
2023-08-04 03:30:56 +02:00
|
|
|
|
|
|
|
import proxies
|
2023-08-09 11:15:49 +02:00
|
|
|
import provider_auth
|
2023-08-04 03:30:56 +02:00
|
|
|
import load_balancing
|
|
|
|
|
|
|
|
from db import logs, users, stats
|
2023-08-06 00:43:36 +02:00
|
|
|
from helpers import network, chat, errors
|
2023-08-04 03:30:56 +02:00
|
|
|
|
|
|
|
load_dotenv()
|
|
|
|
|
|
|
|
DEMO_PAYLOAD = {
|
|
|
|
'model': 'gpt-3.5-turbo',
|
|
|
|
'messages': [
|
|
|
|
{
|
|
|
|
'role': 'user',
|
|
|
|
'content': '1+1='
|
|
|
|
}
|
|
|
|
]
|
|
|
|
}
|
|
|
|
|
|
|
|
async def stream(
|
|
|
|
path: str='/v1/chat/completions',
|
|
|
|
user: dict=None,
|
|
|
|
payload: dict=None,
|
|
|
|
credits_cost: int=0,
|
|
|
|
input_tokens: int=0,
|
|
|
|
incoming_request: starlette.requests.Request=None,
|
|
|
|
):
|
2023-08-04 17:29:49 +02:00
|
|
|
is_chat = False
|
2023-08-06 00:43:36 +02:00
|
|
|
is_stream = payload.get('stream', False)
|
2023-08-04 03:30:56 +02:00
|
|
|
|
2023-08-04 17:29:49 +02:00
|
|
|
if 'chat/completions' in path:
|
|
|
|
is_chat = True
|
|
|
|
model = payload['model']
|
2023-08-04 03:30:56 +02:00
|
|
|
|
2023-08-12 17:49:31 +02:00
|
|
|
# Chat completions always have the same beginning
|
2023-08-06 00:43:36 +02:00
|
|
|
if is_chat and is_stream:
|
|
|
|
chat_id = await chat.create_chat_id()
|
|
|
|
|
2023-08-06 21:42:07 +02:00
|
|
|
chunk = await chat.create_chat_chunk(
|
2023-08-04 17:29:49 +02:00
|
|
|
chat_id=chat_id,
|
|
|
|
model=model,
|
|
|
|
content=chat.CompletionStart
|
|
|
|
)
|
2023-08-06 21:42:07 +02:00
|
|
|
yield chunk
|
2023-08-04 03:30:56 +02:00
|
|
|
|
2023-08-06 21:42:07 +02:00
|
|
|
chunk = await chat.create_chat_chunk(
|
2023-08-05 02:30:42 +02:00
|
|
|
chat_id=chat_id,
|
|
|
|
model=model,
|
|
|
|
content=None
|
|
|
|
)
|
2023-08-04 03:30:56 +02:00
|
|
|
|
2023-08-06 21:42:07 +02:00
|
|
|
yield chunk
|
|
|
|
|
|
|
|
json_response = {
|
|
|
|
'error': 'No JSON response could be received'
|
|
|
|
}
|
|
|
|
|
2023-08-12 17:49:31 +02:00
|
|
|
# Try to get a response from the API
|
2023-08-06 21:42:07 +02:00
|
|
|
for _ in range(5):
|
2023-08-04 17:29:49 +02:00
|
|
|
headers = {
|
|
|
|
'Content-Type': 'application/json'
|
|
|
|
}
|
|
|
|
|
2023-08-12 17:49:31 +02:00
|
|
|
# Load balancing
|
|
|
|
# If the request is a chat completion, then we need to load balance between chat providers
|
|
|
|
# If the request is an organic request, then we need to load balance between organic providers
|
|
|
|
|
2023-08-06 00:43:36 +02:00
|
|
|
try:
|
|
|
|
if is_chat:
|
|
|
|
target_request = await load_balancing.balance_chat_request(payload)
|
|
|
|
else:
|
2023-08-12 17:49:31 +02:00
|
|
|
# "organic" means that it's not using a reverse engineered front-end, but rather ClosedAI's API directly
|
|
|
|
# churchless.tech is an example of an organic provider, because it redirects the request to ClosedAI.
|
2023-08-06 00:43:36 +02:00
|
|
|
target_request = await load_balancing.balance_organic_request({
|
|
|
|
'method': incoming_request.method,
|
|
|
|
'path': path,
|
|
|
|
'payload': payload,
|
|
|
|
'headers': headers,
|
|
|
|
'cookies': incoming_request.cookies
|
|
|
|
})
|
|
|
|
except ValueError as exc:
|
2023-08-12 17:49:31 +02:00
|
|
|
# Error load balancing? Send a webhook to the admins
|
2023-08-06 00:43:36 +02:00
|
|
|
webhook = dhooks.Webhook(os.getenv('DISCORD_WEBHOOK__API_ISSUE'))
|
|
|
|
webhook.send(content=f'API Issue: **`{exc}`**\nhttps://i.imgflip.com/7uv122.jpg')
|
2023-08-12 17:49:31 +02:00
|
|
|
|
2023-08-09 11:43:05 +02:00
|
|
|
error = await errors.yield_error(
|
2023-08-06 00:43:36 +02:00
|
|
|
500,
|
|
|
|
'Sorry, the API has no working keys anymore.',
|
|
|
|
'The admins have been messaged automatically.'
|
|
|
|
)
|
2023-08-06 21:42:07 +02:00
|
|
|
yield error
|
2023-08-06 12:46:41 +02:00
|
|
|
return
|
2023-08-05 02:30:42 +02:00
|
|
|
|
2023-08-04 17:29:49 +02:00
|
|
|
for k, v in target_request.get('headers', {}).items():
|
2023-08-09 11:15:49 +02:00
|
|
|
target_request['headers'][k] = v
|
|
|
|
|
|
|
|
if target_request['method'] == 'GET' and not payload:
|
|
|
|
target_request['payload'] = None
|
2023-08-04 17:29:49 +02:00
|
|
|
|
2023-08-12 17:49:31 +02:00
|
|
|
# We haven't done any requests as of right now, everything until now was just preparation
|
|
|
|
# Here, we process the request
|
|
|
|
async with aiohttp.ClientSession(connector=proxies.get_proxy().connector) as session:
|
2023-08-05 02:30:42 +02:00
|
|
|
try:
|
|
|
|
async with session.request(
|
|
|
|
method=target_request.get('method', 'POST'),
|
|
|
|
url=target_request['url'],
|
|
|
|
|
|
|
|
data=target_request.get('data'),
|
|
|
|
json=target_request.get('payload'),
|
|
|
|
|
2023-08-09 11:15:49 +02:00
|
|
|
headers=target_request.get('headers', {}),
|
2023-08-05 02:30:42 +02:00
|
|
|
cookies=target_request.get('cookies'),
|
|
|
|
|
|
|
|
ssl=False,
|
|
|
|
|
|
|
|
timeout=aiohttp.ClientTimeout(total=float(os.getenv('TRANSFER_TIMEOUT', '120'))),
|
|
|
|
) as response:
|
2023-08-12 17:49:31 +02:00
|
|
|
# if the answer is JSON
|
2023-08-09 11:15:49 +02:00
|
|
|
if response.content_type == 'application/json':
|
|
|
|
data = await response.json()
|
2023-08-06 21:42:07 +02:00
|
|
|
|
2023-08-12 17:49:31 +02:00
|
|
|
# Invalidate the key if it's not working
|
2023-08-09 11:15:49 +02:00
|
|
|
if data.get('code') == 'invalid_api_key':
|
|
|
|
await provider_auth.invalidate_key(target_request.get('provider_auth'))
|
2023-08-05 02:30:42 +02:00
|
|
|
continue
|
2023-08-04 03:30:56 +02:00
|
|
|
|
2023-08-09 11:15:49 +02:00
|
|
|
if response.ok:
|
|
|
|
json_response = data
|
|
|
|
|
2023-08-12 17:49:31 +02:00
|
|
|
# if the answer is a stream
|
2023-08-06 00:43:36 +02:00
|
|
|
if is_stream:
|
2023-08-09 11:15:49 +02:00
|
|
|
try:
|
|
|
|
response.raise_for_status()
|
|
|
|
except Exception as exc:
|
2023-08-12 17:49:31 +02:00
|
|
|
# Rate limit? Balance again
|
2023-08-09 11:15:49 +02:00
|
|
|
if 'Too Many Requests' in str(exc):
|
|
|
|
continue
|
|
|
|
|
2023-08-06 00:43:36 +02:00
|
|
|
try:
|
2023-08-12 17:49:31 +02:00
|
|
|
# process the response chunks
|
2023-08-06 00:43:36 +02:00
|
|
|
async for chunk in response.content.iter_any():
|
|
|
|
send = False
|
|
|
|
chunk = f'{chunk.decode("utf8")}\n\n'
|
|
|
|
|
|
|
|
if is_chat and '{' in chunk:
|
2023-08-12 17:49:31 +02:00
|
|
|
# parse the JSON
|
2023-08-06 00:43:36 +02:00
|
|
|
data = json.loads(chunk.split('data: ')[1])
|
2023-08-06 21:42:07 +02:00
|
|
|
chunk = chunk.replace(data['id'], chat_id)
|
2023-08-06 00:43:36 +02:00
|
|
|
send = True
|
|
|
|
|
2023-08-12 17:49:31 +02:00
|
|
|
# create a custom chunk if we're using specific providers
|
2023-08-06 00:43:36 +02:00
|
|
|
if target_request['module'] == 'twa' and data.get('text'):
|
2023-08-06 21:42:07 +02:00
|
|
|
chunk = await chat.create_chat_chunk(
|
2023-08-06 00:43:36 +02:00
|
|
|
chat_id=chat_id,
|
|
|
|
model=model,
|
|
|
|
content=['text']
|
|
|
|
)
|
|
|
|
|
2023-08-12 17:49:31 +02:00
|
|
|
# don't send empty/unnecessary messages
|
|
|
|
if (not data['choices'][0]['delta']) or data['choices'][0]['delta'] == {'role': 'assistant'}:
|
2023-08-06 00:43:36 +02:00
|
|
|
send = False
|
2023-08-12 17:49:31 +02:00
|
|
|
|
|
|
|
# send the chunk
|
2023-08-11 21:21:38 +02:00
|
|
|
if send and chunk.strip():
|
2023-08-06 21:42:07 +02:00
|
|
|
final_chunk = chunk.strip().replace('data: [DONE]', '') + '\n\n'
|
|
|
|
yield final_chunk
|
2023-08-06 00:43:36 +02:00
|
|
|
|
|
|
|
except Exception as exc:
|
|
|
|
if 'Connection closed' in str(exc):
|
2023-08-09 11:43:05 +02:00
|
|
|
error = await errors.yield_error(
|
2023-08-06 21:42:07 +02:00
|
|
|
500,
|
|
|
|
'Sorry, there was an issue with the connection.',
|
|
|
|
'Please first check if the issue on your end. If this error repeats, please don\'t heistate to contact the staff!.'
|
|
|
|
)
|
|
|
|
yield error
|
|
|
|
return
|
2023-08-04 17:29:49 +02:00
|
|
|
|
2023-08-05 02:30:42 +02:00
|
|
|
break
|
2023-08-04 17:29:49 +02:00
|
|
|
|
2023-08-06 00:43:36 +02:00
|
|
|
except ProxyError as exc:
|
2023-08-12 17:49:31 +02:00
|
|
|
print('[!] Proxy error:', exc)
|
2023-08-05 02:30:42 +02:00
|
|
|
continue
|
2023-08-04 17:29:49 +02:00
|
|
|
|
2023-08-12 17:49:31 +02:00
|
|
|
# Chat completions always have the same ending
|
2023-08-06 00:43:36 +02:00
|
|
|
if is_chat and is_stream:
|
2023-08-06 21:42:07 +02:00
|
|
|
chunk = await chat.create_chat_chunk(
|
2023-08-04 17:29:49 +02:00
|
|
|
chat_id=chat_id,
|
|
|
|
model=model,
|
|
|
|
content=chat.CompletionStop
|
|
|
|
)
|
2023-08-06 21:42:07 +02:00
|
|
|
yield chunk
|
2023-08-05 02:30:42 +02:00
|
|
|
yield 'data: [DONE]\n\n'
|
2023-08-04 03:30:56 +02:00
|
|
|
|
2023-08-12 17:49:31 +02:00
|
|
|
# If the response is JSON, then we need to yield it like this
|
2023-08-09 11:15:49 +02:00
|
|
|
if not is_stream and json_response:
|
2023-08-06 21:42:07 +02:00
|
|
|
yield json.dumps(json_response)
|
|
|
|
|
2023-08-12 17:49:31 +02:00
|
|
|
# DONE WITH REQUEST, NOW LOGGING ETC.
|
2023-08-06 21:42:07 +02:00
|
|
|
|
|
|
|
if user and incoming_request:
|
|
|
|
await logs.log_api_request(
|
|
|
|
user=user,
|
|
|
|
incoming_request=incoming_request,
|
|
|
|
target_url=target_request['url']
|
|
|
|
)
|
|
|
|
|
|
|
|
if credits_cost and user:
|
|
|
|
await users.update_by_id(user['_id'], {
|
|
|
|
'$inc': {'credits': -credits_cost}
|
|
|
|
})
|
|
|
|
|
|
|
|
ip_address = await network.get_ip(incoming_request)
|
|
|
|
|
|
|
|
await stats.add_date()
|
|
|
|
await stats.add_ip_address(ip_address)
|
|
|
|
await stats.add_path(path)
|
|
|
|
await stats.add_target(target_request['url'])
|
|
|
|
|
|
|
|
if is_chat:
|
|
|
|
await stats.add_model(model)
|
|
|
|
await stats.add_tokens(input_tokens, model)
|
2023-08-06 00:43:36 +02:00
|
|
|
|
2023-08-04 03:30:56 +02:00
|
|
|
if __name__ == '__main__':
|
|
|
|
asyncio.run(stream())
|