2023-07-25 02:42:53 +02:00
|
|
|
import os
|
2023-08-03 01:46:49 +02:00
|
|
|
import requests
|
|
|
|
|
|
|
|
from dotenv import load_dotenv
|
2023-07-25 19:45:21 +02:00
|
|
|
|
|
|
|
import proxies
|
2023-07-25 02:42:53 +02:00
|
|
|
|
2023-08-03 01:46:49 +02:00
|
|
|
from helpers import exceptions
|
2023-07-25 02:42:53 +02:00
|
|
|
|
|
|
|
load_dotenv()
|
|
|
|
|
2023-08-03 01:46:49 +02:00
|
|
|
async def stream(request: dict):
|
|
|
|
headers = {
|
|
|
|
'Content-Type': 'application/json'
|
|
|
|
}
|
|
|
|
|
|
|
|
for k, v in request.get('headers', {}).items():
|
|
|
|
headers[k] = v
|
|
|
|
|
|
|
|
for _ in range(3):
|
|
|
|
response = requests.request(
|
|
|
|
method=request.get('method', 'POST'),
|
|
|
|
url=request['url'],
|
|
|
|
json=request.get('payload', {}),
|
|
|
|
headers=headers,
|
|
|
|
timeout=int(os.getenv('TRANSFER_TIMEOUT', '120')),
|
|
|
|
proxies=proxies.default_proxy.urls,
|
|
|
|
stream=True
|
2023-07-27 03:44:53 +02:00
|
|
|
)
|
|
|
|
|
2023-08-03 01:46:49 +02:00
|
|
|
try:
|
|
|
|
response.raise_for_status()
|
|
|
|
except Exception as exc:
|
|
|
|
if str(exc) == '429 Client Error: Too Many Requests for url: https://api.openai.com/v1/chat/completions':
|
|
|
|
continue
|
|
|
|
else:
|
|
|
|
break
|
2023-07-27 03:44:53 +02:00
|
|
|
|
2023-08-03 01:46:49 +02:00
|
|
|
for chunk in response.iter_lines():
|
|
|
|
chunk = f'{chunk.decode("utf8")}\n\n'
|
|
|
|
yield chunk
|
2023-07-25 19:45:21 +02:00
|
|
|
|
|
|
|
if __name__ == '__main__':
|
|
|
|
pass
|