mirror of
https://github.com/NovaOSS/nova-api.git
synced 2024-11-25 18:53:58 +01:00
some thingies
This commit is contained in:
parent
bf7a6b565a
commit
160ceb5efd
|
@ -1,3 +1,4 @@
|
||||||
|
import json
|
||||||
import string
|
import string
|
||||||
import random
|
import random
|
||||||
import asyncio
|
import asyncio
|
||||||
|
@ -26,11 +27,14 @@ def create_chat_chunk(chat_id: str, model: str, content=None) -> dict:
|
||||||
'content': content
|
'content': content
|
||||||
}
|
}
|
||||||
|
|
||||||
if not isinstance(content, str):
|
if content == CompletionStart:
|
||||||
delta = {
|
delta = {
|
||||||
'role': 'assistant'
|
'role': 'assistant'
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if content == CompletionStop:
|
||||||
|
delta = {}
|
||||||
|
|
||||||
chunk = {
|
chunk = {
|
||||||
'id': chat_id,
|
'id': chat_id,
|
||||||
'object': 'chat.completion.chunk',
|
'object': 'chat.completion.chunk',
|
||||||
|
@ -40,13 +44,12 @@ def create_chat_chunk(chat_id: str, model: str, content=None) -> dict:
|
||||||
{
|
{
|
||||||
'delta': delta,
|
'delta': delta,
|
||||||
'index': 0,
|
'index': 0,
|
||||||
'finish_reason': None if not(isinstance(content, str)) else 'stop'
|
'finish_reason': 'stop' if content == CompletionStop else None
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
}
|
}
|
||||||
print(chunk)
|
|
||||||
|
|
||||||
return chunk
|
return f'data: {json.dumps(chunk)}\n\n'
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
demo_chat_id = asyncio.run(create_chat_id())
|
demo_chat_id = asyncio.run(create_chat_id())
|
||||||
|
|
|
@ -2,7 +2,19 @@ import base64
|
||||||
import asyncio
|
import asyncio
|
||||||
|
|
||||||
async def get_ip(request) -> str:
|
async def get_ip(request) -> str:
|
||||||
return request.client.host
|
xff = None
|
||||||
|
if request.headers.get('x-forwarded-for'):
|
||||||
|
xff, *_ = request.headers['x-forwarded-for'].split(', ')
|
||||||
|
|
||||||
|
possible_ips = [
|
||||||
|
xff,
|
||||||
|
request.headers.get('cf-connecting-ip'),
|
||||||
|
request.client.host
|
||||||
|
]
|
||||||
|
|
||||||
|
detected_ip = next((i for i in possible_ips if i), None)
|
||||||
|
|
||||||
|
return detected_ip
|
||||||
|
|
||||||
async def add_proxy_auth_to_headers(username: str, password: str, headers: dict) -> dict:
|
async def add_proxy_auth_to_headers(username: str, password: str, headers: dict) -> dict:
|
||||||
proxy_auth = base64.b64encode(f'{username}:{password}'.encode()).decode()
|
proxy_auth = base64.b64encode(f'{username}:{password}'.encode()).decode()
|
||||||
|
|
|
@ -4,11 +4,11 @@ import asyncio
|
||||||
import chat_providers
|
import chat_providers
|
||||||
|
|
||||||
provider_modules = [
|
provider_modules = [
|
||||||
chat_providers.twa,
|
# chat_providers.twa,
|
||||||
# chat_providers.quantum,
|
# chat_providers.quantum,
|
||||||
# chat_providers.churchless,
|
chat_providers.churchless,
|
||||||
# chat_providers.closed,
|
chat_providers.closed,
|
||||||
# chat_providers.closed4
|
chat_providers.closed4
|
||||||
]
|
]
|
||||||
|
|
||||||
def _get_module_name(module) -> str:
|
def _get_module_name(module) -> str:
|
||||||
|
@ -29,6 +29,9 @@ async def balance_chat_request(payload: dict) -> dict:
|
||||||
|
|
||||||
providers_available.append(provider_module)
|
providers_available.append(provider_module)
|
||||||
|
|
||||||
|
if not providers_available:
|
||||||
|
raise NotImplementedError('This model does not exist.')
|
||||||
|
|
||||||
provider = random.choice(providers_available)
|
provider = random.choice(providers_available)
|
||||||
target = provider.chat_completion(**payload)
|
target = provider.chat_completion(**payload)
|
||||||
target['module'] = _get_module_name(provider)
|
target['module'] = _get_module_name(provider)
|
||||||
|
|
|
@ -2,9 +2,9 @@
|
||||||
|
|
||||||
import fastapi
|
import fastapi
|
||||||
|
|
||||||
from fastapi.middleware.cors import CORSMiddleware
|
from rich import print
|
||||||
|
|
||||||
from dotenv import load_dotenv
|
from dotenv import load_dotenv
|
||||||
|
from fastapi.middleware.cors import CORSMiddleware
|
||||||
|
|
||||||
import core
|
import core
|
||||||
import transfer
|
import transfer
|
||||||
|
|
|
@ -7,6 +7,7 @@ import asyncio
|
||||||
import aiohttp
|
import aiohttp
|
||||||
import aiohttp_socks
|
import aiohttp_socks
|
||||||
|
|
||||||
|
from rich import print
|
||||||
from dotenv import load_dotenv
|
from dotenv import load_dotenv
|
||||||
|
|
||||||
load_dotenv()
|
load_dotenv()
|
||||||
|
@ -71,6 +72,7 @@ class Proxy:
|
||||||
|
|
||||||
proxies_in_files = []
|
proxies_in_files = []
|
||||||
|
|
||||||
|
try:
|
||||||
for proxy_type in ['http', 'socks4', 'socks5']:
|
for proxy_type in ['http', 'socks4', 'socks5']:
|
||||||
with open(f'secret/proxies/{proxy_type}.txt') as f:
|
with open(f'secret/proxies/{proxy_type}.txt') as f:
|
||||||
for line in f.readlines():
|
for line in f.readlines():
|
||||||
|
@ -79,6 +81,8 @@ for proxy_type in ['http', 'socks4', 'socks5']:
|
||||||
line = line.split('#')[0]
|
line = line.split('#')[0]
|
||||||
|
|
||||||
proxies_in_files.append(f'{proxy_type}://{line.strip()}')
|
proxies_in_files.append(f'{proxy_type}://{line.strip()}')
|
||||||
|
except FileNotFoundError:
|
||||||
|
pass
|
||||||
|
|
||||||
class ProxyChain:
|
class ProxyChain:
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
|
@ -87,7 +91,11 @@ class ProxyChain:
|
||||||
self.get_random = Proxy(url=random_proxy)
|
self.get_random = Proxy(url=random_proxy)
|
||||||
self.connector = aiohttp_socks.ChainProxyConnector.from_urls(proxies_in_files)
|
self.connector = aiohttp_socks.ChainProxyConnector.from_urls(proxies_in_files)
|
||||||
|
|
||||||
|
try:
|
||||||
default_chain = ProxyChain()
|
default_chain = ProxyChain()
|
||||||
|
random_proxy = ProxyChain().get_random
|
||||||
|
except IndexError:
|
||||||
|
pass
|
||||||
|
|
||||||
default_proxy = Proxy(
|
default_proxy = Proxy(
|
||||||
proxy_type=os.getenv('PROXY_TYPE', 'http'),
|
proxy_type=os.getenv('PROXY_TYPE', 'http'),
|
||||||
|
@ -97,7 +105,6 @@ default_proxy = Proxy(
|
||||||
password=os.getenv('PROXY_PASS')
|
password=os.getenv('PROXY_PASS')
|
||||||
)
|
)
|
||||||
|
|
||||||
random_proxy = ProxyChain().get_random
|
|
||||||
|
|
||||||
def test_httpx_workaround():
|
def test_httpx_workaround():
|
||||||
import httpx
|
import httpx
|
||||||
|
@ -129,24 +136,11 @@ async def test_aiohttp_socks():
|
||||||
|
|
||||||
async def streaming_aiohttp_socks():
|
async def streaming_aiohttp_socks():
|
||||||
async with aiohttp.ClientSession(connector=default_proxy.connector) as session:
|
async with aiohttp.ClientSession(connector=default_proxy.connector) as session:
|
||||||
async with session.post(
|
async with session.get('https://httpbin.org/get', headers={
|
||||||
'https://free.churchless.tech/v1/chat/completions',
|
'Authorization': 'x'
|
||||||
json={
|
}) as response:
|
||||||
"model": "gpt-3.5-turbo",
|
json = await response.json()
|
||||||
"messages": [
|
return json
|
||||||
{
|
|
||||||
"role": "user",
|
|
||||||
"content": "Hi"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"stream": True
|
|
||||||
},
|
|
||||||
# headers={
|
|
||||||
# 'Authorization': 'Bearer MyDiscord'
|
|
||||||
# }
|
|
||||||
) as response:
|
|
||||||
html = await response.text()
|
|
||||||
return html.strip()
|
|
||||||
|
|
||||||
async def text_httpx_socks():
|
async def text_httpx_socks():
|
||||||
import httpx
|
import httpx
|
||||||
|
@ -163,5 +157,5 @@ if __name__ == '__main__':
|
||||||
# print(test_httpx())
|
# print(test_httpx())
|
||||||
# print(test_requests())
|
# print(test_requests())
|
||||||
# print(asyncio.run(test_aiohttp_socks()))
|
# print(asyncio.run(test_aiohttp_socks()))
|
||||||
# print(asyncio.run(streaming_aiohttp_socks()))
|
print(asyncio.run(streaming_aiohttp_socks()))
|
||||||
print(asyncio.run(text_httpx_socks()))
|
# print(asyncio.run(text_httpx_socks()))
|
||||||
|
|
|
@ -7,6 +7,7 @@ import starlette
|
||||||
|
|
||||||
from rich import print
|
from rich import print
|
||||||
from dotenv import load_dotenv
|
from dotenv import load_dotenv
|
||||||
|
from python_socks._errors import ProxyError
|
||||||
|
|
||||||
import proxies
|
import proxies
|
||||||
import load_balancing
|
import load_balancing
|
||||||
|
@ -38,6 +39,7 @@ async def stream(
|
||||||
input_tokens: int=0,
|
input_tokens: int=0,
|
||||||
incoming_request: starlette.requests.Request=None,
|
incoming_request: starlette.requests.Request=None,
|
||||||
):
|
):
|
||||||
|
|
||||||
payload = payload or DEMO_PAYLOAD
|
payload = payload or DEMO_PAYLOAD
|
||||||
is_chat = False
|
is_chat = False
|
||||||
|
|
||||||
|
@ -46,31 +48,38 @@ async def stream(
|
||||||
chat_id = await chat.create_chat_id()
|
chat_id = await chat.create_chat_id()
|
||||||
model = payload['model']
|
model = payload['model']
|
||||||
|
|
||||||
chat_chunk = chat.create_chat_chunk(
|
yield chat.create_chat_chunk(
|
||||||
chat_id=chat_id,
|
chat_id=chat_id,
|
||||||
model=model,
|
model=model,
|
||||||
content=chat.CompletionStart
|
content=chat.CompletionStart
|
||||||
)
|
)
|
||||||
data = json.dumps(chat_chunk)
|
|
||||||
|
|
||||||
chunk = f'data: {data}'
|
yield chat.create_chat_chunk(
|
||||||
|
chat_id=chat_id,
|
||||||
yield chunk
|
model=model,
|
||||||
|
content=None
|
||||||
|
)
|
||||||
|
|
||||||
for _ in range(5):
|
for _ in range(5):
|
||||||
if is_chat:
|
|
||||||
target_request = await load_balancing.balance_chat_request(payload)
|
|
||||||
else:
|
|
||||||
target_request = await load_balancing.balance_organic_request(payload)
|
|
||||||
|
|
||||||
headers = {
|
headers = {
|
||||||
'Content-Type': 'application/json'
|
'Content-Type': 'application/json'
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if is_chat:
|
||||||
|
target_request = await load_balancing.balance_chat_request(payload)
|
||||||
|
else:
|
||||||
|
target_request = await load_balancing.balance_organic_request({
|
||||||
|
'path': path,
|
||||||
|
'payload': payload,
|
||||||
|
'headers': headers
|
||||||
|
})
|
||||||
|
|
||||||
for k, v in target_request.get('headers', {}).items():
|
for k, v in target_request.get('headers', {}).items():
|
||||||
headers[k] = v
|
headers[k] = v
|
||||||
|
|
||||||
async with aiohttp.ClientSession(connector=proxies.random_proxy.connector) as session:
|
async with aiohttp.ClientSession(connector=proxies.default_proxy.connector) as session:
|
||||||
|
|
||||||
|
try:
|
||||||
async with session.request(
|
async with session.request(
|
||||||
method=target_request.get('method', 'POST'),
|
method=target_request.get('method', 'POST'),
|
||||||
url=target_request['url'],
|
url=target_request['url'],
|
||||||
|
@ -85,11 +94,13 @@ async def stream(
|
||||||
|
|
||||||
timeout=aiohttp.ClientTimeout(total=float(os.getenv('TRANSFER_TIMEOUT', '120'))),
|
timeout=aiohttp.ClientTimeout(total=float(os.getenv('TRANSFER_TIMEOUT', '120'))),
|
||||||
) as response:
|
) as response:
|
||||||
|
|
||||||
try:
|
try:
|
||||||
await response.raise_for_status()
|
response.raise_for_status()
|
||||||
except Exception as exc:
|
except Exception as exc:
|
||||||
|
if 'Too Many Requests' in str(exc):
|
||||||
|
print(429)
|
||||||
continue
|
continue
|
||||||
# if 'Too Many Requests' in str(exc):
|
|
||||||
|
|
||||||
if user and incoming_request:
|
if user and incoming_request:
|
||||||
await logs.log_api_request(
|
await logs.log_api_request(
|
||||||
|
@ -103,6 +114,29 @@ async def stream(
|
||||||
'$inc': {'credits': -credits_cost}
|
'$inc': {'credits': -credits_cost}
|
||||||
})
|
})
|
||||||
|
|
||||||
|
|
||||||
|
try:
|
||||||
|
async for chunk in response.content.iter_any():
|
||||||
|
chunk = f'{chunk.decode("utf8")}\n\n'
|
||||||
|
|
||||||
|
if chunk.strip():
|
||||||
|
if is_chat:
|
||||||
|
if target_request['module'] == 'twa':
|
||||||
|
data = json.loads(chunk.split('data: ')[1])
|
||||||
|
|
||||||
|
if data.get('text'):
|
||||||
|
chunk = chat.create_chat_chunk(
|
||||||
|
chat_id=chat_id,
|
||||||
|
model=model,
|
||||||
|
content=['text']
|
||||||
|
)
|
||||||
|
yield chunk
|
||||||
|
|
||||||
|
except Exception as exc:
|
||||||
|
if 'Connection closed' in str(exc):
|
||||||
|
print('connection closed')
|
||||||
|
continue
|
||||||
|
|
||||||
if not demo_mode:
|
if not demo_mode:
|
||||||
ip_address = await network.get_ip(incoming_request)
|
ip_address = await network.get_ip(incoming_request)
|
||||||
|
|
||||||
|
@ -115,26 +149,13 @@ async def stream(
|
||||||
await stats.add_model(model)
|
await stats.add_model(model)
|
||||||
await stats.add_tokens(input_tokens, model)
|
await stats.add_tokens(input_tokens, model)
|
||||||
|
|
||||||
async for chunk in response.content.iter_any():
|
|
||||||
chunk = f'{chunk.decode("utf8")}\n\n'
|
|
||||||
|
|
||||||
if chunk.strip():
|
|
||||||
if is_chat:
|
|
||||||
if target_request['module'] == 'twa':
|
|
||||||
data = json.loads(chunk.split('data: ')[1])
|
|
||||||
|
|
||||||
if data.get('text'):
|
|
||||||
chat_chunk = chat.create_chat_chunk(
|
|
||||||
chat_id=chat_id,
|
|
||||||
model=model,
|
|
||||||
content=['text']
|
|
||||||
)
|
|
||||||
data = json.dumps(chat_chunk)
|
|
||||||
|
|
||||||
chunk = f'data: {data}'
|
|
||||||
|
|
||||||
yield chunk
|
|
||||||
break
|
break
|
||||||
|
|
||||||
|
except ProxyError:
|
||||||
|
print('proxy error')
|
||||||
|
continue
|
||||||
|
|
||||||
|
print(3)
|
||||||
if is_chat:
|
if is_chat:
|
||||||
chat_chunk = chat.create_chat_chunk(
|
chat_chunk = chat.create_chat_chunk(
|
||||||
chat_id=chat_id,
|
chat_id=chat_id,
|
||||||
|
@ -143,8 +164,7 @@ async def stream(
|
||||||
)
|
)
|
||||||
data = json.dumps(chat_chunk)
|
data = json.dumps(chat_chunk)
|
||||||
|
|
||||||
yield f'data: {data}'
|
yield 'data: [DONE]\n\n'
|
||||||
yield 'data: [DONE]'
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
asyncio.run(stream())
|
asyncio.run(stream())
|
||||||
|
|
|
@ -86,8 +86,27 @@ def test_all():
|
||||||
# print(test_api())
|
# print(test_api())
|
||||||
print(test_library())
|
print(test_library())
|
||||||
|
|
||||||
|
|
||||||
|
def test_api(model: str=MODEL, messages: List[dict]=None) -> dict:
|
||||||
|
"""Tests an API api_endpoint."""
|
||||||
|
|
||||||
|
headers = {
|
||||||
|
'Authorization': 'Bearer ' + api_key
|
||||||
|
}
|
||||||
|
|
||||||
|
response = httpx.get(
|
||||||
|
url=f'{api_endpoint}/v1/usage',
|
||||||
|
headers=headers,
|
||||||
|
timeout=20
|
||||||
|
)
|
||||||
|
response.raise_for_status()
|
||||||
|
|
||||||
|
return response.text
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
# api_endpoint = 'https://api.nova-oss.com'
|
# api_endpoint = 'https://api.nova-oss.com'
|
||||||
api_endpoint = 'http://localhost:2332'
|
api_endpoint = 'https://alpha-api.nova-oss.com'
|
||||||
api_key = os.getenv('TEST_NOVA_KEY')
|
api_key = os.getenv('TEST_NOVA_KEY')
|
||||||
test_all()
|
# test_all()
|
||||||
|
|
||||||
|
print(test_api())
|
||||||
|
|
Loading…
Reference in a new issue