Added more documentation

This commit is contained in:
nsde 2023-08-12 17:49:31 +02:00
parent 23d463fad8
commit a7bd57c5a8
14 changed files with 136 additions and 55 deletions

3
.gitignore vendored
View file

@ -1,8 +1,5 @@
last_update.txt
!api/providers/__main__.py
!api/providers/helpers/utils.py
*.log.json
/logs
/log

View file

@ -63,14 +63,3 @@ async def create_user(incoming_request: fastapi.Request):
await new_user_webhook(user)
return user
if __name__ == '__main__':
# new_user_webhook({
# '_id': 'JUST_A_TEST_IGNORE_ME',
# 'auth': {
# 'discord': 123,
# 'github': 'abc'
# }
# })
pass

View file

@ -12,12 +12,16 @@ class CompletionStop:
"""End of a chat"""
async def create_chat_id() -> str:
"""Generates a random chat ID"""
chars = string.ascii_letters + string.digits
chat_id = ''.join(random.choices(chars, k=32))
return f'chatcmpl-{chat_id}'
async def create_chat_chunk(chat_id: str, model: str, content=None) -> dict:
"""Creates a new chat chunk"""
content = content or {}
delta = {}

View file

@ -2,6 +2,8 @@ import json
import starlette
async def error(code: int, message: str, tip: str) -> starlette.responses.Response:
"""Returns a starlette response JSON with the given error code, message and tip."""
info = {'error': {
'code': code,
'message': message,
@ -13,6 +15,8 @@ async def error(code: int, message: str, tip: str) -> starlette.responses.Respon
return starlette.responses.Response(status_code=code, content=json.dumps(info))
async def yield_error(code: int, message: str, tip: str) -> str:
"""Returns a dumped JSON response with the given error code, message and tip."""
return json.dumps({
'code': code,
'message': message,

View file

@ -1,2 +0,0 @@
class Retry(Exception):
"""The server should retry the request."""

View file

@ -2,6 +2,8 @@ import base64
import asyncio
async def get_ip(request) -> str:
"""Get the IP address of the incoming request."""
xff = None
if request.headers.get('x-forwarded-for'):
xff, *_ = request.headers['x-forwarded-for'].split(', ')
@ -17,6 +19,8 @@ async def get_ip(request) -> str:
return detected_ip
async def add_proxy_auth_to_headers(username: str, password: str, headers: dict) -> dict:
"""Add proxy authentication to the headers. This is useful if the proxy authentication doesn't work as it should."""
proxy_auth = base64.b64encode(f'{username}:{password}'.encode()).decode()
headers['Proxy-Authorization'] = f'Basic {proxy_auth}'
return headers

View file

@ -25,13 +25,13 @@ async def count_for_messages(messages: list, model: str='gpt-3.5-turbo-0613') ->
tokens_per_name = -1 # if there's a name, the role is omitted
elif 'gpt-3.5-turbo' in model:
return num_tokens_from_messages(messages, model='gpt-3.5-turbo-0613')
return count_for_messages(messages, model='gpt-3.5-turbo-0613')
elif 'gpt-4' in model:
return num_tokens_from_messages(messages, model='gpt-4-0613')
return count_for_messages(messages, model='gpt-4-0613')
else:
raise NotImplementedError(f"""num_tokens_from_messages() is not implemented for model {model}.
raise NotImplementedError(f"""count_for_messages() is not implemented for model {model}.
See https://github.com/openai/openai-python/blob/main/chatml.md
for information on how messages are converted to tokens.""")

View file

@ -10,7 +10,8 @@ async def _get_module_name(module) -> str:
return name
async def balance_chat_request(payload: dict) -> dict:
"""Load balance the chat completion request between chat providers."""
"""Load balance the chat completion request between chat providers.
"""
providers_available = []
@ -35,7 +36,9 @@ async def balance_chat_request(payload: dict) -> dict:
return target
async def balance_organic_request(request: dict) -> dict:
"""Load balnace to non-chat completion request between other "organic" providers which respond in the desired format already."""
"""Load balnace to non-chat completion request between other "organic" providers which respond in the desired format already.
Organic providers are used for non-chat completions, such as moderation and other paths.
"""
providers_available = []

View file

@ -1,3 +1,5 @@
"""This module contains functions for checking if a message violates the moderation policy."""
import asyncio
import aiohttp
@ -5,7 +7,16 @@ import proxies
import provider_auth
import load_balancing
async def is_policy_violated(inp) -> bool:
from typing import Union
async def is_policy_violated(inp: Union[str, list]) -> bool:
"""Check if a message violates the moderation policy.
You can either pass a list of messages consisting of dicts with "role" and "content", as used in the API parameter,
or just a simple string.
Returns True if the message violates the policy, False otherwise.
"""
text = inp
if isinstance(inp, list):

View file

@ -1,6 +1,14 @@
"""This module contains functions for authenticating with providers."""
import asyncio
async def invalidate_key(provider_and_key):
async def invalidate_key(provider_and_key: str) -> none:
"""Invalidates a key stored in the secret/ folder by storing it in the associated .invalid.txt file.
The schmea in which <provider_and_key> should be passed is:
<provider_name><key>, e.g.
closed4>sk-...
"""
if not provider_and_key:
return

View file

@ -12,8 +12,14 @@ from dotenv import load_dotenv
load_dotenv()
USE_PROXY_LIST = os.getenv('USE_PROXY_LIST', 'False').lower() == 'true'
class Proxy:
"""Represents a proxy. The type can be either http, https, socks4 or socks5."""
"""Represents a proxy. The type can be either http, https, socks4 or socks5.
You can also pass a url, which will be parsed into the other attributes.
URL format:
[type]://[username:password@]host:port
"""
def __init__(self,
url: str=None,
@ -36,7 +42,7 @@ class Proxy:
self.proxy_type = proxy_type
self.host_or_ip = host_or_ip
self.ip_address = socket.gethostbyname(self.host_or_ip) #if host_or_ip.replace('.', '').isdigit() else host_or_ip
self.ip_address = socket.gethostbyname(self.host_or_ip) # get ip address from host
self.host = self.host_or_ip
self.port = port
self.username = username
@ -54,6 +60,8 @@ class Proxy:
@property
def connector(self):
"""Returns an aiohttp_socks.ProxyConnector object. Which can be used in aiohttp.ClientSession."""
proxy_types = {
'http': aiohttp_socks.ProxyType.HTTP,
'https': aiohttp_socks.ProxyType.HTTP,
@ -65,11 +73,13 @@ class Proxy:
proxy_type=proxy_types[self.proxy_type],
host=self.ip_address,
port=self.port,
rdns=False,
rdns=False, # remote DNS
username=self.username,
password=self.password
)
# load proxies from files
proxies_in_files = []
try:
@ -84,27 +94,19 @@ try:
except FileNotFoundError:
pass
class ProxyChain:
# Proxy lists support
class ProxyLists:
def __init__(self):
random_proxy = random.choice(proxies_in_files)
self.get_random = Proxy(url=random_proxy)
self.connector = aiohttp_socks.ChainProxyConnector.from_urls(proxies_in_files)
try:
default_chain = ProxyChain()
random_proxy = ProxyChain().get_random
except IndexError:
pass
default_proxy = Proxy(
proxy_type=os.getenv('PROXY_TYPE', 'http'),
host_or_ip=os.getenv('PROXY_HOST', '127.0.0.1'),
port=int(os.getenv('PROXY_PORT', '8080')),
username=os.getenv('PROXY_USER'),
password=os.getenv('PROXY_PASS')
)
# ================================================================================================================================ #
# Proxy tests
# Can be useful if you want to troubleshoot your proxies
def test_httpx_workaround():
import httpx
@ -153,6 +155,22 @@ async def text_httpx_socks():
res = await client.get('https://checkip.amazonaws.com')
return res.text
# ================================================================================================================================ #
def get_proxy() -> Proxy:
"""Returns a Proxy object. The proxy is either from the proxy list or from the environment variables.
"""
if USE_PROXY_LIST:
return ProxyLists().get_random
return Proxy(
proxy_type=os.getenv('PROXY_TYPE', 'http'),
host_or_ip=os.getenv('PROXY_HOST', '127.0.0.1'),
port=int(os.getenv('PROXY_PORT', '8080')),
username=os.getenv('PROXY_USER'),
password=os.getenv('PROXY_PASS')
)
if __name__ == '__main__':
# print(test_httpx())
# print(test_requests())

View file

@ -1,3 +1,5 @@
"""This module contains the streaming logic for the API."""
import os
import json
import dhooks
@ -33,7 +35,6 @@ async def stream(
user: dict=None,
payload: dict=None,
credits_cost: int=0,
demo_mode: bool=False,
input_tokens: int=0,
incoming_request: starlette.requests.Request=None,
):
@ -44,6 +45,7 @@ async def stream(
is_chat = True
model = payload['model']
# Chat completions always have the same beginning
if is_chat and is_stream:
chat_id = await chat.create_chat_id()
@ -66,15 +68,22 @@ async def stream(
'error': 'No JSON response could be received'
}
# Try to get a response from the API
for _ in range(5):
headers = {
'Content-Type': 'application/json'
}
# Load balancing
# If the request is a chat completion, then we need to load balance between chat providers
# If the request is an organic request, then we need to load balance between organic providers
try:
if is_chat:
target_request = await load_balancing.balance_chat_request(payload)
else:
# "organic" means that it's not using a reverse engineered front-end, but rather ClosedAI's API directly
# churchless.tech is an example of an organic provider, because it redirects the request to ClosedAI.
target_request = await load_balancing.balance_organic_request({
'method': incoming_request.method,
'path': path,
@ -83,9 +92,10 @@ async def stream(
'cookies': incoming_request.cookies
})
except ValueError as exc:
# Error load balancing? Send a webhook to the admins
webhook = dhooks.Webhook(os.getenv('DISCORD_WEBHOOK__API_ISSUE'))
webhook.send(content=f'API Issue: **`{exc}`**\nhttps://i.imgflip.com/7uv122.jpg')
error = await errors.yield_error(
500,
'Sorry, the API has no working keys anymore.',
@ -100,7 +110,9 @@ async def stream(
if target_request['method'] == 'GET' and not payload:
target_request['payload'] = None
async with aiohttp.ClientSession(connector=proxies.default_proxy.connector) as session:
# We haven't done any requests as of right now, everything until now was just preparation
# Here, we process the request
async with aiohttp.ClientSession(connector=proxies.get_proxy().connector) as session:
try:
async with session.request(
method=target_request.get('method', 'POST'),
@ -116,9 +128,11 @@ async def stream(
timeout=aiohttp.ClientTimeout(total=float(os.getenv('TRANSFER_TIMEOUT', '120'))),
) as response:
# if the answer is JSON
if response.content_type == 'application/json':
data = await response.json()
# Invalidate the key if it's not working
if data.get('code') == 'invalid_api_key':
await provider_auth.invalidate_key(target_request.get('provider_auth'))
continue
@ -126,37 +140,40 @@ async def stream(
if response.ok:
json_response = data
# if the answer is a stream
if is_stream:
try:
response.raise_for_status()
except Exception as exc:
# Rate limit? Balance again
if 'Too Many Requests' in str(exc):
continue
try:
# process the response chunks
async for chunk in response.content.iter_any():
send = False
chunk = f'{chunk.decode("utf8")}\n\n'
chunk = chunk.replace(os.getenv('MAGIC_WORD', 'novaOSScheckKeyword'), payload['model'])
chunk = chunk.replace(os.getenv('MAGIC_USER_WORD', 'novaOSSuserKeyword'), str(user['_id']))
if is_chat and '{' in chunk:
# parse the JSON
data = json.loads(chunk.split('data: ')[1])
chunk = chunk.replace(data['id'], chat_id)
send = True
# create a custom chunk if we're using specific providers
if target_request['module'] == 'twa' and data.get('text'):
chunk = await chat.create_chat_chunk(
chat_id=chat_id,
model=model,
content=['text']
)
if not data['choices'][0]['delta']:
send = False
if data['choices'][0]['delta'] == {'role': 'assistant'}:
# don't send empty/unnecessary messages
if (not data['choices'][0]['delta']) or data['choices'][0]['delta'] == {'role': 'assistant'}:
send = False
# send the chunk
if send and chunk.strip():
final_chunk = chunk.strip().replace('data: [DONE]', '') + '\n\n'
yield final_chunk
@ -174,9 +191,10 @@ async def stream(
break
except ProxyError as exc:
print('proxy error')
print('[!] Proxy error:', exc)
continue
# Chat completions always have the same ending
if is_chat and is_stream:
chunk = await chat.create_chat_chunk(
chat_id=chat_id,
@ -186,10 +204,11 @@ async def stream(
yield chunk
yield 'data: [DONE]\n\n'
# If the response is JSON, then we need to yield it like this
if not is_stream and json_response:
yield json.dumps(json_response)
# DONE =========================================================
# DONE WITH REQUEST, NOW LOGGING ETC.
if user and incoming_request:
await logs.log_api_request(

View file

@ -1,4 +1,4 @@
"""Module for transferring requests to ClosedAI API"""
"""Does quite a few checks and prepares the incoming request for the target endpoint, so it can be streamed"""
import json
import yaml

View file

@ -36,6 +36,9 @@ Set up a MongoDB database and set `MONGO_URI` to the MongoDB database connection
- `PROXY_USER` (optional)
- `PROXY_PASS` (optional)
Want to use a proxy list? See the according section!
Keep in mind to set `USE_PROXY_LIST` to `True`! Otherwise, the proxy list won't be used.
### `ACTUAL_IPS` (optional)
This is a security measure to make sure a proxy, VPN, Tor or any other IP hiding service is used by the host when accessing "Closed"AI's API.
It is a space separated list of IP addresses that are allowed to access the API.
@ -49,13 +52,36 @@ You can also just add the *beginning* of an API address, like `12.123.` (without
`CORE_API_KEY` specifies the **very secret key** for which need to access the entire user database etc.
`TEST_NOVA_KEY` is the API key the which is used in tests. It should be one with tons of credits.
## Webhooks
### Webhooks
`DISCORD_WEBHOOK__USER_CREATED` is the Discord webhook URL for when a user is created.
`DISCORD_WEBHOOK__API_ISSUE` is the Discord webhook URL for when an API issue occurs.
## Other
### Other
`KEYGEN_INFIX` can be almost any string (avoid spaces or special characters) - this string will be put in the middle of every NovaAI API key which is generated. This is useful for identifying the source of the key using e.g. RegEx.
## Proxy Lists
To use proxy lists, navigate to `api/secret/proxies/` and create the following files:
- `http.txt`
- `socks4.txt`
- `socks5.txt`
Then, paste your proxies in the following format:
```
[username:password@]host:port
```
e.g.
```
1.2.3.4:8080
user:pass@127.0.0.1:1337
```
You can use comments just like in Python.
**Important:** to use the proxy lists, you need to change the `USE_PROXY_LIST` environment variable to `True`!
## Run
> **Warning:** read the according section for production usage!