Codebase changes + a lot of commenting

This commit is contained in:
Game_Time 2023-08-13 20:12:35 +05:00
parent 60a89146da
commit 6ecc5f59ce
7 changed files with 53 additions and 9 deletions

View file

@ -24,11 +24,24 @@ async def _get_collection(collection_name: str):
return conn['nova-core'][collection_name]
async def replacer(text: str, dict_: dict) -> str:
# This seems to exist for a very specific and dumb purpose :D
for k, v in dict_.items():
text = text.replace(k, v)
return text
async def log_api_request(user: dict, incoming_request, target_url: str):
"""Logs the API Request into the database.
No input prompt is logged, however data such as IP & useragent is noted.
This would be useful for security reasons. Other minor data is also collected.
Args:
user (dict): User dict object
incoming_request (_type_): Request
target_url (str): The URL the api request was targetted to.
Returns:
_type_: _description_
"""
db = await _get_collection('logs')
payload = {}

View file

@ -20,7 +20,14 @@ async def _get_collection(collection_name: str):
return conn['nova-core'][collection_name]
async def create(discord_id: str='') -> dict:
"""Adds a new user to the MongoDB collection."""
"""Add a user to the mongodb
Args:
discord_id (str): Defaults to ''.
Returns:
dict: The user object
"""
chars = string.ascii_letters + string.digits

View file

@ -10,7 +10,9 @@ async def _get_module_name(module) -> str:
return name
async def balance_chat_request(payload: dict) -> dict:
"""Load balance the chat completion request between chat providers.
"""
### Load balance the chat completion request between chat providers.
Providers are sorted by streaming and models. Target (provider.chat_completion) is returned
"""
providers_available = []
@ -36,7 +38,9 @@ async def balance_chat_request(payload: dict) -> dict:
return target
async def balance_organic_request(request: dict) -> dict:
"""Load balnace to non-chat completion request between other "organic" providers which respond in the desired format already.
"""
### Load balance non-chat completion request
Balances between other "organic" providers which respond in the desired format already.
Organic providers are used for non-chat completions, such as moderation and other paths.
"""
providers_available = []

View file

@ -31,7 +31,9 @@ async def startup_event():
@app.get('/')
async def root():
"""Returns the root endpoint."""
"""
Returns the root endpoint.
"""
return {
'status': 'ok',

View file

@ -63,7 +63,11 @@ class Proxy:
@property
def connector(self):
"""Returns an aiohttp_socks.ProxyConnector object. Which can be used in aiohttp.ClientSession."""
"""
### Returns a proxy connector
Returns an aiohttp_socks.ProxyConnector object.
This can be used in aiohttp.ClientSession.
"""
proxy_types = {
'http': aiohttp_socks.ProxyType.HTTP,

View file

@ -38,6 +38,16 @@ async def stream(
input_tokens: int=0,
incoming_request: starlette.requests.Request=None,
):
"""Stream the completions request. Sends data in chunks
Args:
path (str, optional): URL Path. Defaults to '/v1/chat/completions'.
user (dict, optional): User object (dict) Defaults to None.
payload (dict, optional): Payload. Defaults to None.
credits_cost (int, optional): Cost of the credits of the request. Defaults to 0.
input_tokens (int, optional): Total tokens calculated with tokenizer. Defaults to 0.
incoming_request (starlette.requests.Request, optional): Incoming request. Defaults to None.
"""
is_chat = False
is_stream = payload.get('stream', False)

View file

@ -20,7 +20,11 @@ with open('config/credits.yml', encoding='utf8') as f:
credits_config = yaml.safe_load(f)
async def handle(incoming_request):
"""Transfer a streaming response from the incoming request to the target endpoint"""
"""
### Transfer a streaming response
Takes the request from the incoming request to the target endpoint.
Checks method, token amount, auth and cost along with if request is NSFW.
"""
path = incoming_request.url.path.replace('v1/v1/', 'v1/')