Compare commits

..

2 commits

Author SHA1 Message Date
monosans 73d2cbe382
Merge ec31a268ee into 719f29fb29 2023-10-07 10:38:29 +00:00
monosans ec31a268ee
Refactor file operations 2023-10-07 13:38:18 +03:00
10 changed files with 56 additions and 56 deletions

View file

@ -1,17 +1,21 @@
import os import os
import time import time
import asyncio
from dotenv import load_dotenv from dotenv import load_dotenv
from motor.motor_asyncio import AsyncIOMotorClient from motor.motor_asyncio import AsyncIOMotorClient
try:
from helpers import network from helpers import network
except ImportError:
pass
load_dotenv() load_dotenv()
UA_SIMPLIFY = {
'Windows NT': 'W',
'Mozilla/5.0': 'M',
'Win64; x64': '64',
'Safari/537.36': 'S',
'AppleWebKit/537.36 (KHTML, like Gecko)': 'K',
}
## MONGODB Setup ## MONGODB Setup
conn = AsyncIOMotorClient(os.environ['MONGO_URI']) conn = AsyncIOMotorClient(os.environ['MONGO_URI'])
@ -26,7 +30,18 @@ async def replacer(text: str, dict_: dict) -> str:
return text return text
async def log_api_request(user: dict, incoming_request, target_url: str): async def log_api_request(user: dict, incoming_request, target_url: str):
"""Logs the API Request into the database.""" """Logs the API Request into the database.
No input prompt is logged, however data such as IP & useragent is noted.
This would be useful for security reasons. Other minor data is also collected.
Args:
user (dict): User dict object
incoming_request (_type_): Request
target_url (str): The URL the api request was targetted to.
Returns:
_type_: _description_
"""
db = await _get_collection('logs') db = await _get_collection('logs')
payload = {} payload = {}
@ -38,6 +53,7 @@ async def log_api_request(user: dict, incoming_request, target_url: str):
model = payload.get('model') model = payload.get('model')
ip_address = await network.get_ip(incoming_request) ip_address = await network.get_ip(incoming_request)
useragent = await replacer(incoming_request.headers.get('User-Agent', ''), UA_SIMPLIFY)
new_log_item = { new_log_item = {
'timestamp': time.time(), 'timestamp': time.time(),
@ -46,6 +62,7 @@ async def log_api_request(user: dict, incoming_request, target_url: str):
'user_id': str(user['_id']), 'user_id': str(user['_id']),
'security': { 'security': {
'ip': ip_address, 'ip': ip_address,
'useragent': useragent,
}, },
'details': { 'details': {
'model': model, 'model': model,
@ -73,21 +90,5 @@ async def delete_by_user_id(user_id: str):
db = await _get_collection('logs') db = await _get_collection('logs')
return await db.delete_many({'user_id': user_id}) return await db.delete_many({'user_id': user_id})
async def get_logs_time_range(start: int, end: int):
db = await _get_collection('logs')
entries = []
async for entry in db.find({'timestamp': {'$gte': start, '$lte': end}}):
entries.append(entry)
return entries
async def main():
# how many requests in last 24 hours?
last_24_hours = time.time() - 86400
logs = await get_logs_time_range(last_24_hours, time.time())
print(f'Number of logs in last 24 hours: {len(logs)}')
if __name__ == '__main__': if __name__ == '__main__':
asyncio.run(main()) pass

View file

@ -100,7 +100,7 @@ manager = KeyManager()
async def main(): async def main():
keys = await manager.get_possible_keys('closed') keys = await manager.get_possible_keys('closed')
print(keys) print(len(keys))
if __name__ == '__main__': if __name__ == '__main__':
asyncio.run(main()) asyncio.run(main())

View file

@ -2,6 +2,8 @@ import os
import pytz import pytz
import asyncio import asyncio
import datetime import datetime
import json
import time
from dotenv import load_dotenv from dotenv import load_dotenv
from motor.motor_asyncio import AsyncIOMotorClient from motor.motor_asyncio import AsyncIOMotorClient
@ -13,6 +15,13 @@ load_dotenv()
class StatsManager: class StatsManager:
""" """
### The manager for all statistics tracking ### The manager for all statistics tracking
Stats tracked:
- Dates
- IPs
- Target URLs
- Tokens
- Models
- URL Paths
""" """
def __init__(self): def __init__(self):

6
api/db/tester.py Normal file
View file

@ -0,0 +1,6 @@
from stats import *
import asyncio
manager = StatsManager()
asyncio.run(manager.get_model_usage())

View file

@ -1,12 +1,12 @@
from . import \ from . import \
azure \ azure, \
# closed, \ closed, \
# closed4 closed4
# closed432 # closed432
MODULES = [ MODULES = [
azure, azure,
# closed, closed,
# closed4, closed4,
# closed432, # closed432,
] ]

View file

@ -12,7 +12,7 @@ MODELS = [
'gpt-4', 'gpt-4',
'gpt-4-32k' 'gpt-4-32k'
] ]
# MODELS = [f'{model}-azure' for model in MODELS] MODELS = [f'{model}-azure' for model in MODELS]
AZURE_API = '2023-07-01-preview' AZURE_API = '2023-07-01-preview'

View file

@ -34,4 +34,7 @@ async def conversation_to_prompt(conversation: list) -> str:
return text return text
async def random_secret_for(name: str) -> str: async def random_secret_for(name: str) -> str:
try:
return await providerkeys.manager.get_key(name) return await providerkeys.manager.get_key(name)
except ValueError:
raise ValueError(f'Keys missing for "{name}" <no_keys>')

View file

@ -2,7 +2,7 @@
import os import os
import json import json
import ujson import logging
import aiohttp import aiohttp
import asyncio import asyncio
import starlette import starlette
@ -49,7 +49,9 @@ async def respond(
'Content-Type': 'application/json' 'Content-Type': 'application/json'
} }
for i in range(1): for i in range(20):
print(i)
# Load balancing: randomly selecting a suitable provider
try: try:
if is_chat: if is_chat:
target_request = await load_balancing.balance_chat_request(payload) target_request = await load_balancing.balance_chat_request(payload)
@ -149,21 +151,13 @@ async def respond(
async for chunk in response.content.iter_any(): async for chunk in response.content.iter_any():
chunk = chunk.decode('utf8').strip() chunk = chunk.decode('utf8').strip()
if 'azure' in provider_name:
chunk = chunk.strip().replace('data: ', '')
if not chunk or 'prompt_filter_results' in chunk:
continue
yield chunk + '\n\n' yield chunk + '\n\n'
break break
except Exception as exc: except Exception as exc:
print('[!] exception', exc) print('[!] exception', exc)
# continue continue
raise exc
else: else:
yield await errors.yield_error(500, 'Sorry, our API seems to have issues connecting to our provider(s).', 'This most likely isn\'t your fault. Please try again later.') yield await errors.yield_error(500, 'Sorry, our API seems to have issues connecting to our provider(s).', 'This most likely isn\'t your fault. Please try again later.')

View file

@ -215,10 +215,7 @@ async def demo():
else: else:
raise ConnectionError('API Server is not running.') raise ConnectionError('API Server is not running.')
for func in [ for func in [test_chat_non_stream_gpt4, test_chat_stream_gpt3]:
# test_chat_non_stream_gpt4,
test_chat_stream_gpt3
]:
print(f'[*] {func.__name__}') print(f'[*] {func.__name__}')
result = await func() result = await func()
print(f'[+] {func.__name__} - {result}') print(f'[+] {func.__name__} - {result}')

View file

@ -1,10 +0,0 @@
--- EXPECTED ---
data: {"id":"custom-chatcmpl-nUSiapqELukaPT7vEnGcXkbvrS1fR","object":"chat.completion.chunk","created":1696716717,"model":"gpt-3.5-turbo-0613","choices":[{"index":0,"delta":{"role":"assistant","content":""},"finish_reason":null}]}
data: {"id":"custom-chatcmpl-nUSiapqELukaPT7vEnGcXkbvrS1fR","object":"chat.completion.chunk","created":1696716717,"model":"gpt-3.5-turbo-0613","choices":[{"index":0,"delta":{"content":"123"},"finish_reason":null}]}
data: {"id":"custom-chatcmpl-nUSiapqELukaPT7vEnGcXkbvrS1fR","object":"chat.completion.chunk","created":1696716717,"model":"gpt-3.5-turbo-0613","choices":[{"index":0,"delta":{},"finish_reason":"stop"}]}
data: [DONE]