diff --git a/README.md b/README.md index bde94fd..41f4438 100644 --- a/README.md +++ b/README.md @@ -65,24 +65,31 @@ This one's code can be found in the following repository: [github.com/novaoss/no # Setup ## Requirements -- Python 3.9+ -- pip -- MongoDB database -- `uvicorn` +- newest **Python** version +- newest Python **pip** version +- **MongoDB** database +- `uvicorn` in your system package manager ## Recommended - Setup of the other infrastructure - `git` (for updates) - `screen` (for production) - Cloudflare (for security, anti-DDoS, etc.) - we fully support Cloudflare +- proxies, in case you need to protect your privacy from authorities (China, Iran, ...) ## Staging System This repository has an integrated staging system. It's a simple system that allows you to test the API server before deploying it to production. You should definitely set up two databases on MongoDB: `nova-core` and `nova-test`. Please note that `nova-core` is always used for `providerkeys`. -Put your production `.env` file in `env/.prod.env`. Your test `.env` file should be in `.env`. +Put your production env in `env/.prod.env` and modify the values from the test `.env` to your liking: +- Set `MONGO_NAME` to `nova-core`, which is your database name for the production mode. +- Set `CHECKS_ENDPOINT` to `http://localhost:2333` (or the production port you set for `nova-api`) +**Warning -** always make sure to update your production `.env` (`env/.prod.env`), too! + + +Your test `.env` file should be placed in here. Running `PUSH_TO_PRODUCTION.sh` will: - kill port `2333` (production) - remove all contents of the production directory, set to `/home/nova-prod/` (feel free to change it) @@ -130,6 +137,8 @@ Create a `.env` file, make sure not to reveal any of its contents to anyone, and ### Database Set up a MongoDB database and set `MONGO_URI` to the MongoDB database connection URI. Quotation marks are definetly recommended here! +Then set `MONGO_NAME` to `nova-test`, which is your database name for the tests. + ### Proxy (optional) - `PROXY_TYPE` (optional, defaults to `socks.PROXY_TYPE_HTTP`): the type of proxy - can be `http`, `https`, `socks4`, `socks5`, `4` or `5`, etc... - `PROXY_HOST`: the proxy host (host domain or IP address), without port! @@ -182,13 +191,17 @@ You can also just add the *beginning* of an API address, like `12.123.` (without ### Core Keys `CORE_API_KEY` specifies the **very secret key** for which need to access the entire user database etc. + +### Checks `NOVA_KEY` is the API key the which is used in tests. It should be one with tons of credits. +`CHECKS_ENDPOINT` is the endpoint ### Webhooks `DISCORD_WEBHOOK__USER_CREATED` is the Discord webhook URL for when a user is created. `DISCORD_WEBHOOK__API_ISSUE` is the Discord webhook URL for when an API issue occurs. ### Other +`MODERATION_DEBUG_KEY` can be almost any string (avoid spaces or special characters) - users can add `#` + this key to their API key (e.g. `Bearer nv-123#modkey` as the `Authorization` header) to bypass the moderation checks. This is especially useful if the moderation is too sensitive and can be disabled for certain trusted users. `KEYGEN_INFIX` can be almost any string (avoid spaces or special characters) - this string will be put in the middle of every NovaAI API key which is generated. This is useful for identifying the source of the key using e.g. RegEx. ## Misc diff --git a/api/handler.py b/api/handler.py index cae8bd5..b4911b6 100644 --- a/api/handler.py +++ b/api/handler.py @@ -25,7 +25,7 @@ models = [model['id'] for model in models_list['data']] with open(os.path.join('config', 'config.yml'), encoding='utf8') as f: config = yaml.safe_load(f) -moderation_debug_key_key = os.getenv('MODERATION_DEBUG_KEY') +moderation_debug_key = os.getenv('MODERATION_DEBUG_KEY') async def handle(incoming_request: fastapi.Request): """Transfer a streaming response @@ -124,7 +124,7 @@ async def handle(incoming_request: fastapi.Request): policy_violation = False - if not (moderation_debug_key_key and moderation_debug_key_key in key_tags and 'gpt-3' in payload.get('model', '')): + if not (moderation_debug_key and moderation_debug_key in key_tags and 'gpt-3' in payload.get('model', '')): if '/moderations' not in path: inp = '' diff --git a/api/providers/__init__.py b/api/providers/__init__.py index 0c09508..31462d2 100644 --- a/api/providers/__init__.py +++ b/api/providers/__init__.py @@ -1,2 +1,2 @@ -from . import azure -MODULES = [azure] +from . import ails, closed, closed4 +MODULES = [ails, closed, closed4] diff --git a/api/providers/ails.py b/api/providers/ails.py index 55e53ae..0981c60 100644 --- a/api/providers/ails.py +++ b/api/providers/ails.py @@ -3,8 +3,10 @@ from .helpers import utils ORGANIC = False MODELS = [ 'gpt-3.5-turbo', - 'gpt-3.5-turbo-0301', - 'gpt-3.5-turbo-16k-0613' + 'gpt-3.5-turbo-0613', + 'gpt-4', + 'gpt-3.5-turbo-16k', + 'gpt-4-0613' ] async def chat_completion(**kwargs): diff --git a/api/providers/azure.py b/api/providers/azure.py index 4bf3716..3be038e 100644 --- a/api/providers/azure.py +++ b/api/providers/azure.py @@ -1,13 +1,14 @@ from .helpers import utils ORGANIC = False # If all OpenAI endpoints should be used for the provider. If false, only a chat completions are used for this provider. -ENDPOINT = 'https://nova-00001.openai.azure.com' # (Important: read below) The endpoint for the provider. +ENDPOINT = 'https://nova-00003.openai.azure.com' # (Important: read below) The endpoint for the provider. #! IMPORTANT: If this is an ORGANIC provider, this should be the endpoint for the API with anything BEFORE the "/v1". MODELS = [ 'gpt-3.5-turbo', - 'gpt-3.5-turbo-16k', - 'gpt-4', - 'gpt-4-32k' + # 'gpt-3.5-turbo-16k', + # 'gpt-3.5-turbo-instruct' + # 'gpt-4', + # 'gpt-4-32k' ] async def chat_completion(**payload): diff --git a/api/providers/helpers/utils.py b/api/providers/helpers/utils.py index e48122b..884ed4f 100644 --- a/api/providers/helpers/utils.py +++ b/api/providers/helpers/utils.py @@ -31,7 +31,8 @@ async def random_secret_for(name: str) -> str: async def azure_chat_completion(endpoint: str, provider: str, payload: dict) -> dict: key = await random_secret_for(provider) model = payload['model'] - deployment = model.replace('.', '').replace('-azure', '') + del payload['model'] + deployment = model.replace('.', '') return { 'method': 'POST', diff --git a/api/proxies.py b/api/proxies.py index ca94ebd..01dd63d 100644 --- a/api/proxies.py +++ b/api/proxies.py @@ -122,7 +122,7 @@ def get_proxy() -> Proxy: return Proxy( proxy_type=os.getenv('PROXY_TYPE', 'http'), - host_or_ip=os.getenv('PROXY_HOST', '127.0.0.1'), + host_or_ip=os.environ['PROXY_HOST'], port=int(os.getenv('PROXY_PORT', '8080')), username=os.getenv('PROXY_USER'), password=os.getenv('PROXY_PASS') diff --git a/api/responder.py b/api/responder.py index ba02af8..7b1f35e 100644 --- a/api/responder.py +++ b/api/responder.py @@ -71,7 +71,7 @@ async def respond( 'timeout': 0 } - for _ in range(5): + for _ in range(10): try: if is_chat: target_request = await load_balancing.balance_chat_request(payload) @@ -107,7 +107,12 @@ async def respond( if target_request['method'] == 'GET' and not payload: target_request['payload'] = None - async with aiohttp.ClientSession(connector=proxies.get_proxy().connector) as session: + connector = None + + if os.getenv('PROXY_HOST') or os.getenv('USE_PROXY_LIST', 'False').lower() == 'true': + connector = proxies.get_proxy().connector + + async with aiohttp.ClientSession(connector=connector) as session: try: async with session.request( method=target_request.get('method', 'POST'), diff --git a/checks/client.py b/checks/client.py index f6fe5d1..18afbe4 100644 --- a/checks/client.py +++ b/checks/client.py @@ -72,7 +72,11 @@ async def test_chat_non_stream_gpt4() -> float: ) await _response_base_check(response) - assert '1337' in response.json()['choices'][0]['message']['content'], 'The API did not return a correct response.' + try: + assert '1337' in response.json()['choices'][0]['message']['content'], 'The API did not return a correct response.' + except json.decoder.JSONDecodeError: + return response.status_code + return time.perf_counter() - request_start async def test_chat_stream_gpt3() -> float: @@ -216,8 +220,9 @@ async def demo(): raise ConnectionError('API Server is not running.') for func in [ + test_chat_stream_gpt3, test_chat_non_stream_gpt4, - test_chat_stream_gpt3 + test_function_calling, ]: print(f'[*] {func.__name__}') result = await func()