Compare commits

..

No commits in common. "2f2e8512ebeb4f43e6c89160ec71d4e75fa2c800" and "98b5614345d4a0f2191657e6423e33d644029402" have entirely different histories.

11 changed files with 152 additions and 231 deletions

View file

@ -20,6 +20,8 @@ We aim to fix that! NovaAI provides several AI models for you to use for free.
###### *I founded FoxGPT (called *NovaGPT* back then)
Old, slow, deprecated* FoxGPT vs new NovaAI repository star count:
<a href="https://star-history.com/#NovaOSS/nova-api&FoxGPT/gpt&Date">
<picture>
<source media="(prefers-color-scheme: dark)" srcset="https://api.star-history.com/svg?repos=NovaOSS/nova-api,FoxGPT/gpt&type=Date&theme=dark" />
@ -28,6 +30,8 @@ We aim to fix that! NovaAI provides several AI models for you to use for free.
</picture>
</a>
<img alt="'Emotional damage' meme, with a man with a worried face and the yellow caption 'emotional damage'" src="https://external-content.duckduckgo.com/iu/?u=https%3A%2F%2Findianmemetemplates.com%2Fwp-content%2Fuploads%2Femotional-damage-1024x575.jpg&f=1&nofb=1&ipt=b325721ee0a7b9e11603a9bd484c8042b82e1704e639887107c6ce3e0d9b389e&ipo=images" height=100>
## NovaOSS APIs
Our infrastructure might seem a bit confusing, but it's actually quite simple. Just the first one really matters for you, if you want to access our AI API. The other ones are just for the team.

30
api/chunks.py Normal file
View file

@ -0,0 +1,30 @@
import json
from helpers import chat
async def process_chunks(
chunks,
is_chat: bool,
chat_id: int,
target_request: dict,
model: str=None,
):
"""This function processes the response chunks from the providers and yields them.
"""
async for chunk in chunks:
chunk = chunk.decode("utf8").strip()
send = False
if is_chat and '{' in chunk:
data = json.loads(chunk.split('data: ')[1])
chunk = chunk.replace(data['id'], chat_id)
send = True
if target_request['module'] == 'twa' and data.get('text'):
chunk = await chat.create_chat_chunk(chat_id=chat_id, model=model, content=['text'])
if (not data['choices'][0]['delta']) or data['choices'][0]['delta'] == {'role': 'assistant'}:
send = False
if send and chunk:
yield chunk + '\n\n'

View file

@ -50,9 +50,6 @@ async def get_users(discord_id: int, incoming_request: fastapi.Request):
if not user:
return await errors.error(404, 'Discord user not found in the API database.', 'Check the `discord_id` parameter.')
# turn the ObjectId into a string
user['_id'] = str(user['_id'])
return user
async def new_user_webhook(user: dict) -> None:
@ -93,8 +90,6 @@ async def create_user(incoming_request: fastapi.Request):
user = await manager.create(discord_id)
await new_user_webhook(user)
user['_id'] = str(user['_id'])
return user
@router.put('/users')
@ -131,23 +126,24 @@ async def run_checks(incoming_request: fastapi.Request):
if auth_error:
return auth_error
results = {}
try:
chat = await checks.client.test_chat()
except Exception as exc:
print(exc)
chat = None
funcs = [
checks.client.test_chat_non_stream_gpt4,
checks.client.test_chat_stream_gpt3,
checks.client.test_function_calling,
checks.client.test_image_generation,
# checks.client.test_speech_to_text,
checks.client.test_models
]
try:
moderation = await checks.client.test_api_moderation()
except Exception:
moderation = None
for func in funcs:
try:
result = await func()
except Exception as exc:
results[func.__name__] = str(exc)
else:
results[func.__name__] = result
try:
models = await checks.client.test_models()
except Exception:
models = None
return results
return {
'chat/completions': chat,
'models': models,
'moderations': moderation,
}

View file

@ -124,14 +124,7 @@ async def handle(incoming_request: fastapi.Request):
inp = payload.get('input', payload.get('prompt', ''))
if isinstance(payload.get('messages'), list):
inp = ''
for message in payload.get('messages', []):
if message.get('role') == 'user':
inp += message.get('content', '') + '\n'
if 'functions' in payload:
inp += '\n'.join([function.get('description', '') for function in payload.get('functions', [])])
inp = '\n'.join([message['content'] for message in payload['messages']])
if inp and len(inp) > 2 and not inp.isnumeric():
policy_violation = await moderation.is_policy_violated(inp)
@ -155,7 +148,7 @@ async def handle(incoming_request: fastapi.Request):
path=path,
payload=payload,
credits_cost=cost,
input_tokens=0,
input_tokens=-1,
incoming_request=incoming_request,
),
media_type=media_type

View file

@ -2,12 +2,12 @@
import fastapi
import pydantic
import functools
from rich import print
from dotenv import load_dotenv
from json import JSONDecodeError
from bson.objectid import ObjectId
from slowapi.errors import RateLimitExceeded
from slowapi.middleware import SlowAPIMiddleware
from fastapi.middleware.cors import CORSMiddleware
@ -17,6 +17,7 @@ from helpers import network
import core
import handler
import moderation
load_dotenv()
@ -65,5 +66,17 @@ async def root():
@app.route('/v1/{path:path}', methods=['GET', 'POST', 'PUT', 'DELETE', 'PATCH'])
async def v1_handler(request: fastapi.Request):
res = await handler.handle(incoming_request=request)
res = await handler.handle(request)
return res
@functools.lru_cache()
@app.post('/moderate')
async def moderate(request: fastapi.Request):
try:
prompt = await request.json()
prompt = prompt['text']
except (KeyError, JSONDecodeError):
return fastapi.Response(status_code=400)
result = await moderation.is_policy_violated__own_model(prompt)
return result or ''

View file

@ -11,6 +11,7 @@ import starlette
from rich import print
from dotenv import load_dotenv
import chunks
import proxies
import provider_auth
import after_request
@ -20,6 +21,24 @@ from helpers import network, chat, errors
load_dotenv()
## Loads config which contains rate limits
with open('config/config.yml', encoding='utf8') as f:
config = yaml.safe_load(f)
## Where all rate limit requested data will be stored.
# Rate limit data is **not persistent** (It will be deleted on server stop/restart).
user_last_request_time = {}
DEMO_PAYLOAD = {
'model': 'gpt-3.5-turbo',
'messages': [
{
'role': 'user',
'content': '1+1='
}
]
}
async def respond(
path: str='/v1/chat/completions',
user: dict=None,
@ -33,22 +52,27 @@ async def respond(
"""
is_chat = False
is_stream = payload.get('stream', False)
model = None
is_stream = False
if 'chat/completions' in path:
is_chat = True
model = payload['model']
if is_chat and is_stream:
chat_id = await chat.create_chat_id()
yield await chat.create_chat_chunk(chat_id=chat_id, model=model, content=chat.CompletionStart)
yield await chat.create_chat_chunk(chat_id=chat_id, model=model, content=None)
json_response = {}
headers = {
'Content-Type': 'application/json',
'User-Agent': 'axios/0.21.1',
'User-Agent': 'null'
}
for _ in range(10):
for _ in range(5):
# Load balancing: randomly selecting a suitable provider
# If the request is a chat completion, then we need to load balance between chat providers
# If the request is an organic request, then we need to load balance between organic providers
@ -91,11 +115,10 @@ async def respond(
cookies=target_request.get('cookies'),
ssl=False,
timeout=aiohttp.ClientTimeout(
connect=0.3,
connect=0.5,
total=float(os.getenv('TRANSFER_TIMEOUT', '500'))
),
) as response:
is_stream = response.content_type == 'text/event-stream'
if response.status == 429:
continue
@ -121,27 +144,35 @@ async def respond(
if 'Too Many Requests' in str(exc):
continue
async for chunk in response.content.iter_any():
chunk = chunk.decode('utf8').strip()
yield chunk + '\n\n'
async for chunk in chunks.process_chunks(
chunks=response.content.iter_any(),
is_chat=is_chat,
chat_id=chat_id,
model=model,
target_request=target_request
):
yield chunk
break
except Exception as exc:
# print(f'[!] {type(exc)} - {exc}')
continue
if (not json_response) and is_chat:
print('[!] chat response is empty')
continue
else:
yield await errors.yield_error(500, 'Sorry, the provider is not responding. We\'re possibly getting rate-limited.', 'Please try again later.')
yield await errors.yield_error(500, 'Sorry, the API is not responding.', 'Please try again later.')
return
if is_chat and is_stream:
yield await chat.create_chat_chunk(chat_id=chat_id, model=model, content=chat.CompletionStop)
yield 'data: [DONE]\n\n'
if (not is_stream) and json_response:
yield json.dumps(json_response)
print(f'[+] {path} -> {model or ""}')
await after_request.after_request(
incoming_request=incoming_request,
target_request=target_request,
@ -152,3 +183,5 @@ async def respond(
is_chat=is_chat,
model=model,
)
print(f'[+] {path} -> {model or ""}')

View file

@ -2,7 +2,6 @@
import os
import time
import json
import httpx
import openai
import asyncio
@ -11,7 +10,6 @@ import traceback
from rich import print
from typing import List
from dotenv import load_dotenv
from pydantic import BaseModel
load_dotenv()
@ -20,7 +18,7 @@ MODEL = 'gpt-3.5-turbo'
MESSAGES = [
{
'role': 'user',
'content': 'Just respond with the number "1337", nothing else.'
'content': '1+1=',
}
]
@ -45,12 +43,12 @@ async def test_server():
else:
return time.perf_counter() - request_start
async def test_chat_non_stream_gpt4() -> float:
"""Tests non-streamed chat completions with the GPT-4 model."""
async def test_chat_non_stream(model: str=MODEL, messages: List[dict]=None) -> dict:
"""Tests an API api_endpoint."""
json_data = {
'model': 'gpt-4',
'messages': MESSAGES,
'model': model,
'messages': messages or MESSAGES,
'stream': False
}
@ -65,52 +63,10 @@ async def test_chat_non_stream_gpt4() -> float:
)
response.raise_for_status()
assert '1337' in response.json()['choices'][0]['message']['content'], 'The API did not return a correct response.'
assert '2' in response.json()['choices'][0]['message']['content'], 'The API did not return a correct response.'
return time.perf_counter() - request_start
async def test_chat_stream_gpt3() -> float:
"""Tests the text stream endpoint with the GPT-3.5-Turbo model."""
json_data = {
'model': 'gpt-3.5-turbo',
'messages': MESSAGES,
'stream': True,
}
request_start = time.perf_counter()
async with httpx.AsyncClient() as client:
response = await client.post(
url=f'{api_endpoint}/chat/completions',
headers=HEADERS,
json=json_data,
timeout=10,
)
response.raise_for_status()
chunks = []
resulting_text = ''
async for chunk in response.aiter_text():
for subchunk in chunk.split('\n\n'):
chunk = subchunk.replace('data: ', '').strip()
if chunk == '[DONE]':
break
if chunk:
chunks.append(json.loads(chunk))
try:
resulting_text += json.loads(chunk)['choices'][0]['delta']['content']
except KeyError:
pass
assert '1337' in resulting_text, 'The API did not return a correct response.'
return time.perf_counter() - request_start
async def test_image_generation() -> float:
async def test_sdxl():
"""Tests the image generation endpoint with the SDXL model."""
json_data = {
@ -133,48 +89,6 @@ async def test_image_generation() -> float:
assert '://' in response.json()['data'][0]['url']
return time.perf_counter() - request_start
class StepByStepAIResponse(BaseModel):
"""Demo response structure for the function calling test."""
title: str
steps: List[str]
async def test_function_calling():
"""Tests function calling functionality with newer GPT models."""
json_data = {
'stream': False,
'model': 'gpt-3.5-turbo-0613',
'messages': [
{"role": "user", "content": "Explain how to assemble a PC"}
],
'functions': [
{
'name': 'get_answer_for_user_query',
'description': 'Get user answer in series of steps',
'parameters': StepByStepAIResponse.schema()
}
],
'function_call': {'name': 'get_answer_for_user_query'}
}
request_start = time.perf_counter()
async with httpx.AsyncClient() as client:
response = await client.post(
url=f'{api_endpoint}/chat/completions',
headers=HEADERS,
json=json_data,
timeout=10,
)
response.raise_for_status()
res = response.json()
output = json.loads(res['choices'][0]['message']['function_call']['arguments'])
print(output)
assert output.get('title') and output.get('steps'), 'The API did not return a correct response.'
return time.perf_counter() - request_start
async def test_models():
"""Tests the models endpoint."""
@ -208,20 +122,17 @@ async def demo():
else:
raise ConnectionError('API Server is not running.')
# print('[lightblue]Checking if function calling works...')
# print(await test_function_calling())
print('Checking non-streamed chat completions...')
print(await test_chat_non_stream())
# print('Checking non-streamed chat completions...')
# print(await test_chat_non_stream_gpt4())
# print('[lightblue]Checking if SDXL image generation works...')
# print(await test_sdxl())
# print('Checking streamed chat completions...')
# print(await test_chat_stream_gpt3())
# print('[lightblue]Checking if the moderation endpoint works...')
# print(await test_api_moderation())
# print('[lightblue]Checking if image generation works...')
# print(await test_image_generation())
# print('Checking the models endpoint...')
# print(await test_models())
print('Checking the models endpoint...')
print(await test_models())
except Exception as exc:
print('[red]Error: ' + str(exc))

BIN
image-1.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 566 KiB

BIN
image.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 566 KiB

View file

@ -1,82 +0,0 @@
import os
import json
import openai
from dotenv import load_dotenv
load_dotenv()
openai.api_base = 'http://localhost:2332/v1'
openai.api_key = os.environ['NOVA_KEY']
# Example dummy function hard coded to return the same weather
# In production, this could be your backend API or an external API
def get_current_weather(location, unit='fahrenheit'):
"""Get the current weather in a given location"""
weather_info = {
'location': location,
'temperature': '72',
'unit': unit,
'forecast': ['sunny', 'windy'],
}
return json.dumps(weather_info)
def run_conversation():
# Step 1: send the conversation and available functions to GPT
messages = [{'role': 'user', 'content': 'What\'s the weather like in Boston?'}]
functions = [
{
'name': 'get_current_weather',
'description': 'Get the current weather in a given location',
'parameters': {
'type': 'object',
'properties': {
'location': {
'type': 'string',
'description': 'The city and state, e.g. San Francisco, CA',
},
'unit': {'type': 'string', 'enum': ['celsius', 'fahrenheit']},
},
'required': ['location'],
},
}
]
response = openai.ChatCompletion.create(
model='gpt-3.5-turbo-0613',
messages=messages,
functions=functions,
function_call='auto', # auto is default, but we'll be explicit
)
response_message = response['choices'][0]['message']
# Step 2: check if GPT wanted to call a function
if response_message.get('function_call'):
# Step 3: call the function
# Note: the JSON response may not always be valid; be sure to handle errors
available_functions = {
'get_current_weather': get_current_weather,
} # only one function in this example, but you can have multiple
function_name = response_message['function_call']['name']
fuction_to_call = available_functions[function_name]
function_args = json.loads(response_message['function_call']['arguments'])
function_response = fuction_to_call(
location=function_args.get('location'),
unit=function_args.get('unit'),
)
# Step 4: send the info on the function call and function response to GPT
messages.append(response_message) # extend conversation with assistant's reply
messages.append(
{
'role': 'function',
'name': function_name,
'content': function_response,
}
) # extend conversation with function response
second_response = openai.ChatCompletion.create(
model='gpt-3.5-turbo-0613',
messages=messages,
) # get a new response from GPT where it can see the function response
return second_response
print(run_conversation())

23
setup.py Normal file
View file

@ -0,0 +1,23 @@
from fastapi import FastAPI
from fastapi.responses import PlainTextResponse
from fastapi.requests import Request
from fastapi.responses import Response
from slowapi import Limiter, _rate_limit_exceeded_handler
from slowapi.util import get_remote_address
from slowapi.errors import RateLimitExceeded
limiter = Limiter(key_func=lambda: "test", default_limits=["5/minute"])
app = FastAPI()
app.state.limiter = limiter
app.add_exception_handler(RateLimitExceeded, _rate_limit_exceeded_handler)
# Note: the route decorator must be above the limit decorator, not below it
@app.get("/home")
@limiter.limit("5/minute")
async def homepage(request: Request):
return PlainTextResponse("test")
@app.get("/mars")
@limiter.limit("5/minute")
async def homepage(request: Request, response: Response):
return {"key": "value"}