nova-api/checks/client.py

135 lines
3.2 KiB
Python
Raw Normal View History

2023-06-23 02:18:28 +02:00
"""Tests the API."""
2023-07-19 23:51:28 +02:00
import os
2023-08-20 12:46:20 +02:00
import time
import httpx
import openai
import asyncio
import traceback
2023-06-23 02:18:28 +02:00
2023-08-17 16:47:54 +02:00
from rich import print
2023-07-19 23:51:28 +02:00
from typing import List
from dotenv import load_dotenv
load_dotenv()
2023-06-23 02:18:28 +02:00
MODEL = 'gpt-3.5-turbo'
2023-06-23 02:18:28 +02:00
MESSAGES = [
{
'role': 'user',
'content': '1+1=',
}
2023-06-23 02:18:28 +02:00
]
2023-07-19 23:51:28 +02:00
2023-08-01 02:38:55 +02:00
api_endpoint = 'http://localhost:2332'
2023-06-23 02:18:28 +02:00
async def test_server():
2023-07-19 23:51:28 +02:00
"""Tests if the API server is running."""
2023-06-23 02:18:28 +02:00
try:
2023-08-15 13:55:12 +02:00
return httpx.get(f'{api_endpoint.replace("/v1", "")}').json()['status'] == 'ok'
2023-06-23 02:18:28 +02:00
except httpx.ConnectError as exc:
2023-07-19 23:51:28 +02:00
raise ConnectionError(f'API is not running on port {api_endpoint}.') from exc
2023-06-23 02:18:28 +02:00
async def test_api(model: str=MODEL, messages: List[dict]=None) -> dict:
2023-07-19 23:51:28 +02:00
"""Tests an API api_endpoint."""
2023-06-23 02:18:28 +02:00
json_data = {
'model': model,
'messages': messages or MESSAGES,
'stream': True,
2023-06-23 02:18:28 +02:00
}
2023-07-19 23:51:28 +02:00
response = httpx.post(
url=f'{api_endpoint}/chat/completions',
headers=HEADERS,
2023-07-19 23:51:28 +02:00
json=json_data,
timeout=20
)
2023-06-23 02:18:28 +02:00
response.raise_for_status()
2023-08-01 20:19:18 +02:00
return response.text
2023-06-23 02:18:28 +02:00
async def test_library():
"""Tests if the api_endpoint is working with the OpenAI Python library."""
2023-06-23 02:18:28 +02:00
completion = openai.ChatCompletion.create(
2023-06-23 02:18:28 +02:00
model=MODEL,
2023-08-06 21:42:07 +02:00
messages=MESSAGES
2023-06-23 02:18:28 +02:00
)
print(completion)
2023-08-06 21:42:07 +02:00
return completion['choices'][0]['message']['content']
2023-06-23 02:18:28 +02:00
async def test_library_moderation():
2023-08-16 15:06:16 +02:00
try:
return openai.Moderation.create('I wanna kill myself, I wanna kill myself; It\'s all I hear right now, it\'s all I hear right now')
except openai.error.InvalidRequestError:
2023-08-16 15:06:16 +02:00
return True
async def test_models():
response = httpx.get(
url=f'{api_endpoint}/models',
headers=HEADERS,
timeout=5
)
response.raise_for_status()
return response.json()
async def test_api_moderation() -> dict:
2023-08-15 13:55:12 +02:00
"""Tests an API api_endpoint."""
response = httpx.get(
url=f'{api_endpoint}/moderations',
headers=HEADERS,
timeout=20
)
response.raise_for_status()
return response.text
# ==========================================================================================
def demo():
2023-06-23 02:18:28 +02:00
"""Runs all tests."""
try:
for _ in range(30):
if test_server():
break
2023-06-23 02:18:28 +02:00
print('Waiting until API Server is started up...')
time.sleep(1)
else:
raise ConnectionError('API Server is not running.')
print('[lightblue]Running a api endpoint to see if requests can go through...')
print(asyncio.run(test_api('gpt-3.5-turbo')))
print('[lightblue]Checking if the API works with the python library...')
print(asyncio.run(test_library()))
print('[lightblue]Checking if the moderation endpoint works...')
print(asyncio.run(test_library_moderation()))
print('[lightblue]Checking the /v1/models endpoint...')
print(asyncio.run(test_models()))
except Exception as exc:
print('[red]Error: ' + str(exc))
traceback.print_exc()
exit(500)
2023-08-05 02:30:42 +02:00
openai.api_base = api_endpoint
openai.api_key = os.environ['NOVA_KEY']
2023-08-05 02:30:42 +02:00
HEADERS = {
'Content-Type': 'application/json',
'Authorization': 'Bearer ' + openai.api_key
}
if __name__ == '__main__':
demo()