Small moderation timeout improvements, added SDXL check

This commit is contained in:
nsde 2023-08-28 13:47:13 +02:00
parent 5fdc15c90f
commit c58eaa5b32
2 changed files with 22 additions and 12 deletions

View file

@ -30,7 +30,7 @@ async def is_policy_violated(inp: Union[str, list]) -> bool:
else: else:
text = '\n'.join(inp) text = '\n'.join(inp)
for _ in range(3): for _ in range(5):
req = await load_balancing.balance_organic_request( req = await load_balancing.balance_organic_request(
{ {
'path': '/v1/moderations', 'path': '/v1/moderations',
@ -40,7 +40,6 @@ async def is_policy_violated(inp: Union[str, list]) -> bool:
async with aiohttp.ClientSession(connector=proxies.get_proxy().connector) as session: async with aiohttp.ClientSession(connector=proxies.get_proxy().connector) as session:
try: try:
start = time.perf_counter()
async with session.request( async with session.request(
method=req.get('method', 'POST'), method=req.get('method', 'POST'),
url=req['url'], url=req['url'],
@ -49,7 +48,7 @@ async def is_policy_violated(inp: Union[str, list]) -> bool:
headers=req.get('headers'), headers=req.get('headers'),
cookies=req.get('cookies'), cookies=req.get('cookies'),
ssl=False, ssl=False,
timeout=aiohttp.ClientTimeout(total=3), timeout=aiohttp.ClientTimeout(total=2),
) as res: ) as res:
res.raise_for_status() res.raise_for_status()
json_response = await res.json() json_response = await res.json()

View file

@ -66,16 +66,27 @@ async def test_chat(model: str=MODEL, messages: List[dict]=None) -> dict:
assert '2' in response.json()['choices'][0]['message']['content'], 'The API did not return a correct response.' assert '2' in response.json()['choices'][0]['message']['content'], 'The API did not return a correct response.'
return time.perf_counter() - request_start return time.perf_counter() - request_start
async def test_library_chat(): async def test_sdxl():
"""Tests if the api_endpoint is working with the OpenAI Python library.""" """Tests the image generation endpoint with the SDXL model."""
json_data = {
'prompt': 'a nice sunset with a samurai standing in the middle',
'n': 1,
'size': '1024x1024'
}
request_start = time.perf_counter() request_start = time.perf_counter()
completion = openai.ChatCompletion.create(
model=MODEL,
messages=MESSAGES
)
assert '2' in completion.choices[0]['message']['content'], 'The API did not return a correct response.' async with httpx.AsyncClient() as client:
response = await client.post(
url=f'{api_endpoint}/images/generations',
headers=HEADERS,
json=json_data,
timeout=10,
)
response.raise_for_status()
assert '://' in response.json()['data'][0]['url']
return time.perf_counter() - request_start return time.perf_counter() - request_start
async def test_models(): async def test_models():
@ -129,8 +140,8 @@ async def demo():
print('[lightblue]Checking if the API works...') print('[lightblue]Checking if the API works...')
print(await test_chat()) print(await test_chat())
print('[lightblue]Checking if the API works with the Python library...') print('[lightblue]Checking if SDXL image generation works...')
print(await test_library_chat()) print(await test_sdxl())
print('[lightblue]Checking if the moderation endpoint works...') print('[lightblue]Checking if the moderation endpoint works...')
print(await test_api_moderation()) print(await test_api_moderation())