123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261 |
- from typing import Dict, List
- import openai
- import pytest
- from aphrodite.multimodal.utils import encode_image_base64, fetch_image
- from ...utils import RemoteOpenAIServer
- MODEL_NAME = "microsoft/Phi-3.5-vision-instruct"
- MAXIMUM_IMAGES = 2
- # Test different image extensions (JPG/PNG) and formats (gray/RGB/RGBA)
- TEST_IMAGE_URLS = [
- "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg",
- "https://upload.wikimedia.org/wikipedia/commons/f/fa/Grayscale_8bits_palette_sample_image.png",
- "https://upload.wikimedia.org/wikipedia/commons/thumb/9/91/Venn_diagram_rgb.svg/1280px-Venn_diagram_rgb.svg.png",
- "https://upload.wikimedia.org/wikipedia/commons/0/0b/RGBA_comp.png",
- ]
- @pytest.fixture(scope="module")
- def server():
- args = [
- "--dtype", "bfloat16", "--max-model-len", "4096", "--max-num-seqs",
- "5", "--enforce-eager", "--trust-remote-code", "--limit-mm-per-prompt",
- f"image={MAXIMUM_IMAGES}"
- ]
- with RemoteOpenAIServer(MODEL_NAME, args) as remote_server:
- yield remote_server
- @pytest.fixture(scope="module")
- def client(server):
- return server.get_async_client()
- @pytest.fixture(scope="session")
- def base64_encoded_image() -> Dict[str, str]:
- return {
- image_url: encode_image_base64(fetch_image(image_url))
- for image_url in TEST_IMAGE_URLS
- }
- @pytest.mark.asyncio
- @pytest.mark.parametrize("model_name", [MODEL_NAME])
- @pytest.mark.parametrize("image_url", TEST_IMAGE_URLS)
- async def test_single_chat_session_image(client: openai.AsyncOpenAI,
- model_name: str, image_url: str):
- messages = [{
- "role":
- "user",
- "content": [
- {
- "type": "image_url",
- "image_url": {
- "url": image_url
- }
- },
- {
- "type": "text",
- "text": "What's in this image?"
- },
- ],
- }]
- # test single completion
- chat_completion = await client.chat.completions.create(model=model_name,
- messages=messages,
- max_tokens=10,
- logprobs=True,
- top_logprobs=5)
- assert len(chat_completion.choices) == 1
- choice = chat_completion.choices[0]
- assert choice.finish_reason == "length"
- assert chat_completion.usage == openai.types.CompletionUsage(
- completion_tokens=10, prompt_tokens=772, total_tokens=782)
- message = choice.message
- message = chat_completion.choices[0].message
- assert message.content is not None and len(message.content) >= 10
- assert message.role == "assistant"
- messages.append({"role": "assistant", "content": message.content})
- # test multi-turn dialogue
- messages.append({"role": "user", "content": "express your result in json"})
- chat_completion = await client.chat.completions.create(
- model=model_name,
- messages=messages,
- max_tokens=10,
- )
- message = chat_completion.choices[0].message
- assert message.content is not None and len(message.content) >= 0
- @pytest.mark.asyncio
- @pytest.mark.parametrize("model_name", [MODEL_NAME])
- @pytest.mark.parametrize("image_url", TEST_IMAGE_URLS)
- async def test_single_chat_session_image_base64encoded(
- client: openai.AsyncOpenAI, model_name: str, image_url: str,
- base64_encoded_image: Dict[str, str]):
- messages = [{
- "role":
- "user",
- "content": [
- {
- "type": "image_url",
- "image_url": {
- "url":
- f"data:image/jpeg;base64,{base64_encoded_image[image_url]}"
- }
- },
- {
- "type": "text",
- "text": "What's in this image?"
- },
- ],
- }]
- # test single completion
- chat_completion = await client.chat.completions.create(model=model_name,
- messages=messages,
- max_tokens=10,
- logprobs=True,
- top_logprobs=5)
- assert len(chat_completion.choices) == 1
- choice = chat_completion.choices[0]
- assert choice.finish_reason == "length"
- assert chat_completion.usage == openai.types.CompletionUsage(
- completion_tokens=10, prompt_tokens=772, total_tokens=782)
- message = choice.message
- message = chat_completion.choices[0].message
- assert message.content is not None and len(message.content) >= 10
- assert message.role == "assistant"
- messages.append({"role": "assistant", "content": message.content})
- # test multi-turn dialogue
- messages.append({"role": "user", "content": "express your result in json"})
- chat_completion = await client.chat.completions.create(
- model=model_name,
- messages=messages,
- max_tokens=10,
- )
- message = chat_completion.choices[0].message
- assert message.content is not None and len(message.content) >= 0
- @pytest.mark.asyncio
- @pytest.mark.parametrize("model_name", [MODEL_NAME])
- @pytest.mark.parametrize("image_url", TEST_IMAGE_URLS)
- async def test_chat_streaming_image(client: openai.AsyncOpenAI,
- model_name: str, image_url: str):
- messages = [{
- "role":
- "user",
- "content": [
- {
- "type": "image_url",
- "image_url": {
- "url": image_url
- }
- },
- {
- "type": "text",
- "text": "What's in this image?"
- },
- ],
- }]
- # test single completion
- chat_completion = await client.chat.completions.create(
- model=model_name,
- messages=messages,
- max_tokens=10,
- temperature=0.0,
- )
- output = chat_completion.choices[0].message.content
- stop_reason = chat_completion.choices[0].finish_reason
- # test streaming
- stream = await client.chat.completions.create(
- model=model_name,
- messages=messages,
- max_tokens=10,
- temperature=0.0,
- stream=True,
- )
- chunks: List[str] = []
- finish_reason_count = 0
- async for chunk in stream:
- delta = chunk.choices[0].delta
- if delta.role:
- assert delta.role == "assistant"
- if delta.content:
- chunks.append(delta.content)
- if chunk.choices[0].finish_reason is not None:
- finish_reason_count += 1
- # finish reason should only return in last block
- assert finish_reason_count == 1
- assert chunk.choices[0].finish_reason == stop_reason
- assert delta.content
- assert "".join(chunks) == output
- @pytest.mark.asyncio
- @pytest.mark.parametrize("model_name", [MODEL_NAME])
- @pytest.mark.parametrize(
- "image_urls",
- [TEST_IMAGE_URLS[:i] for i in range(2, len(TEST_IMAGE_URLS))])
- async def test_multi_image_input(client: openai.AsyncOpenAI, model_name: str,
- image_urls: List[str]):
- messages = [{
- "role":
- "user",
- "content": [
- *({
- "type": "image_url",
- "image_url": {
- "url": image_url
- }
- } for image_url in image_urls),
- {
- "type": "text",
- "text": "What's in this image?"
- },
- ],
- }]
- if len(image_urls) > MAXIMUM_IMAGES:
- with pytest.raises(openai.BadRequestError): # test multi-image input
- await client.chat.completions.create(
- model=model_name,
- messages=messages,
- max_tokens=10,
- temperature=0.0,
- )
- # the server should still work afterwards
- completion = await client.completions.create(
- model=model_name,
- prompt=[0, 0, 0, 0, 0],
- max_tokens=5,
- temperature=0.0,
- )
- completion = completion.choices[0].text
- assert completion is not None and len(completion) >= 0
- else:
- chat_completion = await client.chat.completions.create(
- model=model_name,
- messages=messages,
- max_tokens=10,
- temperature=0.0,
- )
- message = chat_completion.choices[0].message
- assert message.content is not None and len(message.content) >= 0
|