conftest.py 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790
  1. import contextlib
  2. import gc
  3. import json
  4. import os
  5. import sys
  6. import tempfile
  7. from collections import UserList
  8. from enum import Enum
  9. from typing import (Any, Callable, Dict, List, Optional, Tuple, TypedDict,
  10. TypeVar, Union)
  11. import numpy as np
  12. import pytest
  13. import torch
  14. import torch.nn as nn
  15. import torch.nn.functional as F
  16. from huggingface_hub import snapshot_download
  17. from loguru import logger
  18. from PIL import Image
  19. from transformers import (AutoModelForCausalLM, AutoTokenizer, BatchEncoding,
  20. BatchFeature)
  21. from aphrodite import LLM, SamplingParams
  22. from aphrodite.assets.image import ImageAsset
  23. from aphrodite.common.config import TokenizerPoolConfig
  24. from aphrodite.common.outputs import RequestOutput
  25. from aphrodite.common.sequence import SampleLogprobs
  26. from aphrodite.common.utils import (STR_DTYPE_TO_TORCH_DTYPE,
  27. cuda_device_count_stateless, identity,
  28. is_cpu)
  29. from aphrodite.connections import global_http_connection
  30. from aphrodite.distributed import (destroy_distributed_environment,
  31. destroy_model_parallel)
  32. from aphrodite.inputs import (ExplicitEncoderDecoderPrompt, TextPrompt,
  33. to_enc_dec_tuple_list, zip_enc_dec_prompts)
  34. _TEST_DIR = os.path.dirname(__file__)
  35. _TEST_PROMPTS = [os.path.join(_TEST_DIR, "prompts", "example.txt")]
  36. _LONG_PROMPTS = [os.path.join(_TEST_DIR, "prompts", "summary.txt")]
  37. PromptImageInput = Union[List[Image.Image], List[List[Image.Image]]]
  38. PromptAudioInput = Union[List[Tuple[np.ndarray, int]],
  39. List[List[Tuple[np.ndarray, int]]]]
  40. def _read_prompts(filename: str) -> List[str]:
  41. with open(filename, "r") as f:
  42. prompts = f.readlines()
  43. return prompts
  44. class _ImageAssetPrompts(TypedDict):
  45. stop_sign: str
  46. cherry_blossom: str
  47. if sys.version_info < (3, 9):
  48. # UserList cannot be subscripted
  49. class _ImageAssetsBase(UserList):
  50. pass
  51. else:
  52. class _ImageAssetsBase(UserList[ImageAsset]):
  53. pass
  54. class _ImageAssets(_ImageAssetsBase):
  55. def __init__(self) -> None:
  56. super().__init__([
  57. ImageAsset("stop_sign"),
  58. ImageAsset("cherry_blossom"),
  59. ])
  60. def prompts(self, prompts: _ImageAssetPrompts) -> List[str]:
  61. """
  62. Convenience method to define the prompt for each test image.
  63. The order of the returned prompts matches the order of the
  64. assets when iterating through this object.
  65. """
  66. return [prompts["stop_sign"], prompts["cherry_blossom"]]
  67. IMAGE_ASSETS = _ImageAssets()
  68. """Singleton instance of :class:`_ImageAssets`."""
  69. @pytest.fixture(autouse=True)
  70. def init_test_http_connection():
  71. # pytest_asyncio may use a different event loop per test
  72. # so we need to make sure the async client is created anew
  73. global_http_connection.reuse_client = False
  74. def cleanup():
  75. destroy_model_parallel()
  76. destroy_distributed_environment()
  77. with contextlib.suppress(AssertionError):
  78. torch.distributed.destroy_process_group()
  79. gc.collect()
  80. if not is_cpu():
  81. torch.cuda.empty_cache()
  82. @pytest.fixture()
  83. def should_do_global_cleanup_after_test(request) -> bool:
  84. """Allow subdirectories to skip global cleanup by overriding this fixture.
  85. This can provide a ~10x speedup for non-GPU unit tests since they don't need
  86. to initialize torch.
  87. """
  88. if request.node.get_closest_marker("skip_global_cleanup"):
  89. return False
  90. return True
  91. @pytest.fixture(autouse=True)
  92. def cleanup_fixture(should_do_global_cleanup_after_test: bool):
  93. yield
  94. if should_do_global_cleanup_after_test:
  95. cleanup()
  96. @pytest.fixture
  97. def example_prompts() -> List[str]:
  98. prompts = []
  99. for filename in _TEST_PROMPTS:
  100. prompts += _read_prompts(filename)
  101. return prompts
  102. class DecoderPromptType(Enum):
  103. """For encoder/decoder models only."""
  104. CUSTOM = 1
  105. NONE = 2
  106. EMPTY_STR = 3
  107. @pytest.fixture
  108. def example_encoder_decoder_prompts(
  109. ) -> Dict[DecoderPromptType, List[ExplicitEncoderDecoderPrompt]]:
  110. '''
  111. Returns an encoder prompt list and a decoder prompt list, wherein each pair
  112. of same-index entries in both lists corresponds to an (encoder prompt,
  113. decoder prompt) tuple.
  114. Returns:
  115. * Encoder prompt list
  116. * Decoder prompt list (reverse of encoder prompt list)
  117. '''
  118. encoder_prompts = []
  119. for filename in _TEST_PROMPTS:
  120. encoder_prompts += _read_prompts(filename)
  121. custom_decoder_prompts = encoder_prompts[::-1]
  122. empty_str_decoder_prompts = [""] * len(encoder_prompts)
  123. none_decoder_prompts = [None] * len(encoder_prompts)
  124. # NONE decoder prompt type
  125. return {
  126. DecoderPromptType.NONE:
  127. zip_enc_dec_prompts(encoder_prompts, none_decoder_prompts),
  128. DecoderPromptType.EMPTY_STR:
  129. zip_enc_dec_prompts(encoder_prompts, empty_str_decoder_prompts),
  130. DecoderPromptType.CUSTOM:
  131. zip_enc_dec_prompts(encoder_prompts, custom_decoder_prompts),
  132. }
  133. @pytest.fixture
  134. def example_long_prompts() -> List[str]:
  135. prompts = []
  136. for filename in _LONG_PROMPTS:
  137. prompts += _read_prompts(filename)
  138. return prompts
  139. @pytest.fixture(scope="session")
  140. def image_assets() -> _ImageAssets:
  141. return IMAGE_ASSETS
  142. _T = TypeVar("_T", nn.Module, torch.Tensor, BatchEncoding, BatchFeature)
  143. class HfRunner:
  144. def wrap_device(self, input: _T) -> _T:
  145. if not is_cpu():
  146. return input.to("cuda")
  147. else:
  148. return input.to("cpu")
  149. def __init__(
  150. self,
  151. model_name: str,
  152. dtype: str = "half",
  153. *,
  154. model_kwargs: Optional[Dict[str, Any]] = None,
  155. is_embedding_model: bool = False,
  156. auto_cls=AutoModelForCausalLM,
  157. postprocess_inputs: Callable[[BatchEncoding],
  158. BatchEncoding] = identity,
  159. ) -> None:
  160. torch_dtype = STR_DTYPE_TO_TORCH_DTYPE[dtype]
  161. self.model_name = model_name
  162. if is_embedding_model:
  163. # Lazy init required for AMD CI
  164. from sentence_transformers import SentenceTransformer
  165. self.model = self.wrap_device(
  166. SentenceTransformer(
  167. model_name,
  168. device="cpu",
  169. ).to(dtype=torch_dtype))
  170. else:
  171. model_kwargs = model_kwargs if model_kwargs is not None else {}
  172. self.model = self.wrap_device(
  173. auto_cls.from_pretrained(
  174. model_name,
  175. torch_dtype=torch_dtype,
  176. trust_remote_code=True,
  177. **model_kwargs,
  178. ))
  179. self.tokenizer = AutoTokenizer.from_pretrained(
  180. model_name,
  181. torch_dtype=torch_dtype,
  182. trust_remote_code=True,
  183. )
  184. try:
  185. # don't put this import at the top level
  186. # it will call torch.cuda.device_count()
  187. from transformers import AutoProcessor # noqa: F401
  188. self.processor = AutoProcessor.from_pretrained(
  189. model_name,
  190. torch_dtype=torch_dtype,
  191. trust_remote_code=True,
  192. )
  193. except Exception as exc:
  194. logger.warning(
  195. "Unable to auto-load HuggingFace processor for model (%s). "
  196. "Using tokenizer instead. Reason: %s", model_name, exc)
  197. self.processor = self.tokenizer
  198. self.postprocess_inputs = postprocess_inputs
  199. def generate(
  200. self,
  201. prompts: List[str],
  202. images: Optional[List[Image.Image]] = None,
  203. **kwargs: Any,
  204. ) -> List[Tuple[List[List[int]], List[str]]]:
  205. if images:
  206. assert len(prompts) == len(images)
  207. outputs: List[Tuple[List[List[int]], List[str]]] = []
  208. for i, prompt in enumerate(prompts):
  209. processor_kwargs: Dict[str, Any] = {
  210. "text": prompt,
  211. "return_tensors": "pt",
  212. }
  213. if images is not None and images[i] is not None:
  214. processor_kwargs["images"] = images[i]
  215. inputs = self.processor(**processor_kwargs)
  216. inputs = self.postprocess_inputs(inputs)
  217. output_ids = self.model.generate(
  218. **self.wrap_device(inputs),
  219. use_cache=True,
  220. **kwargs,
  221. )
  222. output_str = self.processor.batch_decode(
  223. output_ids,
  224. skip_special_tokens=True,
  225. clean_up_tokenization_spaces=False,
  226. )
  227. output_ids = output_ids.cpu().tolist()
  228. outputs.append((output_ids, output_str))
  229. return outputs
  230. def generate_greedy(
  231. self,
  232. prompts: List[str],
  233. max_tokens: int,
  234. images: Optional[List[Image.Image]] = None,
  235. **kwargs: Any,
  236. ) -> List[Tuple[List[int], str]]:
  237. outputs = self.generate(prompts,
  238. do_sample=False,
  239. max_new_tokens=max_tokens,
  240. images=images,
  241. **kwargs)
  242. return [(output_ids[0], output_str[0])
  243. for output_ids, output_str in outputs]
  244. def generate_beam_search(
  245. self,
  246. prompts: List[str],
  247. beam_width: int,
  248. max_tokens: int,
  249. ) -> List[Tuple[List[List[int]], List[str]]]:
  250. outputs = self.generate(prompts,
  251. do_sample=False,
  252. max_new_tokens=max_tokens,
  253. num_beams=beam_width,
  254. num_return_sequences=beam_width)
  255. for i in range(len(outputs)):
  256. output_ids, output_str = outputs[i]
  257. for j in range(len(output_ids)):
  258. output_ids[j] = [
  259. x for x in output_ids[j]
  260. if x != self.tokenizer.pad_token_id
  261. ]
  262. outputs[i] = (output_ids, output_str)
  263. return outputs
  264. def generate_greedy_logprobs(
  265. self,
  266. prompts: List[str],
  267. max_tokens: int,
  268. images: Optional[List[Image.Image]] = None,
  269. **kwargs: Any,
  270. ) -> List[List[torch.Tensor]]:
  271. all_logprobs: List[List[torch.Tensor]] = []
  272. for i, prompt in enumerate(prompts):
  273. processor_kwargs: Dict[str, Any] = {
  274. "text": prompt,
  275. "return_tensors": "pt",
  276. }
  277. if images is not None and images[i] is not None:
  278. processor_kwargs["images"] = images[i]
  279. inputs = self.processor(**processor_kwargs)
  280. inputs = self.postprocess_inputs(inputs)
  281. output = self.model.generate(
  282. **self.wrap_device(inputs),
  283. use_cache=True,
  284. do_sample=False,
  285. max_new_tokens=max_tokens,
  286. output_hidden_states=True,
  287. return_dict_in_generate=True,
  288. **kwargs,
  289. )
  290. seq_logprobs: List[torch.Tensor] = []
  291. for hidden_states in output.hidden_states:
  292. last_hidden_states = hidden_states[-1][0]
  293. logits = torch.matmul(
  294. last_hidden_states,
  295. self.model.get_output_embeddings().weight.t(),
  296. )
  297. if self.model.get_output_embeddings().bias is not None:
  298. logits += self.model.get_output_embeddings(
  299. ).bias.unsqueeze(0)
  300. logprobs = F.log_softmax(logits, dim=-1, dtype=torch.float32)
  301. seq_logprobs.append(logprobs)
  302. all_logprobs.append(seq_logprobs)
  303. return all_logprobs
  304. def _hidden_states_to_logprobs(
  305. self,
  306. hidden_states,
  307. num_logprobs,
  308. ) -> Tuple[List[Dict[int, float]], int]:
  309. seq_logprobs: List[torch.Tensor] = []
  310. output_len = len(hidden_states)
  311. for _, hidden_state in enumerate(hidden_states):
  312. last_hidden_states = hidden_state[-1][0]
  313. logits = torch.matmul(
  314. last_hidden_states,
  315. self.model.get_output_embeddings().weight.t(),
  316. )
  317. if getattr(self.model.get_output_embeddings(), "bias",
  318. None) is not None:
  319. logits += self.model.get_output_embeddings().bias.unsqueeze(0)
  320. logprobs = F.log_softmax(logits, dim=-1, dtype=torch.float32)
  321. seq_logprobs.append(logprobs)
  322. # convert to dict
  323. seq_logprobs_lst: List[Dict[int, float]] = []
  324. for tok_idx, tok_logprobs in enumerate(seq_logprobs):
  325. # drop prompt logprobs
  326. if tok_idx == 0:
  327. tok_logprobs = tok_logprobs[-1, :].reshape(1, -1)
  328. topk = tok_logprobs.topk(num_logprobs)
  329. tok_logprobs_dct = {}
  330. for token_id, logprob in zip(topk.indices[0], topk.values[0]):
  331. tok_logprobs_dct[token_id.item()] = logprob.item()
  332. seq_logprobs_lst.append(tok_logprobs_dct)
  333. return (
  334. seq_logprobs_lst,
  335. output_len,
  336. )
  337. def generate_greedy_logprobs_limit(
  338. self,
  339. prompts: List[str],
  340. max_tokens: int,
  341. num_logprobs: int,
  342. images: Optional[List[Image.Image]] = None,
  343. audios: Optional[List[Tuple[np.ndarray, int]]] = None,
  344. **kwargs: Any,
  345. ) -> List[Tuple[List[int], str, List[Dict[int, float]]]]:
  346. all_logprobs: List[List[Dict[int, float]]] = []
  347. all_output_ids: List[List[int]] = []
  348. all_output_strs: List[str] = []
  349. for i, prompt in enumerate(prompts):
  350. processor_kwargs: Dict[str, Any] = {
  351. "text": prompt,
  352. "return_tensors": "pt",
  353. }
  354. if images is not None and images[i] is not None:
  355. processor_kwargs["images"] = images[i]
  356. if audios is not None:
  357. audio, sr = audios[i]
  358. processor_kwargs["audio"] = audio
  359. processor_kwargs["sampling_rate"] = sr
  360. inputs = self.processor(**processor_kwargs)
  361. inputs = self.postprocess_inputs(inputs)
  362. output = self.model.generate(
  363. **self.wrap_device(inputs),
  364. use_cache=True,
  365. do_sample=False,
  366. max_new_tokens=max_tokens,
  367. output_hidden_states=True,
  368. return_dict_in_generate=True,
  369. **kwargs,
  370. )
  371. (
  372. seq_logprobs_lst,
  373. output_len,
  374. ) = self._hidden_states_to_logprobs(output.hidden_states,
  375. num_logprobs)
  376. all_logprobs.append(seq_logprobs_lst)
  377. seq_ids = output.sequences[0]
  378. output_len = len(seq_logprobs_lst)
  379. output_ids = seq_ids[-output_len:]
  380. all_output_ids.append(output_ids.tolist())
  381. all_output_strs.append(self.tokenizer.decode(output_ids))
  382. outputs = zip(all_output_ids, all_output_strs, all_logprobs)
  383. return [(output_ids, output_str, output_logprobs)
  384. for output_ids, output_str, output_logprobs in outputs]
  385. def generate_encoder_decoder_greedy_logprobs_limit(
  386. self,
  387. encoder_decoder_prompts: List[ExplicitEncoderDecoderPrompt[str, str]],
  388. max_tokens: int,
  389. num_logprobs: int,
  390. **kwargs: Any,
  391. ) -> List[Tuple[List[int], str, List[Dict[int, float]]]]:
  392. '''
  393. Greedy logprobs generation for Aphrodite encoder/decoder models
  394. '''
  395. all_logprobs: List[List[Dict[int, float]]] = []
  396. all_output_ids: List[List[int]] = []
  397. all_output_strs: List[str] = []
  398. for (encoder_prompt,
  399. decoder_prompt) in to_enc_dec_tuple_list(encoder_decoder_prompts):
  400. encoder_input_ids = self.wrap_device(
  401. self.tokenizer(encoder_prompt, return_tensors="pt").input_ids)
  402. decoder_input_ids = (
  403. None if decoder_prompt is None else self.wrap_device(
  404. self.tokenizer(decoder_prompt,
  405. return_tensors="pt").input_ids))
  406. output = self.model.generate(
  407. encoder_input_ids,
  408. decoder_input_ids=decoder_input_ids,
  409. use_cache=True,
  410. do_sample=False,
  411. max_new_tokens=max_tokens,
  412. output_hidden_states=True,
  413. return_dict_in_generate=True,
  414. **kwargs,
  415. )
  416. (
  417. seq_logprobs_lst,
  418. output_len,
  419. ) = self._hidden_states_to_logprobs(output.decoder_hidden_states,
  420. num_logprobs)
  421. all_logprobs.append(seq_logprobs_lst)
  422. seq_ids = output.sequences[0]
  423. output_ids = seq_ids[-output_len:]
  424. all_output_ids.append(output_ids.tolist())
  425. all_output_strs.append(self.tokenizer.decode(output_ids))
  426. outputs = zip(all_output_ids, all_output_strs, all_logprobs)
  427. return [(output_ids, output_str, output_logprobs)
  428. for output_ids, output_str, output_logprobs in outputs]
  429. def encode(self, prompts: List[str]) -> List[List[torch.Tensor]]:
  430. return self.model.encode(prompts)
  431. def __enter__(self):
  432. return self
  433. def __exit__(self, exc_type, exc_value, traceback):
  434. del self.model
  435. cleanup()
  436. @pytest.fixture(scope="session")
  437. def hf_runner():
  438. return HfRunner
  439. class AphroditeRunner:
  440. def __init__(
  441. self,
  442. model_name: str,
  443. tokenizer_name: Optional[str] = None,
  444. # Use smaller max model length, otherwise bigger model cannot run due
  445. # to kv cache size limit.
  446. max_model_len: int = 1024,
  447. dtype: str = "half",
  448. disable_log_stats: bool = True,
  449. tensor_parallel_size: int = 1,
  450. block_size: int = 16,
  451. enable_chunked_prefill: bool = False,
  452. swap_space: int = 4,
  453. enforce_eager: Optional[bool] = False,
  454. **kwargs,
  455. ) -> None:
  456. self.model = LLM(
  457. model=model_name,
  458. tokenizer=tokenizer_name,
  459. trust_remote_code=True,
  460. dtype=dtype,
  461. swap_space=swap_space,
  462. enforce_eager=enforce_eager,
  463. disable_log_stats=disable_log_stats,
  464. tensor_parallel_size=tensor_parallel_size,
  465. max_model_len=max_model_len,
  466. block_size=block_size,
  467. enable_chunked_prefill=enable_chunked_prefill,
  468. **kwargs,
  469. )
  470. def generate(
  471. self,
  472. prompts: List[str],
  473. sampling_params: SamplingParams,
  474. images: Optional[PromptImageInput] = None,
  475. ) -> List[Tuple[List[List[int]], List[str]]]:
  476. if images is not None:
  477. assert len(prompts) == len(images)
  478. inputs = [TextPrompt(prompt=prompt) for prompt in prompts]
  479. if images is not None:
  480. for i, image in enumerate(images):
  481. inputs[i]["multi_modal_data"] = {"image": image}
  482. req_outputs = self.model.generate(inputs,
  483. sampling_params=sampling_params)
  484. outputs: List[Tuple[List[List[int]], List[str]]] = []
  485. for req_output in req_outputs:
  486. prompt_str = req_output.prompt
  487. prompt_ids = req_output.prompt_token_ids
  488. req_sample_output_ids: List[List[int]] = []
  489. req_sample_output_strs: List[str] = []
  490. for sample in req_output.outputs:
  491. output_str = sample.text
  492. output_ids = list(sample.token_ids)
  493. req_sample_output_ids.append(prompt_ids + output_ids)
  494. req_sample_output_strs.append(prompt_str + output_str)
  495. outputs.append((req_sample_output_ids, req_sample_output_strs))
  496. return outputs
  497. def _final_steps_generate_w_logprobs(
  498. self,
  499. req_outputs: List[RequestOutput],
  500. ) -> List[Tuple[List[int], str, Optional[SampleLogprobs]]]:
  501. outputs: List[Tuple[List[int], str, Optional[SampleLogprobs]]] = []
  502. for req_output in req_outputs:
  503. for sample in req_output.outputs:
  504. output_str = sample.text
  505. output_ids = list(sample.token_ids)
  506. output_logprobs = sample.logprobs
  507. outputs.append((output_ids, output_str, output_logprobs))
  508. return outputs
  509. def generate_w_logprobs(
  510. self,
  511. prompts: List[str],
  512. sampling_params: SamplingParams,
  513. images: Optional[PromptImageInput] = None,
  514. audios: Optional[PromptAudioInput] = None,
  515. ) -> List[Tuple[List[int], str, Optional[SampleLogprobs]]]:
  516. assert sampling_params.logprobs is not None
  517. if images is not None:
  518. assert len(prompts) == len(images)
  519. inputs = [TextPrompt(prompt=prompt) for prompt in prompts]
  520. if images is not None:
  521. for i, image in enumerate(images):
  522. inputs[i]["multi_modal_data"] = {"image": image}
  523. if audios is not None:
  524. for i, audio in enumerate(audios):
  525. inputs[i]["multi_modal_data"] = {"audio": audio}
  526. req_outputs = self.model.generate(inputs,
  527. sampling_params=sampling_params)
  528. return self._final_steps_generate_w_logprobs(req_outputs)
  529. def generate_encoder_decoder_w_logprobs(
  530. self,
  531. encoder_decoder_prompts: List[ExplicitEncoderDecoderPrompt[str, str]],
  532. sampling_params: SamplingParams,
  533. ) -> List[Tuple[List[int], str, Optional[SampleLogprobs]]]:
  534. '''
  535. Logprobs generation for Aphrodite encoder/decoder models
  536. '''
  537. assert sampling_params.logprobs is not None
  538. req_outputs = self.model.generate(encoder_decoder_prompts,
  539. sampling_params=sampling_params)
  540. return self._final_steps_generate_w_logprobs(req_outputs)
  541. def generate_greedy(
  542. self,
  543. prompts: List[str],
  544. max_tokens: int,
  545. images: Optional[List[Image.Image]] = None,
  546. ) -> List[Tuple[List[int], str]]:
  547. greedy_params = SamplingParams(temperature=0.0, max_tokens=max_tokens)
  548. outputs = self.generate(prompts, greedy_params, images=images)
  549. return [(output_ids[0], output_str[0])
  550. for output_ids, output_str in outputs]
  551. def generate_greedy_logprobs(
  552. self,
  553. prompts: List[str],
  554. max_tokens: int,
  555. num_logprobs: int,
  556. images: Optional[PromptImageInput] = None,
  557. audios: Optional[PromptAudioInput] = None,
  558. stop_token_ids: Optional[List[int]] = None,
  559. ) -> List[Tuple[List[int], str, Optional[SampleLogprobs]]]:
  560. greedy_logprobs_params = SamplingParams(temperature=0.0,
  561. max_tokens=max_tokens,
  562. logprobs=num_logprobs,
  563. stop_token_ids=stop_token_ids)
  564. outputs = self.generate_w_logprobs(prompts,
  565. greedy_logprobs_params,
  566. images=images,
  567. audios=audios)
  568. return [(output_ids, output_str, output_logprobs)
  569. for output_ids, output_str, output_logprobs in outputs]
  570. def generate_encoder_decoder_greedy_logprobs(
  571. self,
  572. encoder_decoder_prompts: List[ExplicitEncoderDecoderPrompt[str, str]],
  573. max_tokens: int,
  574. num_logprobs: int,
  575. ) -> List[Tuple[List[int], str, Optional[SampleLogprobs]]]:
  576. greedy_logprobs_params = SamplingParams(temperature=0.0,
  577. use_beam_search=False,
  578. max_tokens=max_tokens,
  579. logprobs=num_logprobs)
  580. '''
  581. Greedy logprobs generation for Aphrodite encoder/decoder models
  582. '''
  583. outputs = self.generate_encoder_decoder_w_logprobs(
  584. encoder_decoder_prompts, greedy_logprobs_params)
  585. return [(output_ids, output_str, output_logprobs)
  586. for output_ids, output_str, output_logprobs in outputs]
  587. def generate_beam_search(
  588. self,
  589. prompts: List[str],
  590. beam_width: int,
  591. max_tokens: int,
  592. ) -> List[Tuple[List[List[int]], List[str]]]:
  593. beam_search_params = SamplingParams(n=beam_width,
  594. use_beam_search=True,
  595. temperature=0.0,
  596. max_tokens=max_tokens)
  597. outputs = self.generate(prompts, beam_search_params)
  598. return outputs
  599. def encode(self, prompts: List[str]) -> List[List[float]]:
  600. req_outputs = self.model.encode(prompts)
  601. outputs = []
  602. for req_output in req_outputs:
  603. embedding = req_output.outputs.embedding
  604. outputs.append(embedding)
  605. return outputs
  606. def __enter__(self):
  607. return self
  608. def __exit__(self, exc_type, exc_value, traceback):
  609. del self.model
  610. cleanup()
  611. @pytest.fixture(scope="session")
  612. def aphrodite_runner():
  613. return AphroditeRunner
  614. def get_tokenizer_pool_config(tokenizer_group_type):
  615. if tokenizer_group_type is None:
  616. return None
  617. if tokenizer_group_type == "ray":
  618. return TokenizerPoolConfig(pool_size=1,
  619. pool_type="ray",
  620. extra_config={})
  621. if isinstance(tokenizer_group_type, type):
  622. return TokenizerPoolConfig(pool_size=1,
  623. pool_type=tokenizer_group_type,
  624. extra_config={})
  625. raise ValueError(f"Unknown tokenizer_group_type: {tokenizer_group_type}")
  626. @pytest.fixture()
  627. def temporary_enable_log_propagate():
  628. import logging
  629. logger = logging.getLogger("aphrodite")
  630. logger.propagate = True
  631. yield
  632. logger.propagate = False
  633. @pytest.fixture()
  634. def caplog_aphrodite(temporary_enable_log_propagate, caplog):
  635. # To capture aphrodite log, we should enable propagate=True temporarily
  636. # because caplog depends on logs propagated to the root logger.
  637. yield caplog
  638. @pytest.fixture(scope="session")
  639. def num_gpus_available():
  640. """Get number of GPUs without initializing the CUDA context
  641. in current process."""
  642. return cuda_device_count_stateless()
  643. temp_dir = tempfile.gettempdir()
  644. _dummy_path = os.path.join(temp_dir, "dummy_opt")
  645. @pytest.fixture
  646. def dummy_opt_path():
  647. json_path = os.path.join(_dummy_path, "config.json")
  648. if not os.path.exists(_dummy_path):
  649. snapshot_download(repo_id="facebook/opt-125m",
  650. local_dir=_dummy_path,
  651. ignore_patterns=[
  652. "*.bin", "*.bin.index.json", "*.pt", "*.h5",
  653. "*.msgpack"
  654. ])
  655. assert os.path.exists(json_path)
  656. with open(json_path, "r") as f:
  657. config = json.load(f)
  658. config["architectures"] = ["MyOPTForCausalLM"]
  659. with open(json_path, "w") as f:
  660. json.dump(config, f)
  661. return _dummy_path