utils.py 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481
  1. import asyncio
  2. import enum
  3. import gc
  4. import os
  5. import socket
  6. import subprocess
  7. import uuid
  8. from collections import OrderedDict, defaultdict
  9. from functools import lru_cache, partial
  10. from platform import uname
  11. from typing import (Any, AsyncIterator, Awaitable, Callable, Dict, Generic,
  12. Hashable, List, Optional, Tuple, TypeVar, Union)
  13. import psutil
  14. import torch
  15. from loguru import logger
  16. from packaging.version import Version, parse
  17. T = TypeVar("T")
  18. STR_DTYPE_TO_TORCH_DTYPE = {
  19. "half": torch.half,
  20. "bfloat16": torch.bfloat16,
  21. "float": torch.float,
  22. "fp8": torch.uint8,
  23. }
  24. class Device(enum.Enum):
  25. GPU = enum.auto()
  26. CPU = enum.auto()
  27. class Counter:
  28. def __init__(self, start: int = 0) -> None:
  29. self.counter = start
  30. def __next__(self) -> int:
  31. i = self.counter
  32. self.counter += 1
  33. return i
  34. def reset(self) -> None:
  35. self.counter = 0
  36. class LRUCache(Generic[T]):
  37. def __init__(self, capacity: int):
  38. self.cache = OrderedDict[Hashable, T]()
  39. self.capacity = capacity
  40. def __contains__(self, key: Hashable) -> bool:
  41. return key in self.cache
  42. def __len__(self) -> int:
  43. return len(self.cache)
  44. def __getitem__(self, key: Hashable) -> T:
  45. return self.get(key)
  46. def __setitem__(self, key: Hashable, value: T) -> None:
  47. self.put(key, value)
  48. def __delitem__(self, key: Hashable) -> None:
  49. self.pop(key)
  50. def touch(self, key: Hashable) -> None:
  51. self.cache.move_to_end(key)
  52. def get(self,
  53. key: Hashable,
  54. default_value: Optional[T] = None) -> Optional[T]:
  55. if key in self.cache:
  56. value = self.cache[key]
  57. self.cache.move_to_end(key)
  58. else:
  59. value = default_value
  60. return value
  61. def put(self, key: Hashable, value: T) -> None:
  62. self.cache[key] = value
  63. self.cache.move_to_end(key)
  64. self._remove_old_if_needed()
  65. def _on_remove(self, key: Hashable, value: T):
  66. pass
  67. def remove_oldest(self):
  68. if not self.cache:
  69. return
  70. key, value = self.cache.popitem(last=False)
  71. self._on_remove(key, value)
  72. def _remove_old_if_needed(self) -> None:
  73. while len(self.cache) > self.capacity:
  74. self.remove_oldest()
  75. def pop(self, key: Hashable, default_value: Optional[Any] = None) -> T:
  76. run_on_remove = key in self.cache
  77. value = self.cache.pop(key, default_value)
  78. if run_on_remove:
  79. self._on_remove(key, value)
  80. return value
  81. def clear(self):
  82. while len(self.cache) > 0:
  83. self.remove_oldest()
  84. self.cache.clear()
  85. def is_hip() -> bool:
  86. return torch.version.hip is not None
  87. @lru_cache(maxsize=None)
  88. def is_cpu() -> bool:
  89. from importlib.metadata import PackageNotFoundError, version
  90. try:
  91. return "cpu" in version("aphrodite-engine")
  92. except PackageNotFoundError:
  93. return False
  94. @lru_cache(maxsize=None)
  95. def is_neuron() -> bool:
  96. try:
  97. import transformers_neuronx
  98. except ImportError:
  99. transformers_neuronx = None
  100. return transformers_neuronx is not None
  101. @lru_cache(maxsize=None)
  102. def get_max_shared_memory_bytes(gpu: int = 0) -> int:
  103. """Returns the maximum shared memory per thread block in bytes."""
  104. # NOTE: This import statement should be executed lazily since
  105. # the Neuron-X backend does not have the `cuda_utils` module.
  106. from aphrodite._C import cuda_utils
  107. max_shared_mem = (
  108. cuda_utils.get_max_shared_memory_per_block_device_attribute(gpu))
  109. # value 0 will cause MAX_SEQ_LEN become negative and test_attention.py
  110. # will fail
  111. assert max_shared_mem > 0, "max_shared_mem can not be zero"
  112. return int(max_shared_mem)
  113. def get_cpu_memory() -> int:
  114. """Returns the total CPU memory of the node in bytes."""
  115. return psutil.virtual_memory().total
  116. def random_uuid() -> str:
  117. return str(uuid.uuid4().hex)
  118. @lru_cache(maxsize=None)
  119. def in_wsl() -> bool:
  120. # Reference: https://github.com/microsoft/WSL/issues/4071
  121. return "microsoft" in " ".join(uname()).lower()
  122. def make_async(func: Callable[..., T]) -> Callable[..., Awaitable[T]]:
  123. """Take a blocking function, and run it on in an executor thread.
  124. This function prevents the blocking function from blocking the
  125. asyncio event loop.
  126. The code in this function needs to be thread safe.
  127. """
  128. def _async_wrapper(*args, **kwargs) -> asyncio.Future:
  129. loop = asyncio.get_event_loop()
  130. p_func = partial(func, *args, **kwargs)
  131. return loop.run_in_executor(executor=None, func=p_func)
  132. return _async_wrapper
  133. def merge_async_iterators(
  134. *iterators: AsyncIterator[T]) -> AsyncIterator[Tuple[int, T]]:
  135. """Merge multiple asynchronous iterators into a single iterator.
  136. This method handle the case where some iterators finish before others.
  137. When it yields, it yields a tuple (i, item) where i is the index of the
  138. iterator that yields the item.
  139. """
  140. queue: asyncio.Queue[Union[Tuple[int, T], Exception]] = asyncio.Queue()
  141. finished = [False] * len(iterators)
  142. async def producer(i: int, iterator: AsyncIterator[T]):
  143. try:
  144. async for item in iterator:
  145. await queue.put((i, item))
  146. except Exception as e:
  147. await queue.put(e)
  148. finished[i] = True
  149. _tasks = [
  150. asyncio.create_task(producer(i, iterator))
  151. for i, iterator in enumerate(iterators)
  152. ]
  153. async def consumer():
  154. while not all(finished) or not queue.empty():
  155. item = await queue.get()
  156. if isinstance(item, Exception):
  157. raise item
  158. yield item
  159. await asyncio.gather(*_tasks)
  160. return consumer()
  161. def get_ip() -> str:
  162. # try ipv4
  163. s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
  164. try:
  165. s.connect(("8.8.8.8", 80)) # Doesn't need to be reachable
  166. return s.getsockname()[0]
  167. except OSError:
  168. # try ipv6
  169. s = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
  170. s.connect(("dns.google", 80))
  171. return s.getsockname()[0]
  172. def get_distributed_init_method(ip: str, port: int) -> str:
  173. # Brackets are not permitted in ipv4 addresses,
  174. # see https://github.com/python/cpython/issues/103848
  175. return f"tcp://[{ip}]:{port}" if ":" in ip else f"tcp://{ip}:{port}"
  176. def get_open_port() -> int:
  177. # try ipv4
  178. try:
  179. with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
  180. s.bind(("", 0))
  181. return s.getsockname()[1]
  182. except OSError:
  183. # try ipv6
  184. with socket.socket(socket.AF_INET6, socket.SOCK_STREAM) as s:
  185. s.bind(("", 0))
  186. return s.getsockname()[1]
  187. def set_cuda_visible_devices(device_ids: List[int]) -> None:
  188. os.environ["CUDA_VISIBLE_DEVICES"] = ",".join(map(str, device_ids))
  189. def chunk_list(lst, chunk_size):
  190. """Yield successive chunk_size chunks from lst."""
  191. return [lst[i:i + chunk_size] for i in range(0, len(lst), chunk_size)]
  192. def cdiv(a: int, b: int) -> int:
  193. """Ceiling division."""
  194. return -(a // -b)
  195. @lru_cache(maxsize=None)
  196. def get_nvcc_cuda_version() -> Optional[Version]:
  197. cuda_home = os.environ.get('CUDA_HOME')
  198. if not cuda_home:
  199. cuda_home = '/usr/local/cuda'
  200. if os.path.isfile(cuda_home + '/bin/nvcc'):
  201. logger.info(
  202. f'CUDA_HOME is not found in the environment. Using {cuda_home} '
  203. 'as CUDA_HOME.')
  204. else:
  205. logger.warning(
  206. f'Not found nvcc in {cuda_home}. Skip cuda version check!')
  207. return None
  208. nvcc_output = subprocess.check_output([cuda_home + "/bin/nvcc", "-V"],
  209. universal_newlines=True)
  210. output = nvcc_output.split()
  211. release_idx = output.index("release") + 1
  212. nvcc_cuda_version = parse(output[release_idx].split(",")[0])
  213. return nvcc_cuda_version
  214. def _generate_random_fp8(
  215. tensor: torch.tensor,
  216. low: float,
  217. high: float,
  218. ) -> None:
  219. # NOTE: Due to NaN and Inf representation for fp8 data type,
  220. # we may get Inf or NaN if we directly use torch.randint
  221. # to generate random data for fp8 data.
  222. # For example, s.11111.00 in fp8e5m2 format represents Inf.
  223. # | E4M3 | E5M2
  224. #-----|-------------|-------------------
  225. # Inf | N/A | s.11111.00
  226. # NaN | s.1111.111 | s.11111.{01,10,11}
  227. from aphrodite._C import cache_ops
  228. tensor_tmp = torch.empty_like(tensor, dtype=torch.float16)
  229. tensor_tmp.uniform_(low, high)
  230. cache_ops.convert_fp8(tensor_tmp, tensor)
  231. del tensor_tmp
  232. def create_kv_caches_with_random(
  233. num_blocks: int,
  234. block_size: int,
  235. num_layers: int,
  236. num_heads: int,
  237. head_size: int,
  238. cache_dtype: Optional[Union[str, torch.dtype]],
  239. model_dtype: Optional[Union[str, torch.dtype]] = None,
  240. seed: Optional[int] = 0,
  241. device: Optional[str] = "cuda",
  242. ) -> Tuple[List[torch.Tensor], List[torch.Tensor]]:
  243. torch.random.manual_seed(seed)
  244. if torch.cuda.is_available():
  245. torch.cuda.manual_seed(seed)
  246. if isinstance(cache_dtype, str):
  247. if cache_dtype == "auto":
  248. if isinstance(model_dtype, str):
  249. torch_dtype = STR_DTYPE_TO_TORCH_DTYPE[model_dtype]
  250. elif isinstance(model_dtype, torch.dtype):
  251. torch_dtype = model_dtype
  252. else:
  253. raise ValueError(f"Invalid model dtype: {model_dtype}")
  254. elif cache_dtype in ["half", "bfloat16", "float"]:
  255. torch_dtype = STR_DTYPE_TO_TORCH_DTYPE[cache_dtype]
  256. elif cache_dtype == "fp8":
  257. torch_dtype = torch.uint8
  258. else:
  259. raise ValueError(f"Invalid kv cache dtype: {cache_dtype}")
  260. elif isinstance(cache_dtype, torch.dtype):
  261. torch_dtype = cache_dtype
  262. else:
  263. raise ValueError(f"Invalid kv cache dtype: {cache_dtype}")
  264. scale = head_size**-0.5
  265. x = 16 // torch.tensor([], dtype=torch_dtype).element_size()
  266. key_cache_shape = (num_blocks, num_heads, head_size // x, block_size, x)
  267. key_caches = []
  268. for _ in range(num_layers):
  269. key_cache = torch.empty(size=key_cache_shape,
  270. dtype=torch_dtype,
  271. device=device)
  272. if cache_dtype in ["auto", "half", "bfloat16", "float"]:
  273. key_cache.uniform_(-scale, scale)
  274. elif cache_dtype == 'fp8':
  275. _generate_random_fp8(key_cache, -scale, scale)
  276. else:
  277. raise ValueError(
  278. f"Does not support key cache of type {cache_dtype}")
  279. key_caches.append(key_cache)
  280. value_cache_shape = (num_blocks, num_heads, head_size, block_size)
  281. value_caches = []
  282. for _ in range(num_layers):
  283. value_cache = torch.empty(size=value_cache_shape,
  284. dtype=torch_dtype,
  285. device=device)
  286. if cache_dtype in ["auto", "half", "bfloat16", "float"]:
  287. value_cache.uniform_(-scale, scale)
  288. elif cache_dtype == 'fp8':
  289. _generate_random_fp8(value_cache, -scale, scale)
  290. else:
  291. raise ValueError(
  292. f"Does not support value cache of type {cache_dtype}")
  293. value_caches.append(value_cache)
  294. return key_caches, value_caches
  295. @lru_cache
  296. def print_warning_once(msg: str) -> None:
  297. logger.warning(msg)
  298. @lru_cache(maxsize=None)
  299. def is_pin_memory_available() -> bool:
  300. if in_wsl():
  301. # Pinning memory in WSL is not supported.
  302. # https://docs.nvidia.com/cuda/wsl-user-guide/index.html#known-limitations-for-linux-cuda-applications
  303. print_warning_once("Using 'pin_memory=False' as WSL is detected. "
  304. "This may slow down the performance.")
  305. return False
  306. elif is_neuron():
  307. print_warning_once("Pin memory is not supported on Neuron.")
  308. return False
  309. elif is_cpu():
  310. return False
  311. return True
  312. class CudaMemoryProfiler:
  313. def __init__(self, device=None):
  314. self.device = device
  315. def current_memory_usage(self) -> float:
  316. # Return the memory usage in bytes.
  317. torch.cuda.reset_peak_memory_stats(self.device)
  318. mem = torch.cuda.max_memory_allocated(self.device)
  319. return mem
  320. def __enter__(self):
  321. self.initial_memory = self.current_memory_usage()
  322. # This allows us to call methods of the context manager if needed
  323. return self
  324. def __exit__(self, exc_type, exc_val, exc_tb):
  325. self.final_memory = self.current_memory_usage()
  326. self.consumed_memory = self.final_memory - self.initial_memory
  327. # Force garbage collection
  328. gc.collect()
  329. def str_to_int_tuple(s: str) -> Tuple[int]:
  330. """Convert a string to a tuple of integers."""
  331. try:
  332. return tuple(map(int, s.split(",")))
  333. except ValueError as e:
  334. raise ValueError(
  335. "String must be a series of integers separated by commas "
  336. f"(e.g., 1, 2, 3). Given input: {s}") from e
  337. def pad_to_max_length(x: List[int], max_len: int, pad: int) -> List[int]:
  338. assert len(x) <= max_len
  339. return x + [pad] * (max_len - len(x))
  340. def make_tensor_with_pad(
  341. x: List[List[int]],
  342. max_len: int,
  343. pad: int,
  344. dtype: torch.dtype,
  345. device: Optional[Union[str, torch.device]],
  346. ) -> torch.Tensor:
  347. """Make a padded tensor of a 2D inputs.
  348. The padding is applied to the end of each inner list until it reaches
  349. `max_len`.
  350. """
  351. padded_x = [pad_to_max_length(x_i, max_len, pad) for x_i in x]
  352. return torch.tensor(padded_x, dtype=dtype, device=device)
  353. def async_tensor_h2d(
  354. data: list,
  355. dtype: torch.dtype,
  356. target_device: Union[str, torch.device],
  357. pin_memory: bool,
  358. ) -> torch.Tensor:
  359. """Asynchronously create a tensor and copy it from host to device."""
  360. t = torch.tensor(data, dtype=dtype, pin_memory=pin_memory, device="cpu")
  361. return t.to(device=target_device, non_blocking=True)
  362. def maybe_expand_dim(tensor: torch.Tensor,
  363. target_dims: int,
  364. size: int = 1) -> torch.Tensor:
  365. """Expand the tensor to the target_dims."""
  366. if tensor.ndim < target_dims:
  367. tensor = tensor.view(-1, *([size] * (target_dims - tensor.ndim)))
  368. return tensor
  369. def merge_dicts(dict1: Dict[Any, List[Any]],
  370. dict2: Dict[Any, List[Any]]) -> Dict[Any, List[Any]]:
  371. """Merge 2 dicts that have key -> List of items.
  372. When a key conflicts, the values in dict1 is prioritized.
  373. """
  374. merged_dict = defaultdict(list)
  375. for key, value in dict1.items():
  376. merged_dict[key].extend(value)
  377. for key, value in dict2.items():
  378. merged_dict[key].extend(value)
  379. return dict(merged_dict)