utils.py 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666
  1. import asyncio
  2. import datetime
  3. import enum
  4. import gc
  5. import os
  6. import socket
  7. import subprocess
  8. import sys
  9. import tempfile
  10. import threading
  11. import uuid
  12. import warnings
  13. from collections import defaultdict
  14. from functools import lru_cache, partial, wraps
  15. from platform import uname
  16. from typing import (Any, AsyncIterator, Awaitable, Callable, Dict, Generic,
  17. Hashable, List, Optional, OrderedDict, Tuple, TypeVar,
  18. Union)
  19. import numpy as np
  20. import psutil
  21. import torch
  22. from loguru import logger
  23. from aphrodite.common.logger import enable_trace_function_call
  24. T = TypeVar("T")
  25. STR_DTYPE_TO_TORCH_DTYPE = {
  26. "half": torch.half,
  27. "bfloat16": torch.bfloat16,
  28. "float": torch.float,
  29. "fp8": torch.uint8,
  30. "fp8_e4m3": torch.uint8,
  31. "fp8_e5m2": torch.uint8,
  32. }
  33. class Device(enum.Enum):
  34. GPU = enum.auto()
  35. CPU = enum.auto()
  36. class Counter:
  37. def __init__(self, start: int = 0) -> None:
  38. self.counter = start
  39. def __next__(self) -> int:
  40. i = self.counter
  41. self.counter += 1
  42. return i
  43. def reset(self) -> None:
  44. self.counter = 0
  45. class LRUCache(Generic[T]):
  46. def __init__(self, capacity: int):
  47. self.cache: OrderedDict[Hashable, T] = OrderedDict()
  48. self.capacity = capacity
  49. def __contains__(self, key: Hashable) -> bool:
  50. return key in self.cache
  51. def __len__(self) -> int:
  52. return len(self.cache)
  53. def __getitem__(self, key: Hashable) -> Optional[T]:
  54. return self.get(key)
  55. def __setitem__(self, key: Hashable, value: T) -> None:
  56. self.put(key, value)
  57. def __delitem__(self, key: Hashable) -> None:
  58. self.pop(key)
  59. def touch(self, key: Hashable) -> None:
  60. self.cache.move_to_end(key)
  61. def get(self,
  62. key: Hashable,
  63. default_value: Optional[T] = None) -> Optional[T]:
  64. if key in self.cache:
  65. value: Optional[T] = self.cache[key]
  66. self.cache.move_to_end(key)
  67. else:
  68. value = default_value
  69. return value
  70. def put(self, key: Hashable, value: T) -> None:
  71. self.cache[key] = value
  72. self.cache.move_to_end(key)
  73. self._remove_old_if_needed()
  74. def _on_remove(self, key: Hashable, value: Optional[T]):
  75. pass
  76. def remove_oldest(self):
  77. if not self.cache:
  78. return
  79. key, value = self.cache.popitem(last=False)
  80. self._on_remove(key, value)
  81. def _remove_old_if_needed(self) -> None:
  82. while len(self.cache) > self.capacity:
  83. self.remove_oldest()
  84. def pop(self,
  85. key: Hashable,
  86. default_value: Optional[T] = None) -> Optional[T]:
  87. run_on_remove = key in self.cache
  88. value: Optional[T] = self.cache.pop(key, default_value)
  89. if run_on_remove:
  90. self._on_remove(key, value)
  91. return value
  92. def clear(self):
  93. while len(self.cache) > 0:
  94. self.remove_oldest()
  95. self.cache.clear()
  96. def is_hip() -> bool:
  97. return torch.version.hip is not None
  98. @lru_cache(maxsize=None)
  99. def is_cpu() -> bool:
  100. from importlib.metadata import PackageNotFoundError, version
  101. try:
  102. return "cpu" in version("aphrodite-engine")
  103. except PackageNotFoundError:
  104. return False
  105. @lru_cache(maxsize=None)
  106. def is_neuron() -> bool:
  107. try:
  108. import transformers_neuronx
  109. except ImportError:
  110. transformers_neuronx = None
  111. return transformers_neuronx is not None
  112. @lru_cache(maxsize=None)
  113. def get_max_shared_memory_bytes(gpu: int = 0) -> int:
  114. """Returns the maximum shared memory per thread block in bytes."""
  115. # NOTE: This import statement should be executed lazily since
  116. # the Neuron-X backend does not have the `cuda_utils` module.
  117. from aphrodite._C import cuda_utils
  118. max_shared_mem = (
  119. cuda_utils.get_max_shared_memory_per_block_device_attribute(gpu))
  120. # value 0 will cause MAX_SEQ_LEN become negative and test_attention.py
  121. # will fail
  122. assert max_shared_mem > 0, "max_shared_mem can not be zero"
  123. return int(max_shared_mem)
  124. def get_cpu_memory() -> int:
  125. """Returns the total CPU memory of the node in bytes."""
  126. return psutil.virtual_memory().total
  127. def random_uuid() -> str:
  128. return str(uuid.uuid4().hex)
  129. @lru_cache(maxsize=None)
  130. def get_aphrodite_instance_id():
  131. """
  132. If the environment variable APHRODITE_INSTANCE_ID is set, return it.
  133. Otherwise, return a random UUID.
  134. Instance id represents an instance of the Aphrodite. All processes in the
  135. same instance should have the same instance id.
  136. """
  137. return os.environ.get("APHRODITE_INSTANCE_ID",
  138. f"aphrodite-instance-{random_uuid()}")
  139. @lru_cache(maxsize=None)
  140. def in_wsl() -> bool:
  141. # Reference: https://github.com/microsoft/WSL/issues/4071
  142. return "microsoft" in " ".join(uname()).lower()
  143. def make_async(func: Callable[..., T]) -> Callable[..., Awaitable[T]]:
  144. """Take a blocking function, and run it on in an executor thread.
  145. This function prevents the blocking function from blocking the
  146. asyncio event loop.
  147. The code in this function needs to be thread safe.
  148. """
  149. def _async_wrapper(*args, **kwargs) -> asyncio.Future:
  150. loop = asyncio.get_event_loop()
  151. p_func = partial(func, *args, **kwargs)
  152. return loop.run_in_executor(executor=None, func=p_func)
  153. return _async_wrapper
  154. def merge_async_iterators(
  155. *iterators: AsyncIterator[T]) -> AsyncIterator[Tuple[int, T]]:
  156. """Merge multiple asynchronous iterators into a single iterator.
  157. This method handle the case where some iterators finish before others.
  158. When it yields, it yields a tuple (i, item) where i is the index of the
  159. iterator that yields the item.
  160. """
  161. queue: asyncio.Queue[Union[Tuple[int, T], Exception]] = asyncio.Queue()
  162. finished = [False] * len(iterators)
  163. async def producer(i: int, iterator: AsyncIterator[T]):
  164. try:
  165. async for item in iterator:
  166. await queue.put((i, item))
  167. except Exception as e:
  168. await queue.put(e)
  169. finished[i] = True
  170. _tasks = [
  171. asyncio.create_task(producer(i, iterator))
  172. for i, iterator in enumerate(iterators)
  173. ]
  174. async def consumer():
  175. try:
  176. while not all(finished) or not queue.empty():
  177. item = await queue.get()
  178. if isinstance(item, Exception):
  179. raise item
  180. yield item
  181. except (Exception, asyncio.CancelledError) as e:
  182. for task in _tasks:
  183. if sys.version_info >= (3, 9):
  184. # msg parameter only supported in Python 3.9+
  185. task.cancel(e)
  186. else:
  187. task.cancel()
  188. raise e
  189. await asyncio.gather(*_tasks)
  190. return consumer()
  191. def get_ip() -> str:
  192. host_ip = os.environ.get("HOST_IP")
  193. if host_ip:
  194. return host_ip
  195. # IP is not set, try to get it from the network interface
  196. # try ipv4
  197. s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
  198. try:
  199. s.connect(("8.8.8.8", 80)) # Doesn't need to be reachable
  200. return s.getsockname()[0]
  201. except Exception:
  202. pass
  203. # try ipv6
  204. try:
  205. s = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
  206. # Google's public DNS server, see
  207. # https://developers.google.com/speed/public-dns/docs/using#addresses
  208. s.connect(("2001:4860:4860::8888", 80)) # Doesn't need to be reachable
  209. return s.getsockname()[0]
  210. except Exception:
  211. pass
  212. warnings.warn(
  213. "Failed to get the IP address, using 0.0.0.0 by default."
  214. "The value can be set by the environment variable HOST_IP.",
  215. stacklevel=2)
  216. return "0.0.0.0"
  217. def get_distributed_init_method(ip: str, port: int) -> str:
  218. # Brackets are not permitted in ipv4 addresses,
  219. # see https://github.com/python/cpython/issues/103848
  220. return f"tcp://[{ip}]:{port}" if ":" in ip else f"tcp://{ip}:{port}"
  221. def get_open_port() -> int:
  222. # try ipv4
  223. try:
  224. with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
  225. s.bind(("", 0))
  226. return s.getsockname()[1]
  227. except OSError:
  228. # try ipv6
  229. with socket.socket(socket.AF_INET6, socket.SOCK_STREAM) as s:
  230. s.bind(("", 0))
  231. return s.getsockname()[1]
  232. def update_environment_variables(envs: Dict[str, str]):
  233. for k, v in envs.items():
  234. if k in os.environ and os.environ[k] != v:
  235. logger.warning(f"Overwriting environment variable {k} "
  236. f"from '{os.environ[k]}' to '{v}'")
  237. os.environ[k] = v
  238. def chunk_list(lst, chunk_size):
  239. """Yield successive chunk_size chunks from lst."""
  240. return [lst[i:i + chunk_size] for i in range(0, len(lst), chunk_size)]
  241. def cdiv(a: int, b: int) -> int:
  242. """Ceiling division."""
  243. return -(a // -b)
  244. def _generate_random_fp8(
  245. tensor: torch.tensor,
  246. low: float,
  247. high: float,
  248. ) -> None:
  249. # NOTE: Due to NaN and Inf representation for fp8 data type,
  250. # it may occur Inf or NaN if we directly use torch.randint
  251. # to generate random data for fp8 data.
  252. # For example, s.11111.00 in fp8e5m2 format represents Inf.
  253. # | E4M3 | E5M2
  254. #-----|-------------|-------------------
  255. # Inf | N/A | s.11111.00
  256. # NaN | s.1111.111 | s.11111.{01,10,11}
  257. from aphrodite._C import cache_ops
  258. tensor_tmp = torch.empty_like(tensor, dtype=torch.float16)
  259. tensor_tmp.uniform_(low, high)
  260. cache_ops.convert_fp8(tensor, tensor_tmp)
  261. del tensor_tmp
  262. def get_kv_cache_torch_dtype(
  263. cache_dtype: Optional[Union[str, torch.dtype]],
  264. model_dtype: Optional[Union[str, torch.dtype]] = None) -> torch.dtype:
  265. if isinstance(cache_dtype, str):
  266. if cache_dtype == "auto":
  267. if isinstance(model_dtype, str):
  268. torch_dtype = STR_DTYPE_TO_TORCH_DTYPE[model_dtype]
  269. elif isinstance(model_dtype, torch.dtype):
  270. torch_dtype = model_dtype
  271. else:
  272. raise ValueError(f"Invalid model dtype: {model_dtype}")
  273. elif cache_dtype in ["half", "bfloat16", "float"]:
  274. torch_dtype = STR_DTYPE_TO_TORCH_DTYPE[cache_dtype]
  275. elif cache_dtype == "fp8":
  276. torch_dtype = torch.uint8
  277. else:
  278. raise ValueError(f"Invalid kv cache dtype: {cache_dtype}")
  279. elif isinstance(cache_dtype, torch.dtype):
  280. torch_dtype = cache_dtype
  281. else:
  282. raise ValueError(f"Invalid kv cache dtype: {cache_dtype}")
  283. return torch_dtype
  284. def create_kv_caches_with_random_flash(
  285. num_blocks: int,
  286. block_size: int,
  287. num_layers: int,
  288. num_heads: int,
  289. head_size: int,
  290. cache_dtype: Optional[Union[str, torch.dtype]],
  291. model_dtype: Optional[Union[str, torch.dtype]] = None,
  292. seed: int = 0,
  293. device: Optional[str] = "cuda",
  294. ) -> Tuple[List[torch.Tensor], List[torch.Tensor]]:
  295. assert cache_dtype != "fp8"
  296. torch.random.manual_seed(seed)
  297. if torch.cuda.is_available():
  298. torch.cuda.manual_seed(seed)
  299. torch_dtype = get_kv_cache_torch_dtype(cache_dtype, model_dtype)
  300. key_value_cache_shape = (num_blocks, 2, block_size, num_heads, head_size)
  301. scale = head_size**-0.5
  302. key_caches, value_caches = [], []
  303. for _ in range(num_layers):
  304. key_value_cache = torch.empty(size=key_value_cache_shape,
  305. dtype=torch_dtype,
  306. device=device)
  307. key_value_cache.uniform_(-scale, scale)
  308. key_caches.append(key_value_cache[:, 0])
  309. value_caches.append(key_value_cache[:, 1])
  310. return key_caches, value_caches
  311. def create_kv_caches_with_random(
  312. num_blocks: int,
  313. block_size: int,
  314. num_layers: int,
  315. num_heads: int,
  316. head_size: int,
  317. cache_dtype: Optional[Union[str, torch.dtype]],
  318. model_dtype: Optional[Union[str, torch.dtype]] = None,
  319. seed: int = 0,
  320. device: Optional[str] = "cuda",
  321. ) -> Tuple[List[torch.Tensor], List[torch.Tensor]]:
  322. torch.random.manual_seed(seed)
  323. if torch.cuda.is_available():
  324. torch.cuda.manual_seed(seed)
  325. torch_dtype = get_kv_cache_torch_dtype(cache_dtype, model_dtype)
  326. scale = head_size**-0.5
  327. x = 16 // torch.tensor([], dtype=torch_dtype).element_size()
  328. key_cache_shape = (num_blocks, num_heads, head_size // x, block_size, x)
  329. key_caches = []
  330. for _ in range(num_layers):
  331. key_cache = torch.empty(size=key_cache_shape,
  332. dtype=torch_dtype,
  333. device=device)
  334. if cache_dtype in ["auto", "half", "bfloat16", "float"]:
  335. key_cache.uniform_(-scale, scale)
  336. elif cache_dtype == 'fp8':
  337. _generate_random_fp8(key_cache, -scale, scale)
  338. else:
  339. raise ValueError(
  340. f"Does not support key cache of type {cache_dtype}")
  341. key_caches.append(key_cache)
  342. value_cache_shape = (num_blocks, num_heads, head_size, block_size)
  343. value_caches = []
  344. for _ in range(num_layers):
  345. value_cache = torch.empty(size=value_cache_shape,
  346. dtype=torch_dtype,
  347. device=device)
  348. if cache_dtype in ["auto", "half", "bfloat16", "float"]:
  349. value_cache.uniform_(-scale, scale)
  350. elif cache_dtype == 'fp8':
  351. _generate_random_fp8(value_cache, -scale, scale)
  352. else:
  353. raise ValueError(
  354. f"Does not support value cache of type {cache_dtype}")
  355. value_caches.append(value_cache)
  356. return key_caches, value_caches
  357. @lru_cache
  358. def print_warning_once(msg: str) -> None:
  359. logger.warning(msg)
  360. @lru_cache(maxsize=None)
  361. def is_pin_memory_available() -> bool:
  362. if in_wsl():
  363. # Pinning memory in WSL is not supported.
  364. # https://docs.nvidia.com/cuda/wsl-user-guide/index.html#known-limitations-for-linux-cuda-applications
  365. print_warning_once("Using 'pin_memory=False' as WSL is detected. "
  366. "This may slow down the performance.")
  367. return False
  368. elif is_neuron():
  369. print_warning_once("Pin memory is not supported on Neuron.")
  370. return False
  371. elif is_cpu():
  372. return False
  373. return True
  374. class CudaMemoryProfiler:
  375. def __init__(self, device=None):
  376. self.device = device
  377. def current_memory_usage(self) -> float:
  378. # Return the memory usage in bytes.
  379. torch.cuda.reset_peak_memory_stats(self.device)
  380. mem = torch.cuda.max_memory_allocated(self.device)
  381. return mem
  382. def __enter__(self):
  383. self.initial_memory = self.current_memory_usage()
  384. # This allows us to call methods of the context manager if needed
  385. return self
  386. def __exit__(self, exc_type, exc_val, exc_tb):
  387. self.final_memory = self.current_memory_usage()
  388. self.consumed_memory = self.final_memory - self.initial_memory
  389. # Force garbage collection
  390. gc.collect()
  391. def str_to_int_tuple(s: str) -> Tuple[int, ...]:
  392. """Convert a string to a tuple of integers."""
  393. try:
  394. return tuple(map(int, s.split(",")))
  395. except ValueError as e:
  396. raise ValueError(
  397. "String must be a series of integers separated by commas "
  398. f"(e.g., 1, 2, 3). Given input: {s}") from e
  399. def make_tensor_with_pad(
  400. x: List[List[int]],
  401. max_len: int,
  402. pad: int,
  403. dtype: torch.dtype,
  404. device: Optional[Union[str, torch.device]],
  405. ) -> torch.Tensor:
  406. """Make a padded tensor of a 2D inputs.
  407. The padding is applied to the end of each inner list until it reaches
  408. `max_len`.
  409. """
  410. padded_x = np.zeros([len(x), max_len], dtype=np.int32) + pad
  411. for ind, blocktb in enumerate(x):
  412. assert len(blocktb) <= max_len
  413. padded_x[ind, :len(blocktb)] = blocktb
  414. return torch.tensor(padded_x, dtype=dtype, device=device)
  415. def async_tensor_h2d(
  416. data: list,
  417. dtype: torch.dtype,
  418. target_device: Union[str, torch.device],
  419. pin_memory: bool,
  420. ) -> torch.Tensor:
  421. """Asynchronously create a tensor and copy it from host to device."""
  422. t = torch.tensor(data, dtype=dtype, pin_memory=pin_memory, device="cpu")
  423. return t.to(device=target_device, non_blocking=True)
  424. def maybe_expand_dim(tensor: torch.Tensor,
  425. target_dims: int,
  426. size: int = 1) -> torch.Tensor:
  427. """Expand the tensor to the target_dims."""
  428. if tensor.ndim < target_dims:
  429. tensor = tensor.view(-1, *([size] * (target_dims - tensor.ndim)))
  430. return tensor
  431. def merge_dicts(dict1: Dict[Any, List[Any]],
  432. dict2: Dict[Any, List[Any]]) -> Dict[Any, List[Any]]:
  433. """Merge 2 dicts that have key -> List of items.
  434. When a key conflicts, the values in dict1 is prioritized.
  435. """
  436. merged_dict = defaultdict(list)
  437. for key, value in dict1.items():
  438. merged_dict[key].extend(value)
  439. for key, value in dict2.items():
  440. merged_dict[key].extend(value)
  441. return dict(merged_dict)
  442. def init_cached_hf_modules():
  443. """
  444. Lazy initialization of the Hugging Face modules.
  445. """
  446. from transformers.dynamic_module_utils import init_hf_modules
  447. init_hf_modules()
  448. @lru_cache(maxsize=None)
  449. def find_library(lib_name: str) -> str:
  450. """
  451. Find the library file in the system.
  452. `lib_name` is full filename, with both prefix and suffix.
  453. This function resolves `lib_name` to the full path of the library.
  454. """
  455. # Adapted from https://github.com/openai/triton/blob/main/third_party/nvidia/backend/driver.py#L19 # noqa
  456. # According to https://en.wikipedia.org/wiki/Filesystem_Hierarchy_Standard
  457. # `/sbin/ldconfig` should exist in all Linux systems.
  458. # `/sbin/ldconfig` searches the library in the system
  459. libs = subprocess.check_output(["/sbin/ldconfig", "-p"]).decode()
  460. # each line looks like the following:
  461. # libcuda.so.1 (libc6,x86-64) => /lib/x86_64-linux-gnu/libcuda.so.1
  462. locs = [line.split()[-1] for line in libs.splitlines() if lib_name in line]
  463. # `LD_LIBRARY_PATH` searches the library in the user-defined paths
  464. env_ld_library_path = os.getenv("LD_LIBRARY_PATH")
  465. if not locs and env_ld_library_path:
  466. locs = [
  467. os.path.join(dir, lib_name)
  468. for dir in env_ld_library_path.split(":")
  469. if os.path.exists(os.path.join(dir, lib_name))
  470. ]
  471. if not locs:
  472. raise ValueError(f"Cannot find {lib_name} in the system.")
  473. return locs[0]
  474. def find_nccl_library():
  475. """
  476. We either use the library file specified by the `APHRODITE_NCCL_SO_PATH`
  477. environment variable, or we find the library file brought by PyTorch.
  478. After importing `torch`, `libnccl.so.2` or `librccl.so.1` can be
  479. found by `ctypes` automatically.
  480. """
  481. so_file = os.environ.get("APHRODITE_NCCL_SO_PATH", "")
  482. # manually load the nccl library
  483. if so_file:
  484. logger.info("Found nccl from environment variable "
  485. f"APHRODITE_NCCL_SO_PATH={so_file}")
  486. else:
  487. if torch.version.cuda is not None:
  488. so_file = "libnccl.so.2"
  489. elif torch.version.hip is not None:
  490. so_file = "librccl.so.1"
  491. else:
  492. raise ValueError("NCCL only supports CUDA and ROCm backends.")
  493. logger.info(f"Found nccl from library {so_file}")
  494. return so_file
  495. def enable_trace_function_call_for_thread() -> None:
  496. if int(os.getenv("APHRODITE_TRACE_FUNCTION", "0")):
  497. tmp_dir = tempfile.gettempdir()
  498. filename = (f"APHRODITE_TRACE_FUNCTION_for_process_{os.getpid()}"
  499. f"_thread_{threading.get_ident()}_"
  500. f"at_{datetime.datetime.now()}.log").replace(" ", "_")
  501. log_path = os.path.join(tmp_dir, "aphrodite",
  502. get_aphrodite_instance_id(), filename)
  503. os.makedirs(os.path.dirname(log_path), exist_ok=True)
  504. enable_trace_function_call(log_path)
  505. def identity(value: T) -> T:
  506. return value
  507. F = TypeVar('F', bound=Callable[..., Any])
  508. def deprecate_kwargs(
  509. *kws: str,
  510. is_deprecated: Union[bool, Callable[[], bool]] = True,
  511. additional_message: Optional[str] = None) -> Callable[[F], F]:
  512. deprecated_kws = set(kws)
  513. if not callable(is_deprecated):
  514. is_deprecated = partial(identity, is_deprecated)
  515. def wrapper(fn: F) -> F:
  516. @wraps(fn)
  517. def inner(*args, **kwargs):
  518. if is_deprecated():
  519. deprecated_kwargs = kwargs.keys() & deprecated_kws
  520. if deprecated_kwargs:
  521. msg = (
  522. f"The keyword arguments {deprecated_kwargs} are "
  523. "deprecated and will be removed in a future update.")
  524. if additional_message is not None:
  525. msg += f" {additional_message}"
  526. warnings.warn(
  527. DeprecationWarning(msg),
  528. stacklevel=3, # The inner function takes up one level
  529. )
  530. return fn(*args, **kwargs)
  531. return inner # type: ignore
  532. return wrapper