1
0

tensorizer.py 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472
  1. import argparse
  2. import dataclasses
  3. import io
  4. import os
  5. import re
  6. import time
  7. from dataclasses import dataclass
  8. from functools import partial
  9. from typing import BinaryIO, Generator, Optional, Tuple, Type, Union
  10. import torch
  11. from loguru import logger
  12. from torch import nn
  13. from transformers import PretrainedConfig
  14. import aphrodite.common.envs as envs
  15. from aphrodite.common.config import ModelConfig, ParallelConfig
  16. from aphrodite.engine.aphrodite_engine import AphroditeEngine
  17. from aphrodite.engine.args_tools import EngineArgs
  18. from aphrodite.modeling.layers.vocab_parallel_embedding import (
  19. VocabParallelEmbedding)
  20. from aphrodite.quantization.base_config import QuantizationConfig
  21. tensorizer_error_msg = None
  22. try:
  23. from tensorizer import (DecryptionParams, EncryptionParams,
  24. TensorDeserializer, TensorSerializer)
  25. from tensorizer.stream_io import open_stream
  26. from tensorizer.utils import (convert_bytes, get_mem_usage,
  27. no_init_or_tensor)
  28. _read_stream, _write_stream = (partial(
  29. open_stream,
  30. mode=mode,
  31. ) for mode in ("rb", "wb+"))
  32. except ImportError as e:
  33. tensorizer_error_msg = e
  34. __all__ = [
  35. 'EncryptionParams', 'DecryptionParams', 'TensorDeserializer',
  36. 'TensorSerializer', 'open_stream', 'convert_bytes', 'get_mem_usage',
  37. 'no_init_or_tensor', 'TensorizerConfig'
  38. ]
  39. @dataclass
  40. class TensorizerConfig:
  41. tensorizer_uri: str
  42. aphrodite_tensorized: Optional[bool] = False
  43. verify_hash: Optional[bool] = False
  44. num_readers: Optional[int] = None
  45. encryption_keyfile: Optional[str] = None
  46. s3_access_key_id: Optional[str] = None
  47. s3_secret_access_key: Optional[str] = None
  48. s3_endpoint: Optional[str] = None
  49. model_class: Optional[Type[torch.nn.Module]] = None
  50. hf_config: Optional[PretrainedConfig] = None
  51. dtype: Optional[Union[str, torch.dtype]] = None
  52. _is_sharded: bool = False
  53. def __post_init__(self):
  54. # check if the configuration is for a sharded Aphrodite model
  55. self._is_sharded = isinstance(self.tensorizer_uri, str) \
  56. and re.search(r'%0\dd', self.tensorizer_uri) is not None
  57. def _construct_tensorizer_args(self) -> "TensorizerArgs":
  58. tensorizer_args = {
  59. "tensorizer_uri": self.tensorizer_uri,
  60. "aphrodite_tensorized": self.aphrodite_tensorized,
  61. "verify_hash": self.verify_hash,
  62. "num_readers": self.num_readers,
  63. "encryption_keyfile": self.encryption_keyfile,
  64. "s3_access_key_id": self.s3_access_key_id,
  65. "s3_secret_access_key": self.s3_secret_access_key,
  66. "s3_endpoint": self.s3_endpoint,
  67. }
  68. return TensorizerArgs(**tensorizer_args)
  69. def verify_with_parallel_config(
  70. self,
  71. parallel_config: "ParallelConfig",
  72. ) -> None:
  73. if parallel_config.tensor_parallel_size > 1 \
  74. and not self._is_sharded:
  75. raise ValueError(
  76. "For a sharded model, tensorizer_uri should include a"
  77. " string format template like '%04d' to be formatted"
  78. " with the rank of the shard")
  79. def verify_with_model_config(self, model_config: "ModelConfig") -> None:
  80. if (model_config.quantization is not None
  81. and self.tensorizer_uri is not None):
  82. logger.warning(
  83. "Loading a model using Tensorizer with quantization on "
  84. "aphrodite is unstable and may lead to errors.")
  85. def load_with_tensorizer(tensorizer_config: TensorizerConfig,
  86. **extra_kwargs) -> nn.Module:
  87. tensorizer = TensorizerAgent(tensorizer_config, **extra_kwargs)
  88. return tensorizer.deserialize()
  89. @dataclass
  90. class TensorizerArgs:
  91. tensorizer_uri: Union[io.BufferedIOBase, io.RawIOBase, BinaryIO, str,
  92. bytes, os.PathLike, int]
  93. aphrodite_tensorized: Optional[bool] = False
  94. verify_hash: Optional[bool] = False
  95. num_readers: Optional[int] = None
  96. encryption_keyfile: Optional[str] = None
  97. s3_access_key_id: Optional[str] = None
  98. s3_secret_access_key: Optional[str] = None
  99. s3_endpoint: Optional[str] = None
  100. """
  101. Args for the TensorizerAgent class. These are used to configure the behavior
  102. of the TensorDeserializer when loading tensors from a serialized model.
  103. Args:
  104. tensorizer_uri: Path to serialized model tensors. Can be a local file
  105. path or a S3 URI.
  106. aphrodite_tensorized: If True, indicates that the serialized model is a
  107. aphrodite model. This is used to determine the behavior of the
  108. TensorDeserializer when loading tensors from a serialized model.
  109. It is far faster to deserialize a aphrodite model as it utilizes
  110. ttensorizer's optimized GPU loading. Note that this is now
  111. deprecated, as serialized Aphrodite models are now automatically
  112. inferred as Aphrodite models.
  113. verify_hash: If True, the hashes of each tensor will be verified against
  114. the hashes stored in the metadata. A `HashMismatchError` will be
  115. raised if any of the hashes do not match.
  116. num_readers: Controls how many threads are allowed to read concurrently
  117. from the source file. Default is `None`, which will dynamically set
  118. the number of readers based on the number of available
  119. resources and model size. This greatly increases performance.
  120. encryption_keyfile: File path to a binary file containing a
  121. binary key to use for decryption. `None` (the default) means
  122. no decryption. See the example script in
  123. examples/tensorize_aphrodite_model.py.
  124. s3_access_key_id: The access key for the S3 bucket. Can also be set via
  125. the S3_ACCESS_KEY_ID environment variable.
  126. s3_secret_access_key: The secret access key for the S3 bucket. Can also
  127. be set via the S3_SECRET_ACCESS_KEY environment variable.
  128. s3_endpoint: The endpoint for the S3 bucket. Can also be set via the
  129. S3_ENDPOINT_URL environment variable.
  130. """
  131. def __post_init__(self):
  132. self.file_obj = self.tensorizer_uri
  133. self.s3_access_key_id = (self.s3_access_key_id
  134. or envs.S3_ACCESS_KEY_ID) or None
  135. self.s3_secret_access_key = (
  136. self.s3_secret_access_key
  137. or envs.S3_SECRET_ACCESS_KEY) or None
  138. self.s3_endpoint = (self.s3_endpoint
  139. or envs.S3_ENDPOINT_URL) or None
  140. self.stream_params = {
  141. "s3_access_key_id": self.s3_access_key_id,
  142. "s3_secret_access_key": self.s3_secret_access_key,
  143. "s3_endpoint": self.s3_endpoint,
  144. }
  145. self.deserializer_params = {
  146. "verify_hash": self.verify_hash,
  147. "encryption": self.encryption_keyfile,
  148. "num_readers": self.num_readers
  149. }
  150. if self.encryption_keyfile:
  151. with open_stream(
  152. self.encryption_keyfile,
  153. **self.stream_params,
  154. ) as stream:
  155. key = stream.read()
  156. decryption_params = DecryptionParams.from_key(key)
  157. self.deserializer_params['encryption'] = decryption_params
  158. @staticmethod
  159. def add_cli_args(
  160. parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
  161. """Tensorizer CLI arguments"""
  162. # Tensorizer options arg group
  163. group = parser.add_argument_group(
  164. 'tensorizer options',
  165. description=('Options for configuring the behavior of the'
  166. ' tensorizer deserializer when '
  167. 'load_format=tensorizer is specified when '
  168. 'initializing an AphroditeEngine, either via the CLI '
  169. 'when running the Aphrodite OpenAI inference server '
  170. 'with a JSON string passed to '
  171. '--model-loader-extra-config or as arguments given '
  172. 'to TensorizerConfig when passed to '
  173. 'model_loader_extra_config in the constructor '
  174. 'for AphroditeEngine.'))
  175. group.add_argument(
  176. "--tensorizer-uri",
  177. help="Path to serialized model tensors. Can be a local file path,"
  178. " or an HTTP(S) or S3 URI.",
  179. )
  180. group.add_argument(
  181. "--verify-hash",
  182. action="store_true",
  183. help="If enabled, the hashes of each tensor will be verified"
  184. " against the hashes stored in the file metadata. An exception"
  185. " will be raised if any of the hashes do not match.",
  186. )
  187. group.add_argument(
  188. "--encryption-keyfile",
  189. default=None,
  190. help="The file path to a binary file containing a binary key to "
  191. "use for decryption. Can be a file path or S3 network URI.")
  192. group.add_argument(
  193. "--num-readers",
  194. default=None,
  195. type=int,
  196. help="Controls how many threads are allowed to read concurrently "
  197. "from the source file. Default is `None`, which will dynamically "
  198. "set the number of readers based on the available resources "
  199. "and model size. This greatly increases performance.")
  200. group.add_argument(
  201. "--s3-access-key-id",
  202. default=None,
  203. help="The access key for the S3 bucket. Can also be set via the "
  204. "S3_ACCESS_KEY_ID environment variable.",
  205. )
  206. group.add_argument(
  207. "--s3-secret-access-key",
  208. default=None,
  209. help="The secret access key for the S3 bucket. Can also be set via "
  210. "the S3_SECRET_ACCESS_KEY environment variable.",
  211. )
  212. group.add_argument(
  213. "--s3-endpoint",
  214. default=None,
  215. help="The endpoint for the S3 bucket. Can also be set via the "
  216. "S3_ENDPOINT_URL environment variable.",
  217. )
  218. return parser
  219. @classmethod
  220. def from_cli_args(cls, args: argparse.Namespace) -> "TensorizerArgs":
  221. attrs = [attr.name for attr in dataclasses.fields(cls)]
  222. tensorizer_args = cls(**{
  223. attr: getattr(args, attr)
  224. for attr in attrs if hasattr(args, attr)
  225. })
  226. return tensorizer_args
  227. class TensorizerAgent:
  228. """
  229. A class for performing tensorizer deserializations specifically for
  230. aphrodite models using plaid_mode. Uses TensorizerArgs to configure the
  231. behavior of the TensorDeserializer when loading tensors from a serialized
  232. model. For deserializations of HuggingFace models, TensorDeserializer is
  233. instead used as an iterator directly in the func hf_model_weights_iterator
  234. in aphrodite/modeling/model_loader/weight_utils.py
  235. """
  236. def __init__(self, tensorizer_config: TensorizerConfig,
  237. quant_config: QuantizationConfig, **extra_kwargs):
  238. if tensorizer_error_msg is not None:
  239. raise ImportError(
  240. "Tensorizer is not installed. Please install tensorizer "
  241. "to use this feature with "
  242. "`pip install aphrodite-engine[tensorizer]`. "
  243. "Error message: {}".format(tensorizer_error_msg))
  244. self.tensorizer_config = tensorizer_config
  245. self.tensorizer_args = (
  246. self.tensorizer_config._construct_tensorizer_args())
  247. self.extra_kwargs = extra_kwargs
  248. if extra_kwargs.get("quant_config", None) is not None:
  249. self.quant_config = extra_kwargs["quant_config"]
  250. else:
  251. self.quant_config = quant_config
  252. self.model = self._init_model()
  253. def _init_model(self):
  254. model_args = self.tensorizer_config.hf_config
  255. model_args.torch_dtype = self.tensorizer_config.dtype
  256. with no_init_or_tensor():
  257. return self.tensorizer_config.model_class(
  258. config=model_args,
  259. quant_config=self.quant_config,
  260. **self.extra_kwargs)
  261. def _resize_lora_embeddings(self):
  262. """Modify LoRA embedding layers to use bigger tensors
  263. to allow for adapter added tokens."""
  264. for child in self.model.modules():
  265. if (isinstance(child, VocabParallelEmbedding)
  266. and child.weight.shape[0] <
  267. child.num_embeddings_per_partition):
  268. new_weight = torch.empty(child.num_embeddings_per_partition,
  269. child.embedding_dim,
  270. dtype=child.weight.dtype,
  271. device=child.weight.device)
  272. new_weight[:child.weight.shape[0]].copy_(child.weight.data)
  273. new_weight[child.weight.shape[0]:].fill_(0)
  274. child.weight.data = new_weight
  275. def _check_tensors_on_meta_device(self):
  276. for tensor in self.model.state_dict().values():
  277. if tensor.device.type == 'meta':
  278. raise ValueError(
  279. "The serialized model contains tensors on the meta device,"
  280. " indicating that some tensors were not loaded properly."
  281. " Please check that the parameters of the model being"
  282. " specified match that of the serialized model, such as"
  283. " its quantization.")
  284. def deserialize(self):
  285. """
  286. Deserialize the model using the TensorDeserializer. This method is
  287. specifically for Aphrodite models using tensorizer's plaid_mode.
  288. The deserializer makes use of tensorizer_args.stream_params
  289. to configure the behavior of the stream when loading tensors from a
  290. serialized model. The deserializer_params are used to configure the
  291. behavior of the TensorDeserializer when loading tensors themselves.
  292. Documentation on these params can be found in TensorizerArgs
  293. Returns:
  294. nn.Module: The deserialized model.
  295. """
  296. before_mem = get_mem_usage()
  297. start = time.perf_counter()
  298. with _read_stream(
  299. self.tensorizer_config.tensorizer_uri,
  300. **self.tensorizer_args.stream_params
  301. ) as stream, TensorDeserializer(
  302. stream,
  303. dtype=self.tensorizer_config.dtype,
  304. device=f'cuda:{torch.cuda.current_device()}',
  305. **self.tensorizer_args.deserializer_params) as deserializer:
  306. deserializer.load_into_module(self.model)
  307. end = time.perf_counter()
  308. total_bytes_str = convert_bytes(deserializer.total_tensor_bytes)
  309. duration = end - start
  310. per_second = convert_bytes(deserializer.total_tensor_bytes / duration)
  311. after_mem = get_mem_usage()
  312. deserializer.close()
  313. logger.info(f"Deserialized {total_bytes_str} in "
  314. f"{end - start:0.2f}s, {per_second}/s")
  315. logger.info(f"Memory usage before: {before_mem}")
  316. logger.info(f"Memory usage after: {after_mem}")
  317. self._check_tensors_on_meta_device()
  318. self._resize_lora_embeddings()
  319. del self.model.aphrodite_tensorized_marker
  320. return self.model.eval()
  321. def tensorizer_weights_iterator(
  322. tensorizer_args: "TensorizerArgs"
  323. ) -> Generator[Tuple[str, torch.Tensor], None, None]:
  324. logger.warning(
  325. "Deserializing HuggingFace models is not optimized for "
  326. "loading on Aphrodite, as tensorizer is forced to load to CPU. "
  327. "Consider deserializing a Aphrodite model instead for faster "
  328. "load times. See the examples/tensorize_aphrodite_model.py example "
  329. "script for serializing Aphrodite models.")
  330. deserializer_args = tensorizer_args.deserializer_params
  331. stream_params = tensorizer_args.stream_params
  332. stream = open_stream(tensorizer_args.tensorizer_uri, **stream_params)
  333. with TensorDeserializer(stream, **deserializer_args,
  334. device="cpu") as state:
  335. for name, param in state.items():
  336. yield name, param
  337. del state
  338. def is_aphrodite_tensorized(tensorizer_config: "TensorizerConfig") -> bool:
  339. """
  340. Infer if the model is a Aphrodite model by checking the weights for
  341. a Aphrodite tensorized marker.
  342. Args:
  343. tensorizer_config: The TensorizerConfig object containing the
  344. tensorizer_uri to the serialized model.
  345. Returns:
  346. bool: True if the model is a Aphrodite model, False otherwise.
  347. """
  348. tensorizer_args = tensorizer_config._construct_tensorizer_args()
  349. deserializer = TensorDeserializer(open_stream(
  350. tensorizer_args.tensorizer_uri, **tensorizer_args.stream_params),
  351. **tensorizer_args.deserializer_params,
  352. lazy_load=True)
  353. if tensorizer_config.aphrodite_tensorized:
  354. logger.warning(
  355. "Please note that newly serialized Aphrodite models are "
  356. "automatically inferred as Aphrodite models, so setting "
  357. "aphrodite_tensorized=True is only necessary for models serialized "
  358. "prior to this change.")
  359. return True
  360. if (".aphrodite_tensorized_marker" in deserializer):
  361. return True
  362. return False
  363. def serialize_aphrodite_model(
  364. model: nn.Module,
  365. tensorizer_config: TensorizerConfig,
  366. ) -> nn.Module:
  367. model.register_parameter(
  368. "aphrodite_tensorized_marker",
  369. nn.Parameter(torch.tensor((1, ), device="meta"), requires_grad=False))
  370. tensorizer_args = tensorizer_config._construct_tensorizer_args()
  371. encryption_params = None
  372. if (keyfile := tensorizer_config.encryption_keyfile) is not None:
  373. with open(keyfile, "rb") as f:
  374. key = f.read()
  375. encryption_params = EncryptionParams(key=key)
  376. output_file = tensorizer_args.tensorizer_uri
  377. if tensorizer_config._is_sharded:
  378. from aphrodite.distributed import get_tensor_model_parallel_rank
  379. output_file = output_file % get_tensor_model_parallel_rank()
  380. with _write_stream(output_file, **tensorizer_args.stream_params) as stream:
  381. serializer = TensorSerializer(stream, encryption=encryption_params)
  382. serializer.write_module(model)
  383. serializer.close()
  384. logger.info(f"Successfully serialized model to {str(output_file)}")
  385. return model
  386. def tensorize_aphrodite_model(engine_args: EngineArgs,
  387. tensorizer_config: TensorizerConfig,
  388. generate_keyfile: bool = True):
  389. """Utility to load a model and then serialize it with Tensorizer
  390. Intended to be used separately from running a aphrodite server since it
  391. creates its own Engine instance.
  392. """
  393. engine_config = engine_args.create_engine_config()
  394. tensorizer_config.verify_with_model_config(engine_config.model_config)
  395. tensorizer_config.verify_with_parallel_config(
  396. engine_config.parallel_config)
  397. # generate the encryption key before creating the engine to support
  398. # sharding
  399. if generate_keyfile and (keyfile :=
  400. tensorizer_config.encryption_keyfile) is not None:
  401. encryption_params = EncryptionParams.random()
  402. with _write_stream(
  403. keyfile,
  404. s3_access_key_id=tensorizer_config.s3_access_key_id,
  405. s3_secret_access_key=tensorizer_config.s3_secret_access_key,
  406. s3_endpoint=tensorizer_config.s3_endpoint,
  407. ) as stream:
  408. stream.write(encryption_params.key)
  409. engine = AphroditeEngine.from_engine_args(engine_args)
  410. if tensorizer_config._is_sharded:
  411. # if the engine is a distributed engine (for tensor parallel) then each
  412. # worker shard needs to serialize its part of the model.
  413. engine.model_executor._run_workers(
  414. "save_tensorized_model",
  415. tensorizer_config=tensorizer_config,
  416. )
  417. else:
  418. # with a single worker, we can get to the underlying model directly
  419. serialize_aphrodite_model(
  420. engine.model_executor.driver_worker.model_runner.model,
  421. tensorizer_config,
  422. )