tokenizer.py 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301
  1. import os
  2. import tempfile
  3. from typing import List, Optional, Tuple, Union
  4. import gguf
  5. from transformers import (AutoTokenizer, PreTrainedTokenizer,
  6. PreTrainedTokenizerFast, LlamaTokenizer)
  7. from transformers.convert_slow_tokenizer import import_protobuf
  8. from aphrodite.common.logger import init_logger
  9. from aphrodite.lora.request import LoRARequest
  10. from aphrodite.common.utils import make_async, LRUCache
  11. from aphrodite.transformers_utils.tokenizers import BaichuanTokenizer
  12. logger = init_logger(__name__)
  13. def convert_gguf_to_tokenizer(checkpoint):
  14. result = gguf.GGUFReader(checkpoint)
  15. # write vocab
  16. sentencepiece_model_pb2 = import_protobuf()
  17. vocab = sentencepiece_model_pb2.ModelProto()
  18. vocab_size = len(result.fields['tokenizer.ggml.token_type'].data)
  19. vocab.trainer_spec.model_type = 2 # BPE
  20. vocab.trainer_spec.vocab_size = vocab_size
  21. vocab.trainer_spec.byte_fallback = True
  22. vocab.normalizer_spec.remove_extra_whitespaces = False
  23. tokens = result.fields['tokenizer.ggml.tokens']
  24. scores = result.fields['tokenizer.ggml.scores']
  25. types = result.fields['tokenizer.ggml.token_type']
  26. for i in range(vocab_size):
  27. new_token = vocab.SentencePiece()
  28. new_token.piece = str(bytes(tokens.parts[tokens.data[i]]),
  29. encoding='utf-8')
  30. new_token.score = scores.parts[scores.data[i]]
  31. # llama.cpp tokentype is the same with sentencepiece token type
  32. new_token.type = int(types.parts[types.data[i]])
  33. vocab.pieces.append(new_token)
  34. with tempfile.NamedTemporaryFile(mode='wb', delete=False) as temp_file:
  35. temp_file.write(vocab.SerializeToString())
  36. temp_file_filename = temp_file.name
  37. tokenizer_args = {"vocab_file": temp_file_filename}
  38. if 'tokenizer.ggml.bos_token_id' in result.fields:
  39. tokenizer_args["bos_token"] = vocab.pieces[int(
  40. result.fields['tokenizer.ggml.bos_token_id'].parts[-1])].piece
  41. if 'tokenizer.ggml.eos_token_id' in result.fields:
  42. tokenizer_args["eos_token"] = vocab.pieces[int(
  43. result.fields['tokenizer.ggml.eos_token_id'].parts[-1])].piece
  44. if 'tokenizer.ggml.padding_token_id' in result.fields:
  45. tokenizer_args["pad_token"] = vocab.pieces[int(
  46. result.fields['tokenizer.ggml.padding_token_id'].parts[-1])].piece
  47. if 'tokenizer.ggml.unknown_token_id' in result.fields:
  48. tokenizer_args["unk_token"] = vocab.pieces[int(
  49. result.fields['tokenizer.ggml.unknown_token_id'].parts[-1])].piece
  50. if 'tokenizer.ggml.add_bos_token' in result.fields:
  51. tokenizer_args["add_bos_token"] = bool(
  52. result.fields['tokenizer.ggml.add_bos_token'].parts[-1])
  53. if 'tokenizer.ggml.add_eos_token' in result.fields:
  54. tokenizer_args["add_eos_token"] = bool(
  55. result.fields['tokenizer.ggml.add_eos_token'].parts[-1])
  56. tokenizer = LlamaTokenizer(**tokenizer_args)
  57. os.unlink(temp_file_filename)
  58. return tokenizer
  59. def get_tokenizer(
  60. tokenizer_name: str,
  61. *args,
  62. tokenizer_mode: str = "auto",
  63. trust_remote_code: bool = False,
  64. tokenizer_revision: Optional[str] = None,
  65. **kwargs,
  66. ) -> Union[PreTrainedTokenizer, PreTrainedTokenizerFast]:
  67. """Gets a tokenizer for the given model name via Huggingface."""
  68. if tokenizer_name.endswith("gguf"):
  69. return convert_gguf_to_tokenizer(tokenizer_name)
  70. if tokenizer_mode == "slow":
  71. if kwargs.get("use_fast", False):
  72. raise ValueError(
  73. "Cannot use the fast tokenizer in slow tokenizer mode.")
  74. kwargs["use_fast"] = False
  75. try:
  76. tokenizer = AutoTokenizer.from_pretrained(
  77. tokenizer_name,
  78. *args,
  79. trust_remote_code=trust_remote_code,
  80. tokenizer_revision=tokenizer_revision,
  81. **kwargs)
  82. except ValueError as e:
  83. # If the error pertains to the tokenizer class not existing or not
  84. # currently being imported, suggest using the --trust-remote-code flag.
  85. if (not trust_remote_code and
  86. ("does not exist or is not currently imported." in str(e)
  87. or "requires you to execute the tokenizer file" in str(e))):
  88. err_msg = (
  89. "Failed to load the tokenizer. If the tokenizer is a custom "
  90. "tokenizer not yet available in the HuggingFace transformers "
  91. "library, consider setting `trust_remote_code=True` in LLM "
  92. "or using the `--trust-remote-code` flag in the CLI.")
  93. raise RuntimeError(err_msg) from e
  94. else:
  95. raise e
  96. except AttributeError as e:
  97. if "BaichuanTokenizer" in str(e):
  98. # This is for the error "'BaichuanTokenizer' object has no
  99. # attribute 'sp_model'".
  100. tokenizer = BaichuanTokenizer.from_pretrained(
  101. tokenizer_name,
  102. *args,
  103. trust_remote_code=trust_remote_code,
  104. tokenizer_revision=tokenizer_revision,
  105. **kwargs)
  106. else:
  107. raise e
  108. if not isinstance(tokenizer, PreTrainedTokenizerFast):
  109. logger.warning(
  110. "Using a slow tokenizer. This might cause a significant "
  111. "slowdown. Consider using a fast tokenizer instead.")
  112. return tokenizer
  113. def get_lora_tokenizer(lora_request: LoRARequest, *args,
  114. **kwargs) -> Optional[PreTrainedTokenizer]:
  115. if lora_request is None:
  116. return None
  117. try:
  118. tokenizer = get_tokenizer(lora_request.lora_local_path, *args,
  119. **kwargs)
  120. except OSError as e:
  121. # No tokenizer was found in the LoRA folder,
  122. # use base model tokenizer
  123. logger.warning(
  124. f"No tokenizer found in {lora_request.lora_local_path}, "
  125. "using base model tokenizer instead. "
  126. f"(Exception: {str(e)})")
  127. tokenizer = None
  128. return tokenizer
  129. get_lora_tokenizer_async = make_async(get_lora_tokenizer)
  130. class TokenizerGroup:
  131. """A group of tokenizers that can be used for LoRA adapters."""
  132. def __init__(self, tokenizer_id: str, enable_lora: bool, max_num_seqs: int,
  133. max_input_length: Optional[int], **tokenizer_config):
  134. self.tokenizer_id = tokenizer_id
  135. self.tokenizer_config = tokenizer_config
  136. self.enable_lora = enable_lora
  137. self.max_input_length = max_input_length
  138. self.tokenizer = get_tokenizer(self.tokenizer_id, **tokenizer_config)
  139. if enable_lora:
  140. self.lora_tokenizers = LRUCache(capacity=max_num_seqs)
  141. else:
  142. self.lora_tokenizers = None
  143. def encode(self,
  144. prompt: str,
  145. request_id: Optional[str] = None,
  146. lora_request: Optional[LoRARequest] = None) -> List[int]:
  147. tokenizer = self.get_lora_tokenizer(lora_request)
  148. return tokenizer.encode(prompt)
  149. async def encode_async(
  150. self,
  151. prompt: str,
  152. request_id: Optional[str] = None,
  153. lora_request: Optional[LoRARequest] = None) -> List[int]:
  154. tokenizer = await self.get_lora_tokenizer_async(lora_request)
  155. return tokenizer.encode(prompt)
  156. def get_lora_tokenizer(
  157. self,
  158. lora_request: Optional[LoRARequest]) -> "PreTrainedTokenizer":
  159. if not lora_request or not self.enable_lora:
  160. return self.tokenizer
  161. if lora_request.lora_int_id not in self.lora_tokenizers:
  162. tokenizer = (get_lora_tokenizer(
  163. lora_request, **self.tokenizer_config) or self.tokenizer)
  164. self.lora_tokenizers.put(lora_request.lora_int_id, tokenizer)
  165. return tokenizer
  166. else:
  167. return self.lora_tokenizers.get(lora_request.lora_int_id)
  168. async def get_lora_tokenizer_async(
  169. self,
  170. lora_request: Optional[LoRARequest]) -> "PreTrainedTokenizer":
  171. if not lora_request or not self.enable_lora:
  172. return self.tokenizer
  173. if lora_request.lora_int_id not in self.lora_tokenizers:
  174. tokenizer = (await get_lora_tokenizer_async(
  175. lora_request, **self.tokenizer_config) or self.tokenizer)
  176. self.lora_tokenizers.put(lora_request.lora_int_id, tokenizer)
  177. return tokenizer
  178. else:
  179. return self.lora_tokenizers.get(lora_request.lora_int_id)
  180. def _convert_tokens_to_string_with_added_encoders(
  181. tokenizer: Union[PreTrainedTokenizer, PreTrainedTokenizerFast],
  182. output_tokens: List[str],
  183. skip_special_tokens: bool,
  184. spaces_between_special_tokens: bool,
  185. ) -> str:
  186. # Adapted from
  187. # https://github.com/huggingface/transformers/blob/v4.28.0/src/transformers/tokenization_utils.py#L921
  188. # NOTE: The following code is slow because it runs a for loop over
  189. # the output_tokens. In Python, running a for loop over a list can be slow
  190. # even when the loop body is very simple.
  191. sub_texts = []
  192. current_sub_text = []
  193. all_special_tokens = set(tokenizer.all_special_tokens)
  194. for token in output_tokens:
  195. if skip_special_tokens and token in all_special_tokens:
  196. continue
  197. if token in tokenizer.get_added_vocab():
  198. if current_sub_text:
  199. sub_text = tokenizer.convert_tokens_to_string(current_sub_text)
  200. sub_texts.append(sub_text)
  201. current_sub_text = []
  202. sub_texts.append(token)
  203. else:
  204. current_sub_text.append(token)
  205. if current_sub_text:
  206. sub_text = tokenizer.convert_tokens_to_string(current_sub_text)
  207. sub_texts.append(sub_text)
  208. if spaces_between_special_tokens:
  209. return " ".join(sub_texts)
  210. else:
  211. return "".join(sub_texts)
  212. # Based on
  213. # https://github.com/huggingface/text-generation-inference/blob/v0.9.4/server/text_generation_server/models/model.py#L62C9-L62C15
  214. # under Apache 2.0 license
  215. def detokenize_incrementally(
  216. tokenizer: Union[PreTrainedTokenizer, PreTrainedTokenizerFast],
  217. all_input_ids: List[int],
  218. prev_tokens: Optional[List[str]],
  219. prefix_offset: int = 0,
  220. read_offset: int = 0,
  221. skip_special_tokens: bool = False,
  222. spaces_between_special_tokens: bool = True,
  223. ) -> Tuple[List[str], str, int, int]:
  224. new_token_id = all_input_ids[-1]
  225. # This is the first iteration for this sequence
  226. if prev_tokens is None:
  227. new_tokens = tokenizer.convert_ids_to_tokens(
  228. all_input_ids, skip_special_tokens=skip_special_tokens)
  229. output_tokens = new_tokens
  230. # 5 is an arbitrary value that should work for all
  231. # tokenizers (bigger = more conservative).
  232. # Subtract 1 extra to account for the generated token.
  233. prefix_offset = max(len(output_tokens) - 6, 0)
  234. # If the first new token is a special token, we can't skip 1 extra token
  235. if skip_special_tokens and new_token_id in tokenizer.all_special_ids:
  236. read_offset = max(len(output_tokens), 0)
  237. else:
  238. read_offset = max(len(output_tokens) - 1, 0)
  239. else:
  240. # Put new_token_id in a list so skip_special_tokens is respected
  241. new_tokens = tokenizer.convert_ids_to_tokens(
  242. [new_token_id], skip_special_tokens=skip_special_tokens)
  243. output_tokens = prev_tokens + new_tokens
  244. # The prefix text is necessary only to defeat cleanup algorithms in
  245. # the decode which decide to add a space or not depending on the
  246. # surrounding ids.
  247. if tokenizer.is_fast or not tokenizer.get_added_vocab():
  248. prefix_text = tokenizer.convert_tokens_to_string(
  249. output_tokens[prefix_offset:read_offset])
  250. new_text = tokenizer.convert_tokens_to_string(
  251. output_tokens[prefix_offset:])
  252. else:
  253. prefix_text = _convert_tokens_to_string_with_added_encoders(
  254. tokenizer,
  255. output_tokens[prefix_offset:read_offset],
  256. skip_special_tokens=skip_special_tokens,
  257. spaces_between_special_tokens=spaces_between_special_tokens,
  258. )
  259. new_text = _convert_tokens_to_string_with_added_encoders(
  260. tokenizer,
  261. output_tokens[prefix_offset:],
  262. skip_special_tokens=skip_special_tokens,
  263. spaces_between_special_tokens=spaces_between_special_tokens,
  264. )
  265. if len(new_text) > len(prefix_text) and not new_text.endswith("�"):
  266. # utf-8 char at the end means it's a potential unfinished byte sequence
  267. # from byte fallback tokenization.
  268. # If it's in the middle, it's probably a real invalid id generated
  269. # by the model
  270. new_text = new_text[len(prefix_text):]
  271. return new_tokens, new_text, read_offset, len(output_tokens)
  272. else:
  273. return new_tokens, "", prefix_offset, read_offset