Selaa lähdekoodia

fix: tokenizer delay with using LLM class

AlpinDale 7 kuukautta sitten
vanhempi
commit
d00a7517e6
1 muutettua tiedostoa jossa 9 lisäystä ja 1 poistoa
  1. 9 1
      aphrodite/endpoints/llm.py

+ 9 - 1
aphrodite/endpoints/llm.py

@@ -14,6 +14,7 @@ from aphrodite.common.utils import Counter, deprecate_kwargs
 from aphrodite.engine.aphrodite_engine import AphroditeEngine
 from aphrodite.engine.args_tools import EngineArgs
 from aphrodite.lora.request import LoRARequest
+from aphrodite.transformers_utils.tokenizer import get_cached_tokenizer
 
 
 class LLM:
@@ -147,7 +148,14 @@ class LLM:
         self,
         tokenizer: Union[PreTrainedTokenizer, PreTrainedTokenizerFast],
     ) -> None:
-        self.llm_engine.tokenizer.tokenizer = tokenizer
+        # While CachedTokenizer is dynamic, have no choice but
+        # compare class name. Misjudgment will arise from
+        # user-defined tokenizer started with 'Cached'
+        if tokenizer.__class__.__name__.startswith("Cached"):
+            self.llm_engine.tokenizer.tokenizer = tokenizer
+        else:
+            self.llm_engine.tokenizer.tokenizer = get_cached_tokenizer(
+                tokenizer)
 
     @overload  # LEGACY: single (prompt + optional token ids)
     def generate(