|
@@ -146,8 +146,9 @@ def detokenize_incrementally(
|
|
|
prefix_offset = max(len(output_tokens) - 6, 0)
|
|
|
read_offset = max(len(output_tokens) - 1, 0)
|
|
|
else:
|
|
|
- new_tokens = tokenizer.convert_ids_to_tokens(
|
|
|
- [new_token_id], skip_special_tokens=skip_special_tokens)
|
|
|
+ new_token = tokenizer.convert_ids_to_tokens(
|
|
|
+ new_token_id, skip_special_tokens=skip_special_tokens)
|
|
|
+ new_tokens = [new_token]
|
|
|
output_tokens = prev_tokens + new_tokens
|
|
|
|
|
|
# The prefix text is necessary only to defeat cleanup algorithms in
|