1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162 |
- from typing import cast
- def get_video_processor(
- processor_name: str,
- trust_remote_code: bool = False,
- ):
- """
- Gets a processor for the given model name via HuggingFace.
- """
- from transformers import AutoProcessor
- try:
- processor = AutoProcessor.from_pretrained(processor_name)
- video_processor = processor.video_processor
- except ValueError as e:
- if not trust_remote_code:
- err_msg = (
- "Failed to load the processor. If the processor is "
- "a custom processor not yet available in the HuggingFace "
- "transformers library, consider setting "
- "`trust_remote_code=True` in LLM or using the "
- "`--trust-remote-code` flag in the CLI.")
- raise RuntimeError(err_msg) from e
- else:
- raise e
- return video_processor
- def get_image_processor(
- processor_name: str,
- *args,
- trust_remote_code: bool = False,
- **kwargs,
- ):
- """Gets an image processor for the given model name via HuggingFace."""
- # don't put this import at the top level
- # it will call torch.cuda.device_count()
- from transformers import AutoImageProcessor
- from transformers.image_processing_utils import BaseImageProcessor
- try:
- processor = AutoImageProcessor.from_pretrained(
- processor_name,
- *args,
- trust_remote_code=trust_remote_code,
- **kwargs)
- except ValueError as e:
- # If the error pertains to the processor class not existing or not
- # currently being imported, suggest using the --trust-remote-code flag.
- # Unlike AutoTokenizer, AutoImageProcessor does not separate such errors
- if not trust_remote_code:
- err_msg = (
- "Failed to load the image processor. If the image processor is "
- "a custom processor not yet available in the HuggingFace "
- "transformers library, consider setting "
- "`trust_remote_code=True` in LLM or using the "
- "`--trust-remote-code` flag in the CLI.")
- raise RuntimeError(err_msg) from e
- else:
- raise e
- return cast(BaseImageProcessor, processor)
|