image_processor.py 1.4 KB

12345678910111213141516171819202122232425262728293031323334353637
  1. from typing import cast
  2. def get_image_processor(
  3. processor_name: str,
  4. *args,
  5. trust_remote_code: bool = False,
  6. **kwargs,
  7. ):
  8. """Gets an image processor for the given model name via HuggingFace."""
  9. # don't put this import at the top level
  10. # it will call torch.cuda.device_count()
  11. from transformers import AutoImageProcessor
  12. from transformers.image_processing_utils import BaseImageProcessor
  13. try:
  14. processor = AutoImageProcessor.from_pretrained(
  15. processor_name,
  16. *args,
  17. trust_remote_code=trust_remote_code,
  18. **kwargs)
  19. except ValueError as e:
  20. # If the error pertains to the processor class not existing or not
  21. # currently being imported, suggest using the --trust-remote-code flag.
  22. # Unlike AutoTokenizer, AutoImageProcessor does not separate such errors
  23. if not trust_remote_code:
  24. err_msg = (
  25. "Failed to load the image processor. If the image processor is "
  26. "a custom processor not yet available in the HuggingFace "
  27. "transformers library, consider setting "
  28. "`trust_remote_code=True` in LLM or using the "
  29. "`--trust-remote-code` flag in the CLI.")
  30. raise RuntimeError(err_msg) from e
  31. else:
  32. raise e
  33. return cast(BaseImageProcessor, processor)