BarkModelManager.py 1.8 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253
  1. class BarkModelManager:
  2. def __init__(self):
  3. self.models_loaded = False
  4. def reload_models(self, config):
  5. from bark.generation import preload_models
  6. self.models_loaded = True
  7. c = config["model"]
  8. def _print_prop(name: str, gpu: bool, small: bool):
  9. def _yes_or_no(x: bool):
  10. return "Yes" if x else "No"
  11. print(
  12. f"\t- {name}:\t\t\t GPU: {_yes_or_no(gpu)}, Small Model: {_yes_or_no(small)}"
  13. )
  14. print(f"{'Reloading' if self.models_loaded else 'Loading'} Bark models")
  15. _print_prop("Text-to-Semantic", c["text_use_gpu"], c["text_use_small"])
  16. _print_prop("Semantic-to-Coarse", c["coarse_use_gpu"], c["coarse_use_small"])
  17. _print_prop("Coarse-to-Fine", c["fine_use_gpu"], c["fine_use_small"])
  18. _print_prop("Encodec", c["codec_use_gpu"], False)
  19. # preload_models(**c, force_reload=True)
  20. preload_models(
  21. coarse_use_gpu=c["coarse_use_gpu"],
  22. coarse_use_small=c["coarse_use_small"],
  23. fine_use_gpu=c["fine_use_gpu"],
  24. fine_use_small=c["fine_use_small"],
  25. text_use_gpu=c["text_use_gpu"],
  26. text_use_small=c["text_use_small"],
  27. codec_use_gpu=c["codec_use_gpu"],
  28. force_reload=True,
  29. )
  30. def unload_models(self):
  31. from bark.generation import clean_models
  32. print("Unloading Bark models...")
  33. self.models_loaded = False
  34. clean_models()
  35. print("Unloaded Bark models")
  36. def unload_model(self, model_key):
  37. from bark.generation import clean_models
  38. print(f"Unloading Bark model {model_key}")
  39. clean_models(model_key=model_key)
  40. print(f"Unloaded Bark model {model_key}")
  41. bark_model_manager = BarkModelManager()