settings_tab_bark.py 6.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201
  1. import os
  2. import gradio as gr
  3. from tts_webui.config.config import config
  4. from tts_webui.config.save_config_bark import save_config_bark
  5. from tts_webui.bark.BarkModelManager import bark_model_manager
  6. from tts_webui.utils.setup_or_recover import generate_env
  7. from tts_webui.utils.setup_or_recover import write_env
  8. def settings_tab_bark() -> None:
  9. with gr.Tab("Settings (Bark)") as settings_tab, gr.Row(equal_height=False):
  10. bark_settings_ui(settings_tab)
  11. def bark_settings_ui(settings_tab: gr.Tab):
  12. with gr.Column():
  13. model_config = config["model"]
  14. with gr.Row(variant="panel"):
  15. gr.Markdown("### Text generation:")
  16. text_use_gpu = gr.Checkbox(
  17. label="Use GPU",
  18. value=model_config["text_use_gpu"],
  19. )
  20. text_use_small = gr.Checkbox(
  21. label="Use small model",
  22. value=model_config["text_use_small"],
  23. )
  24. with gr.Row(variant="panel"):
  25. gr.Markdown("### Coarse-to-fine inference:")
  26. coarse_use_gpu = gr.Checkbox(
  27. label="Use GPU",
  28. value=model_config["coarse_use_gpu"],
  29. )
  30. coarse_use_small = gr.Checkbox(
  31. label="Use small model",
  32. value=model_config["coarse_use_small"],
  33. )
  34. with gr.Row(variant="panel"):
  35. gr.Markdown("### Fine-tuning:")
  36. fine_use_gpu = gr.Checkbox(
  37. label="Use GPU",
  38. value=model_config["fine_use_gpu"],
  39. )
  40. fine_use_small = gr.Checkbox(
  41. label="Use small model",
  42. value=model_config["fine_use_small"],
  43. )
  44. with gr.Row(variant="panel"):
  45. gr.Markdown("### Codec:")
  46. codec_use_gpu = gr.Checkbox(
  47. label="Use GPU for codec",
  48. value=model_config["codec_use_gpu"],
  49. )
  50. save_beacon = gr.Markdown("")
  51. gr.Markdown(
  52. """
  53. ## Environment variables
  54. (Requires restart)
  55. """
  56. )
  57. def _cast_bool(x: str):
  58. return x.lower() in ("true", "1")
  59. env_suno_use_small_models = gr.Checkbox(
  60. label="Use small models",
  61. value=_cast_bool(os.environ.get("SUNO_USE_SMALL_MODELS", "")),
  62. )
  63. env_suno_enable_mps = gr.Checkbox(
  64. label="Enable MPS", value=_cast_bool(os.environ.get("SUNO_ENABLE_MPS", ""))
  65. )
  66. env_suno_offload_cpu = gr.Checkbox(
  67. label="Offload GPU models to CPU",
  68. value=_cast_bool(os.environ.get("SUNO_OFFLOAD_CPU", "")),
  69. )
  70. def save_env_variables(
  71. env_suno_use_small_models,
  72. env_suno_enable_mps,
  73. env_suno_offload_cpu,
  74. ):
  75. write_env(
  76. generate_env(
  77. env_suno_use_small_models=env_suno_use_small_models,
  78. env_suno_enable_mps=env_suno_enable_mps,
  79. env_suno_offload_cpu=env_suno_offload_cpu,
  80. )
  81. )
  82. env_inputs = [
  83. env_suno_use_small_models,
  84. env_suno_enable_mps,
  85. env_suno_offload_cpu,
  86. ]
  87. for i in env_inputs:
  88. i.change(
  89. fn=save_env_variables,
  90. inputs=env_inputs,
  91. api_name=i == env_inputs[0] and "save_env_variables_bark" or None,
  92. )
  93. # refresh environment variables button
  94. inputs = [
  95. text_use_gpu,
  96. text_use_small,
  97. coarse_use_gpu,
  98. coarse_use_small,
  99. fine_use_gpu,
  100. fine_use_small,
  101. codec_use_gpu,
  102. ]
  103. for i in inputs:
  104. i.change(
  105. fn=save_config_bark,
  106. inputs=inputs,
  107. outputs=[save_beacon],
  108. api_name=i == inputs[0] and "save_config_bark" or None,
  109. )
  110. def sync_ui():
  111. def checkbox_update_helper(key: str):
  112. return gr.Checkbox(value=config["model"][key])
  113. return [
  114. checkbox_update_helper("text_use_gpu"),
  115. checkbox_update_helper("text_use_small"),
  116. checkbox_update_helper("coarse_use_gpu"),
  117. checkbox_update_helper("coarse_use_small"),
  118. checkbox_update_helper("fine_use_gpu"),
  119. checkbox_update_helper("fine_use_small"),
  120. checkbox_update_helper("codec_use_gpu"),
  121. ]
  122. settings_tab.select(fn=sync_ui, outputs=inputs, api_name="get_config_bark")
  123. with gr.Column():
  124. gr.Markdown(
  125. """
  126. # Recommended settings:
  127. * For VRAM >= 10GB, use large models.
  128. * For VRAM < 10GB, use small models.
  129. * Text generation and coarse-to-fine are of similar importance.
  130. * Small models might not have a perceptible difference in the result.
  131. * For VRAM < 4GB, use CPU offloading (requires restart).
  132. * Small models are also recommended.
  133. * For VRAM < 2GB, use CPU offloading and small models (requires restart).
  134. """
  135. )
  136. load_button = gr.Button(value="Load models")
  137. load_button.click(
  138. fn=lambda: gr.Button(value="Loading...", interactive=False),
  139. outputs=[load_button],
  140. ).then(
  141. fn=_load_models,
  142. inputs=[
  143. text_use_gpu,
  144. text_use_small,
  145. coarse_use_gpu,
  146. coarse_use_small,
  147. fine_use_gpu,
  148. fine_use_small,
  149. codec_use_gpu,
  150. ],
  151. outputs=[load_button],
  152. )
  153. def _load_models(
  154. text_use_gpu,
  155. text_use_small,
  156. coarse_use_gpu,
  157. coarse_use_small,
  158. fine_use_gpu,
  159. fine_use_small,
  160. codec_use_gpu,
  161. ):
  162. save_config_bark(
  163. text_use_gpu,
  164. text_use_small,
  165. coarse_use_gpu,
  166. coarse_use_small,
  167. fine_use_gpu,
  168. fine_use_small,
  169. codec_use_gpu,
  170. )
  171. try:
  172. bark_model_manager.reload_models(config)
  173. return gr.Button(value="Reload models", interactive=True)
  174. except Exception as e:
  175. print(e)
  176. return gr.Button(value="Failed to load models", interactive=True)