Dockerfile 4.2 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697
  1. # Copyright (c) 2023 Agung Wijaya
  2. # Installing Gradio via Dockerfile
  3. # pull docker
  4. FROM python:3.8.16-slim-bullseye
  5. # install virtualenv
  6. RUN apt update \
  7. && apt install -y aria2 wget curl tree unzip ffmpeg build-essential \
  8. && rm -rf /var/lib/apt/lists/*
  9. # clean up
  10. RUN apt-get clean; \
  11. rm -rf /etc/machine-id /var/lib/dbus/machine-id /var/lib/apt/lists/* /tmp/* /var/tmp/*; \
  12. find /var/log -name "*.log" -type f -delete
  13. # set tmp
  14. RUN mkdir -p /content/tmp
  15. RUN chmod -R 777 /content/tmp
  16. RUN rm -rf /tmp
  17. RUN ln -s /content/tmp /tmp
  18. # make dir
  19. RUN mkdir -p /content
  20. RUN chmod -R 777 /content
  21. # try fix mplconfigdir
  22. RUN mkdir -p /content/mplconfig
  23. RUN chmod -R 777 /content/mplconfig
  24. # try fix
  25. # RuntimeError: cannot cache function '__shear_dense': no locator available for file '/usr/local/lib/python3.8/site-packages/librosa/util/utils.py'
  26. RUN mkdir -p /content/numbacache
  27. RUN chmod -R 777 /content/numbacache
  28. # try fix
  29. # PermissionError: [Errno 13] Permission denied: '/.cache' (demucs)
  30. RUN mkdir -p /content/demucscache
  31. RUN chmod -R 777 /content/demucscache
  32. RUN ln -s /content/demucscache /.cache
  33. # set workdir
  34. WORKDIR /content
  35. # set environment
  36. # PYTORCH_NO_CUDA_MEMORY_CACHING is can help users with even smaller RAM such as 2GB (Demucs)
  37. ENV PYTORCH_NO_CUDA_MEMORY_CACHING=1 \
  38. MPLCONFIGDIR=/content/mplconfig \
  39. NUMBA_CACHE_DIR=/content/numbacache
  40. # upgrade pip
  41. RUN python -m pip install --no-cache-dir --upgrade pip
  42. # install library
  43. RUN pip install --no-cache-dir --upgrade gradio
  44. RUN pip install --no-cache-dir --upgrade setuptools wheel
  45. RUN pip install --no-cache-dir faiss-gpu fairseq gradio ffmpeg ffmpeg-python praat-parselmouth pyworld numpy==1.23.5 numba==0.56.4 librosa==0.9.2
  46. # copying requirements.txt
  47. COPY requirements.txt /content/requirements.txt
  48. # install requirements
  49. RUN pip install --no-cache-dir --upgrade -r requirements.txt
  50. # copying files
  51. COPY . .
  52. # download hubert_base
  53. RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/hubert_base.pt -d /content -o hubert_base.pt
  54. # download library infer_pack
  55. RUN mkdir -p infer_pack
  56. RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://raw.githubusercontent.com/fumiama/Retrieval-based-Voice-Conversion-WebUI/main/infer_pack/attentions.py -d /content/infer_pack -o attentions.py
  57. RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://raw.githubusercontent.com/fumiama/Retrieval-based-Voice-Conversion-WebUI/main/infer_pack/commons.py -d /content/infer_pack -o commons.py
  58. RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://raw.githubusercontent.com/fumiama/Retrieval-based-Voice-Conversion-WebUI/main/infer_pack/models.py -d /content/infer_pack -o models.py
  59. RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://raw.githubusercontent.com/fumiama/Retrieval-based-Voice-Conversion-WebUI/main/infer_pack/models_onnx.py -d /content/infer_pack -o models_onnx.py
  60. RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://raw.githubusercontent.com/fumiama/Retrieval-based-Voice-Conversion-WebUI/main/infer_pack/models_onnx_moess.py -d /content/infer_pack -o models_onnx_moess.py
  61. RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://raw.githubusercontent.com/fumiama/Retrieval-based-Voice-Conversion-WebUI/main/infer_pack/modules.py -d /content/infer_pack -o modules.py
  62. RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://raw.githubusercontent.com/fumiama/Retrieval-based-Voice-Conversion-WebUI/main/infer_pack/transforms.py -d /content/infer_pack -o transforms.py
  63. # download library infer_pipeline.py
  64. RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/spaces/DJQmUKV/rvc-inference/raw/main/vc_infer_pipeline.py -d /content -o vc_infer_pipeline.py
  65. # download library config.py and util.py
  66. RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/spaces/DJQmUKV/rvc-inference/raw/main/config.py -d /content -o config.py
  67. RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/spaces/DJQmUKV/rvc-inference/raw/main/util.py -d /content -o util.py
  68. # check /tmp
  69. RUN ls -l /tmp
  70. # expose port gradio
  71. EXPOSE 7860
  72. # run app
  73. CMD ["python", "app.py"]
  74. # Enjoy run Gradio!