setup.py 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513
  1. import io
  2. import logging
  3. import os
  4. import re
  5. import subprocess
  6. import sys
  7. import warnings
  8. from shutil import which
  9. from typing import List
  10. import torch
  11. from packaging.version import Version, parse
  12. from setuptools import Extension, find_packages, setup
  13. from setuptools.command.build_ext import build_ext
  14. from torch.utils.cpp_extension import CUDA_HOME
  15. ROOT_DIR = os.path.dirname(__file__)
  16. logger = logging.getLogger(__name__)
  17. # Target device of Aphrodite, supporting [cuda (by default), rocm, neuron, cpu]
  18. APHRODITE_TARGET_DEVICE = os.getenv("APHRODITE_TARGET_DEVICE", "cuda")
  19. def embed_commit_hash():
  20. try:
  21. commit_id = subprocess.check_output(["git", "rev-parse", "HEAD"],
  22. encoding="utf-8").strip()
  23. short_commit_id = subprocess.check_output(
  24. ["git", "rev-parse", "--short", "HEAD"], encoding="utf-8").strip()
  25. commit_contents = f'__commit__ = "{commit_id}"\n'
  26. short_commit_contents = f'__short_commit__ = "{short_commit_id}"\n'
  27. version_file = os.path.join(ROOT_DIR, "aphrodite", "commit_id.py")
  28. with open(version_file, "w", encoding="utf-8") as f:
  29. f.write(commit_contents)
  30. f.write(short_commit_contents)
  31. except subprocess.CalledProcessError as e:
  32. warnings.warn(f"Failed to get commit hash:\n{e}",
  33. RuntimeWarning,
  34. stacklevel=2)
  35. except Exception as e:
  36. warnings.warn(f"Failed to embed commit hash:\n{e}",
  37. RuntimeWarning,
  38. stacklevel=2)
  39. embed_commit_hash()
  40. if not sys.platform.startswith("linux"):
  41. logger.warning(
  42. "Aphrodite only supports Linux platform (including WSL). "
  43. f"Building on {sys.platform}, "
  44. "so APhrodite may not be able to run correctly")
  45. APHRODITE_TARGET_DEVICE = "empty"
  46. MAIN_CUDA_VERSION = "12.4"
  47. def is_sccache_available() -> bool:
  48. return which("sccache") is not None
  49. def is_ccache_available() -> bool:
  50. return which("ccache") is not None
  51. def is_ninja_available() -> bool:
  52. return which("ninja") is not None
  53. def remove_prefix(text, prefix):
  54. if text.startswith(prefix):
  55. return text[len(prefix):]
  56. return text
  57. class CMakeExtension(Extension):
  58. def __init__(self, name: str, cmake_lists_dir: str = '.', **kwa) -> None:
  59. super().__init__(name, sources=[], py_limited_api=True, **kwa)
  60. self.cmake_lists_dir = os.path.abspath(cmake_lists_dir)
  61. class cmake_build_ext(build_ext):
  62. # A dict of extension directories that have been configured.
  63. did_config = {}
  64. #
  65. # Determine number of compilation jobs and optionally nvcc compile threads.
  66. #
  67. def compute_num_jobs(self):
  68. # `num_jobs` is either the value of the MAX_JOBS environment variable
  69. # (if defined) or the number of CPUs available.
  70. num_jobs = os.environ.get("MAX_JOBS", None)
  71. if num_jobs is not None:
  72. num_jobs = int(num_jobs)
  73. logger.info(f"Using MAX_JOBS={num_jobs} as the number of jobs.")
  74. else:
  75. try:
  76. # os.sched_getaffinity() isn't universally available, so fall
  77. # back to os.cpu_count() if we get an error here.
  78. num_jobs = len(os.sched_getaffinity(0))
  79. logger.info(f"Using {num_jobs} CPUs as the number of jobs.")
  80. except AttributeError:
  81. num_jobs = os.cpu_count()
  82. logger.info(f"Using os.cpu_count()={num_jobs} as the number of"
  83. " jobs.")
  84. nvcc_threads = None
  85. if _is_cuda() and get_nvcc_cuda_version() >= Version("11.2"):
  86. # `nvcc_threads` is either the value of the NVCC_THREADS
  87. # environment variable (if defined) or 1.
  88. # when it is set, we reduce `num_jobs` to avoid
  89. # overloading the system.
  90. nvcc_threads = os.getenv("NVCC_THREADS", None)
  91. if nvcc_threads is not None:
  92. nvcc_threads = int(nvcc_threads)
  93. logger.info(f"Using NVCC_THREADS={nvcc_threads} as the number"
  94. " of nvcc threads.")
  95. else:
  96. nvcc_threads = 1
  97. num_jobs = max(1, num_jobs // nvcc_threads)
  98. return num_jobs, nvcc_threads
  99. #
  100. # Perform cmake configuration for a single extension.
  101. #
  102. def configure(self, ext: CMakeExtension) -> None:
  103. # If we've already configured using the CMakeLists.txt for
  104. # this extension, exit early.
  105. if ext.cmake_lists_dir in cmake_build_ext.did_config:
  106. return
  107. cmake_build_ext.did_config[ext.cmake_lists_dir] = True
  108. # Select the build type.
  109. # Note: optimization level + debug info are set by the build type
  110. default_cfg = "Debug" if self.debug else "RelWithDebInfo"
  111. cfg = os.getenv("CMAKE_BUILD_TYPE", default_cfg)
  112. # where .so files will be written, should be the same for all extensions
  113. # that use the same CMakeLists.txt.
  114. outdir = os.path.abspath(
  115. os.path.dirname(self.get_ext_fullpath(ext.name)))
  116. cmake_args = [
  117. '-DCMAKE_BUILD_TYPE={}'.format(cfg),
  118. '-DCMAKE_LIBRARY_OUTPUT_DIRECTORY={}'.format(outdir),
  119. '-DCMAKE_ARCHIVE_OUTPUT_DIRECTORY={}'.format(self.build_temp),
  120. '-DAPHRODITE_TARGET_DEVICE={}'.format(APHRODITE_TARGET_DEVICE),
  121. ]
  122. verbose = bool(int(os.getenv('VERBOSE', '0')))
  123. if verbose:
  124. cmake_args += ['-DCMAKE_VERBOSE_MAKEFILE=ON']
  125. if is_sccache_available():
  126. cmake_args += [
  127. '-DCMAKE_CXX_COMPILER_LAUNCHER=sccache',
  128. '-DCMAKE_CUDA_COMPILER_LAUNCHER=sccache',
  129. ]
  130. logger.info("Using sccache as the compiler launcher.")
  131. elif is_ccache_available():
  132. cmake_args += [
  133. '-DCMAKE_CXX_COMPILER_LAUNCHER=ccache',
  134. '-DCMAKE_CUDA_COMPILER_LAUNCHER=ccache',
  135. ]
  136. logger.info("Using ccache as the compiler launcher.")
  137. # Pass the python executable to cmake so it can find an exact
  138. # match.
  139. cmake_args += [
  140. '-DAPHRODITE_PYTHON_EXECUTABLE={}'.format(sys.executable)
  141. ]
  142. num_jobs, nvcc_threads = self.compute_num_jobs()
  143. if _install_punica():
  144. cmake_args += ['-DAPHRODITE_INSTALL_PUNICA_KERNELS=ON']
  145. if nvcc_threads:
  146. cmake_args += ['-DNVCC_THREADS={}'.format(nvcc_threads)]
  147. if is_ninja_available():
  148. build_tool = ['-G', 'Ninja']
  149. cmake_args += [
  150. '-DCMAKE_JOB_POOL_COMPILE:STRING=compile',
  151. '-DCMAKE_JOB_POOLS:STRING=compile={}'.format(num_jobs),
  152. ]
  153. else:
  154. # Default build tool to whatever cmake picks.
  155. build_tool = []
  156. subprocess.check_call(
  157. ['cmake', ext.cmake_lists_dir, *build_tool, *cmake_args],
  158. cwd=self.build_temp)
  159. def build_extensions(self) -> None:
  160. # Ensure that CMake is present and working
  161. try:
  162. subprocess.check_output(['cmake', '--version'])
  163. except OSError as e:
  164. raise RuntimeError('Cannot find CMake executable') from e
  165. # Create build directory if it does not exist.
  166. if not os.path.exists(self.build_temp):
  167. os.makedirs(self.build_temp)
  168. targets = []
  169. # Build all the extensions
  170. for ext in self.extensions:
  171. self.configure(ext)
  172. targets.append(remove_prefix(ext.name, "aphrodite."))
  173. num_jobs, _ = self.compute_num_jobs()
  174. build_args = [
  175. "--build",
  176. ".",
  177. f"-j={num_jobs}",
  178. *[f"--target={name}" for name in targets],
  179. ]
  180. subprocess.check_call(["cmake", *build_args], cwd=self.build_temp)
  181. def _no_device() -> bool:
  182. return APHRODITE_TARGET_DEVICE == "empty"
  183. def _is_cuda() -> bool:
  184. has_cuda = torch.version.cuda is not None
  185. return (APHRODITE_TARGET_DEVICE == "cuda" and has_cuda
  186. and not (_is_neuron() or _is_tpu()))
  187. def _is_hip() -> bool:
  188. return (APHRODITE_TARGET_DEVICE == "cuda"
  189. or APHRODITE_TARGET_DEVICE == "rocm") \
  190. and torch.version.hip is not None
  191. def _is_neuron() -> bool:
  192. torch_neuronx_installed = True
  193. try:
  194. subprocess.run(["neuron-ls"], capture_output=True, check=True)
  195. except (FileNotFoundError, PermissionError, subprocess.CalledProcessError):
  196. torch_neuronx_installed = False
  197. return torch_neuronx_installed
  198. def _is_tpu() -> bool:
  199. return APHRODITE_TARGET_DEVICE == "tpu"
  200. def _is_cpu() -> bool:
  201. return APHRODITE_TARGET_DEVICE == "cpu"
  202. def _is_openvino() -> bool:
  203. return APHRODITE_TARGET_DEVICE == "openvino"
  204. def _is_xpu() -> bool:
  205. return APHRODITE_TARGET_DEVICE == "xpu"
  206. def _build_custom_ops() -> bool:
  207. return _is_cuda() or _is_hip() or _is_cpu()
  208. def _build_core_ext() -> bool:
  209. return not _is_neuron() and not _is_tpu()
  210. def _install_punica() -> bool:
  211. install_punica = bool(
  212. int(os.getenv("APHRODITE_INSTALL_PUNICA_KERNELS", "0")))
  213. device_count = torch.cuda.device_count()
  214. for i in range(device_count):
  215. major, minor = torch.cuda.get_device_capability(i)
  216. if major < 8:
  217. install_punica = False
  218. break
  219. return install_punica
  220. def get_hipcc_rocm_version():
  221. # Run the hipcc --version command
  222. result = subprocess.run(['hipcc', '--version'],
  223. stdout=subprocess.PIPE,
  224. stderr=subprocess.STDOUT,
  225. text=True)
  226. # Check if the command was executed successfully
  227. if result.returncode != 0:
  228. print("Error running 'hipcc --version'")
  229. return None
  230. # Extract the version using a regular expression
  231. match = re.search(r'HIP version: (\S+)', result.stdout)
  232. if match:
  233. # Return the version string
  234. return match.group(1)
  235. else:
  236. print("Could not find HIP version in the output")
  237. return None
  238. def get_neuronxcc_version():
  239. import sysconfig
  240. site_dir = sysconfig.get_paths()["purelib"]
  241. version_file = os.path.join(site_dir, "neuronxcc", "version",
  242. "__init__.py")
  243. # Check if the command was executed successfully
  244. with open(version_file, "rt") as fp:
  245. content = fp.read()
  246. # Extract the version using a regular expression
  247. match = re.search(r"__version__ = '(\S+)'", content)
  248. if match:
  249. # Return the version string
  250. return match.group(1)
  251. else:
  252. raise RuntimeError("Could not find HIP version in the output")
  253. def get_nvcc_cuda_version() -> Version:
  254. """Get the CUDA version from nvcc.
  255. Adapted from https://github.com/NVIDIA/apex/blob/8b7a1ff183741dd8f9b87e7bafd04cfde99cea28/setup.py
  256. """
  257. nvcc_output = subprocess.check_output([CUDA_HOME + "/bin/nvcc", "-V"],
  258. universal_newlines=True)
  259. output = nvcc_output.split()
  260. release_idx = output.index("release") + 1
  261. nvcc_cuda_version = parse(output[release_idx].split(",")[0])
  262. return nvcc_cuda_version
  263. def get_path(*filepath) -> str:
  264. return os.path.join(ROOT_DIR, *filepath)
  265. def find_version(filepath: str) -> str:
  266. """Extract version information from the given filepath.
  267. Adapted from https://github.com/ray-project/ray/blob/0b190ee1160eeca9796bc091e07eaebf4c85b511/python/setup.py
  268. """
  269. with open(filepath) as fp:
  270. version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
  271. fp.read(), re.M)
  272. if version_match:
  273. return version_match.group(1)
  274. raise RuntimeError("Unable to find version string.")
  275. def get_aphrodite_version() -> str:
  276. version = find_version(get_path("aphrodite", "version.py"))
  277. if _no_device():
  278. version += "+empty"
  279. elif _is_cuda():
  280. cuda_version = str(get_nvcc_cuda_version())
  281. if cuda_version != MAIN_CUDA_VERSION:
  282. cuda_version_str = cuda_version.replace(".", "")[:3]
  283. version += f"+cu{cuda_version_str}"
  284. elif _is_hip():
  285. # Get the HIP version
  286. hipcc_version = get_hipcc_rocm_version()
  287. if hipcc_version != MAIN_CUDA_VERSION:
  288. rocm_version_str = hipcc_version.replace(".", "")[:3]
  289. version += f"+rocm{rocm_version_str}"
  290. elif _is_neuron():
  291. # Get the Neuron version
  292. neuron_version = str(get_neuronxcc_version())
  293. if neuron_version != MAIN_CUDA_VERSION:
  294. neuron_version_str = neuron_version.replace(".", "")[:3]
  295. version += f"+neuron{neuron_version_str}"
  296. elif _is_openvino():
  297. version += "+openvino"
  298. elif _is_tpu():
  299. version += "+tpu"
  300. elif _is_cpu():
  301. version += "+cpu"
  302. elif _is_xpu():
  303. version += "+xpu"
  304. else:
  305. raise RuntimeError("Unknown runtime environment, "
  306. "must be either CUDA, ROCm, CPU, or Neuron.")
  307. return version
  308. def read_readme() -> str:
  309. """Read the README file if present."""
  310. p = get_path("README.md")
  311. if os.path.isfile(p):
  312. return io.open(get_path("README.md"), "r", encoding="utf-8").read()
  313. else:
  314. return ""
  315. def get_requirements() -> List[str]:
  316. """Get Python package dependencies from requirements.txt."""
  317. def _read_requirements(filename: str) -> List[str]:
  318. with open(get_path(filename)) as f:
  319. requirements = f.read().strip().split("\n")
  320. resolved_requirements = []
  321. for line in requirements:
  322. if line.startswith("-r "):
  323. resolved_requirements += _read_requirements(line.split()[1])
  324. else:
  325. resolved_requirements.append(line)
  326. return resolved_requirements
  327. if _no_device():
  328. requirements = _read_requirements("requirements-cuda.txt")
  329. elif _is_cuda():
  330. requirements = _read_requirements("requirements-cuda.txt")
  331. cuda_major, cuda_minor = torch.version.cuda.split(".")
  332. modified_requirements = []
  333. for req in requirements:
  334. if ("aphrodite-flash-attn" in req
  335. and not (cuda_major == "12" and cuda_minor == "4")):
  336. # aphrodite-flash-attn is built only for CUDA 12.4.
  337. # Skip for other versions.
  338. continue
  339. modified_requirements.append(req)
  340. elif _is_hip():
  341. requirements = _read_requirements("requirements-rocm.txt")
  342. elif _is_neuron():
  343. requirements = _read_requirements("requirements-neuron.txt")
  344. elif _is_openvino():
  345. requirements = _read_requirements("requirements-openvino.txt")
  346. elif _is_tpu():
  347. requirements = _read_requirements("requirements-tpu.txt")
  348. elif _is_cpu():
  349. requirements = _read_requirements("requirements-cpu.txt")
  350. elif _is_xpu():
  351. requirements = _read_requirements("requirements-xpu.txt")
  352. else:
  353. raise ValueError(
  354. "Unsupported platform, please use CUDA, ROCm, Neuron, CPU or "
  355. "OpenVINO.")
  356. return requirements
  357. ext_modules = []
  358. if _build_core_ext():
  359. ext_modules.append(CMakeExtension(name="aphrodite._core_C"))
  360. if _is_cuda() or _is_hip():
  361. ext_modules.append(CMakeExtension(name="aphrodite._moe_C"))
  362. if _build_custom_ops():
  363. ext_modules.append(CMakeExtension(name="aphrodite._C"))
  364. if _is_cuda() or _is_hip() and _install_punica():
  365. ext_modules.append(CMakeExtension(name="aphrodite._punica_C"))
  366. package_data = {
  367. "aphrodite": [
  368. "endpoints/kobold/klite.embd", "quantization/hadamard.safetensors",
  369. "py.typed", "modeling/layers/fused_moe/configs/*.json"
  370. ]
  371. }
  372. if os.environ.get("APHRODITE_USE_PRECOMPILED"):
  373. ext_modules = []
  374. package_data["aphrodite"].append("*.so")
  375. if _no_device():
  376. ext_modules = []
  377. setup(
  378. name="aphrodite-engine",
  379. version=get_aphrodite_version(),
  380. author="PygmalionAI",
  381. license="AGPL 3.0",
  382. description="The inference engine for PygmalionAI models",
  383. long_description=read_readme(),
  384. long_description_content_type="text/markdown",
  385. url="https://github.com/PygmalionAI/aphrodite-engine",
  386. project_urls={
  387. "Homepage": "https://pygmalion.chat",
  388. "Documentation": "https://docs.pygmalion.chat",
  389. "GitHub": "https://github.com/PygmalionAI",
  390. "Huggingface": "https://huggingface.co/PygmalionAI",
  391. },
  392. classifiers=[
  393. "Programming Language :: Python :: 3",
  394. "License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)", # noqa: E501
  395. "Topic :: Scientific/Engineering :: Artificial Intelligence",
  396. ],
  397. packages=find_packages(exclude=("kernels", "examples", "tests*")),
  398. python_requires=">=3.8",
  399. install_requires=get_requirements(),
  400. extras_require={
  401. "flash-attn": ["flash-attn==2.5.8"],
  402. "tensorizer": ["tensorizer>=2.9.0"],
  403. "ray": ["ray>=2.9"],
  404. },
  405. ext_modules=ext_modules,
  406. cmdclass={"build_ext": cmake_build_ext} if len(ext_modules) > 0 else {},
  407. package_data=package_data,
  408. entry_points={
  409. "console_scripts": [
  410. "aphrodite=aphrodite.endpoints.cli:main",
  411. ],
  412. },
  413. )