setup.py 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517
  1. import io
  2. import logging
  3. import os
  4. import re
  5. import subprocess
  6. import sys
  7. import warnings
  8. from shutil import which
  9. from typing import List
  10. import torch
  11. from packaging.version import Version, parse
  12. from setuptools import Extension, find_packages, setup
  13. from setuptools.command.build_ext import build_ext
  14. from torch.utils.cpp_extension import CUDA_HOME
  15. ROOT_DIR = os.path.dirname(__file__)
  16. logger = logging.getLogger(__name__)
  17. # Target device of Aphrodite, supporting [cuda (by default), rocm, neuron, cpu]
  18. APHRODITE_TARGET_DEVICE = os.getenv("APHRODITE_TARGET_DEVICE", "cuda")
  19. def embed_commit_hash():
  20. try:
  21. commit_id = subprocess.check_output(["git", "rev-parse", "HEAD"],
  22. encoding="utf-8").strip()
  23. short_commit_id = subprocess.check_output(
  24. ["git", "rev-parse", "--short", "HEAD"], encoding="utf-8").strip()
  25. commit_contents = f'__commit__ = "{commit_id}"\n'
  26. short_commit_contents = f'__short_commit__ = "{short_commit_id}"\n'
  27. version_file = os.path.join(ROOT_DIR, "aphrodite", "commit_id.py")
  28. with open(version_file, "w", encoding="utf-8") as f:
  29. f.write(commit_contents)
  30. f.write(short_commit_contents)
  31. except subprocess.CalledProcessError as e:
  32. warnings.warn(f"Failed to get commit hash:\n{e}",
  33. RuntimeWarning,
  34. stacklevel=2)
  35. except Exception as e:
  36. warnings.warn(f"Failed to embed commit hash:\n{e}",
  37. RuntimeWarning,
  38. stacklevel=2)
  39. embed_commit_hash()
  40. # Aphrodite only supports Linux platform
  41. assert sys.platform.startswith(
  42. "linux"), "Aphrodite only supports Linux platform (including WSL)."
  43. MAIN_CUDA_VERSION = "12.1"
  44. def is_sccache_available() -> bool:
  45. return which("sccache") is not None
  46. def is_ccache_available() -> bool:
  47. return which("ccache") is not None
  48. def is_ninja_available() -> bool:
  49. return which("ninja") is not None
  50. def remove_prefix(text, prefix):
  51. if text.startswith(prefix):
  52. return text[len(prefix):]
  53. return text
  54. class CMakeExtension(Extension):
  55. def __init__(self, name: str, cmake_lists_dir: str = '.', **kwa) -> None:
  56. super().__init__(name, sources=[], py_limited_api=True, **kwa)
  57. self.cmake_lists_dir = os.path.abspath(cmake_lists_dir)
  58. class cmake_build_ext(build_ext):
  59. # A dict of extension directories that have been configured.
  60. did_config = {}
  61. #
  62. # Determine number of compilation jobs and optionally nvcc compile threads.
  63. #
  64. def compute_num_jobs(self):
  65. # `num_jobs` is either the value of the MAX_JOBS environment variable
  66. # (if defined) or the number of CPUs available.
  67. num_jobs = os.environ.get("MAX_JOBS", None)
  68. if num_jobs is not None:
  69. num_jobs = int(num_jobs)
  70. logger.info(f"Using MAX_JOBS={num_jobs} as the number of jobs.")
  71. else:
  72. try:
  73. # os.sched_getaffinity() isn't universally available, so fall
  74. # back to os.cpu_count() if we get an error here.
  75. num_jobs = len(os.sched_getaffinity(0))
  76. logger.info(f"Using {num_jobs} CPUs as the number of jobs.")
  77. except AttributeError:
  78. num_jobs = os.cpu_count()
  79. logger.info(f"Using os.cpu_count()={num_jobs} as the number of"
  80. " jobs.")
  81. nvcc_threads = None
  82. if _is_cuda() and get_nvcc_cuda_version() >= Version("11.2"):
  83. # `nvcc_threads` is either the value of the NVCC_THREADS
  84. # environment variable (if defined) or 1.
  85. # when it is set, we reduce `num_jobs` to avoid
  86. # overloading the system.
  87. nvcc_threads = os.getenv("NVCC_THREADS", None)
  88. if nvcc_threads is not None:
  89. nvcc_threads = int(nvcc_threads)
  90. logger.info(f"Using NVCC_THREADS={nvcc_threads} as the number"
  91. " of nvcc threads.")
  92. else:
  93. nvcc_threads = 1
  94. num_jobs = max(1, num_jobs // nvcc_threads)
  95. return num_jobs, nvcc_threads
  96. #
  97. # Perform cmake configuration for a single extension.
  98. #
  99. def configure(self, ext: CMakeExtension) -> None:
  100. # If we've already configured using the CMakeLists.txt for
  101. # this extension, exit early.
  102. if ext.cmake_lists_dir in cmake_build_ext.did_config:
  103. return
  104. cmake_build_ext.did_config[ext.cmake_lists_dir] = True
  105. # Select the build type.
  106. # Note: optimization level + debug info are set by the build type
  107. default_cfg = "Debug" if self.debug else "RelWithDebInfo"
  108. cfg = os.getenv("CMAKE_BUILD_TYPE", default_cfg)
  109. # where .so files will be written, should be the same for all extensions
  110. # that use the same CMakeLists.txt.
  111. outdir = os.path.abspath(
  112. os.path.dirname(self.get_ext_fullpath(ext.name)))
  113. cmake_args = [
  114. '-DCMAKE_BUILD_TYPE={}'.format(cfg),
  115. '-DCMAKE_LIBRARY_OUTPUT_DIRECTORY={}'.format(outdir),
  116. '-DCMAKE_ARCHIVE_OUTPUT_DIRECTORY={}'.format(self.build_temp),
  117. '-DAPHRODITE_TARGET_DEVICE={}'.format(APHRODITE_TARGET_DEVICE),
  118. ]
  119. verbose = bool(int(os.getenv('VERBOSE', '0')))
  120. if verbose:
  121. cmake_args += ['-DCMAKE_VERBOSE_MAKEFILE=ON']
  122. if is_sccache_available():
  123. cmake_args += [
  124. '-DCMAKE_CXX_COMPILER_LAUNCHER=sccache',
  125. '-DCMAKE_CUDA_COMPILER_LAUNCHER=sccache',
  126. ]
  127. logger.info("Using sccache as the compiler launcher.")
  128. elif is_ccache_available():
  129. cmake_args += [
  130. '-DCMAKE_CXX_COMPILER_LAUNCHER=ccache',
  131. '-DCMAKE_CUDA_COMPILER_LAUNCHER=ccache',
  132. ]
  133. logger.info("Using ccache as the compiler launcher.")
  134. # Pass the python executable to cmake so it can find an exact
  135. # match.
  136. cmake_args += [
  137. '-DAPHRODITE_PYTHON_EXECUTABLE={}'.format(sys.executable)
  138. ]
  139. if _install_punica():
  140. cmake_args += ['-DAPHRODITE_INSTALL_PUNICA_KERNELS=ON']
  141. # if _install_hadamard():
  142. # cmake_args += ['-DAPHRODITE_INSTALL_HADAMARD_KERNELS=ON']
  143. #
  144. # Setup parallelism and build tool
  145. #
  146. num_jobs, nvcc_threads = self.compute_num_jobs()
  147. if nvcc_threads:
  148. cmake_args += ['-DNVCC_THREADS={}'.format(nvcc_threads)]
  149. if is_ninja_available():
  150. build_tool = ['-G', 'Ninja']
  151. cmake_args += [
  152. '-DCMAKE_JOB_POOL_COMPILE:STRING=compile',
  153. '-DCMAKE_JOB_POOLS:STRING=compile={}'.format(num_jobs),
  154. ]
  155. else:
  156. # Default build tool to whatever cmake picks.
  157. build_tool = []
  158. subprocess.check_call(
  159. ['cmake', ext.cmake_lists_dir, *build_tool, *cmake_args],
  160. cwd=self.build_temp)
  161. def build_extensions(self) -> None:
  162. # Ensure that CMake is present and working
  163. try:
  164. subprocess.check_output(['cmake', '--version'])
  165. except OSError as e:
  166. raise RuntimeError('Cannot find CMake executable') from e
  167. # Create build directory if it does not exist.
  168. if not os.path.exists(self.build_temp):
  169. os.makedirs(self.build_temp)
  170. targets = []
  171. # Build all the extensions
  172. for ext in self.extensions:
  173. self.configure(ext)
  174. targets.append(remove_prefix(ext.name, "aphrodite."))
  175. num_jobs, _ = self.compute_num_jobs()
  176. build_args = [
  177. "--build",
  178. ".",
  179. f"-j={num_jobs}",
  180. *[f"--target={name}" for name in targets],
  181. ]
  182. subprocess.check_call(["cmake", *build_args], cwd=self.build_temp)
  183. def _is_cuda() -> bool:
  184. has_cuda = torch.version.cuda is not None
  185. return (APHRODITE_TARGET_DEVICE == "cuda" and has_cuda
  186. and not (_is_neuron() or _is_tpu()))
  187. def _is_hip() -> bool:
  188. return (APHRODITE_TARGET_DEVICE == "cuda"
  189. or APHRODITE_TARGET_DEVICE == "rocm") \
  190. and torch.version.hip is not None
  191. def _is_neuron() -> bool:
  192. torch_neuronx_installed = True
  193. try:
  194. subprocess.run(["neuron-ls"], capture_output=True, check=True)
  195. except (FileNotFoundError, PermissionError, subprocess.CalledProcessError):
  196. torch_neuronx_installed = False
  197. return torch_neuronx_installed
  198. def _is_tpu() -> bool:
  199. return APHRODITE_TARGET_DEVICE == "tpu"
  200. def _is_cpu() -> bool:
  201. return APHRODITE_TARGET_DEVICE == "cpu"
  202. def _is_openvino() -> bool:
  203. return APHRODITE_TARGET_DEVICE == "openvino"
  204. def _is_xpu() -> bool:
  205. return APHRODITE_TARGET_DEVICE == "xpu"
  206. def _build_custom_ops() -> bool:
  207. return _is_cuda() or _is_hip() or _is_cpu()
  208. def _install_punica() -> bool:
  209. install_punica = bool(
  210. int(os.getenv("APHRODITE_INSTALL_PUNICA_KERNELS", "1")))
  211. device_count = torch.cuda.device_count()
  212. for i in range(device_count):
  213. major, minor = torch.cuda.get_device_capability(i)
  214. if major < 8:
  215. install_punica = False
  216. break
  217. return install_punica
  218. # def _install_hadamard() -> bool:
  219. # install_hadamard = bool(
  220. # int(os.getenv("APHRODITE_INSTALL_HADAMARD_KERNELS", "1")))
  221. # device_count = torch.cuda.device_count()
  222. # for i in range(device_count):
  223. # major, minor = torch.cuda.get_device_capability(i)
  224. # if major <= 6:
  225. # install_hadamard = False
  226. # break
  227. # return install_hadamard
  228. def get_hipcc_rocm_version():
  229. # Run the hipcc --version command
  230. result = subprocess.run(['hipcc', '--version'],
  231. stdout=subprocess.PIPE,
  232. stderr=subprocess.STDOUT,
  233. text=True)
  234. # Check if the command was executed successfully
  235. if result.returncode != 0:
  236. print("Error running 'hipcc --version'")
  237. return None
  238. # Extract the version using a regular expression
  239. match = re.search(r'HIP version: (\S+)', result.stdout)
  240. if match:
  241. # Return the version string
  242. return match.group(1)
  243. else:
  244. print("Could not find HIP version in the output")
  245. return None
  246. def get_neuronxcc_version():
  247. import sysconfig
  248. site_dir = sysconfig.get_paths()["purelib"]
  249. version_file = os.path.join(site_dir, "neuronxcc", "version",
  250. "__init__.py")
  251. # Check if the command was executed successfully
  252. with open(version_file, "rt") as fp:
  253. content = fp.read()
  254. # Extract the version using a regular expression
  255. match = re.search(r"__version__ = '(\S+)'", content)
  256. if match:
  257. # Return the version string
  258. return match.group(1)
  259. else:
  260. raise RuntimeError("Could not find HIP version in the output")
  261. def get_nvcc_cuda_version() -> Version:
  262. """Get the CUDA version from nvcc.
  263. Adapted from https://github.com/NVIDIA/apex/blob/8b7a1ff183741dd8f9b87e7bafd04cfde99cea28/setup.py
  264. """
  265. nvcc_output = subprocess.check_output([CUDA_HOME + "/bin/nvcc", "-V"],
  266. universal_newlines=True)
  267. output = nvcc_output.split()
  268. release_idx = output.index("release") + 1
  269. nvcc_cuda_version = parse(output[release_idx].split(",")[0])
  270. return nvcc_cuda_version
  271. def get_path(*filepath) -> str:
  272. return os.path.join(ROOT_DIR, *filepath)
  273. def find_version(filepath: str) -> str:
  274. """Extract version information from the given filepath.
  275. Adapted from https://github.com/ray-project/ray/blob/0b190ee1160eeca9796bc091e07eaebf4c85b511/python/setup.py
  276. """
  277. with open(filepath) as fp:
  278. version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
  279. fp.read(), re.M)
  280. if version_match:
  281. return version_match.group(1)
  282. raise RuntimeError("Unable to find version string.")
  283. def get_aphrodite_version() -> str:
  284. version = find_version(get_path("aphrodite", "version.py"))
  285. if _is_cuda():
  286. cuda_version = str(get_nvcc_cuda_version())
  287. if cuda_version != MAIN_CUDA_VERSION:
  288. cuda_version_str = cuda_version.replace(".", "")[:3]
  289. version += f"+cu{cuda_version_str}"
  290. elif _is_hip():
  291. # Get the HIP version
  292. hipcc_version = get_hipcc_rocm_version()
  293. if hipcc_version != MAIN_CUDA_VERSION:
  294. rocm_version_str = hipcc_version.replace(".", "")[:3]
  295. version += f"+rocm{rocm_version_str}"
  296. elif _is_neuron():
  297. # Get the Neuron version
  298. neuron_version = str(get_neuronxcc_version())
  299. if neuron_version != MAIN_CUDA_VERSION:
  300. neuron_version_str = neuron_version.replace(".", "")[:3]
  301. version += f"+neuron{neuron_version_str}"
  302. elif _is_openvino():
  303. version += "+openvino"
  304. elif _is_tpu():
  305. version += "+tpu"
  306. elif _is_cpu():
  307. version += "+cpu"
  308. elif _is_xpu():
  309. version += "+xpu"
  310. else:
  311. raise RuntimeError("Unknown runtime environment, "
  312. "must be either CUDA, ROCm, CPU, or Neuron.")
  313. return version
  314. def read_readme() -> str:
  315. """Read the README file if present."""
  316. p = get_path("README.md")
  317. if os.path.isfile(p):
  318. return io.open(get_path("README.md"), "r", encoding="utf-8").read()
  319. else:
  320. return ""
  321. def get_requirements() -> List[str]:
  322. """Get Python package dependencies from requirements.txt."""
  323. def _read_requirements(filename: str) -> List[str]:
  324. with open(get_path(filename)) as f:
  325. requirements = f.read().strip().split("\n")
  326. resolved_requirements = []
  327. for line in requirements:
  328. if line.startswith("-r "):
  329. resolved_requirements += _read_requirements(line.split()[1])
  330. else:
  331. resolved_requirements.append(line)
  332. return resolved_requirements
  333. if _is_cuda():
  334. requirements = _read_requirements("requirements-cuda.txt")
  335. cuda_major, cuda_minor = torch.version.cuda.split(".")
  336. modified_requirements = []
  337. for req in requirements:
  338. if ("vllm-flash-attn" in req
  339. and not (cuda_major == "12" and cuda_minor == "1")):
  340. # vllm-flash-attn is built only for CUDA 12.1.
  341. # Skip for other versions.
  342. continue
  343. modified_requirements.append(req)
  344. elif _is_hip():
  345. requirements = _read_requirements("requirements-rocm.txt")
  346. elif _is_neuron():
  347. requirements = _read_requirements("requirements-neuron.txt")
  348. elif _is_openvino():
  349. requirements = _read_requirements("requirements-openvino.txt")
  350. elif _is_tpu():
  351. requirements = _read_requirements("requirements-tpu.txt")
  352. elif _is_cpu():
  353. requirements = _read_requirements("requirements-cpu.txt")
  354. elif _is_xpu():
  355. requirements = _read_requirements("requirements-xpu.txt")
  356. else:
  357. raise ValueError(
  358. "Unsupported platform, please use CUDA, ROCm, Neuron, CPU or "
  359. "OpenVINO.")
  360. return requirements
  361. ext_modules = []
  362. if _is_cuda() or _is_hip():
  363. ext_modules.append(CMakeExtension(name="aphrodite._moe_C"))
  364. if _build_custom_ops():
  365. ext_modules.append(CMakeExtension(name="aphrodite._C"))
  366. if _install_punica() and _is_cuda() or _is_hip():
  367. ext_modules.append(CMakeExtension(name="aphrodite._punica_C"))
  368. # TODO: see if hadamard kernels work with HIP
  369. # if _install_hadamard() and _is_cuda():
  370. # ext_modules.append(CMakeExtension(name="aphrodite._hadamard_C"))
  371. package_data = {
  372. "aphrodite": [
  373. "endpoints/kobold/klite.embd", "quantization/hadamard.safetensors",
  374. "py.typed", "modeling/layers/fused_moe/configs/*.json"
  375. ]
  376. }
  377. if os.environ.get("APHRODITE_USE_PRECOMPILED"):
  378. ext_modules = []
  379. package_data["aphrodite"].append("*.so")
  380. setup(
  381. name="aphrodite-engine",
  382. version=get_aphrodite_version(),
  383. author="PygmalionAI",
  384. license="AGPL 3.0",
  385. description="The inference engine for PygmalionAI models",
  386. long_description=read_readme(),
  387. long_description_content_type="text/markdown",
  388. url="https://github.com/PygmalionAI/aphrodite-engine",
  389. project_urls={
  390. "Homepage": "https://pygmalion.chat",
  391. "Documentation": "https://docs.pygmalion.chat",
  392. "GitHub": "https://github.com/PygmalionAI",
  393. "Huggingface": "https://huggingface.co/PygmalionAI",
  394. },
  395. classifiers=[
  396. "Programming Language :: Python :: 3.8",
  397. "Programming Language :: Python :: 3.9",
  398. "Programming Language :: Python :: 3.10",
  399. "Programming Language :: Python :: 3.11",
  400. "License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)", # noqa: E501
  401. "Topic :: Scientific/Engineering :: Artificial Intelligence",
  402. ],
  403. packages=find_packages(exclude=("kernels", "examples", "tests*")),
  404. python_requires=">=3.8",
  405. install_requires=get_requirements(),
  406. extras_require={
  407. "flash-attn": ["flash-attn==2.5.8"],
  408. "tensorizer": ["tensorizer>=2.9.0"],
  409. "ray": ["ray>=2.9"],
  410. },
  411. ext_modules=ext_modules,
  412. cmdclass={"build_ext": cmake_build_ext} if _build_custom_ops() else {},
  413. package_data=package_data,
  414. entry_points={
  415. "console_scripts": [
  416. "aphrodite=aphrodite.endpoints.cli:main",
  417. ],
  418. },
  419. )