CMakeLists.txt 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348
  1. cmake_minimum_required(VERSION 3.26)
  2. project(aphrodite_extensions LANGUAGES CXX)
  3. # CUDA by default, can be overridden by using -DAPHRODITE_TARGET_DEVICE=... (used by setup.py)
  4. set(APHRODITE_TARGET_DEVICE "cuda" CACHE STRING "Target device backend for Aphrodite")
  5. message(STATUS "Build type: ${CMAKE_BUILD_TYPE}")
  6. message(STATUS "Target device: ${APHRODITE_TARGET_DEVICE}")
  7. include(${CMAKE_CURRENT_LIST_DIR}/cmake/utils.cmake)
  8. #
  9. # Supported python versions. These versions will be searched in order, the
  10. # first match will be selected. These should be kept in sync with setup.py.
  11. #
  12. set(PYTHON_SUPPORTED_VERSIONS "3.8" "3.9" "3.10" "3.11" "3.12")
  13. # Supported NVIDIA architectures.
  14. set(CUDA_SUPPORTED_ARCHS "6.0;6.1;7.0;7.5;8.0;8.6;8.9;9.0")
  15. # Supported AMD GPU architectures.
  16. set(HIP_SUPPORTED_ARCHS "gfx906;gfx908;gfx90a;gfx940;gfx941;gfx942;gfx1030;gfx1100")
  17. #
  18. # Supported/expected torch versions for CUDA/ROCm.
  19. #
  20. # Currently, having an incorrect pytorch version results in a warning
  21. # rather than an error.
  22. #
  23. # Note: the CUDA torch version is derived from pyproject.toml and various
  24. # requirements.txt files and should be kept consistent. The ROCm torch
  25. # versions are derived from Dockerfile.rocm
  26. #
  27. set(TORCH_SUPPORTED_VERSION_CUDA "2.4.0")
  28. set(TORCH_SUPPORTED_VERSION_ROCM "2.5.0")
  29. #
  30. # Try to find python package with an executable that exactly matches
  31. # `APHRODITE_PYTHON_EXECUTABLE` and is one of the supported versions.
  32. #
  33. if (APHRODITE_PYTHON_EXECUTABLE)
  34. find_python_from_executable(${APHRODITE_PYTHON_EXECUTABLE} "${PYTHON_SUPPORTED_VERSIONS}")
  35. else()
  36. message(FATAL_ERROR
  37. "Please set APHRODITE_PYTHON_EXECUTABLE to the path of the desired python version"
  38. " before running cmake configure.")
  39. endif()
  40. #
  41. # Update cmake's `CMAKE_PREFIX_PATH` with torch location.
  42. #
  43. append_cmake_prefix_path("torch" "torch.utils.cmake_prefix_path")
  44. # Ensure the 'nvcc' command is in the PATH
  45. find_program(NVCC_EXECUTABLE nvcc)
  46. if (CUDA_FOUND AND NOT NVCC_EXECUTABLE)
  47. message(FATAL_ERROR "nvcc not found")
  48. endif()
  49. #
  50. # Import torch cmake configuration.
  51. # Torch also imports CUDA (and partially HIP) languages with some customizations,
  52. # so there is no need to do this explicitly with check_language/enable_language,
  53. # etc.
  54. #
  55. find_package(Torch REQUIRED)
  56. #
  57. # Add the `default` target which detects which extensions should be
  58. # built based on platform/architecture. This is the same logic that
  59. # setup.py uses to select which extensions should be built and should
  60. # be kept in sync.
  61. #
  62. # The `default` target makes direct use of cmake easier since knowledge
  63. # of which extensions are supported has been factored in, e.g.
  64. #
  65. # mkdir build && cd build
  66. # cmake -G Ninja -DAPHRODITE_PYTHON_EXECUTABLE=`which python3` -DCMAKE_LIBRARY_OUTPUT_DIRECTORY=../aphrodite ..
  67. # cmake --build . --target default
  68. #
  69. add_custom_target(default)
  70. message(STATUS "Enabling core extension.")
  71. # Define _core_C extension
  72. # built for (almost) every target platform, (excludes TPU and Neuron)
  73. set(APHRODITE_EXT_SRC
  74. "kernels/core/torch_bindings.cpp")
  75. define_gpu_extension_target(
  76. _core_C
  77. DESTINATION aphrodite
  78. LANGUAGE CXX
  79. SOURCES ${APHRODITE_EXT_SRC}
  80. COMPILE_FLAGS ${CXX_COMPILE_FLAGS}
  81. USE_SABI 3
  82. WITH_SOABI)
  83. add_dependencies(default _core_C)
  84. #
  85. # Forward the non-CUDA device extensions to external CMake scripts.
  86. #
  87. if (NOT APHRODITE_TARGET_DEVICE STREQUAL "cuda" AND
  88. NOT APHRODITE_TARGET_DEVICE STREQUAL "rocm")
  89. if (APHRODITE_TARGET_DEVICE STREQUAL "cpu")
  90. include(${CMAKE_CURRENT_LIST_DIR}/cmake/cpu_extension.cmake)
  91. else()
  92. return()
  93. endif()
  94. return()
  95. endif()
  96. #
  97. # Set up GPU language and check the torch version and warn if it isn't
  98. # what is expected.
  99. #
  100. if (NOT HIP_FOUND AND CUDA_FOUND)
  101. set(APHRODITE_GPU_LANG "CUDA")
  102. if (NOT Torch_VERSION VERSION_EQUAL ${TORCH_SUPPORTED_VERSION_CUDA})
  103. message(WARNING "Pytorch version ${TORCH_SUPPORTED_VERSION_CUDA} "
  104. "expected for CUDA build, saw ${Torch_VERSION} instead.")
  105. endif()
  106. elseif(HIP_FOUND)
  107. set(APHRODITE_GPU_LANG "HIP")
  108. # Importing torch recognizes and sets up some HIP/ROCm configuration but does
  109. # not let cmake recognize .hip files. In order to get cmake to understand the
  110. # .hip extension automatically, HIP must be enabled explicitly.
  111. enable_language(HIP)
  112. # ROCm 5.X and 6.X
  113. if (ROCM_VERSION_DEV_MAJOR GREATER_EQUAL 5 AND
  114. NOT Torch_VERSION VERSION_EQUAL ${TORCH_SUPPORTED_VERSION_ROCM})
  115. message(WARNING "Pytorch version >= ${TORCH_SUPPORTED_VERSION_ROCM} "
  116. "expected for ROCm build, saw ${Torch_VERSION} instead.")
  117. endif()
  118. else()
  119. message(FATAL_ERROR "Can't find CUDA or HIP installation.")
  120. endif()
  121. #
  122. # Override the GPU architectures detected by cmake/torch and filter them by
  123. # the supported versions for the current language.
  124. # The final set of arches is stored in `APHRODITE_GPU_ARCHES`.
  125. #
  126. override_gpu_arches(APHRODITE_GPU_ARCHES
  127. ${APHRODITE_GPU_LANG}
  128. "${${APHRODITE_GPU_LANG}_SUPPORTED_ARCHS}")
  129. #
  130. # Query torch for additional GPU compilation flags for the given
  131. # `APHRODITE_GPU_LANG`.
  132. # The final set of arches is stored in `APHRODITE_GPU_FLAGS`.
  133. #
  134. get_torch_gpu_compiler_flags(APHRODITE_GPU_FLAGS ${APHRODITE_GPU_LANG})
  135. #
  136. # Set nvcc parallelism.
  137. #
  138. if(NVCC_THREADS AND APHRODITE_GPU_LANG STREQUAL "CUDA")
  139. list(APPEND APHRODITE_GPU_FLAGS "--threads=${NVCC_THREADS}")
  140. endif()
  141. #
  142. # Define other extension targets
  143. #
  144. #
  145. # _C extension
  146. #
  147. set(APHRODITE_EXT_SRC
  148. "kernels/cache_kernels.cu"
  149. "kernels/attention/attention_kernels.cu"
  150. "kernels/pos_encoding_kernels.cu"
  151. "kernels/activation_kernels.cu"
  152. "kernels/layernorm_kernels.cu"
  153. "kernels/quantization/squeezellm/quant_cuda_kernel.cu"
  154. "kernels/quantization/gptq/q_gemm.cu"
  155. "kernels/quantization/compressed_tensors/int8_quant_kernels.cu"
  156. "kernels/quantization/fp8/common.cu"
  157. "kernels/cuda_utils_kernels.cu"
  158. "kernels/moe/align_block_size_kernel.cu"
  159. "kernels/prepare_inputs/advance_step.cu"
  160. "kernels/torch_bindings.cpp")
  161. if(APHRODITE_GPU_LANG STREQUAL "CUDA")
  162. include(FetchContent)
  163. SET(CUTLASS_ENABLE_HEADERS_ONLY=ON)
  164. FetchContent_Declare(
  165. cutlass
  166. GIT_REPOSITORY https://github.com/nvidia/cutlass.git
  167. # CUTLASS 3.5.1
  168. GIT_TAG 06b21349bcf6ddf6a1686a47a137ad1446579db9
  169. )
  170. FetchContent_MakeAvailable(cutlass)
  171. list(APPEND APHRODITE_EXT_SRC
  172. "kernels/quantization/fp6/fp6_linear.cu"
  173. "kernels/mamba/mamba_ssm/selective_scan_fwd.cu"
  174. "kernels/mamba/causal_conv1d/causal_conv1d.cu"
  175. "kernels/quantization/aqlm/gemm_kernels.cu"
  176. "kernels/quantization/awq/gemm_kernels.cu"
  177. "kernels/quantization/quip/origin_order.cu"
  178. "kernels/quantization/marlin/dense/marlin_cuda_kernel.cu"
  179. "kernels/quantization/marlin/sparse/marlin_24_cuda_kernel.cu"
  180. "kernels/quantization/marlin/qqq/marlin_qqq_gemm_kernel.cu"
  181. "kernels/quantization/gptq_marlin/gptq_marlin.cu"
  182. "kernels/quantization/gptq_marlin/gptq_marlin_repack.cu"
  183. "kernels/quantization/gguf/gguf_kernel.cu"
  184. "kernels/quantization/gptq_marlin/awq_marlin_repack.cu"
  185. "kernels/quantization/fp8/fp8_marlin.cu"
  186. "kernels/all_reduce/custom_all_reduce.cu"
  187. "kernels/quantization/cutlass_w8a8/scaled_mm_entry.cu"
  188. "kernels/quantization/cutlass_w8a8/scaled_mm_c2x.cu"
  189. "kernels/quantization/cutlass_w8a8/scaled_mm_c3x.cu")
  190. #
  191. # The CUTLASS kernels for Hopper require sm90a to be enabled.
  192. # This is done via the below gencode option, BUT that creates kernels for both sm90 and sm90a.
  193. # That adds an extra 17MB to compiled binary, so instead we selectively enable it.
  194. if(${CMAKE_CUDA_COMPILER_VERSION} VERSION_GREATER 12.0)
  195. set_source_files_properties(
  196. "kernels/quantization/cutlass_w8a8/scaled_mm_c2x.cu"
  197. "kernels/quantization/cutlass_w8a8/scaled_mm_c3x.cu"
  198. PROPERTIES
  199. COMPILE_FLAGS
  200. "-gencode arch=compute_90a,code=sm_90a -Wno-psabi")
  201. endif()
  202. endif()
  203. define_gpu_extension_target(
  204. _C
  205. DESTINATION aphrodite
  206. LANGUAGE ${APHRODITE_GPU_LANG}
  207. SOURCES ${APHRODITE_EXT_SRC}
  208. COMPILE_FLAGS ${APHRODITE_GPU_FLAGS}
  209. ARCHITECTURES ${APHRODITE_GPU_ARCHES}
  210. INCLUDE_DIRECTORIES ${CUTLASS_INCLUDE_DIR}
  211. USE_SABI 3
  212. WITH_SOABI)
  213. #
  214. # _punica_C extension
  215. #
  216. set(APHRODITE_PUNICA_EXT_SRC
  217. "kernels/punica/bgmv/bgmv_bf16_bf16_bf16.cu"
  218. "kernels/punica/bgmv/bgmv_bf16_bf16_fp16.cu"
  219. "kernels/punica/bgmv/bgmv_bf16_fp16_bf16.cu"
  220. "kernels/punica/bgmv/bgmv_bf16_fp16_fp16.cu"
  221. "kernels/punica/bgmv/bgmv_bf16_fp32_bf16.cu"
  222. "kernels/punica/bgmv/bgmv_bf16_fp32_fp16.cu"
  223. "kernels/punica/bgmv/bgmv_fp16_bf16_bf16.cu"
  224. "kernels/punica/bgmv/bgmv_fp16_bf16_fp16.cu"
  225. "kernels/punica/bgmv/bgmv_fp16_fp16_bf16.cu"
  226. "kernels/punica/bgmv/bgmv_fp16_fp16_fp16.cu"
  227. "kernels/punica/bgmv/bgmv_fp16_fp32_bf16.cu"
  228. "kernels/punica/bgmv/bgmv_fp16_fp32_fp16.cu"
  229. "kernels/punica/bgmv/bgmv_fp32_bf16_bf16.cu"
  230. "kernels/punica/bgmv/bgmv_fp32_bf16_fp16.cu"
  231. "kernels/punica/bgmv/bgmv_fp32_fp16_bf16.cu"
  232. "kernels/punica/bgmv/bgmv_fp32_fp16_fp16.cu"
  233. "kernels/punica/bgmv/bgmv_fp32_fp32_bf16.cu"
  234. "kernels/punica/bgmv/bgmv_fp32_fp32_fp16.cu"
  235. "kernels/punica/punica_ops.cu"
  236. "kernels/punica/torch_bindings.cpp")
  237. #
  238. # Copy GPU compilation flags+update for punica
  239. #
  240. set(APHRODITE_PUNICA_GPU_FLAGS ${APHRODITE_GPU_FLAGS})
  241. list(REMOVE_ITEM APHRODITE_PUNICA_GPU_FLAGS
  242. "-D__CUDA_NO_HALF_OPERATORS__"
  243. "-D__CUDA_NO_HALF_CONVERSIONS__"
  244. "-D__CUDA_NO_BFLOAT16_CONVERSIONS__"
  245. "-D__CUDA_NO_HALF2_OPERATORS__")
  246. #
  247. # Filter out CUDA architectures < 8.0 for punica.
  248. #
  249. if (${APHRODITE_GPU_LANG} STREQUAL "CUDA")
  250. set(APHRODITE_PUNICA_GPU_ARCHES)
  251. foreach(ARCH ${APHRODITE_GPU_ARCHES})
  252. string_to_ver(CODE_VER ${ARCH})
  253. if (CODE_VER GREATER_EQUAL 8.0)
  254. list(APPEND APHRODITE_PUNICA_GPU_ARCHES ${ARCH})
  255. endif()
  256. endforeach()
  257. message(STATUS "Punica target arches: ${APHRODITE_PUNICA_GPU_ARCHES}")
  258. elseif(${APHRODITE_GPU_LANG} STREQUAL "HIP")
  259. set(APHRODITE_PUNICA_GPU_ARCHES ${APHRODITE_GPU_ARCHES})
  260. message(STATUS "Punica target arches: ${APHRODITE_PUNICA_GPU_ARCHES}")
  261. endif()
  262. if (APHRODITE_PUNICA_GPU_ARCHES)
  263. define_gpu_extension_target(
  264. _punica_C
  265. DESTINATION aphrodite
  266. LANGUAGE ${APHRODITE_GPU_LANG}
  267. SOURCES ${APHRODITE_PUNICA_EXT_SRC}
  268. COMPILE_FLAGS ${APHRODITE_PUNICA_GPU_FLAGS}
  269. ARCHITECTURES ${APHRODITE_PUNICA_GPU_ARCHES}
  270. USE_SABI 3
  271. WITH_SOABI)
  272. else()
  273. message(WARNING "Unable to create _punica_C target because none of the "
  274. "requested architectures (${APHRODITE_GPU_ARCHES}) are supported, i.e. >= 8.0")
  275. endif()
  276. #
  277. # _moe_C extension
  278. #
  279. set(APHRODITE_MOE_EXT_SRC
  280. "kernels/moe/torch_bindings.cpp"
  281. "kernels/moe/softmax.cu")
  282. define_gpu_extension_target(
  283. _moe_C
  284. DESTINATION aphrodite
  285. LANGUAGE ${APHRODITE_GPU_LANG}
  286. SOURCES ${APHRODITE_MOE_EXT_SRC}
  287. COMPILE_FLAGS ${APHRODITE_GPU_FLAGS}
  288. ARCHITECTURES ${APHRODITE_GPU_ARCHES}
  289. USE_SABI 3
  290. WITH_SOABI)
  291. if(APHRODITE_GPU_LANG STREQUAL "CUDA" OR APHRODITE_GPU_LANG STREQUAL "HIP")
  292. message(STATUS "Enabling C extension.")
  293. add_dependencies(default _C)
  294. message(STATUS "Enabling moe extension.")
  295. add_dependencies(default _moe_C)
  296. # Enable punica if -DAPHRODITE_INSTALL_PUNICA_KERNELS=ON or
  297. # APHRODITE_INSTALL_PUNICA_KERNELS is set in the environment and
  298. # there are supported target arches.
  299. if (APHRODITE_PUNICA_GPU_ARCHES AND
  300. (ENV{APHRODITE_INSTALL_PUNICA_KERNELS} OR APHRODITE_INSTALL_PUNICA_KERNELS))
  301. message(STATUS "Enabling punica extension.")
  302. add_dependencies(default _punica_C)
  303. endif()
  304. endif()