CMakeLists.txt 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376
  1. cmake_minimum_required(VERSION 3.26)
  2. project(aphrodite_extensions LANGUAGES CXX)
  3. # CUDA by default, can be overridden by using -DAPHRODITE_TARGET_DEVICE=... (used by setup.py)
  4. set(APHRODITE_TARGET_DEVICE "cuda" CACHE STRING "Target device backend for Aphrodite")
  5. message(STATUS "Build type: ${CMAKE_BUILD_TYPE}")
  6. message(STATUS "Target device: ${APHRODITE_TARGET_DEVICE}")
  7. include(${CMAKE_CURRENT_LIST_DIR}/cmake/utils.cmake)
  8. # Suppress potential warnings about unused manually-specified variables
  9. set(ignoreMe "${APHRODITE_PYTHON_PATH}")
  10. #
  11. # Supported python versions. These versions will be searched in order, the
  12. # first match will be selected. These should be kept in sync with setup.py.
  13. #
  14. set(PYTHON_SUPPORTED_VERSIONS "3.8" "3.9" "3.10" "3.11" "3.12")
  15. # Supported NVIDIA architectures.
  16. set(CUDA_SUPPORTED_ARCHS "6.0;6.1;7.0;7.5;8.0;8.6;8.9;9.0")
  17. # Supported AMD GPU architectures.
  18. set(HIP_SUPPORTED_ARCHS "gfx906;gfx908;gfx90a;gfx940;gfx941;gfx942;gfx1030;gfx1100;gfx1101")
  19. #
  20. # Supported/expected torch versions for CUDA/ROCm.
  21. #
  22. # Currently, having an incorrect pytorch version results in a warning
  23. # rather than an error.
  24. #
  25. # Note: the CUDA torch version is derived from pyproject.toml and various
  26. # requirements.txt files and should be kept consistent. The ROCm torch
  27. # versions are derived from Dockerfile.rocm
  28. #
  29. set(TORCH_SUPPORTED_VERSION_CUDA "2.4.0")
  30. set(TORCH_SUPPORTED_VERSION_ROCM "2.5.0")
  31. #
  32. # Try to find python package with an executable that exactly matches
  33. # `APHRODITE_PYTHON_EXECUTABLE` and is one of the supported versions.
  34. #
  35. if (APHRODITE_PYTHON_EXECUTABLE)
  36. find_python_from_executable(${APHRODITE_PYTHON_EXECUTABLE} "${PYTHON_SUPPORTED_VERSIONS}")
  37. else()
  38. message(FATAL_ERROR
  39. "Please set APHRODITE_PYTHON_EXECUTABLE to the path of the desired python version"
  40. " before running cmake configure.")
  41. endif()
  42. #
  43. # Update cmake's `CMAKE_PREFIX_PATH` with torch location.
  44. #
  45. append_cmake_prefix_path("torch" "torch.utils.cmake_prefix_path")
  46. # Ensure the 'nvcc' command is in the PATH
  47. find_program(NVCC_EXECUTABLE nvcc)
  48. if (CUDA_FOUND AND NOT NVCC_EXECUTABLE)
  49. message(FATAL_ERROR "nvcc not found")
  50. endif()
  51. #
  52. # Import torch cmake configuration.
  53. # Torch also imports CUDA (and partially HIP) languages with some customizations,
  54. # so there is no need to do this explicitly with check_language/enable_language,
  55. # etc.
  56. #
  57. find_package(Torch REQUIRED)
  58. if(MSVC)
  59. find_package(CUDA REQUIRED)
  60. find_package(CUDAToolkit REQUIRED)
  61. # Add cuBLAS to the list of libraries to link against
  62. list(APPEND LIBS CUDA::cublas)
  63. set(CMAKE_CXX_STANDARD 17)
  64. set(CMAKE_CXX_STANDARD_REQUIRED ON)
  65. set(CMAKE_CUDA_STANDARD 17)
  66. set(CMAKE_CUDA_STANDARD_REQUIRED ON)
  67. # Replace -std=c++20 with -std=c++17 in APHRODITE_GPU_FLAGS
  68. if(APHRODITE_GPU_LANG STREQUAL "CUDA")
  69. list(APPEND APHRODITE_GPU_FLAGS "--std=c++17" "-Xcompiler -Wno-return-type")
  70. endif()
  71. endif()
  72. #
  73. # Add the `default` target which detects which extensions should be
  74. # built based on platform/architecture. This is the same logic that
  75. # setup.py uses to select which extensions should be built and should
  76. # be kept in sync.
  77. #
  78. # The `default` target makes direct use of cmake easier since knowledge
  79. # of which extensions are supported has been factored in, e.g.
  80. #
  81. # mkdir build && cd build
  82. # cmake -G Ninja -DAPHRODITE_PYTHON_EXECUTABLE=`which python3` -DCMAKE_LIBRARY_OUTPUT_DIRECTORY=../aphrodite ..
  83. # cmake --build . --target default
  84. #
  85. add_custom_target(default)
  86. message(STATUS "Enabling core extension.")
  87. # Define _core_C extension
  88. # built for (almost) every target platform, (excludes TPU and Neuron)
  89. set(APHRODITE_EXT_SRC
  90. "kernels/core/torch_bindings.cpp")
  91. define_gpu_extension_target(
  92. _core_C
  93. DESTINATION aphrodite
  94. LANGUAGE CXX
  95. SOURCES ${APHRODITE_EXT_SRC}
  96. COMPILE_FLAGS ${CXX_COMPILE_FLAGS}
  97. USE_SABI 3
  98. WITH_SOABI)
  99. add_dependencies(default _core_C)
  100. #
  101. # Forward the non-CUDA device extensions to external CMake scripts.
  102. #
  103. if (NOT APHRODITE_TARGET_DEVICE STREQUAL "cuda" AND
  104. NOT APHRODITE_TARGET_DEVICE STREQUAL "rocm")
  105. if (APHRODITE_TARGET_DEVICE STREQUAL "cpu")
  106. include(${CMAKE_CURRENT_LIST_DIR}/cmake/cpu_extension.cmake)
  107. else()
  108. return()
  109. endif()
  110. return()
  111. endif()
  112. #
  113. # Set up GPU language and check the torch version and warn if it isn't
  114. # what is expected.
  115. #
  116. if (NOT HIP_FOUND AND CUDA_FOUND)
  117. set(APHRODITE_GPU_LANG "CUDA")
  118. if (NOT Torch_VERSION VERSION_EQUAL ${TORCH_SUPPORTED_VERSION_CUDA})
  119. message(WARNING "Pytorch version ${TORCH_SUPPORTED_VERSION_CUDA} "
  120. "expected for CUDA build, saw ${Torch_VERSION} instead.")
  121. endif()
  122. elseif(HIP_FOUND)
  123. set(APHRODITE_GPU_LANG "HIP")
  124. # Importing torch recognizes and sets up some HIP/ROCm configuration but does
  125. # not let cmake recognize .hip files. In order to get cmake to understand the
  126. # .hip extension automatically, HIP must be enabled explicitly.
  127. enable_language(HIP)
  128. # ROCm 5.X and 6.X
  129. if (ROCM_VERSION_DEV_MAJOR GREATER_EQUAL 5 AND
  130. NOT Torch_VERSION VERSION_EQUAL ${TORCH_SUPPORTED_VERSION_ROCM})
  131. message(WARNING "Pytorch version >= ${TORCH_SUPPORTED_VERSION_ROCM} "
  132. "expected for ROCm build, saw ${Torch_VERSION} instead.")
  133. endif()
  134. else()
  135. message(FATAL_ERROR "Can't find CUDA or HIP installation.")
  136. endif()
  137. #
  138. # Override the GPU architectures detected by cmake/torch and filter them by
  139. # the supported versions for the current language.
  140. # The final set of arches is stored in `APHRODITE_GPU_ARCHES`.
  141. #
  142. override_gpu_arches(APHRODITE_GPU_ARCHES
  143. ${APHRODITE_GPU_LANG}
  144. "${${APHRODITE_GPU_LANG}_SUPPORTED_ARCHS}")
  145. #
  146. # Query torch for additional GPU compilation flags for the given
  147. # `APHRODITE_GPU_LANG`.
  148. # The final set of arches is stored in `APHRODITE_GPU_FLAGS`.
  149. #
  150. get_torch_gpu_compiler_flags(APHRODITE_GPU_FLAGS ${APHRODITE_GPU_LANG})
  151. #
  152. # Set nvcc parallelism.
  153. #
  154. if(NVCC_THREADS AND APHRODITE_GPU_LANG STREQUAL "CUDA")
  155. list(APPEND APHRODITE_GPU_FLAGS "--threads=${NVCC_THREADS}")
  156. endif()
  157. #
  158. # Define other extension targets
  159. #
  160. #
  161. # _C extension
  162. #
  163. set(APHRODITE_EXT_SRC
  164. "kernels/cache_kernels.cu"
  165. "kernels/attention/attention_kernels.cu"
  166. "kernels/pos_encoding_kernels.cu"
  167. "kernels/activation_kernels.cu"
  168. "kernels/layernorm_kernels.cu"
  169. "kernels/quantization/squeezellm/quant_cuda_kernel.cu"
  170. "kernels/quantization/gptq/q_gemm.cu"
  171. "kernels/quantization/compressed_tensors/int8_quant_kernels.cu"
  172. "kernels/quantization/fp8/common.cu"
  173. "kernels/cuda_utils_kernels.cu"
  174. "kernels/moe/align_block_size_kernel.cu"
  175. "kernels/prepare_inputs/advance_step.cu"
  176. "kernels/torch_bindings.cpp")
  177. if(APHRODITE_GPU_LANG STREQUAL "CUDA")
  178. list(APPEND APHRODITE_EXT_SRC
  179. "kernels/quantization/fp6/fp6_linear.cu"
  180. "kernels/mamba/mamba_ssm/selective_scan_fwd.cu"
  181. "kernels/mamba/causal_conv1d/causal_conv1d.cu"
  182. "kernels/quantization/aqlm/gemm_kernels.cu"
  183. "kernels/quantization/awq/gemm_kernels.cu"
  184. "kernels/quantization/quip/origin_order.cu"
  185. "kernels/quantization/gptq_marlin/gptq_marlin.cu"
  186. "kernels/quantization/gptq_marlin/gptq_marlin_repack.cu"
  187. "kernels/quantization/marlin/dense/marlin_cuda_kernel.cu"
  188. "kernels/quantization/marlin/sparse/marlin_24_cuda_kernel.cu"
  189. "kernels/quantization/marlin/qqq/marlin_qqq_gemm_kernel.cu"
  190. "kernels/quantization/gguf/gguf_kernel.cu"
  191. "kernels/quantization/gptq_marlin/awq_marlin_repack.cu"
  192. "kernels/quantization/fp8/fp8_marlin.cu"
  193. "kernels/all_reduce/custom_all_reduce.cu"
  194. "kernels/permute_cols.cu"
  195. "kernels/sampling/sampling.cu")
  196. if(NOT MSVC)
  197. # Include CUTLASS only when needed
  198. include(FetchContent)
  199. SET(CUTLASS_ENABLE_HEADERS_ONLY=ON)
  200. FetchContent_Declare(
  201. cutlass
  202. GIT_REPOSITORY https://github.com/nvidia/cutlass.git
  203. GIT_TAG v3.5.1
  204. GIT_PROGRESS TRUE
  205. # Speed up CUTLASS download by retrieving only the specified GIT_TAG instead of the history.
  206. # Important: If GIT_SHALLOW is enabled then GIT_TAG works only with branch names and tags.
  207. # So if the GIT_TAG above is updated to a commit hash, GIT_SHALLOW must be set to FALSE
  208. GIT_SHALLOW TRUE
  209. )
  210. FetchContent_MakeAvailable(cutlass)
  211. list(APPEND APHRODITE_EXT_SRC
  212. "kernels/quantization/cutlass_w8a8/scaled_mm_entry.cu"
  213. "kernels/quantization/cutlass_w8a8/scaled_mm_c2x.cu"
  214. "kernels/quantization/cutlass_w8a8/scaled_mm_c3x.cu")
  215. # Enable sm90a for Hopper CUTLASS kernels when using newer CUDA
  216. if(${CMAKE_CUDA_COMPILER_VERSION} VERSION_GREATER 12.0)
  217. set_source_files_properties(
  218. "kernels/quantization/cutlass_w8a8/scaled_mm_c2x.cu"
  219. "kernels/quantization/cutlass_w8a8/scaled_mm_c3x.cu"
  220. PROPERTIES
  221. COMPILE_FLAGS
  222. "-gencode arch=compute_90a,code=sm_90a -Wno-psabi")
  223. endif()
  224. endif()
  225. #
  226. # Machete kernels
  227. # The machete kernels only work on hopper and require CUDA 12.0 or later.
  228. if(${CMAKE_CUDA_COMPILER_VERSION} VERSION_GREATER 12.0)
  229. #
  230. # For the Machete kernels we automatically generate sources for various
  231. # preselected input type pairs and schedules.
  232. # Generate sources:
  233. execute_process(
  234. COMMAND ${CMAKE_COMMAND} -E env
  235. PYTHONPATH=${CMAKE_CURRENT_SOURCE_DIR}/kernels/cutlass_extensions/:${CUTLASS_DIR}/python/:${APHRODITE_PYTHON_PATH}:$PYTHONPATH
  236. ${Python_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/kernels/quantization/machete/generate.py
  237. RESULT_VARIABLE machete_generation_result
  238. OUTPUT_VARIABLE machete_generation_output
  239. OUTPUT_FILE ${CMAKE_CURRENT_BINARY_DIR}/machete_generation.log
  240. ERROR_FILE ${CMAKE_CURRENT_BINARY_DIR}/machete_generation.log
  241. )
  242. if (NOT machete_generation_result EQUAL 0)
  243. message(FATAL_ERROR "Machete generation failed."
  244. " Result: \"${machete_generation_result}\""
  245. "\nCheck the log for details: "
  246. "${CMAKE_CURRENT_BINARY_DIR}/machete_generation.log")
  247. else()
  248. message(STATUS "Machete generation completed successfully.")
  249. endif()
  250. # Add machete generated sources
  251. file(GLOB MACHETE_GEN_SOURCES "kernels/quantization/machete/generated/*.cu")
  252. list(APPEND APHRODITE_EXT_SRC ${MACHETE_GEN_SOURCES})
  253. message(STATUS "Machete generated sources: ${MACHETE_GEN_SOURCES}")
  254. set_source_files_properties(
  255. ${MACHETE_GEN_SOURCES}
  256. PROPERTIES
  257. COMPILE_FLAGS
  258. "-gencode arch=compute_90a,code=sm_90a")
  259. endif()
  260. # Add pytorch binding for machete (add on even CUDA < 12.0 so that we can
  261. # raise an error if the user that this was built with an incompatible
  262. # CUDA version)
  263. list(APPEND APHRODITE_EXT_SRC
  264. kernels/quantization/machete/machete_pytorch.cu)
  265. endif()
  266. define_gpu_extension_target(
  267. _C
  268. DESTINATION aphrodite
  269. LANGUAGE ${APHRODITE_GPU_LANG}
  270. SOURCES ${APHRODITE_EXT_SRC}
  271. COMPILE_FLAGS ${APHRODITE_GPU_FLAGS}
  272. ARCHITECTURES ${APHRODITE_GPU_ARCHES}
  273. INCLUDE_DIRECTORIES ${CUTLASS_INCLUDE_DIR}
  274. LIBRARIES ${LIBS}
  275. USE_SABI 3
  276. WITH_SOABI)
  277. # If CUTLASS is compiled on NVCC >= 12.5, it by default uses
  278. # cudaGetDriverEntryPointByVersion as a wrapper to avoid directly calling the
  279. # driver API. This causes problems when linking with earlier versions of CUDA.
  280. # Setting this variable sidesteps the issue by calling the driver directly.
  281. target_compile_definitions(_C PRIVATE CUTLASS_ENABLE_DIRECT_CUDA_DRIVER_CALL=1)
  282. #
  283. # _moe_C extension
  284. #
  285. set(APHRODITE_MOE_EXT_SRC
  286. "kernels/moe/torch_bindings.cpp"
  287. "kernels/moe/softmax.cu")
  288. if(APHRODITE_GPU_LANG STREQUAL "CUDA")
  289. list(APPEND APHRODITE_MOE_EXT_SRC
  290. "kernels/moe/marlin_moe_ops.cu")
  291. endif()
  292. define_gpu_extension_target(
  293. _moe_C
  294. DESTINATION aphrodite
  295. LANGUAGE ${APHRODITE_GPU_LANG}
  296. SOURCES ${APHRODITE_MOE_EXT_SRC}
  297. COMPILE_FLAGS ${APHRODITE_GPU_FLAGS}
  298. ARCHITECTURES ${APHRODITE_GPU_ARCHES}
  299. USE_SABI 3
  300. WITH_SOABI)
  301. if(APHRODITE_GPU_LANG STREQUAL "HIP")
  302. #
  303. # _rocm_C extension
  304. #
  305. set(APHRODITE_ROCM_EXT_SRC
  306. "kernels/rocm/torch_bindings.cpp"
  307. "kernels/rocm/attention.cu")
  308. define_gpu_extension_target(
  309. _rocm_C
  310. DESTINATION aphrodite
  311. LANGUAGE ${APHRODITE_GPU_LANG}
  312. SOURCES ${APHRODITE_ROCM_EXT_SRC}
  313. COMPILE_FLAGS ${APHRODITE_GPU_FLAGS}
  314. ARCHITECTURES ${APHRODITE_GPU_ARCHES}
  315. USE_SABI 3
  316. WITH_SOABI)
  317. endif()
  318. if(APHRODITE_GPU_LANG STREQUAL "CUDA" OR APHRODITE_GPU_LANG STREQUAL "HIP")
  319. message(STATUS "Enabling C extension.")
  320. add_dependencies(default _C)
  321. message(STATUS "Enabling moe extension.")
  322. add_dependencies(default _moe_C)
  323. endif()
  324. if(APHRODITE_GPU_LANG STREQUAL "HIP")
  325. message(STATUS "Enabling rocm extension.")
  326. add_dependencies(default _rocm_C)
  327. endif()