1
0

utils.cmake 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354
  1. #
  2. # Attempt to find the python package that uses the same python executable as
  3. # `EXECUTABLE` and is one of the `SUPPORTED_VERSIONS`.
  4. #
  5. macro (find_python_from_executable EXECUTABLE SUPPORTED_VERSIONS)
  6. file(REAL_PATH ${EXECUTABLE} EXECUTABLE)
  7. set(Python_EXECUTABLE ${EXECUTABLE})
  8. find_package(Python COMPONENTS Interpreter Development.Module)
  9. if (NOT Python_FOUND)
  10. message(FATAL_ERROR "Unable to find python matching: ${EXECUTABLE}.")
  11. endif()
  12. set(_VER "${Python_VERSION_MAJOR}.${Python_VERSION_MINOR}")
  13. set(_SUPPORTED_VERSIONS_LIST ${SUPPORTED_VERSIONS} ${ARGN})
  14. if (NOT _VER IN_LIST _SUPPORTED_VERSIONS_LIST)
  15. message(FATAL_ERROR
  16. "Python version (${_VER}) is not one of the supported versions: "
  17. "${_SUPPORTED_VERSIONS_LIST}.")
  18. endif()
  19. message(STATUS "Found python matching: ${EXECUTABLE}.")
  20. endmacro()
  21. #
  22. # Run `EXPR` in python. The standard output of python is stored in `OUT` and
  23. # has trailing whitespace stripped. If an error is encountered when running
  24. # python, a fatal message `ERR_MSG` is issued.
  25. #
  26. function (run_python OUT EXPR ERR_MSG)
  27. execute_process(
  28. COMMAND
  29. "${Python_EXECUTABLE}" "-c" "${EXPR}"
  30. OUTPUT_VARIABLE PYTHON_OUT
  31. RESULT_VARIABLE PYTHON_ERROR_CODE
  32. ERROR_VARIABLE PYTHON_STDERR
  33. OUTPUT_STRIP_TRAILING_WHITESPACE)
  34. if(NOT PYTHON_ERROR_CODE EQUAL 0)
  35. message(FATAL_ERROR "${ERR_MSG}: ${PYTHON_STDERR}")
  36. endif()
  37. set(${OUT} ${PYTHON_OUT} PARENT_SCOPE)
  38. endfunction()
  39. # Run `EXPR` in python after importing `PKG`. Use the result of this to extend
  40. # `CMAKE_PREFIX_PATH` so the torch cmake configuration can be imported.
  41. macro (append_cmake_prefix_path PKG EXPR)
  42. run_python(_PREFIX_PATH
  43. "import ${PKG}; print(${EXPR})" "Failed to locate ${PKG} path")
  44. list(APPEND CMAKE_PREFIX_PATH ${_PREFIX_PATH})
  45. endmacro()
  46. #
  47. # Add a target named `hipify${NAME}` that runs the hipify preprocessor on a set
  48. # of CUDA source files. The names of the corresponding "hipified" sources are
  49. # stored in `OUT_SRCS`.
  50. #
  51. function (hipify_sources_target OUT_SRCS NAME ORIG_SRCS)
  52. #
  53. # Split into C++ and non-C++ (i.e. CUDA) sources.
  54. #
  55. set(SRCS ${ORIG_SRCS})
  56. set(CXX_SRCS ${ORIG_SRCS})
  57. list(FILTER SRCS EXCLUDE REGEX "\.(cc)|(cpp)$")
  58. list(FILTER CXX_SRCS INCLUDE REGEX "\.(cc)|(cpp)$")
  59. #
  60. # Generate ROCm/HIP source file names from CUDA file names.
  61. # Since HIP files are generated code, they will appear in the build area
  62. # `CMAKE_CURRENT_BINARY_DIR` directory rather than the original kernels dir.
  63. #
  64. set(HIP_SRCS)
  65. foreach (SRC ${SRCS})
  66. string(REGEX REPLACE "\.cu$" "\.hip" SRC ${SRC})
  67. string(REGEX REPLACE "cuda" "hip" SRC ${SRC})
  68. list(APPEND HIP_SRCS "${CMAKE_CURRENT_BINARY_DIR}/${SRC}")
  69. endforeach()
  70. set(CSRC_BUILD_DIR ${CMAKE_CURRENT_BINARY_DIR}/kernels)
  71. add_custom_target(
  72. hipify${NAME}
  73. COMMAND ${CMAKE_SOURCE_DIR}/cmake/hipify.py -p ${CMAKE_SOURCE_DIR}/kernels -o ${CSRC_BUILD_DIR} ${SRCS}
  74. DEPENDS ${CMAKE_SOURCE_DIR}/cmake/hipify.py ${SRCS}
  75. BYPRODUCTS ${HIP_SRCS}
  76. COMMENT "Running hipify on ${NAME} extension source files.")
  77. # Swap out original extension sources with hipified sources.
  78. list(APPEND HIP_SRCS ${CXX_SRCS})
  79. set(${OUT_SRCS} ${HIP_SRCS} PARENT_SCOPE)
  80. endfunction()
  81. #
  82. # Get additional GPU compiler flags from torch.
  83. #
  84. function (get_torch_gpu_compiler_flags OUT_GPU_FLAGS GPU_LANG)
  85. if (${GPU_LANG} STREQUAL "CUDA")
  86. #
  87. # Get common NVCC flags from torch.
  88. #
  89. run_python(GPU_FLAGS
  90. "from torch.utils.cpp_extension import COMMON_NVCC_FLAGS; print(';'.join(COMMON_NVCC_FLAGS))"
  91. "Failed to determine torch nvcc compiler flags")
  92. if (CUDA_VERSION VERSION_GREATER_EQUAL 11.8)
  93. list(APPEND GPU_FLAGS "-DENABLE_FP8_E5M2")
  94. endif()
  95. if (CUDA_VERSION VERSION_GREATER_EQUAL 12.0)
  96. list(REMOVE_ITEM GPU_FLAGS
  97. "-D__CUDA_NO_HALF_OPERATORS__"
  98. "-D__CUDA_NO_HALF_CONVERSIONS__"
  99. "-D__CUDA_NO_BFLOAT16_CONVERSIONS__"
  100. "-D__CUDA_NO_HALF2_OPERATORS__")
  101. endif()
  102. elseif(${GPU_LANG} STREQUAL "HIP")
  103. #
  104. # Get common HIP/HIPCC flags from torch.
  105. #
  106. run_python(GPU_FLAGS
  107. "import torch.utils.cpp_extension as t; print(';'.join(t.COMMON_HIP_FLAGS + t.COMMON_HIPCC_FLAGS))"
  108. "Failed to determine torch nvcc compiler flags")
  109. list(APPEND GPU_FLAGS
  110. "-DUSE_ROCM"
  111. "-DENABLE_FP8_E4M3"
  112. "-U__HIP_NO_HALF_CONVERSIONS__"
  113. "-U__HIP_NO_HALF_OPERATORS__"
  114. "-fno-gpu-rdc")
  115. endif()
  116. set(${OUT_GPU_FLAGS} ${GPU_FLAGS} PARENT_SCOPE)
  117. endfunction()
  118. # Macro for converting a `gencode` version number to a cmake version number.
  119. macro(string_to_ver OUT_VER IN_STR)
  120. string(REGEX REPLACE "\([0-9]+\)\([0-9]\)" "\\1.\\2" ${OUT_VER} ${IN_STR})
  121. endmacro()
  122. #
  123. # Override the GPU architectures detected by cmake/torch and filter them by
  124. # `GPU_SUPPORTED_ARCHES`. Sets the final set of architectures in
  125. # `GPU_ARCHES`.
  126. #
  127. # Note: this is defined as a macro since it updates `CMAKE_CUDA_FLAGS`.
  128. #
  129. macro(override_gpu_arches GPU_ARCHES GPU_LANG GPU_SUPPORTED_ARCHES)
  130. set(_GPU_SUPPORTED_ARCHES_LIST ${GPU_SUPPORTED_ARCHES} ${ARGN})
  131. message(STATUS "${GPU_LANG} supported arches: ${_GPU_SUPPORTED_ARCHES_LIST}")
  132. if (${GPU_LANG} STREQUAL "HIP")
  133. #
  134. # `GPU_ARCHES` controls the `--offload-arch` flags.
  135. # `CMAKE_HIP_ARCHITECTURES` is set up by torch and can be controlled
  136. # via the `PYTORCH_ROCM_ARCH` env variable.
  137. #
  138. #
  139. # Find the intersection of the supported + detected architectures to
  140. # set the module architecture flags.
  141. #
  142. set(${GPU_ARCHES})
  143. foreach (_ARCH ${CMAKE_HIP_ARCHITECTURES})
  144. if (_ARCH IN_LIST _GPU_SUPPORTED_ARCHES_LIST)
  145. list(APPEND ${GPU_ARCHES} ${_ARCH})
  146. endif()
  147. endforeach()
  148. if(NOT ${GPU_ARCHES})
  149. message(FATAL_ERROR
  150. "None of the detected ROCm architectures: ${CMAKE_HIP_ARCHITECTURES} is"
  151. " supported. Supported ROCm architectures are: ${_GPU_SUPPORTED_ARCHES_LIST}.")
  152. endif()
  153. elseif(${GPU_LANG} STREQUAL "CUDA")
  154. #
  155. # Setup/process CUDA arch flags.
  156. #
  157. # The torch cmake setup hardcodes the detected architecture flags in
  158. # `CMAKE_CUDA_FLAGS`. Since `CMAKE_CUDA_FLAGS` is a "global" variable, it
  159. # can't modified on a per-target basis, e.g. for the `punica` extension.
  160. # So, all the `-gencode` flags need to be extracted and removed from
  161. # `CMAKE_CUDA_FLAGS` for processing so they can be passed by another method.
  162. # Since it's not possible to use `target_compiler_options` for adding target
  163. # specific `-gencode` arguments, the target's `CUDA_ARCHITECTURES` property
  164. # must be used instead. This requires repackaging the architecture flags
  165. # into a format that cmake expects for `CUDA_ARCHITECTURES`.
  166. #
  167. # This is a bit fragile in that it depends on torch using `-gencode` as opposed
  168. # to one of the other nvcc options to specify architectures.
  169. #
  170. # Note: torch uses the `TORCH_CUDA_ARCH_LIST` environment variable to override
  171. # detected architectures.
  172. #
  173. message(DEBUG "initial CMAKE_CUDA_FLAGS: ${CMAKE_CUDA_FLAGS}")
  174. # Extract all `-gencode` flags from `CMAKE_CUDA_FLAGS`
  175. string(REGEX MATCHALL "-gencode arch=[^ ]+" _CUDA_ARCH_FLAGS
  176. ${CMAKE_CUDA_FLAGS})
  177. # Remove all `-gencode` flags from `CMAKE_CUDA_FLAGS` since they will be modified
  178. # and passed back via the `CUDA_ARCHITECTURES` property.
  179. string(REGEX REPLACE "-gencode arch=[^ ]+ *" "" CMAKE_CUDA_FLAGS
  180. ${CMAKE_CUDA_FLAGS})
  181. # If this error is triggered, it might mean that torch has changed how it sets
  182. # up nvcc architecture code generation flags.
  183. if (NOT _CUDA_ARCH_FLAGS)
  184. message(FATAL_ERROR
  185. "Could not find any architecture related code generation flags in "
  186. "CMAKE_CUDA_FLAGS. (${CMAKE_CUDA_FLAGS})")
  187. endif()
  188. message(DEBUG "final CMAKE_CUDA_FLAGS: ${CMAKE_CUDA_FLAGS}")
  189. message(DEBUG "arch flags: ${_CUDA_ARCH_FLAGS}")
  190. # Initialize the architecture lists to empty.
  191. set(${GPU_ARCHES})
  192. # Process each `gencode` flag.
  193. foreach(_ARCH ${_CUDA_ARCH_FLAGS})
  194. # For each flag, extract the version number and whether it refers to PTX
  195. # or native code.
  196. # Note: if a regex matches then `CMAKE_MATCH_1` holds the binding
  197. # for that match.
  198. string(REGEX MATCH "arch=compute_\([0-9]+a?\)" _COMPUTE ${_ARCH})
  199. if (_COMPUTE)
  200. set(_COMPUTE ${CMAKE_MATCH_1})
  201. endif()
  202. string(REGEX MATCH "code=sm_\([0-9]+a?\)" _SM ${_ARCH})
  203. if (_SM)
  204. set(_SM ${CMAKE_MATCH_1})
  205. endif()
  206. string(REGEX MATCH "code=compute_\([0-9]+a?\)" _CODE ${_ARCH})
  207. if (_CODE)
  208. set(_CODE ${CMAKE_MATCH_1})
  209. endif()
  210. # Make sure the virtual architecture can be matched.
  211. if (NOT _COMPUTE)
  212. message(FATAL_ERROR
  213. "Could not determine virtual architecture from: ${_ARCH}.")
  214. endif()
  215. # One of sm_ or compute_ must exist.
  216. if ((NOT _SM) AND (NOT _CODE))
  217. message(FATAL_ERROR
  218. "Could not determine a codegen architecture from: ${_ARCH}.")
  219. endif()
  220. if (_SM)
  221. # -real suffix let CMake to only generate elf code for the kernels.
  222. # we want this, otherwise the added ptx (default) will increase binary size.
  223. set(_VIRT "-real")
  224. set(_CODE_ARCH ${_SM})
  225. else()
  226. # -virtual suffix let CMake to generate ptx code for the kernels.
  227. set(_VIRT "-virtual")
  228. set(_CODE_ARCH ${_CODE})
  229. endif()
  230. # Check if the current version is in the supported arch list.
  231. string_to_ver(_CODE_VER ${_CODE_ARCH})
  232. if (NOT _CODE_VER IN_LIST _GPU_SUPPORTED_ARCHES_LIST)
  233. message(STATUS "discarding unsupported CUDA arch ${_VER}.")
  234. continue()
  235. endif()
  236. # Add it to the arch list.
  237. list(APPEND ${GPU_ARCHES} "${_CODE_ARCH}${_VIRT}")
  238. endforeach()
  239. endif()
  240. message(STATUS "${GPU_LANG} target arches: ${${GPU_ARCHES}}")
  241. endmacro()
  242. #
  243. # Define a target named `GPU_MOD_NAME` for a single extension. The
  244. # arguments are:
  245. #
  246. # DESTINATION <dest> - Module destination directory.
  247. # LANGUAGE <lang> - The GPU language for this module, e.g CUDA, HIP,
  248. # etc.
  249. # SOURCES <sources> - List of source files relative to CMakeLists.txt
  250. # directory.
  251. #
  252. # Optional arguments:
  253. #
  254. # ARCHITECTURES <arches> - A list of target GPU architectures in cmake
  255. # format.
  256. # Refer `CMAKE_CUDA_ARCHITECTURES` documentation
  257. # and `CMAKE_HIP_ARCHITECTURES` for more info.
  258. # ARCHITECTURES will use cmake's defaults if
  259. # not provided.
  260. # COMPILE_FLAGS <flags> - Extra compiler flags passed to NVCC/hip.
  261. # INCLUDE_DIRECTORIES <dirs> - Extra include directories.
  262. # LIBRARIES <libraries> - Extra link libraries.
  263. # WITH_SOABI - Generate library with python SOABI suffix name.
  264. #
  265. # Note: optimization level/debug info is set via cmake build type.
  266. #
  267. function (define_gpu_extension_target GPU_MOD_NAME)
  268. cmake_parse_arguments(PARSE_ARGV 1
  269. GPU
  270. "WITH_SOABI"
  271. "DESTINATION;LANGUAGE"
  272. "SOURCES;ARCHITECTURES;COMPILE_FLAGS;INCLUDE_DIRECTORIES;LIBRARIES")
  273. # Add hipify preprocessing step when building with HIP/ROCm.
  274. if (GPU_LANGUAGE STREQUAL "HIP")
  275. hipify_sources_target(GPU_SOURCES ${GPU_MOD_NAME} "${GPU_SOURCES}")
  276. endif()
  277. if (GPU_WITH_SOABI)
  278. set(GPU_WITH_SOABI WITH_SOABI)
  279. else()
  280. set(GPU_WITH_SOABI)
  281. endif()
  282. Python_add_library(${GPU_MOD_NAME} MODULE "${GPU_SOURCES}" ${GPU_WITH_SOABI})
  283. if (GPU_LANGUAGE STREQUAL "HIP")
  284. # Make this target dependent on the hipify preprocessor step.
  285. add_dependencies(${GPU_MOD_NAME} hipify${GPU_MOD_NAME})
  286. endif()
  287. if (GPU_ARCHITECTURES)
  288. set_target_properties(${GPU_MOD_NAME} PROPERTIES
  289. ${GPU_LANGUAGE}_ARCHITECTURES "${GPU_ARCHITECTURES}")
  290. endif()
  291. set_property(TARGET ${GPU_MOD_NAME} PROPERTY CXX_STANDARD 20)
  292. target_compile_options(${GPU_MOD_NAME} PRIVATE
  293. $<$<COMPILE_LANGUAGE:${GPU_LANGUAGE}>:${GPU_COMPILE_FLAGS}>)
  294. target_compile_definitions(${GPU_MOD_NAME} PRIVATE
  295. "-DTORCH_EXTENSION_NAME=${GPU_MOD_NAME}")
  296. target_include_directories(${GPU_MOD_NAME} PRIVATE kernels
  297. ${GPU_INCLUDE_DIRECTORIES})
  298. target_link_libraries(${GPU_MOD_NAME} PRIVATE torch ${torch_python_LIBRARY}
  299. ${GPU_LIBRARIES})
  300. # Don't use `TORCH_LIBRARIES` for CUDA since it pulls in a bunch of
  301. # dependencies that are not necessary and may not be installed.
  302. if (GPU_LANGUAGE STREQUAL "CUDA")
  303. target_link_libraries(${GPU_MOD_NAME} PRIVATE ${CUDA_CUDA_LIB}
  304. ${CUDA_LIBRARIES})
  305. else()
  306. target_link_libraries(${GPU_MOD_NAME} PRIVATE ${TORCH_LIBRARIES})
  307. endif()
  308. install(TARGETS ${GPU_MOD_NAME} LIBRARY DESTINATION ${GPU_DESTINATION})
  309. endfunction()