utils.cpp 2.0 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465
  1. #include <numa.h>
  2. #include <unistd.h>
  3. #include <string>
  4. #include <sched.h>
  5. #include "cpu_types.hpp"
  6. void init_cpu_threads_env(const std::string& cpu_ids) {
  7. bitmask* omp_cpu_mask = numa_parse_cpustring(cpu_ids.c_str());
  8. TORCH_CHECK(omp_cpu_mask->size > 0);
  9. std::vector<int> omp_cpu_ids;
  10. omp_cpu_ids.reserve(omp_cpu_mask->size);
  11. constexpr int group_size = 8 * sizeof(*omp_cpu_mask->maskp);
  12. for (int offset = 0; offset < omp_cpu_mask->size; offset += group_size) {
  13. unsigned long group_mask = omp_cpu_mask->maskp[offset / group_size];
  14. int i = 0;
  15. while (group_mask) {
  16. if (group_mask & 1) {
  17. omp_cpu_ids.emplace_back(offset + i);
  18. }
  19. ++i;
  20. group_mask >>= 1;
  21. }
  22. }
  23. // Memory node binding
  24. if (numa_available() != -1) {
  25. int mem_node_id = numa_node_of_cpu(omp_cpu_ids.front());
  26. bitmask* mask = numa_parse_nodestring(std::to_string(mem_node_id).c_str());
  27. bitmask* src_mask = numa_get_membind();
  28. int pid = getpid();
  29. // move all existing pages to the specified numa node.
  30. *(src_mask->maskp) = *(src_mask->maskp) ^ *(mask->maskp);
  31. int page_num = numa_migrate_pages(pid, src_mask, mask);
  32. if (page_num == -1) {
  33. TORCH_CHECK(false,
  34. "numa_migrate_pages failed. errno: " + std::to_string(errno));
  35. }
  36. // restrict memory allocation node.
  37. numa_set_membind(mask);
  38. numa_set_strict(1);
  39. }
  40. // OMP threads binding
  41. omp_set_num_threads((int)omp_cpu_ids.size());
  42. torch::set_num_threads((int)omp_cpu_ids.size());
  43. TORCH_CHECK_EQ(omp_cpu_ids.size(), torch::get_num_threads());
  44. TORCH_CHECK_EQ(omp_cpu_ids.size(), omp_get_max_threads());
  45. #pragma omp parallel for schedule(static, 1)
  46. for (size_t i = 0; i < omp_cpu_ids.size(); ++i) {
  47. cpu_set_t* mask = CPU_ALLOC(omp_cpu_mask->size);
  48. size_t size = CPU_ALLOC_SIZE(omp_cpu_mask->size);
  49. CPU_ZERO_S(size, mask);
  50. CPU_SET_S(omp_cpu_ids[i], size, mask);
  51. sched_setaffinity(0, sizeof(cpu_set_t), mask);
  52. CPU_FREE(mask);
  53. }
  54. numa_free_nodemask(omp_cpu_mask);
  55. }