utils.py 1.3 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556
  1. """Utils."""
  2. import enum
  3. from platform import uname
  4. import uuid
  5. import psutil
  6. import torch
  7. from aphrodite import cuda_utils
  8. class Device(enum.Enum):
  9. GPU = enum.auto()
  10. CPU = enum.auto()
  11. class Counter:
  12. def __init__(self, start: int = 0) -> None:
  13. self.counter = start
  14. def __next__(self) -> int:
  15. i = self.counter
  16. self.counter += 1
  17. return i
  18. def reset(self) -> None:
  19. self.counter = 0
  20. def get_max_shared_memory_bytes(gpu: int = 0) -> int:
  21. """Returns the maximum shared memory per thread block in bytes."""
  22. # https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART__TYPES.html
  23. cudaDevAttrMaxSharedMemoryPerBlockOptin = 97 # pylint: disable=invalid-name
  24. max_shared_mem = cuda_utils.get_device_attribute(
  25. cudaDevAttrMaxSharedMemoryPerBlockOptin, gpu)
  26. return int(max_shared_mem)
  27. def get_gpu_memory(gpu: int = 0) -> int:
  28. """Returns the total memory of the GPU in bytes."""
  29. return torch.cuda.get_device_properties(gpu).total_memory
  30. def get_cpu_memory() -> int:
  31. """Returns the total CPU memory of the node in bytes."""
  32. return psutil.virtual_memory().total
  33. def random_uuid() -> str:
  34. return str(uuid.uuid4().hex)
  35. def in_wsl() -> bool:
  36. # Reference: https://github.com/microsoft/WSL/issues/4071
  37. return "microsoft" in " ".join(uname()).lower()