xref: /aosp_15_r20/external/pytorch/aten/src/ATen/native/cuda/MiscUtils.h (revision da0073e96a02ea20f0ac840b70461e3646d07c45)
1 #pragma once
2 #include <ATen/cuda/Exceptions.h>
3 #include <ATen/cuda/CUDAContext.h>
4 #include <ATen/cuda/CUDAConfig.h>
5 #include <ATen/cuda/PinnedMemoryAllocator.h>
6 
7 namespace at {
8 namespace native {
9 
cuda_int_cast(int64_t value,const char * varname)10 static inline int cuda_int_cast(int64_t value, const char* varname) {
11   auto result = static_cast<int>(value);
12   TORCH_CHECK(static_cast<int64_t>(result) == value,
13               "cuda_int_cast: The value of ", varname, "(", (long long)value,
14               ") is too large to fit into a int (", sizeof(int), " bytes)");
15   return result;
16 }
17 
18 // Creates an array of size elements of type T, backed by pinned memory
19 // wrapped in a Storage
20 template<class T>
pin_memory(int64_t size)21 static inline Storage pin_memory(int64_t size) {
22   auto* allocator = cuda::getPinnedMemoryAllocator();
23   int64_t adjusted_size = size * sizeof(T);
24   return Storage(
25       Storage::use_byte_size_t(),
26       adjusted_size,
27       allocator,
28       /*resizable=*/false);
29 }
30 
31 } // namespace native
32 } // namespace at
33