23struct cuda_device_buffer {
24 recycle_allocator_cuda_device<T> allocator;
25 T *device_side_buffer;
26 size_t number_of_elements;
28 cuda_device_buffer(const size_t number_of_elements, const size_t device_id = 0)
29 : allocator{device_id}, number_of_elements(number_of_elements) {
30 assert(device_id < max_number_gpus);
32 allocator.allocate(number_of_elements);
34 ~cuda_device_buffer() {
35 allocator.deallocate(device_side_buffer, number_of_elements);
37 // not yet implemented
38 cuda_device_buffer(cuda_device_buffer const &other) = delete;
39 cuda_device_buffer operator=(cuda_device_buffer const &other) = delete;
40 cuda_device_buffer(cuda_device_buffer const &&other) = delete;
41 cuda_device_buffer operator=(cuda_device_buffer const &&other) = delete;
47template <typename T, typename Host_Allocator, std::enable_if_t<std::is_trivial<T>::value, int> = 0>
48struct cuda_aggregated_device_buffer {
49 T *device_side_buffer;
50 size_t number_of_elements;
51 cuda_aggregated_device_buffer(size_t number_of_elements, Host_Allocator &alloc)
52 : number_of_elements(number_of_elements), alloc(alloc) {
54 alloc.allocate(number_of_elements);
56 ~cuda_aggregated_device_buffer() {
57 alloc.deallocate(device_side_buffer, number_of_elements);
59 // not yet implemented
60 cuda_aggregated_device_buffer(cuda_aggregated_device_buffer const &other) = delete;
61 cuda_aggregated_device_buffer operator=(cuda_aggregated_device_buffer const &other) = delete;
62 cuda_aggregated_device_buffer(cuda_aggregated_device_buffer const &&other) = delete;
63 cuda_aggregated_device_buffer operator=(cuda_aggregated_device_buffer const &&other) = delete;
66 Host_Allocator &alloc; // will stay valid for the entire aggregation region and hence
67 // for the entire lifetime of this buffer