Unified Memory Group Allocator
Allocator.h
Go to the documentation of this file.
1 
5 #ifndef GALLOCATOR_HH
6 #define GALLOCATOR_HH
7 
8 #include <cstdlib>
9 #include <memory>
10 #include <mutex>
11 #include <unordered_map>
12 #include <vector>
13 
14 #include "GroupAllocator.h"
15 
16 namespace groupallocator {
17 
18  static std::mutex groupMapMutex;
19  static std::unordered_map<int, std::shared_ptr<GroupAllocator>> allocator;
20 
24  struct Context {
28  Context() = default;
32  ~Context() = default;
36  std::size_t page_size = 4096;
37  };
38 
47  template<typename T>
48  void allocate(T **ptr, size_t s, const Context ctx, int group = -1, bool forceAligned128 = false) {
49  groupMapMutex.lock();
50  std::shared_ptr <GroupAllocator> g = allocator[group];
51  if (g == nullptr) {
52  g = std::make_shared<GroupAllocator>(group, ctx.page_size);
53  allocator[group] = g;
54  }
55  groupMapMutex.unlock();
56 
57  g->allocate<T>(ptr, s, forceAligned128);
58  }
59 
66  template<typename T>
67  void free(T *p, int group = -1) {
68  groupMapMutex.lock();
69  std::shared_ptr <GroupAllocator> g = allocator[group];
70  groupMapMutex.unlock();
71  if (g != nullptr) {
72  g->free(p);
73  }
74  }
75 
80  inline void freeall() {
81  groupMapMutex.lock();
82  for (std::pair<const int, std::shared_ptr < groupallocator::GroupAllocator>>
83  &elm : allocator) {
84  elm.second->freeall();
85  }
86  groupMapMutex.unlock();
87  }
88 
95  inline void moveToGPU(int group = -1, int gpuID = 0, cudaStream_t stream = cudaStreamDefault){
96  groupMapMutex.lock();
97  std::shared_ptr <GroupAllocator> g = allocator[group];
98  groupMapMutex.unlock();
99  if (g != nullptr) {
100  //std::cerr << "Using stream " << stream << std::endl;
101 
102  g->moveToDevice(gpuID, stream);
103  }
104  }
105 
111  inline void moveToCPU(int group = -1, cudaStream_t stream = cudaStreamDefault){
112  groupMapMutex.lock();
113  std::shared_ptr <GroupAllocator> g = allocator[group];
114  groupMapMutex.unlock();
115  if (g != nullptr) {
116  //std::cerr << "Using stream " << stream << std::endl;
117  g->moveToDevice(cudaCpuDeviceId, stream);
118  }
119  }
120 
121 } // namespace groupallocator
122 #endif
GroupAllocator.h
groupallocator::free
void free(T *p, int group=-1)
Definition: Allocator.h:67
groupallocator::Context::Context
Context()=default
groupallocator::Context::~Context
~Context()=default
groupallocator::moveToGPU
void moveToGPU(int group=-1, int gpuID=0, cudaStream_t stream=cudaStreamDefault)
Definition: Allocator.h:95
groupallocator::Context::page_size
std::size_t page_size
Definition: Allocator.h:36
groupallocator::Context
Definition: Allocator.h:24
groupallocator::moveToCPU
void moveToCPU(int group=-1, cudaStream_t stream=cudaStreamDefault)
Definition: Allocator.h:111
groupallocator::freeall
void freeall()
Definition: Allocator.h:80
groupallocator::allocate
void allocate(T **ptr, size_t s, const Context ctx, int group=-1, bool forceAligned128=false)
Definition: Allocator.h:48