Proton  1.1.1
Make porting easy from Python to C++11
pool.hpp
1 #ifndef PROTON_POOL_HEADER
2 #define PROTON_POOL_HEADER
3 #include <new>
4 #include <cstddef>
5 
6 #ifndef PROTON_POOL_DEBUG
7 #define PROTON_POOL_DEBUG 1
8 #endif
9 
10 #if PROTON_POOL_DEBUG
11 #define PROTON_POOL_THROW_IF PROTON_THROW_IF
12 #else
13 #define PROTON_POOL_THROW_IF(cond, out)
14 #endif
15 
16 namespace proton{
17 
18 class mem_pool;
19 void pool_free(void* p);
20 
21 namespace detail{
22 
23 class pool_block;
24 class seg_pool;
25 
26 void mmfree(void* p);
27 void* __mmdup(void* p);
28 
29 /** header of chunk, the basic of memory block.
30  */
32  chunk_header* next_free;
33  pool_block* parent; ///< NULL means being malloc-ed directly
34 };
35 
36 /** basic list header for pool_block.
37  */
38 class list_header {
39 protected:
40  list_header* _prev;
41  list_header* _next;
42 
43 private:
44  list_header(const list_header& lh); ///< disabled
45 public:
46  list_header():_prev(NULL), _next(NULL)
47  {}
48 
49  /// init as a circle
50  list_header(int):_prev(this), _next(this)
51  {}
52 
53  bool empty()const
54  {
55  return (_next==this || _next==NULL);
56  }
57 
58  void erase_from_list()
59  {
60  if(_prev)
61  _prev->_next=_next;
62  if(_next)
63  _next->_prev=_prev;
64  _prev=NULL;
65  _next=NULL;
66  }
67 
68  ~list_header()
69  {
70  if(_prev)
71  _prev->_next=_next;
72  if(_next)
73  _next->_prev=_prev;
74  }
75 
76  list_header* next()const
77  {
78  return _next;
79  }
80 
81  list_header* prev()const
82  {
83  return _prev;
84  }
85 
86  void insert_after(list_header* prev)
87  {
88  PROTON_POOL_THROW_IF(_prev, "invalid prev");
89  PROTON_POOL_THROW_IF(_next, "invalid next");
90 
91  _prev=prev;
92  if(prev){
93  _next=prev->_next;
94  prev->_next=this;
95  }
96  if(_next)
97  _next->_prev=this;
98  }
99 
100  void insert_before(list_header* next)
101  {
102  PROTON_POOL_THROW_IF(_prev, "invalid prev");
103  PROTON_POOL_THROW_IF(_next, "invalid next");
104 
105  _next=next;
106  if(_next){
107  _prev=next->_prev;
108  _next->_prev=this;
109  }
110  if(_prev){
111  _prev->_next=this;
112  }
113  }
114 };
115 
116 inline pool_block* get_block(list_header* lh)
117 {
118  return (pool_block*)(lh);
119 }
120 
121 /** pool contains many chunks.
122  */
123 class pool_block:public list_header {
124  friend class seg_pool;
125 protected:
126 
127  seg_pool* _parent;
128 
129  size_t _block_size; // 4
130  size_t _chunk_size;
131  size_t _chunk_cap;
132 
133  size_t _chunk_cnt;
134  size_t _chunk_max; // 4
135 
136  char* _unalloc_chunk;
137  chunk_header* _free_header;
138  // chunk_header // 3 + 1 of mmheader
139  // data
140 private:
141  pool_block(const pool_block& a); ///< disabled
142 public:
143  pool_block(size_t chunk_size, size_t block_size, seg_pool* parent);
144  ~pool_block();
145 
146  void* malloc_one();
147  void free_chunk(chunk_header *ch);
148 
149  seg_pool* parent()
150  {
151  return _parent;
152  }
153  size_t block_size()
154  {
155  return _block_size;
156  }
157  pool_block* prev_block()
158  {
159  return get_block(_prev);
160  }
161  pool_block* next_block()
162  {
163  return get_block(_next);
164  }
165 
166  bool full()
167  {
168  return _chunk_cnt==_chunk_cap;
169  }
170  bool empty()
171  {
172  return _chunk_cnt==0;
173  }
174 
175 };
176 
177 /** seg contains many pools for a same size range.
178  */
179 class seg_pool {
180  friend class pool_block;
181  friend class proton::mem_pool;
182  friend void proton::pool_free(void* p);
183 protected:
184  mem_pool* _parent;
185 
186  size_t _chunk_size; ///< 0 means: new directly
187  size_t _chunk_min_size; ///< _chunk_min_size <= real_size <=_chunk_size
188 
189  list_header _free_blocks;
190  list_header _empty_blocks;
191  list_header _full_blocks;
192 
193  size_t _total_block_size;
194  size_t _min_block_size;
195 
196  pool_block* get_free_block()
197  {
198  list_header* p=_free_blocks.next();
199  if(p==&_free_blocks)
200  return NULL;
201  else
202  return get_block(p);
203  }
204 
205  void malloc_block(); ///< get a new pool_block
206  void release_block(pool_block* p);
207 
208  void reg_free_block(pool_block* p);
209  void reg_full_block(pool_block* p);
210  void reg_empty_block(pool_block* p);
211 
212  void free_chunk(chunk_header* ch);
213  void purge_circle(list_header* lh);
214 
215 private:
216  seg_pool(const seg_pool& a); ///< disabled
217 public:
218  seg_pool();
219  ~seg_pool();
220 
221  void init(size_t chunk_size, size_t chunk_min_size, mem_pool* parent);
222  void destroy();
223  void purge();
224 
225  size_t chunk_size()const
226  {
227  return _chunk_size;
228  }
229  size_t chunk_min_size()const
230  {
231  return _chunk_min_size;
232  }
233 
234  void* malloc(size_t size, size_t n=1);
235  void* malloc_one(); ///< alloc a block
236 
237  void get_info(size_t&free_cnt, size_t& free_cap, size_t& empty_cap, size_t& full_cnt);
238  void print_info(bool print_null);
239 };
240 
241 } // namespace detail
242 
243 /** @addtogroup pool Smart allocator
244  * A high-performance and smart allocator.
245  * @{
246  */
247 
248 #define PROTON_META_BLOCK_MAX 128
249 
250 /** the main memory pool.
251  * mem_pool contains many seg_pools for different size ranges.
252  */
253 class mem_pool {
254 protected:
255  detail::seg_pool _segs[PROTON_META_BLOCK_MAX+1];
256  size_t _seg_cnt;
257  size_t _seg_linear_cnt;
258 
259  void compute_sizes(size_t max, size_t align, size_t factor);
260 
261 private:
262  mem_pool(const mem_pool& a); ///< disabled
263 
264 public:
265  mem_pool(size_t max=16*1024*sizeof(long), size_t factor=16);
266  ~mem_pool();
267 
268  void destroy();
269  void purge();
270 
271  void* malloc(size_t size, size_t n=1); // malloc size*n
272 
273  detail::seg_pool* get_seg(size_t size);
274 
275  size_t get_seg_cnt()const
276  {
277  return _seg_cnt;
278  }
279  size_t get_max_chunk_size()const
280  {
281  return _segs[_seg_cnt-1].chunk_size();
282  }
283 
284  size_t get_seg_total(); ///< get total memory usage of seg pools
285  size_t get_seg_free(); ///< get total free allocatable memory of seg pools
286 
287  void print_info();
288 };
289 
290 inline void pool_free(void *p)
291 {
292  if(p){
294  if(ch->parent){
295  ch->parent->parent()->free_chunk(ch);
296  }
297  else{
298  detail::mmfree((void*)ch);
299  }
300  }
301 }
302 
303 inline void* pool_dup(void *p)
304 {
305  if(p){
306  detail::chunk_header* ch=(detail::chunk_header*)(p)-1;
307  if(ch->parent){
308  return ch->parent->parent()->malloc_one();
309  }
310  else{
311  detail::chunk_header* q=(detail::chunk_header*)detail::__mmdup((void*)ch);
312  q->parent=NULL;
313  return (void*)(q+1);
314  }
315  }
316  else
317  return NULL;
318 }
319 
320 /////////////////////////////////////////////////////
321 // pools
322 
323 template<typename pool_tag>mem_pool* get_pool_()
324 {
325  static mem_pool alloc;
326  return &alloc;
327 }
328 
329 struct tmp_pool {}; // temporary pool
330 struct per_pool {}; // persistent pool
331 
332 inline void* tmp_malloc(size_t size)
333 {
334  return get_pool_<tmp_pool>()->malloc(size);
335 }
336 
337 inline void* per_malloc(size_t size)
338 {
339  return get_pool_<per_pool>()->malloc(size);
340 }
341 
342 #define tmp_new(T,arg) (new(tmp_malloc(sizeof(T))) T arg)
343 #define per_new(T,arg) (new(per_malloc(sizeof(T))) T arg)
344 template<typename T> void pool_delete(T* p)
345 {
346  p->~T();
347  pool_free((void*)p);
348 }
349 
350 /** An extended allocator using memory pool.
351  * Beside normal functions of std::allocator, smart_allocator also supports confiscate() and
352  * duplicate(), while confiscate(),duplicate() and allocate() must be static in smart_allocator.
353  * confiscate() is a general free function to deallocate memory blocks not dependable on T.
354  * duplicate() is a function to allocate a new memory block which size is the same as an given allocated block.
355  */
356 template<class T, typename pool_tag=tmp_pool> class smart_allocator {
357 public:
358  typedef size_t size_type;
359  typedef ptrdiff_t difference_type;
360  typedef T* pointer;
361  typedef const T* const_pointer;
362  typedef T& reference;
363  typedef const T& const_reference;
364  typedef T value_type;
365  template<class U>struct rebind{
366  typedef smart_allocator<U, pool_tag> other;
367  };
368 
370  {
371  }
372 
373  // default copy constructor
374  // default assignment operator
375  // default destructor
376 
377  template<class U> smart_allocator(const smart_allocator<U, pool_tag>&)
378  {
379  }
380 
381  static pointer address(reference x)
382  {
383  return &x;
384  }
385  static const_pointer address(const_reference x)
386  {
387  return &x;
388  }
389 
390  static size_type max_size()
391  {
392  return size_type(-1);
393  }
394 
395  static pointer allocate(size_type n)
396  {
397  static detail::seg_pool* meta=get_pool_<pool_tag>()->get_seg(sizeof(value_type));
398  pointer r=(pointer)meta->malloc(sizeof(T), n);
399  if(!r)
400  throw std::bad_alloc();
401  return r;
402  }
403 
404  static pointer allocate(size_type n, const void * const)
405  {
406  return allocate(n);
407  }
408 
409  static void deallocate(pointer p, size_type n)
410  {
411  if(p)
412  pool_free(p);
413  }
414 
415  /** Free a memory block not dependable on T.
416  * Different with deallocate(), confiscate() doesn't depend on type T information.
417  * confiscate() CAN safely free any pointer to memory blocks allocated by the same
418  * template of allocator, no matter what T is.
419  * @param p pointer to the memory block to be freed
420  */
421  static void confiscate(void* p)
422  {
423  pool_free(p);
424  }
425 
426  /** Free a memory block not dependable on T.
427  * Different with deallocate(), confiscate() doesn't depend on type T information.
428  * confiscate() CAN safely free any pointer to memory blocks allocated by the same
429  * template of allocator, no matter what T is.
430  * @param p pointer to the memory block to be freed
431  */
432  static void* duplicate(void* p)
433  {
434  return pool_dup(p);
435  }
436 
437  static void construct(pointer p, const T& val)
438  {
439  new (p) T(val);
440  }
441 
442  static void construct(pointer p)
443  {
444  new (p) T();
445  }
446 
447  static void destroy(pointer p)
448  {
449  p->~T();
450  }
451 
452  bool operator==(const smart_allocator &) const
453  { return true; }
454 
455  bool operator!=(const smart_allocator &) const
456  { return false; }
457 
458 };
459 
460 template<class _Ty, class _Other, typename pool_type> inline
461  bool operator==(const smart_allocator<_Ty,pool_type>&, const smart_allocator<_Other,pool_type>&)
462  { // test for allocator equality (always true)
463  return (true);
464  }
465 
466 template<class _Ty, class _Other, typename pool_type> inline
467  bool operator!=(const smart_allocator<_Ty,pool_type>&, const smart_allocator<_Other,pool_type>&)
468  { // test for allocator inequality (always false)
469  return (false);
470  }
471 
472 template<typename pool_tag>class smart_allocator<void, pool_tag>
473 {
474 public:
475  typedef void* pointer;
476  typedef const void* const_pointer;
477  typedef void value_type;
478  template <class U> struct rebind {
479  typedef smart_allocator<U, pool_tag> other;
480  };
481 };
482 
483 /**
484  * @}
485  */
486 };
487 #endif // PROTON_POOL_HEADER