43 #ifndef _POOL_ALLOCATOR_H
44 #define _POOL_ALLOCATOR_H 1
78 enum { _S_align = 8 };
79 enum { _S_max_bytes = 128 };
80 enum { _S_free_list_size = (size_t)_S_max_bytes / (
size_t)_S_align };
84 union _Obj* _M_free_list_link;
85 char _M_client_data[1];
88 static _Obj*
volatile _S_free_list[_S_free_list_size];
91 static char* _S_start_free;
92 static char* _S_end_free;
93 static size_t _S_heap_size;
96 _M_round_up(
size_t __bytes)
97 {
return ((__bytes + (
size_t)_S_align - 1) & ~((
size_t)_S_align - 1)); }
100 _M_get_free_list(
size_t __bytes);
108 _M_refill(
size_t __n);
113 _M_allocate_chunk(
size_t __n,
int& __nobjs);
121 template<
typename _Tp>
125 static _Atomic_word _S_force_new;
128 typedef size_t size_type;
129 typedef ptrdiff_t difference_type;
130 typedef _Tp* pointer;
131 typedef const _Tp* const_pointer;
132 typedef _Tp& reference;
133 typedef const _Tp& const_reference;
134 typedef _Tp value_type;
136 template<
typename _Tp1>
144 template<
typename _Tp1>
150 address(reference __x)
const {
return &__x; }
153 address(const_reference __x)
const {
return &__x; }
156 max_size()
const throw()
157 {
return size_t(-1) /
sizeof(_Tp); }
162 construct(pointer __p,
const _Tp& __val)
163 { ::new((
void *)__p) _Tp(__val); }
165 #ifdef __GXX_EXPERIMENTAL_CXX0X__
166 template<
typename... _Args>
168 construct(pointer __p, _Args&&... __args)
169 { ::new((
void *)__p) _Tp(std::forward<_Args>(__args)...); }
173 destroy(pointer __p) { __p->~_Tp(); }
176 allocate(size_type __n,
const void* = 0);
179 deallocate(pointer __p, size_type __n);
182 template<
typename _Tp>
187 template<
typename _Tp>
189 operator!=(
const __pool_alloc<_Tp>&,
const __pool_alloc<_Tp>&)
192 template<
typename _Tp>
194 __pool_alloc<_Tp>::_S_force_new;
196 template<
typename _Tp>
198 __pool_alloc<_Tp>::allocate(size_type __n,
const void*)
201 if (__builtin_expect(__n != 0,
true))
203 if (__builtin_expect(__n > this->max_size(),
false))
204 std::__throw_bad_alloc();
209 if (_S_force_new == 0)
211 if (std::getenv(
"GLIBCXX_FORCE_NEW"))
212 __atomic_add_dispatch(&_S_force_new, 1);
214 __atomic_add_dispatch(&_S_force_new, -1);
217 const size_t __bytes = __n *
sizeof(_Tp);
218 if (__bytes >
size_t(_S_max_bytes) || _S_force_new > 0)
219 __ret = static_cast<_Tp*>(::
operator new(__bytes));
222 _Obj*
volatile* __free_list = _M_get_free_list(__bytes);
224 __scoped_lock sentry(_M_get_mutex());
225 _Obj* __restrict__ __result = *__free_list;
226 if (__builtin_expect(__result == 0, 0))
227 __ret =
static_cast<_Tp*
>(_M_refill(_M_round_up(__bytes)));
230 *__free_list = __result->_M_free_list_link;
231 __ret =
reinterpret_cast<_Tp*
>(__result);
233 if (__builtin_expect(__ret == 0, 0))
234 std::__throw_bad_alloc();
240 template<
typename _Tp>
242 __pool_alloc<_Tp>::deallocate(pointer __p, size_type __n)
244 if (__builtin_expect(__n != 0 && __p != 0,
true))
246 const size_t __bytes = __n *
sizeof(_Tp);
247 if (__bytes > static_cast<size_t>(_S_max_bytes) || _S_force_new > 0)
248 ::
operator delete(__p);
251 _Obj*
volatile* __free_list = _M_get_free_list(__bytes);
252 _Obj* __q =
reinterpret_cast<_Obj*
>(__p);
254 __scoped_lock sentry(_M_get_mutex());
255 __q ->_M_free_list_link = *__free_list;
261 _GLIBCXX_END_NAMESPACE
ISO C++ entities toplevel namespace is std.
Base class for __pool_alloc.
GNU extensions for public use.
Allocator using a memory pool with a single lock.