libstdc++
|
00001 // Allocators -*- C++ -*- 00002 00003 // Copyright (C) 2001-2015 Free Software Foundation, Inc. 00004 // 00005 // This file is part of the GNU ISO C++ Library. This library is free 00006 // software; you can redistribute it and/or modify it under the 00007 // terms of the GNU General Public License as published by the 00008 // Free Software Foundation; either version 3, or (at your option) 00009 // any later version. 00010 00011 // This library is distributed in the hope that it will be useful, 00012 // but WITHOUT ANY WARRANTY; without even the implied warranty of 00013 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 00014 // GNU General Public License for more details. 00015 00016 // Under Section 7 of GPL version 3, you are granted additional 00017 // permissions described in the GCC Runtime Library Exception, version 00018 // 3.1, as published by the Free Software Foundation. 00019 00020 // You should have received a copy of the GNU General Public License and 00021 // a copy of the GCC Runtime Library Exception along with this program; 00022 // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see 00023 // <http://www.gnu.org/licenses/>. 00024 00025 /* 00026 * Copyright (c) 1996-1997 00027 * Silicon Graphics Computer Systems, Inc. 00028 * 00029 * Permission to use, copy, modify, distribute and sell this software 00030 * and its documentation for any purpose is hereby granted without fee, 00031 * provided that the above copyright notice appear in all copies and 00032 * that both that copyright notice and this permission notice appear 00033 * in supporting documentation. Silicon Graphics makes no 00034 * representations about the suitability of this software for any 00035 * purpose. It is provided "as is" without express or implied warranty. 00036 */ 00037 00038 /** @file ext/pool_allocator.h 00039 * This file is a GNU extension to the Standard C++ Library. 00040 */ 00041 00042 #ifndef _POOL_ALLOCATOR_H 00043 #define _POOL_ALLOCATOR_H 1 00044 00045 #include <bits/c++config.h> 00046 #include <cstdlib> 00047 #include <new> 00048 #include <bits/functexcept.h> 00049 #include <ext/atomicity.h> 00050 #include <ext/concurrence.h> 00051 #include <bits/move.h> 00052 #if __cplusplus >= 201103L 00053 #include <type_traits> 00054 #endif 00055 00056 namespace __gnu_cxx _GLIBCXX_VISIBILITY(default) 00057 { 00058 _GLIBCXX_BEGIN_NAMESPACE_VERSION 00059 00060 using std::size_t; 00061 using std::ptrdiff_t; 00062 00063 /** 00064 * @brief Base class for __pool_alloc. 00065 * 00066 * Uses various allocators to fulfill underlying requests (and makes as 00067 * few requests as possible when in default high-speed pool mode). 00068 * 00069 * Important implementation properties: 00070 * 0. If globally mandated, then allocate objects from new 00071 * 1. If the clients request an object of size > _S_max_bytes, the resulting 00072 * object will be obtained directly from new 00073 * 2. In all other cases, we allocate an object of size exactly 00074 * _S_round_up(requested_size). Thus the client has enough size 00075 * information that we can return the object to the proper free list 00076 * without permanently losing part of the object. 00077 */ 00078 class __pool_alloc_base 00079 { 00080 protected: 00081 00082 enum { _S_align = 8 }; 00083 enum { _S_max_bytes = 128 }; 00084 enum { _S_free_list_size = (size_t)_S_max_bytes / (size_t)_S_align }; 00085 00086 union _Obj 00087 { 00088 union _Obj* _M_free_list_link; 00089 char _M_client_data[1]; // The client sees this. 00090 }; 00091 00092 static _Obj* volatile _S_free_list[_S_free_list_size]; 00093 00094 // Chunk allocation state. 00095 static char* _S_start_free; 00096 static char* _S_end_free; 00097 static size_t _S_heap_size; 00098 00099 size_t 00100 _M_round_up(size_t __bytes) 00101 { return ((__bytes + (size_t)_S_align - 1) & ~((size_t)_S_align - 1)); } 00102 00103 _GLIBCXX_CONST _Obj* volatile* 00104 _M_get_free_list(size_t __bytes) throw (); 00105 00106 __mutex& 00107 _M_get_mutex() throw (); 00108 00109 // Returns an object of size __n, and optionally adds to size __n 00110 // free list. 00111 void* 00112 _M_refill(size_t __n); 00113 00114 // Allocates a chunk for nobjs of size size. nobjs may be reduced 00115 // if it is inconvenient to allocate the requested number. 00116 char* 00117 _M_allocate_chunk(size_t __n, int& __nobjs); 00118 }; 00119 00120 00121 /** 00122 * @brief Allocator using a memory pool with a single lock. 00123 * @ingroup allocators 00124 */ 00125 template<typename _Tp> 00126 class __pool_alloc : private __pool_alloc_base 00127 { 00128 private: 00129 static _Atomic_word _S_force_new; 00130 00131 public: 00132 typedef size_t size_type; 00133 typedef ptrdiff_t difference_type; 00134 typedef _Tp* pointer; 00135 typedef const _Tp* const_pointer; 00136 typedef _Tp& reference; 00137 typedef const _Tp& const_reference; 00138 typedef _Tp value_type; 00139 00140 template<typename _Tp1> 00141 struct rebind 00142 { typedef __pool_alloc<_Tp1> other; }; 00143 00144 #if __cplusplus >= 201103L 00145 // _GLIBCXX_RESOLVE_LIB_DEFECTS 00146 // 2103. propagate_on_container_move_assignment 00147 typedef std::true_type propagate_on_container_move_assignment; 00148 #endif 00149 00150 __pool_alloc() _GLIBCXX_USE_NOEXCEPT { } 00151 00152 __pool_alloc(const __pool_alloc&) _GLIBCXX_USE_NOEXCEPT { } 00153 00154 template<typename _Tp1> 00155 __pool_alloc(const __pool_alloc<_Tp1>&) _GLIBCXX_USE_NOEXCEPT { } 00156 00157 ~__pool_alloc() _GLIBCXX_USE_NOEXCEPT { } 00158 00159 pointer 00160 address(reference __x) const _GLIBCXX_NOEXCEPT 00161 { return std::__addressof(__x); } 00162 00163 const_pointer 00164 address(const_reference __x) const _GLIBCXX_NOEXCEPT 00165 { return std::__addressof(__x); } 00166 00167 size_type 00168 max_size() const _GLIBCXX_USE_NOEXCEPT 00169 { return size_t(-1) / sizeof(_Tp); } 00170 00171 #if __cplusplus >= 201103L 00172 template<typename _Up, typename... _Args> 00173 void 00174 construct(_Up* __p, _Args&&... __args) 00175 { ::new((void *)__p) _Up(std::forward<_Args>(__args)...); } 00176 00177 template<typename _Up> 00178 void 00179 destroy(_Up* __p) { __p->~_Up(); } 00180 #else 00181 // _GLIBCXX_RESOLVE_LIB_DEFECTS 00182 // 402. wrong new expression in [some_] allocator::construct 00183 void 00184 construct(pointer __p, const _Tp& __val) 00185 { ::new((void *)__p) _Tp(__val); } 00186 00187 void 00188 destroy(pointer __p) { __p->~_Tp(); } 00189 #endif 00190 00191 pointer 00192 allocate(size_type __n, const void* = 0); 00193 00194 void 00195 deallocate(pointer __p, size_type __n); 00196 }; 00197 00198 template<typename _Tp> 00199 inline bool 00200 operator==(const __pool_alloc<_Tp>&, const __pool_alloc<_Tp>&) 00201 { return true; } 00202 00203 template<typename _Tp> 00204 inline bool 00205 operator!=(const __pool_alloc<_Tp>&, const __pool_alloc<_Tp>&) 00206 { return false; } 00207 00208 template<typename _Tp> 00209 _Atomic_word 00210 __pool_alloc<_Tp>::_S_force_new; 00211 00212 template<typename _Tp> 00213 _Tp* 00214 __pool_alloc<_Tp>::allocate(size_type __n, const void*) 00215 { 00216 pointer __ret = 0; 00217 if (__builtin_expect(__n != 0, true)) 00218 { 00219 if (__n > this->max_size()) 00220 std::__throw_bad_alloc(); 00221 00222 // If there is a race through here, assume answer from getenv 00223 // will resolve in same direction. Inspired by techniques 00224 // to efficiently support threading found in basic_string.h. 00225 if (_S_force_new == 0) 00226 { 00227 if (std::getenv("GLIBCXX_FORCE_NEW")) 00228 __atomic_add_dispatch(&_S_force_new, 1); 00229 else 00230 __atomic_add_dispatch(&_S_force_new, -1); 00231 } 00232 00233 const size_t __bytes = __n * sizeof(_Tp); 00234 if (__bytes > size_t(_S_max_bytes) || _S_force_new > 0) 00235 __ret = static_cast<_Tp*>(::operator new(__bytes)); 00236 else 00237 { 00238 _Obj* volatile* __free_list = _M_get_free_list(__bytes); 00239 00240 __scoped_lock sentry(_M_get_mutex()); 00241 _Obj* __restrict__ __result = *__free_list; 00242 if (__builtin_expect(__result == 0, 0)) 00243 __ret = static_cast<_Tp*>(_M_refill(_M_round_up(__bytes))); 00244 else 00245 { 00246 *__free_list = __result->_M_free_list_link; 00247 __ret = reinterpret_cast<_Tp*>(__result); 00248 } 00249 if (__ret == 0) 00250 std::__throw_bad_alloc(); 00251 } 00252 } 00253 return __ret; 00254 } 00255 00256 template<typename _Tp> 00257 void 00258 __pool_alloc<_Tp>::deallocate(pointer __p, size_type __n) 00259 { 00260 if (__builtin_expect(__n != 0 && __p != 0, true)) 00261 { 00262 const size_t __bytes = __n * sizeof(_Tp); 00263 if (__bytes > static_cast<size_t>(_S_max_bytes) || _S_force_new > 0) 00264 ::operator delete(__p); 00265 else 00266 { 00267 _Obj* volatile* __free_list = _M_get_free_list(__bytes); 00268 _Obj* __q = reinterpret_cast<_Obj*>(__p); 00269 00270 __scoped_lock sentry(_M_get_mutex()); 00271 __q ->_M_free_list_link = *__free_list; 00272 *__free_list = __q; 00273 } 00274 } 00275 } 00276 00277 _GLIBCXX_END_NAMESPACE_VERSION 00278 } // namespace 00279 00280 #endif