|
1 /* |
|
2 * Portions Copyright (c) 2008 Nokia Corporation and/or its subsidiary(-ies). All rights reserved. |
|
3 * |
|
4 * Copyright (c) 1996,1997 |
|
5 * Silicon Graphics Computer Systems, Inc. |
|
6 * |
|
7 * Copyright (c) 1997 |
|
8 * Moscow Center for SPARC Technology |
|
9 * |
|
10 * Copyright (c) 1999 |
|
11 * Boris Fomitchev |
|
12 * |
|
13 * This material is provided "as is", with absolutely no warranty expressed |
|
14 * or implied. Any use is at your own risk. |
|
15 * |
|
16 * Permission to use or copy this software for any purpose is hereby granted |
|
17 * without fee, provided the above notices are retained on all copies. |
|
18 * Permission to modify the code and to distribute modified code is granted, |
|
19 * provided the above notices are retained, and a notice that the code was |
|
20 * modified is included with the above copyright notice. |
|
21 * |
|
22 */ |
|
23 |
|
24 #include "stlport_prefix.h" |
|
25 |
|
26 #include <memory> |
|
27 |
|
28 #if defined (__GNUC__) && (defined (__CYGWIN__) || defined (__MINGW32__)) && (!defined (__SYMBIAN32__)) |
|
29 # include <malloc.h> |
|
30 //# define _STLP_MALLOC_USABLE_SIZE(__buf) malloc_usable_size(__buf) |
|
31 #endif |
|
32 |
|
33 #if defined (_STLP_PTHREADS) && !defined (_STLP_NO_THREADS) |
|
34 # include <pthread_alloc> |
|
35 # include <cerrno> |
|
36 #endif |
|
37 |
|
38 #include <stl/_threads.h> |
|
39 |
|
40 #include "lock_free_slist.h" |
|
41 |
|
42 #if defined(__SYMBIAN32__WSD__) |
|
43 #include "libstdcppwsd.h" |
|
44 |
|
45 #define __oom_handler get_oom_handler() |
|
46 #define _S_lock get_allocator_S_lock() |
|
47 #define _S_heap_size get_S_heap_size() |
|
48 #define _S_start_free get_S_start_free() |
|
49 #define _S_end_free get_S_end_free() |
|
50 #define _S_free_list get_S_free_list() |
|
51 #define _S_chunk_allocator_lock get_S_chunk_allocator_lock() |
|
52 #define _S_free_per_thread_states get_S_free_per_thread_states() |
|
53 #define _S_key get_S_key() |
|
54 #define _S_key_initialized get_S_key_initialized() |
|
55 |
|
56 |
|
57 #endif |
|
58 |
|
59 #if defined (__WATCOMC__) |
|
60 # pragma warning 13 9 |
|
61 # pragma warning 367 9 |
|
62 # pragma warning 368 9 |
|
63 #endif |
|
64 |
|
65 #if defined (_STLP_SGI_THREADS) |
|
66 // We test whether threads are in use before locking. |
|
67 // Perhaps this should be moved into stl_threads.h, but that |
|
68 // probably makes it harder to avoid the procedure call when |
|
69 // it isn't needed. |
|
70 extern "C" { |
|
71 extern int __us_rsthread_malloc; |
|
72 } |
|
73 #endif |
|
74 |
|
75 // Specialised debug form of malloc which does not provide "false" |
|
76 // memory leaks when run with debug CRT libraries. |
|
77 #if defined (_STLP_MSVC) && (_STLP_MSVC >= 1020 && defined (_STLP_DEBUG_ALLOC)) && !defined (_STLP_WCE) |
|
78 # include <crtdbg.h> |
|
79 inline void* __stlp_chunk_malloc(size_t __bytes) { _STLP_CHECK_NULL_ALLOC(_malloc_dbg(__bytes, _CRT_BLOCK, __FILE__, __LINE__)); } |
|
80 inline void __stlp_chunck_free(void* __p) { _free_dbg(__p, _CRT_BLOCK); } |
|
81 #else // !_DEBUG |
|
82 # ifdef _STLP_NODE_ALLOC_USE_MALLOC |
|
83 # include <cstdlib> |
|
84 inline void* __stlp_chunk_malloc(size_t __bytes) { _STLP_CHECK_NULL_ALLOC(_STLP_VENDOR_CSTD::malloc(__bytes)); } |
|
85 inline void __stlp_chunck_free(void* __p) { _STLP_VENDOR_CSTD::free(__p); } |
|
86 # else |
|
87 inline void* __stlp_chunk_malloc(size_t __bytes) { return _STLP_STD::__stl_new(__bytes); } |
|
88 inline void __stlp_chunck_free(void* __p) { _STLP_STD::__stl_delete(__p); } |
|
89 # endif |
|
90 #endif // !_DEBUG |
|
91 |
|
92 #define _S_FREELIST_INDEX(__bytes) ((__bytes - size_t(1)) >> (int)_ALIGN_SHIFT) |
|
93 |
|
94 _STLP_BEGIN_NAMESPACE |
|
95 |
|
96 class __malloc_alloc_impl { |
|
97 private: |
|
98 static void* _S_oom_malloc(size_t __n) { |
|
99 __oom_handler_type __my_malloc_handler; |
|
100 void * __result; |
|
101 |
|
102 for (;;) { |
|
103 __my_malloc_handler = __oom_handler; |
|
104 if (0 == __my_malloc_handler) { __THROW_BAD_ALLOC; } |
|
105 (*__my_malloc_handler)(); |
|
106 __result = malloc(__n); |
|
107 if (__result) return(__result); |
|
108 } |
|
109 #if defined (_STLP_NEED_UNREACHABLE_RETURN) |
|
110 return 0; |
|
111 #endif |
|
112 } |
|
113 #if defined(__SYMBIAN32__WSD__) |
|
114 static _STLP_STATIC_MEMBER_DECLSPEC __oom_handler_type& get_oom_handler(); |
|
115 #else |
|
116 static __oom_handler_type __oom_handler; |
|
117 #endif |
|
118 public: |
|
119 // this one is needed for proper simple_alloc wrapping |
|
120 typedef char value_type; |
|
121 static void* allocate(size_t& __n) { |
|
122 void* __result = malloc(__n); |
|
123 if (0 == __result) { |
|
124 __result = _S_oom_malloc(__n); |
|
125 } |
|
126 #if defined (_STLP_MALLOC_USABLE_SIZE) |
|
127 else { |
|
128 size_t __new_n = _STLP_MALLOC_USABLE_SIZE(__result); |
|
129 /* |
|
130 if (__n != __new_n) { |
|
131 printf("requested size %d, usable %d\n", __n, __new_n); |
|
132 } |
|
133 */ |
|
134 __n = __new_n; |
|
135 } |
|
136 #endif |
|
137 return __result; |
|
138 } |
|
139 static void deallocate(void* __p, size_t /* __n */) { free((char*)__p); } |
|
140 static __oom_handler_type set_malloc_handler(__oom_handler_type __f) { |
|
141 __oom_handler_type __old = __oom_handler; |
|
142 __oom_handler = __f; |
|
143 return __old; |
|
144 } |
|
145 #if defined(__SYMBIAN32__WSD__) |
|
146 friend void ::stdcpp_allocators_init(); |
|
147 #endif |
|
148 }; |
|
149 |
|
150 #if !defined(__SYMBIAN32__WSD__) |
|
151 // malloc_alloc out-of-memory handling |
|
152 __oom_handler_type __malloc_alloc_impl::__oom_handler = __STATIC_CAST(__oom_handler_type, 0); |
|
153 #endif |
|
154 |
|
155 void* _STLP_CALL __malloc_alloc::allocate(size_t& __n) |
|
156 { return __malloc_alloc_impl::allocate(__n); } |
|
157 __oom_handler_type _STLP_CALL __malloc_alloc::set_malloc_handler(__oom_handler_type __f) |
|
158 { return __malloc_alloc_impl::set_malloc_handler(__f); } |
|
159 |
|
160 // ******************************************************* |
|
161 // Default node allocator. |
|
162 // With a reasonable compiler, this should be roughly as fast as the |
|
163 // original STL class-specific allocators, but with less fragmentation. |
|
164 // |
|
165 // Important implementation properties: |
|
166 // 1. If the client request an object of size > _MAX_BYTES, the resulting |
|
167 // object will be obtained directly from malloc. |
|
168 // 2. In all other cases, we allocate an object of size exactly |
|
169 // _S_round_up(requested_size). Thus the client has enough size |
|
170 // information that we can return the object to the proper free list |
|
171 // without permanently losing part of the object. |
|
172 // |
|
173 |
|
174 #define _STLP_NFREELISTS 16 |
|
175 |
|
176 #if defined (_STLP_LEAKS_PEDANTIC) && defined (_STLP_USE_DYNAMIC_LIB) |
|
177 /* |
|
178 * We can only do cleanup of the node allocator memory pool if we are |
|
179 * sure that the STLport library is used as a shared one as it guaranties |
|
180 * the unicity of the node allocator instance. Without that guaranty node |
|
181 * allocator instances might exchange memory blocks making the implementation |
|
182 * of a cleaning process much more complicated. |
|
183 */ |
|
184 # define _STLP_DO_CLEAN_NODE_ALLOC |
|
185 #endif |
|
186 |
|
187 /* When STLport is used without multi threaded safety we use the node allocator |
|
188 * implementation with locks as locks becomes no-op. The lock free implementation |
|
189 * always use system specific atomic operations which are slower than 'normal' |
|
190 * ones. |
|
191 */ |
|
192 #if defined (_STLP_THREADS) && \ |
|
193 defined (_STLP_HAS_ATOMIC_FREELIST) && defined (_STLP_ATOMIC_ADD) |
|
194 /* |
|
195 * We have an implementation of the atomic freelist (_STLP_atomic_freelist) |
|
196 * for this architecture and compiler. That means we can use the non-blocking |
|
197 * implementation of the node-allocation engine.*/ |
|
198 # define _STLP_USE_LOCK_FREE_IMPLEMENTATION |
|
199 #endif |
|
200 |
|
201 #if !defined (_STLP_USE_LOCK_FREE_IMPLEMENTATION) |
|
202 # if defined (_STLP_THREADS) |
|
203 |
|
204 class _Node_Alloc_Lock { |
|
205 public: |
|
206 _Node_Alloc_Lock() { |
|
207 # if defined (_STLP_SGI_THREADS) |
|
208 if (__us_rsthread_malloc) |
|
209 # endif |
|
210 _S_lock._M_acquire_lock(); |
|
211 } |
|
212 |
|
213 ~_Node_Alloc_Lock() { |
|
214 # if defined (_STLP_SGI_THREADS) |
|
215 if (__us_rsthread_malloc) |
|
216 # endif |
|
217 _S_lock._M_release_lock(); |
|
218 } |
|
219 #if defined (__SYMBIAN32__WSD__) |
|
220 static _STLP_STATIC_MUTEX& get_allocator_S_lock(); |
|
221 #else |
|
222 static _STLP_STATIC_MUTEX _S_lock; |
|
223 #endif |
|
224 }; |
|
225 |
|
226 #if !defined(__SYMBIAN32__WSD__) |
|
227 _STLP_STATIC_MUTEX _Node_Alloc_Lock::_S_lock _STLP_MUTEX_INITIALIZER; |
|
228 #endif |
|
229 |
|
230 # else |
|
231 |
|
232 class _Node_Alloc_Lock { |
|
233 public: |
|
234 _Node_Alloc_Lock() { } |
|
235 ~_Node_Alloc_Lock() { } |
|
236 }; |
|
237 |
|
238 # endif |
|
239 |
|
240 struct _Node_alloc_obj { |
|
241 _Node_alloc_obj * _M_next; |
|
242 }; |
|
243 #endif |
|
244 |
|
245 class __node_alloc_impl { |
|
246 _STLP_PRIVATE: |
|
247 static inline size_t _STLP_CALL _S_round_up(size_t __bytes) |
|
248 { return (((__bytes) + (size_t)_ALIGN-1) & ~((size_t)_ALIGN - 1)); } |
|
249 |
|
250 #if defined (_STLP_USE_LOCK_FREE_IMPLEMENTATION) |
|
251 typedef _STLP_atomic_freelist::item _Obj; |
|
252 typedef _STLP_atomic_freelist _Freelist; |
|
253 typedef _STLP_atomic_freelist _ChunkList; |
|
254 |
|
255 // Header of blocks of memory that have been allocated as part of |
|
256 // a larger chunk but have not yet been chopped up into nodes. |
|
257 struct _FreeBlockHeader : public _STLP_atomic_freelist::item { |
|
258 char* _M_end; // pointer to end of free memory |
|
259 }; |
|
260 #else |
|
261 typedef _Node_alloc_obj _Obj; |
|
262 typedef _Obj* _STLP_VOLATILE _Freelist; |
|
263 typedef _Obj* _ChunkList; |
|
264 #endif |
|
265 |
|
266 private: |
|
267 // Returns an object of size __n, and optionally adds to size __n free list. |
|
268 static _Obj* _S_refill(size_t __n); |
|
269 // Allocates a chunk for nobjs of size __p_size. nobjs may be reduced |
|
270 // if it is inconvenient to allocate the requested number. |
|
271 static char* _S_chunk_alloc(size_t __p_size, int& __nobjs); |
|
272 // Chunk allocation state. |
|
273 #if defined(__SYMBIAN32__WSD__) |
|
274 static _Freelist* get_S_free_list(); |
|
275 #else |
|
276 static _Freelist _S_free_list[_STLP_NFREELISTS]; |
|
277 #endif |
|
278 |
|
279 // Amount of total allocated memory |
|
280 #if defined (_STLP_USE_LOCK_FREE_IMPLEMENTATION) |
|
281 static _STLP_VOLATILE __stl_atomic_t _S_heap_size; |
|
282 #else |
|
283 #if defined(__SYMBIAN32__WSD__) |
|
284 static size_t& get_S_heap_size(); |
|
285 #else |
|
286 static size_t _S_heap_size; |
|
287 #endif |
|
288 #endif |
|
289 |
|
290 #if defined (_STLP_USE_LOCK_FREE_IMPLEMENTATION) |
|
291 // List of blocks of free memory |
|
292 static _STLP_atomic_freelist _S_free_mem_blocks; |
|
293 #else |
|
294 #if defined(__SYMBIAN32__WSD__) |
|
295 // Start of the current free memory buffer |
|
296 static char*& get_S_start_free(); |
|
297 // End of the current free memory buffer |
|
298 static char*& get_S_end_free(); |
|
299 #else |
|
300 // Start of the current free memory buffer |
|
301 static char* _S_start_free; |
|
302 // End of the current free memory buffer |
|
303 static char* _S_end_free; |
|
304 #endif |
|
305 #endif |
|
306 |
|
307 #if defined (_STLP_DO_CLEAN_NODE_ALLOC) |
|
308 public: |
|
309 // Methods to report alloc/dealloc calls to the counter system. |
|
310 # if defined (_STLP_USE_LOCK_FREE_IMPLEMENTATION) |
|
311 typedef _STLP_VOLATILE __stl_atomic_t _AllocCounter; |
|
312 # else |
|
313 typedef __stl_atomic_t _AllocCounter; |
|
314 # endif |
|
315 static _AllocCounter& _STLP_CALL _S_alloc_counter(); |
|
316 static void _S_alloc_call(); |
|
317 static void _S_dealloc_call(); |
|
318 |
|
319 private: |
|
320 // Free all the allocated chuncks of memory |
|
321 static void _S_chunk_dealloc(); |
|
322 // Beginning of the linked list of allocated chunks of memory |
|
323 static _ChunkList _S_chunks; |
|
324 #endif /* _STLP_DO_CLEAN_NODE_ALLOC */ |
|
325 |
|
326 public: |
|
327 /* __n must be > 0 */ |
|
328 static void* _M_allocate(size_t& __n); |
|
329 /* __p may not be 0 */ |
|
330 static void _M_deallocate(void *__p, size_t __n); |
|
331 |
|
332 #if defined(__SYMBIAN32__WSD__) |
|
333 friend void ::stdcpp_allocators_init(); |
|
334 #endif |
|
335 }; |
|
336 |
|
337 #if !defined (_STLP_USE_LOCK_FREE_IMPLEMENTATION) |
|
338 void* __node_alloc_impl::_M_allocate(size_t& __n) { |
|
339 __n = _S_round_up(__n); |
|
340 _Obj * _STLP_VOLATILE * __my_free_list = _S_free_list + _S_FREELIST_INDEX(__n); |
|
341 _Obj *__r; |
|
342 |
|
343 // Acquire the lock here with a constructor call. |
|
344 // This ensures that it is released in exit or during stack |
|
345 // unwinding. |
|
346 _Node_Alloc_Lock __lock_instance; |
|
347 |
|
348 if ( (__r = *__my_free_list) != 0 ) { |
|
349 *__my_free_list = __r->_M_next; |
|
350 } else { |
|
351 __r = _S_refill(__n); |
|
352 } |
|
353 # if defined (_STLP_DO_CLEAN_NODE_ALLOC) |
|
354 _S_alloc_call(); |
|
355 # endif |
|
356 // lock is released here |
|
357 return __r; |
|
358 } |
|
359 |
|
360 void __node_alloc_impl::_M_deallocate(void *__p, size_t __n) { |
|
361 _Obj * _STLP_VOLATILE * __my_free_list = _S_free_list + _S_FREELIST_INDEX(__n); |
|
362 _Obj * __pobj = __STATIC_CAST(_Obj*, __p); |
|
363 |
|
364 // acquire lock |
|
365 _Node_Alloc_Lock __lock_instance; |
|
366 __pobj->_M_next = *__my_free_list; |
|
367 *__my_free_list = __pobj; |
|
368 |
|
369 # if defined (_STLP_DO_CLEAN_NODE_ALLOC) |
|
370 _S_dealloc_call(); |
|
371 # endif |
|
372 // lock is released here |
|
373 } |
|
374 |
|
375 /* We allocate memory in large chunks in order to avoid fragmenting */ |
|
376 /* the malloc heap too much. */ |
|
377 /* We assume that size is properly aligned. */ |
|
378 /* We hold the allocation lock. */ |
|
379 char* __node_alloc_impl::_S_chunk_alloc(size_t _p_size, int& __nobjs) { |
|
380 char* __result; |
|
381 size_t __total_bytes = _p_size * __nobjs; |
|
382 size_t __bytes_left = _S_end_free - _S_start_free; |
|
383 |
|
384 if (__bytes_left > 0) { |
|
385 if (__bytes_left >= __total_bytes) { |
|
386 __result = _S_start_free; |
|
387 _S_start_free += __total_bytes; |
|
388 return __result; |
|
389 } |
|
390 |
|
391 if (__bytes_left >= _p_size) { |
|
392 __nobjs = (int)(__bytes_left / _p_size); |
|
393 __total_bytes = _p_size * __nobjs; |
|
394 __result = _S_start_free; |
|
395 _S_start_free += __total_bytes; |
|
396 return __result; |
|
397 } |
|
398 |
|
399 // Try to make use of the left-over piece. |
|
400 _Obj* _STLP_VOLATILE* __my_free_list = _S_free_list + _S_FREELIST_INDEX(__bytes_left); |
|
401 __REINTERPRET_CAST(_Obj*, _S_start_free)->_M_next = *__my_free_list; |
|
402 *__my_free_list = __REINTERPRET_CAST(_Obj*, _S_start_free); |
|
403 } |
|
404 |
|
405 size_t __bytes_to_get = |
|
406 2 * __total_bytes + _S_round_up(_S_heap_size >> 4) |
|
407 # if defined (_STLP_DO_CLEAN_NODE_ALLOC) |
|
408 + sizeof(_Obj) |
|
409 # endif |
|
410 ; |
|
411 |
|
412 _S_start_free = __STATIC_CAST(char*, __stlp_chunk_malloc(__bytes_to_get)); |
|
413 if (0 == _S_start_free) { |
|
414 _Obj* _STLP_VOLATILE* __my_free_list; |
|
415 _Obj* __p; |
|
416 // Try to do with what we have. That can't hurt. |
|
417 // We do not try smaller requests, since that tends |
|
418 // to result in disaster on multi-process machines. |
|
419 for (size_t __i = _p_size; __i <= (size_t)_MAX_BYTES; __i += (size_t)_ALIGN) { |
|
420 __my_free_list = _S_free_list + _S_FREELIST_INDEX(__i); |
|
421 __p = *__my_free_list; |
|
422 if (0 != __p) { |
|
423 *__my_free_list = __p -> _M_next; |
|
424 _S_start_free = __REINTERPRET_CAST(char*, __p); |
|
425 _S_end_free = _S_start_free + __i; |
|
426 return _S_chunk_alloc(_p_size, __nobjs); |
|
427 // Any leftover piece will eventually make it to the |
|
428 // right free list. |
|
429 } |
|
430 } |
|
431 _S_end_free = 0; // In case of exception. |
|
432 _S_start_free = __STATIC_CAST(char*, __stlp_chunk_malloc(__bytes_to_get)); |
|
433 /* |
|
434 (char*)malloc_alloc::allocate(__bytes_to_get); |
|
435 */ |
|
436 |
|
437 // This should either throw an |
|
438 // exception or remedy the situation. Thus we assume it |
|
439 // succeeded. |
|
440 } |
|
441 |
|
442 _S_heap_size += __bytes_to_get; |
|
443 # if defined (_STLP_DO_CLEAN_NODE_ALLOC) |
|
444 __REINTERPRET_CAST(_Obj*, _S_start_free)->_M_next = _S_chunks; |
|
445 _S_chunks = __REINTERPRET_CAST(_Obj*, _S_start_free); |
|
446 # endif |
|
447 _S_end_free = _S_start_free + __bytes_to_get; |
|
448 # if defined (_STLP_DO_CLEAN_NODE_ALLOC) |
|
449 _S_start_free += sizeof(_Obj); |
|
450 # endif |
|
451 return _S_chunk_alloc(_p_size, __nobjs); |
|
452 } |
|
453 |
|
454 /* Returns an object of size __n, and optionally adds to size __n free list.*/ |
|
455 /* We assume that __n is properly aligned. */ |
|
456 /* We hold the allocation lock. */ |
|
457 _Node_alloc_obj* __node_alloc_impl::_S_refill(size_t __n) { |
|
458 int __nobjs = 20; |
|
459 char* __chunk = _S_chunk_alloc(__n, __nobjs); |
|
460 |
|
461 if (1 == __nobjs) return __REINTERPRET_CAST(_Obj*, __chunk); |
|
462 |
|
463 _Obj* _STLP_VOLATILE* __my_free_list = _S_free_list + _S_FREELIST_INDEX(__n); |
|
464 _Obj* __result; |
|
465 _Obj* __current_obj; |
|
466 _Obj* __next_obj; |
|
467 |
|
468 /* Build free list in chunk */ |
|
469 __result = __REINTERPRET_CAST(_Obj*, __chunk); |
|
470 *__my_free_list = __next_obj = __REINTERPRET_CAST(_Obj*, __chunk + __n); |
|
471 for (--__nobjs; --__nobjs; ) { |
|
472 __current_obj = __next_obj; |
|
473 __next_obj = __REINTERPRET_CAST(_Obj*, __REINTERPRET_CAST(char*, __next_obj) + __n); |
|
474 __current_obj->_M_next = __next_obj; |
|
475 } |
|
476 __next_obj->_M_next = 0; |
|
477 return __result; |
|
478 } |
|
479 |
|
480 # if defined (_STLP_DO_CLEAN_NODE_ALLOC) |
|
481 void __node_alloc_impl::_S_alloc_call() |
|
482 { ++_S_alloc_counter(); } |
|
483 |
|
484 void __node_alloc_impl::_S_dealloc_call() { |
|
485 __stl_atomic_t &counter = _S_alloc_counter(); |
|
486 if (--counter == 0) |
|
487 { _S_chunk_dealloc(); } |
|
488 } |
|
489 |
|
490 /* We deallocate all the memory chunks */ |
|
491 void __node_alloc_impl::_S_chunk_dealloc() { |
|
492 _Obj *__pcur = _S_chunks, *__pnext; |
|
493 while (__pcur != 0) { |
|
494 __pnext = __pcur->_M_next; |
|
495 __stlp_chunck_free(__pcur); |
|
496 __pcur = __pnext; |
|
497 } |
|
498 _S_chunks = 0; |
|
499 _S_start_free = _S_end_free = 0; |
|
500 _S_heap_size = 0; |
|
501 memset(__REINTERPRET_CAST(char*, &_S_free_list[0]), 0, _STLP_NFREELISTS * sizeof(_Obj*)); |
|
502 } |
|
503 # endif /* _STLP_DO_CLEAN_NODE_ALLOC */ |
|
504 |
|
505 #else /* !defined(_STLP_USE_LOCK_FREE_IMPLEMENTATION) */ |
|
506 |
|
507 void* __node_alloc_impl::_M_allocate(size_t& __n) { |
|
508 __n = _S_round_up(__n); |
|
509 _Obj* __r = _S_free_list[_S_FREELIST_INDEX(__n)].pop(); |
|
510 if (__r == 0) |
|
511 { __r = _S_refill(__n); } |
|
512 |
|
513 # if defined (_STLP_DO_CLEAN_NODE_ALLOC) |
|
514 _S_alloc_call(); |
|
515 # endif |
|
516 return __r; |
|
517 } |
|
518 |
|
519 void __node_alloc_impl::_M_deallocate(void *__p, size_t __n) { |
|
520 _S_free_list[_S_FREELIST_INDEX(__n)].push(__STATIC_CAST(_Obj*, __p)); |
|
521 |
|
522 # if defined (_STLP_DO_CLEAN_NODE_ALLOC) |
|
523 _S_dealloc_call(); |
|
524 # endif |
|
525 } |
|
526 |
|
527 /* Returns an object of size __n, and optionally adds additional ones to */ |
|
528 /* freelist of objects of size __n. */ |
|
529 /* We assume that __n is properly aligned. */ |
|
530 __node_alloc_impl::_Obj* __node_alloc_impl::_S_refill(size_t __n) { |
|
531 int __nobjs = 20; |
|
532 char* __chunk = _S_chunk_alloc(__n, __nobjs); |
|
533 |
|
534 if (__nobjs <= 1) |
|
535 return __REINTERPRET_CAST(_Obj*, __chunk); |
|
536 |
|
537 // Push all new nodes (minus first one) onto freelist |
|
538 _Obj* __result = __REINTERPRET_CAST(_Obj*, __chunk); |
|
539 _Obj* __cur_item = __result; |
|
540 _Freelist* __my_freelist = _S_free_list + _S_FREELIST_INDEX(__n); |
|
541 for (--__nobjs; __nobjs != 0; --__nobjs) { |
|
542 __cur_item = __REINTERPRET_CAST(_Obj*, __REINTERPRET_CAST(char*, __cur_item) + __n); |
|
543 __my_freelist->push(__cur_item); |
|
544 } |
|
545 return __result; |
|
546 } |
|
547 |
|
548 /* We allocate memory in large chunks in order to avoid fragmenting */ |
|
549 /* the malloc heap too much. */ |
|
550 /* We assume that size is properly aligned. */ |
|
551 char* __node_alloc_impl::_S_chunk_alloc(size_t _p_size, int& __nobjs) { |
|
552 # if defined (_STLP_DO_CLEAN_NODE_ALLOC) |
|
553 //We are going to add a small memory block to keep all the allocated blocks |
|
554 //address, we need to do so respecting the memory alignment. The following |
|
555 //static assert checks that the reserved block is big enough to store a pointer. |
|
556 _STLP_STATIC_ASSERT(sizeof(_Obj) <= _ALIGN) |
|
557 # endif |
|
558 char* __result = 0; |
|
559 __stl_atomic_t __total_bytes = __STATIC_CAST(__stl_atomic_t, _p_size) * __nobjs; |
|
560 |
|
561 _FreeBlockHeader* __block = __STATIC_CAST(_FreeBlockHeader*, _S_free_mem_blocks.pop()); |
|
562 if (__block != 0) { |
|
563 // We checked a block out and can now mess with it with impugnity. |
|
564 // We'll put the remainder back into the list if we're done with it below. |
|
565 char* __buf_start = __REINTERPRET_CAST(char*, __block); |
|
566 __stl_atomic_t __bytes_left = __block->_M_end - __buf_start; |
|
567 |
|
568 if ((__bytes_left < __total_bytes) && (__bytes_left >= __STATIC_CAST(__stl_atomic_t, _p_size))) { |
|
569 // There's enough left for at least one object, but not as much as we wanted |
|
570 __result = __buf_start; |
|
571 __nobjs = (int)(__bytes_left/_p_size); |
|
572 __total_bytes = __STATIC_CAST(__stl_atomic_t, _p_size) * __nobjs; |
|
573 __bytes_left -= __total_bytes; |
|
574 __buf_start += __total_bytes; |
|
575 } |
|
576 else if (__bytes_left >= __total_bytes) { |
|
577 // The block has enough left to satisfy all that was asked for |
|
578 __result = __buf_start; |
|
579 __bytes_left -= __total_bytes; |
|
580 __buf_start += __total_bytes; |
|
581 } |
|
582 |
|
583 if (__bytes_left != 0) { |
|
584 // There is still some memory left over in block after we satisfied our request. |
|
585 if ((__result != 0) && (__bytes_left >= sizeof(_FreeBlockHeader))) { |
|
586 // We were able to allocate at least one object and there is still enough |
|
587 // left to put remainder back into list. |
|
588 _FreeBlockHeader* __newblock = __REINTERPRET_CAST(_FreeBlockHeader*, __buf_start); |
|
589 __newblock->_M_end = __block->_M_end; |
|
590 _S_free_mem_blocks.push(__newblock); |
|
591 } |
|
592 else { |
|
593 // We were not able to allocate enough for at least one object. |
|
594 // Shove into freelist of nearest (rounded-down!) size. |
|
595 size_t __rounded_down = _S_round_up(__bytes_left + 1) - (size_t)_ALIGN; |
|
596 if (__rounded_down > 0) |
|
597 _S_free_list[_S_FREELIST_INDEX(__rounded_down)].push((_Obj*)__buf_start); |
|
598 } |
|
599 } |
|
600 if (__result != 0) |
|
601 return __result; |
|
602 } |
|
603 |
|
604 // We couldn't satisfy it from the list of free blocks, get new memory. |
|
605 __stl_atomic_t __bytes_to_get = 2 * __total_bytes + __STATIC_CAST(__stl_atomic_t, _S_round_up(_S_heap_size >> 4)) |
|
606 # if defined (_STLP_DO_CLEAN_NODE_ALLOC) |
|
607 + _ALIGN |
|
608 # endif |
|
609 ; |
|
610 |
|
611 __result = __STATIC_CAST(char*, __stlp_chunk_malloc(__bytes_to_get)); |
|
612 // Alignment check |
|
613 _STLP_VERBOSE_ASSERT(((__REINTERPRET_CAST(size_t, __result) & __STATIC_CAST(size_t, _ALIGN - 1)) == 0), _StlMsg_DBA_DELETED_TWICE) |
|
614 |
|
615 if (0 == __result) { |
|
616 // Allocation failed; try to canibalize from freelist of a larger object size. |
|
617 for (size_t __i = _p_size; __i <= (size_t)_MAX_BYTES; __i += (size_t)_ALIGN) { |
|
618 _Obj* __p = _S_free_list[_S_FREELIST_INDEX(__i)].pop(); |
|
619 if (0 != __p) { |
|
620 if (__i < sizeof(_FreeBlockHeader)) { |
|
621 // Not enough to put into list of free blocks, divvy it up here. |
|
622 // Use as much as possible for this request and shove remainder into freelist. |
|
623 __nobjs = (int)(__i/_p_size); |
|
624 __total_bytes = __nobjs * __STATIC_CAST(__stl_atomic_t, _p_size); |
|
625 size_t __bytes_left = __i - __total_bytes; |
|
626 size_t __rounded_down = _S_round_up(__bytes_left+1) - (size_t)_ALIGN; |
|
627 if (__rounded_down > 0) { |
|
628 _S_free_list[_S_FREELIST_INDEX(__rounded_down)].push(__REINTERPRET_CAST(_Obj*, __REINTERPRET_CAST(char*, __p) + __total_bytes)); |
|
629 } |
|
630 return __REINTERPRET_CAST(char*, __p); |
|
631 } |
|
632 else { |
|
633 // Add node to list of available blocks and recursively allocate from it. |
|
634 _FreeBlockHeader* __newblock = (_FreeBlockHeader*)__p; |
|
635 __newblock->_M_end = __REINTERPRET_CAST(char*, __p) + __i; |
|
636 _S_free_mem_blocks.push(__newblock); |
|
637 return _S_chunk_alloc(_p_size, __nobjs); |
|
638 } |
|
639 } |
|
640 } |
|
641 |
|
642 // We were not able to find something in a freelist, try to allocate a smaller amount. |
|
643 __bytes_to_get = __total_bytes |
|
644 # if defined (_STLP_DO_CLEAN_NODE_ALLOC) |
|
645 + _ALIGN |
|
646 # endif |
|
647 ; |
|
648 __result = __STATIC_CAST(char*, __stlp_chunk_malloc(__bytes_to_get)); |
|
649 // Alignment check |
|
650 _STLP_VERBOSE_ASSERT(((__REINTERPRET_CAST(size_t, __result) & __STATIC_CAST(size_t, _ALIGN - 1)) == 0), _StlMsg_DBA_DELETED_TWICE) |
|
651 |
|
652 // This should either throw an exception or remedy the situation. |
|
653 // Thus we assume it succeeded. |
|
654 } |
|
655 |
|
656 _STLP_ATOMIC_ADD(&_S_heap_size, __bytes_to_get); |
|
657 |
|
658 # if defined (_STLP_DO_CLEAN_NODE_ALLOC) |
|
659 // We have to track the allocated memory chunks for release on exit. |
|
660 _S_chunks.push(__REINTERPRET_CAST(_Obj*, __result)); |
|
661 __result += _ALIGN; |
|
662 __bytes_to_get -= _ALIGN; |
|
663 # endif |
|
664 |
|
665 if (__bytes_to_get > __total_bytes) { |
|
666 // Push excess memory allocated in this chunk into list of free memory blocks |
|
667 _FreeBlockHeader* __freeblock = __REINTERPRET_CAST(_FreeBlockHeader*, __result + __total_bytes); |
|
668 __freeblock->_M_end = __result + __bytes_to_get; |
|
669 _S_free_mem_blocks.push(__freeblock); |
|
670 } |
|
671 return __result; |
|
672 } |
|
673 |
|
674 # if defined (_STLP_DO_CLEAN_NODE_ALLOC) |
|
675 void __node_alloc_impl::_S_alloc_call() |
|
676 { _STLP_ATOMIC_INCREMENT(&_S_alloc_counter()); } |
|
677 |
|
678 void __node_alloc_impl::_S_dealloc_call() { |
|
679 _STLP_VOLATILE __stl_atomic_t *pcounter = &_S_alloc_counter(); |
|
680 if (_STLP_ATOMIC_DECREMENT(pcounter) == 0) |
|
681 _S_chunk_dealloc(); |
|
682 } |
|
683 |
|
684 /* We deallocate all the memory chunks */ |
|
685 void __node_alloc_impl::_S_chunk_dealloc() { |
|
686 // Note: The _Node_alloc_helper class ensures that this function |
|
687 // will only be called when the (shared) library is unloaded or the |
|
688 // process is shutdown. It's thus not possible that another thread |
|
689 // is currently trying to allocate a node (we're not thread-safe here). |
|
690 // |
|
691 |
|
692 // Clear the free blocks and all freelistst. This makes sure that if |
|
693 // for some reason more memory is allocated again during shutdown |
|
694 // (it'd also be really nasty to leave references to deallocated memory). |
|
695 _S_free_mem_blocks.clear(); |
|
696 _S_heap_size = 0; |
|
697 |
|
698 for (size_t __i = 0; __i < _STLP_NFREELISTS; ++__i) { |
|
699 _S_free_list[__i].clear(); |
|
700 } |
|
701 |
|
702 // Detach list of chunks and free them all |
|
703 _Obj* __chunk = _S_chunks.clear(); |
|
704 while (__chunk != 0) { |
|
705 _Obj* __next = __chunk->_M_next; |
|
706 __stlp_chunck_free(__chunk); |
|
707 __chunk = __next; |
|
708 } |
|
709 } |
|
710 # endif /* _STLP_DO_CLEAN_NODE_ALLOC */ |
|
711 |
|
712 #endif /* !defined(_STLP_USE_LOCK_FREE_IMPLEMENTATION) */ |
|
713 |
|
714 #if defined (_STLP_DO_CLEAN_NODE_ALLOC) |
|
715 struct __node_alloc_cleaner { |
|
716 ~__node_alloc_cleaner() |
|
717 { __node_alloc_impl::_S_dealloc_call(); } |
|
718 }; |
|
719 |
|
720 # if defined (_STLP_USE_LOCK_FREE_IMPLEMENTATION) |
|
721 _STLP_VOLATILE __stl_atomic_t& _STLP_CALL |
|
722 # else |
|
723 __stl_atomic_t& _STLP_CALL |
|
724 # endif |
|
725 __node_alloc_impl::_S_alloc_counter() { |
|
726 static _AllocCounter _S_counter = 1; |
|
727 static __node_alloc_cleaner _S_node_alloc_cleaner; |
|
728 return _S_counter; |
|
729 } |
|
730 #endif |
|
731 |
|
732 #if !defined(__SYMBIAN32__WSD__) |
|
733 #if !defined (_STLP_USE_LOCK_FREE_IMPLEMENTATION) |
|
734 _Node_alloc_obj * _STLP_VOLATILE |
|
735 __node_alloc_impl::_S_free_list[_STLP_NFREELISTS] |
|
736 = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; |
|
737 // The 16 zeros are necessary to make version 4.1 of the SunPro |
|
738 // compiler happy. Otherwise it appears to allocate too little |
|
739 // space for the array. |
|
740 #else |
|
741 _STLP_atomic_freelist __node_alloc_impl::_S_free_list[_STLP_NFREELISTS]; |
|
742 _STLP_atomic_freelist __node_alloc_impl::_S_free_mem_blocks; |
|
743 #endif |
|
744 |
|
745 #if !defined (_STLP_USE_LOCK_FREE_IMPLEMENTATION) |
|
746 char *__node_alloc_impl::_S_start_free = 0; |
|
747 char *__node_alloc_impl::_S_end_free = 0; |
|
748 #endif |
|
749 |
|
750 #if defined (_STLP_USE_LOCK_FREE_IMPLEMENTATION) |
|
751 _STLP_VOLATILE __stl_atomic_t |
|
752 #else |
|
753 size_t |
|
754 #endif |
|
755 __node_alloc_impl::_S_heap_size = 0; |
|
756 #endif //__SYMBIAN32__WSD__ |
|
757 |
|
758 #if defined (_STLP_DO_CLEAN_NODE_ALLOC) |
|
759 # if defined (_STLP_USE_LOCK_FREE_IMPLEMENTATION) |
|
760 _STLP_atomic_freelist __node_alloc_impl::_S_chunks; |
|
761 # else |
|
762 _Node_alloc_obj* __node_alloc_impl::_S_chunks = 0; |
|
763 # endif |
|
764 #endif |
|
765 |
|
766 _STLP_DECLSPEC void * _STLP_CALL __node_alloc::_M_allocate(size_t& __n) |
|
767 { return __node_alloc_impl::_M_allocate(__n); } |
|
768 |
|
769 _STLP_DECLSPEC void _STLP_CALL __node_alloc::_M_deallocate(void *__p, size_t __n) |
|
770 { __node_alloc_impl::_M_deallocate(__p, __n); } |
|
771 |
|
772 #if defined (_STLP_PTHREADS) && !defined (_STLP_NO_THREADS) |
|
773 |
|
774 # define _STLP_DATA_ALIGNMENT 8 |
|
775 |
|
776 _STLP_MOVE_TO_PRIV_NAMESPACE |
|
777 |
|
778 // ******************************************************* |
|
779 // __perthread_alloc implementation |
|
780 union _Pthread_alloc_obj { |
|
781 union _Pthread_alloc_obj * __free_list_link; |
|
782 char __client_data[_STLP_DATA_ALIGNMENT]; /* The client sees this. */ |
|
783 }; |
|
784 |
|
785 // Pthread allocators don't appear to the client to have meaningful |
|
786 // instances. We do in fact need to associate some state with each |
|
787 // thread. That state is represented by _Pthread_alloc_per_thread_state. |
|
788 |
|
789 struct _Pthread_alloc_per_thread_state { |
|
790 typedef _Pthread_alloc_obj __obj; |
|
791 enum { _S_NFREELISTS = _MAX_BYTES / _STLP_DATA_ALIGNMENT }; |
|
792 |
|
793 // Free list link for list of available per thread structures. |
|
794 // When one of these becomes available for reuse due to thread |
|
795 // termination, any objects in its free list remain associated |
|
796 // with it. The whole structure may then be used by a newly |
|
797 // created thread. |
|
798 _Pthread_alloc_per_thread_state() : __next(0) |
|
799 { memset((void *)__CONST_CAST(_Pthread_alloc_obj**, __free_list), 0, (size_t)_S_NFREELISTS * sizeof(__obj *)); } |
|
800 // Returns an object of size __n, and possibly adds to size n free list. |
|
801 void *_M_refill(size_t __n); |
|
802 |
|
803 _Pthread_alloc_obj* volatile __free_list[_S_NFREELISTS]; |
|
804 _Pthread_alloc_per_thread_state *__next; |
|
805 // this data member is only to be used by per_thread_allocator, which returns memory to the originating thread. |
|
806 _STLP_mutex _M_lock; |
|
807 }; |
|
808 |
|
809 // Pthread-specific allocator. |
|
810 class _Pthread_alloc_impl { |
|
811 public: // but only for internal use: |
|
812 typedef _Pthread_alloc_per_thread_state __state_type; |
|
813 typedef char value_type; |
|
814 |
|
815 // Allocates a chunk for nobjs of size size. nobjs may be reduced |
|
816 // if it is inconvenient to allocate the requested number. |
|
817 static char *_S_chunk_alloc(size_t __size, size_t &__nobjs, __state_type*); |
|
818 |
|
819 enum {_S_ALIGN = _STLP_DATA_ALIGNMENT}; |
|
820 |
|
821 static size_t _S_round_up(size_t __bytes) |
|
822 { return (((__bytes) + (int)_S_ALIGN - 1) & ~((int)_S_ALIGN - 1)); } |
|
823 static size_t _S_freelist_index(size_t __bytes) |
|
824 { return (((__bytes) + (int)_S_ALIGN - 1) / (int)_S_ALIGN - 1); } |
|
825 |
|
826 private: |
|
827 // Chunk allocation state. And other shared state. |
|
828 // Protected by _S_chunk_allocator_lock. |
|
829 #if defined(__SYMBIAN32__WSD__) |
|
830 public: |
|
831 static void pt_wsd_init() { |
|
832 get_S_free_per_thread_states() = 0; |
|
833 get_S_key() = 0; |
|
834 get_S_chunk_allocator_lock()._M_lock.iState = _ENeedsNormalInit; |
|
835 get_S_chunk_allocator_lock()._M_lock.iPtr = 0; |
|
836 get_S_chunk_allocator_lock()._M_lock.iReentry = 0; |
|
837 get_S_key_initialized() = false; |
|
838 get_S_start_free() = 0; |
|
839 get_S_end_free() = 0; |
|
840 get_S_heap_size() = 0; |
|
841 } |
|
842 private: |
|
843 static _STLP_STATIC_MUTEX& get_S_chunk_allocator_lock() |
|
844 { return get_libcpp_wsd().wsd_pt_S_chunk_allocator_lock; } |
|
845 static char*& get_S_start_free() |
|
846 { return get_libcpp_wsd().wsd_pt_S_start_free; } |
|
847 static char*& get_S_end_free() |
|
848 { return get_libcpp_wsd().wsd_pt_S_end_free; } |
|
849 static size_t& get_S_heap_size() |
|
850 { return get_libcpp_wsd().wsd_pt_S_heap_size; } |
|
851 static __state_type*& get_S_free_per_thread_states() |
|
852 { return get_libcpp_wsd().wsd_pt_S_free_per_thread_states; } |
|
853 static pthread_key_t& get_S_key() |
|
854 { return get_libcpp_wsd().wsd_pt_S_key; } |
|
855 static bool& get_S_key_initialized() |
|
856 { return get_libcpp_wsd().wsd_pt_S_key_initialized; } |
|
857 #else |
|
858 static _STLP_STATIC_MUTEX _S_chunk_allocator_lock; |
|
859 static char *_S_start_free; |
|
860 static char *_S_end_free; |
|
861 static size_t _S_heap_size; |
|
862 static __state_type *_S_free_per_thread_states; |
|
863 static pthread_key_t _S_key; |
|
864 static bool _S_key_initialized; |
|
865 #endif |
|
866 // Pthread key under which per thread state is stored. |
|
867 // Allocator instances that are currently unclaimed by any thread. |
|
868 static void _S_destructor(void *instance); |
|
869 // Function to be called on thread exit to reclaim per thread |
|
870 // state. |
|
871 static __state_type *_S_new_per_thread_state(); |
|
872 public: |
|
873 // Return a recycled or new per thread state. |
|
874 static __state_type *_S_get_per_thread_state(); |
|
875 private: |
|
876 // ensure that the current thread has an associated |
|
877 // per thread state. |
|
878 class _M_lock; |
|
879 friend class _M_lock; |
|
880 class _M_lock { |
|
881 public: |
|
882 _M_lock () { _S_chunk_allocator_lock._M_acquire_lock(); } |
|
883 ~_M_lock () { _S_chunk_allocator_lock._M_release_lock(); } |
|
884 }; |
|
885 |
|
886 public: |
|
887 |
|
888 /* n must be > 0 */ |
|
889 static void * allocate(size_t& __n); |
|
890 |
|
891 /* p may not be 0 */ |
|
892 static void deallocate(void *__p, size_t __n); |
|
893 |
|
894 // boris : versions for per_thread_allocator |
|
895 /* n must be > 0 */ |
|
896 static void * allocate(size_t& __n, __state_type* __a); |
|
897 |
|
898 /* p may not be 0 */ |
|
899 static void deallocate(void *__p, size_t __n, __state_type* __a); |
|
900 |
|
901 static void * reallocate(void *__p, size_t __old_sz, size_t& __new_sz); |
|
902 }; |
|
903 |
|
904 /* Returns an object of size n, and optionally adds to size n free list.*/ |
|
905 /* We assume that n is properly aligned. */ |
|
906 /* We hold the allocation lock. */ |
|
907 void *_Pthread_alloc_per_thread_state::_M_refill(size_t __n) { |
|
908 typedef _Pthread_alloc_obj __obj; |
|
909 size_t __nobjs = 128; |
|
910 char * __chunk = _Pthread_alloc_impl::_S_chunk_alloc(__n, __nobjs, this); |
|
911 __obj * volatile * __my_free_list; |
|
912 __obj * __result; |
|
913 __obj * __current_obj, * __next_obj; |
|
914 size_t __i; |
|
915 |
|
916 if (1 == __nobjs) { |
|
917 return __chunk; |
|
918 } |
|
919 |
|
920 __my_free_list = __free_list + _Pthread_alloc_impl::_S_freelist_index(__n); |
|
921 |
|
922 /* Build free list in chunk */ |
|
923 __result = (__obj *)__chunk; |
|
924 *__my_free_list = __next_obj = (__obj *)(__chunk + __n); |
|
925 for (__i = 1; ; ++__i) { |
|
926 __current_obj = __next_obj; |
|
927 __next_obj = (__obj *)((char *)__next_obj + __n); |
|
928 if (__nobjs - 1 == __i) { |
|
929 __current_obj -> __free_list_link = 0; |
|
930 break; |
|
931 } else { |
|
932 __current_obj -> __free_list_link = __next_obj; |
|
933 } |
|
934 } |
|
935 return __result; |
|
936 } |
|
937 |
|
938 void _Pthread_alloc_impl::_S_destructor(void *__instance) { |
|
939 _M_lock __lock_instance; // Need to acquire lock here. |
|
940 _Pthread_alloc_per_thread_state* __s = (_Pthread_alloc_per_thread_state*)__instance; |
|
941 __s -> __next = _S_free_per_thread_states; |
|
942 _S_free_per_thread_states = __s; |
|
943 } |
|
944 |
|
945 _Pthread_alloc_per_thread_state* _Pthread_alloc_impl::_S_new_per_thread_state() { |
|
946 /* lock already held here. */ |
|
947 if (0 != _S_free_per_thread_states) { |
|
948 _Pthread_alloc_per_thread_state *__result = _S_free_per_thread_states; |
|
949 _S_free_per_thread_states = _S_free_per_thread_states -> __next; |
|
950 return __result; |
|
951 } |
|
952 else { |
|
953 return _STLP_NEW _Pthread_alloc_per_thread_state; |
|
954 } |
|
955 } |
|
956 |
|
957 _Pthread_alloc_per_thread_state* _Pthread_alloc_impl::_S_get_per_thread_state() { |
|
958 int __ret_code; |
|
959 __state_type* __result; |
|
960 |
|
961 if (_S_key_initialized && ((__result = (__state_type*) pthread_getspecific(_S_key)) != NULL)) |
|
962 return __result; |
|
963 |
|
964 /*REFERENCED*/ |
|
965 _M_lock __lock_instance; // Need to acquire lock here. |
|
966 if (!_S_key_initialized) { |
|
967 if (pthread_key_create(&_S_key, _S_destructor)) { |
|
968 __THROW_BAD_ALLOC; // failed |
|
969 } |
|
970 _S_key_initialized = true; |
|
971 } |
|
972 |
|
973 __result = _S_new_per_thread_state(); |
|
974 __ret_code = pthread_setspecific(_S_key, __result); |
|
975 if (__ret_code) { |
|
976 if (__ret_code == ENOMEM) { |
|
977 __THROW_BAD_ALLOC; |
|
978 } else { |
|
979 // EINVAL |
|
980 _STLP_ABORT(); |
|
981 } |
|
982 } |
|
983 return __result; |
|
984 } |
|
985 |
|
986 /* We allocate memory in large chunks in order to avoid fragmenting */ |
|
987 /* the malloc heap too much. */ |
|
988 /* We assume that size is properly aligned. */ |
|
989 char *_Pthread_alloc_impl::_S_chunk_alloc(size_t __p_size, size_t &__nobjs, _Pthread_alloc_per_thread_state *__a) { |
|
990 typedef _Pthread_alloc_obj __obj; |
|
991 { |
|
992 char * __result; |
|
993 size_t __total_bytes; |
|
994 size_t __bytes_left; |
|
995 /*REFERENCED*/ |
|
996 _M_lock __lock_instance; // Acquire lock for this routine |
|
997 |
|
998 __total_bytes = __p_size * __nobjs; |
|
999 __bytes_left = _S_end_free - _S_start_free; |
|
1000 if (__bytes_left >= __total_bytes) { |
|
1001 __result = _S_start_free; |
|
1002 _S_start_free += __total_bytes; |
|
1003 return __result; |
|
1004 } else if (__bytes_left >= __p_size) { |
|
1005 __nobjs = __bytes_left/__p_size; |
|
1006 __total_bytes = __p_size * __nobjs; |
|
1007 __result = _S_start_free; |
|
1008 _S_start_free += __total_bytes; |
|
1009 return __result; |
|
1010 } else { |
|
1011 size_t __bytes_to_get = 2 * __total_bytes + _S_round_up(_S_heap_size >> 4); |
|
1012 // Try to make use of the left-over piece. |
|
1013 if (__bytes_left > 0) { |
|
1014 __obj * volatile * __my_free_list = __a->__free_list + _S_freelist_index(__bytes_left); |
|
1015 ((__obj *)_S_start_free) -> __free_list_link = *__my_free_list; |
|
1016 *__my_free_list = (__obj *)_S_start_free; |
|
1017 } |
|
1018 # ifdef _SGI_SOURCE |
|
1019 // Try to get memory that's aligned on something like a |
|
1020 // cache line boundary, so as to avoid parceling out |
|
1021 // parts of the same line to different threads and thus |
|
1022 // possibly different processors. |
|
1023 { |
|
1024 const int __cache_line_size = 128; // probable upper bound |
|
1025 __bytes_to_get &= ~(__cache_line_size-1); |
|
1026 _S_start_free = (char *)memalign(__cache_line_size, __bytes_to_get); |
|
1027 if (0 == _S_start_free) { |
|
1028 _S_start_free = (char *)__malloc_alloc::allocate(__bytes_to_get); |
|
1029 } |
|
1030 } |
|
1031 # else /* !SGI_SOURCE */ |
|
1032 _S_start_free = (char *)__malloc_alloc::allocate(__bytes_to_get); |
|
1033 # endif |
|
1034 _S_heap_size += __bytes_to_get; |
|
1035 _S_end_free = _S_start_free + __bytes_to_get; |
|
1036 } |
|
1037 } |
|
1038 // lock is released here |
|
1039 return _S_chunk_alloc(__p_size, __nobjs, __a); |
|
1040 } |
|
1041 |
|
1042 |
|
1043 /* n must be > 0 */ |
|
1044 void *_Pthread_alloc_impl::allocate(size_t& __n) { |
|
1045 typedef _Pthread_alloc_obj __obj; |
|
1046 __obj * volatile * __my_free_list; |
|
1047 __obj * __result; |
|
1048 __state_type* __a; |
|
1049 |
|
1050 if (__n > _MAX_BYTES) { |
|
1051 return __malloc_alloc::allocate(__n); |
|
1052 } |
|
1053 |
|
1054 __n = _S_round_up(__n); |
|
1055 __a = _S_get_per_thread_state(); |
|
1056 |
|
1057 __my_free_list = __a->__free_list + _S_freelist_index(__n); |
|
1058 __result = *__my_free_list; |
|
1059 if (__result == 0) { |
|
1060 void *__r = __a->_M_refill(__n); |
|
1061 return __r; |
|
1062 } |
|
1063 *__my_free_list = __result->__free_list_link; |
|
1064 return __result; |
|
1065 }; |
|
1066 |
|
1067 /* p may not be 0 */ |
|
1068 void _Pthread_alloc_impl::deallocate(void *__p, size_t __n) { |
|
1069 typedef _Pthread_alloc_obj __obj; |
|
1070 __obj *__q = (__obj *)__p; |
|
1071 __obj * volatile * __my_free_list; |
|
1072 __state_type* __a; |
|
1073 |
|
1074 if (__n > _MAX_BYTES) { |
|
1075 __malloc_alloc::deallocate(__p, __n); |
|
1076 return; |
|
1077 } |
|
1078 |
|
1079 __a = _S_get_per_thread_state(); |
|
1080 |
|
1081 __my_free_list = __a->__free_list + _S_freelist_index(__n); |
|
1082 __q -> __free_list_link = *__my_free_list; |
|
1083 *__my_free_list = __q; |
|
1084 } |
|
1085 |
|
1086 // boris : versions for per_thread_allocator |
|
1087 /* n must be > 0 */ |
|
1088 void *_Pthread_alloc_impl::allocate(size_t& __n, __state_type* __a) { |
|
1089 typedef _Pthread_alloc_obj __obj; |
|
1090 __obj * volatile * __my_free_list; |
|
1091 __obj * __result; |
|
1092 |
|
1093 if (__n > _MAX_BYTES) { |
|
1094 return __malloc_alloc::allocate(__n); |
|
1095 } |
|
1096 __n = _S_round_up(__n); |
|
1097 |
|
1098 // boris : here, we have to lock per thread state, as we may be getting memory from |
|
1099 // different thread pool. |
|
1100 _STLP_auto_lock __lock(__a->_M_lock); |
|
1101 |
|
1102 __my_free_list = __a->__free_list + _S_freelist_index(__n); |
|
1103 __result = *__my_free_list; |
|
1104 if (__result == 0) { |
|
1105 void *__r = __a->_M_refill(__n); |
|
1106 return __r; |
|
1107 } |
|
1108 *__my_free_list = __result->__free_list_link; |
|
1109 return __result; |
|
1110 }; |
|
1111 |
|
1112 /* p may not be 0 */ |
|
1113 void _Pthread_alloc_impl::deallocate(void *__p, size_t __n, __state_type* __a) { |
|
1114 typedef _Pthread_alloc_obj __obj; |
|
1115 __obj *__q = (__obj *)__p; |
|
1116 __obj * volatile * __my_free_list; |
|
1117 |
|
1118 if (__n > _MAX_BYTES) { |
|
1119 __malloc_alloc::deallocate(__p, __n); |
|
1120 return; |
|
1121 } |
|
1122 |
|
1123 // boris : here, we have to lock per thread state, as we may be returning memory from |
|
1124 // different thread. |
|
1125 _STLP_auto_lock __lock(__a->_M_lock); |
|
1126 |
|
1127 __my_free_list = __a->__free_list + _S_freelist_index(__n); |
|
1128 __q -> __free_list_link = *__my_free_list; |
|
1129 *__my_free_list = __q; |
|
1130 } |
|
1131 |
|
1132 void *_Pthread_alloc_impl::reallocate(void *__p, size_t __old_sz, size_t& __new_sz) { |
|
1133 void * __result; |
|
1134 size_t __copy_sz; |
|
1135 |
|
1136 if (__old_sz > _MAX_BYTES && __new_sz > _MAX_BYTES) { |
|
1137 return realloc(__p, __new_sz); |
|
1138 } |
|
1139 |
|
1140 if (_S_round_up(__old_sz) == _S_round_up(__new_sz)) return __p; |
|
1141 __result = allocate(__new_sz); |
|
1142 __copy_sz = __new_sz > __old_sz? __old_sz : __new_sz; |
|
1143 memcpy(__result, __p, __copy_sz); |
|
1144 deallocate(__p, __old_sz); |
|
1145 return __result; |
|
1146 } |
|
1147 #if !defined(__SYMBIAN32__WSD__) |
|
1148 _Pthread_alloc_per_thread_state* _Pthread_alloc_impl::_S_free_per_thread_states = 0; |
|
1149 pthread_key_t _Pthread_alloc_impl::_S_key = 0; |
|
1150 _STLP_STATIC_MUTEX _Pthread_alloc_impl::_S_chunk_allocator_lock _STLP_MUTEX_INITIALIZER; |
|
1151 bool _Pthread_alloc_impl::_S_key_initialized = false; |
|
1152 char *_Pthread_alloc_impl::_S_start_free = 0; |
|
1153 char *_Pthread_alloc_impl::_S_end_free = 0; |
|
1154 size_t _Pthread_alloc_impl::_S_heap_size = 0; |
|
1155 #else |
|
1156 |
|
1157 inline __oom_handler_type& __malloc_alloc_impl::get_oom_handler() |
|
1158 { |
|
1159 return get_libcpp_wsd().wsd__oom_handler; |
|
1160 } |
|
1161 |
|
1162 inline __node_alloc_impl::_Freelist* __node_alloc_impl::get_S_free_list() |
|
1163 { |
|
1164 return (__node_alloc_impl::_Freelist*)get_libcpp_wsd().wsd_S_free_list; |
|
1165 } |
|
1166 |
|
1167 inline size_t& __node_alloc_impl::get_S_heap_size() |
|
1168 { |
|
1169 return get_libcpp_wsd().wsd__node_alloc_impl_S_heap_size; |
|
1170 } |
|
1171 |
|
1172 inline char*& __node_alloc_impl::get_S_start_free() |
|
1173 { |
|
1174 return get_libcpp_wsd().wsd_S_start_free; |
|
1175 } |
|
1176 |
|
1177 inline char*& __node_alloc_impl::get_S_end_free() |
|
1178 { |
|
1179 return get_libcpp_wsd().wsd_S_end_free; |
|
1180 } |
|
1181 |
|
1182 inline _STLP_STATIC_MUTEX& _Node_Alloc_Lock::get_allocator_S_lock() |
|
1183 { |
|
1184 return get_libcpp_wsd().wsd_allocator_S_lock; |
|
1185 } |
|
1186 |
|
1187 #endif |
|
1188 |
|
1189 void * _STLP_CALL _Pthread_alloc::allocate(size_t& __n) |
|
1190 { return _Pthread_alloc_impl::allocate(__n); } |
|
1191 void _STLP_CALL _Pthread_alloc::deallocate(void *__p, size_t __n) |
|
1192 { _Pthread_alloc_impl::deallocate(__p, __n); } |
|
1193 void * _STLP_CALL _Pthread_alloc::allocate(size_t& __n, __state_type* __a) |
|
1194 { return _Pthread_alloc_impl::allocate(__n, __a); } |
|
1195 void _STLP_CALL _Pthread_alloc::deallocate(void *__p, size_t __n, __state_type* __a) |
|
1196 { _Pthread_alloc_impl::deallocate(__p, __n, __a); } |
|
1197 void * _STLP_CALL _Pthread_alloc::reallocate(void *__p, size_t __old_sz, size_t& __new_sz) |
|
1198 { return _Pthread_alloc_impl::reallocate(__p, __old_sz, __new_sz); } |
|
1199 _Pthread_alloc_per_thread_state* _STLP_CALL _Pthread_alloc::_S_get_per_thread_state() |
|
1200 { return _Pthread_alloc_impl::_S_get_per_thread_state(); } |
|
1201 |
|
1202 _STLP_MOVE_TO_STD_NAMESPACE |
|
1203 |
|
1204 #endif |
|
1205 |
|
1206 _STLP_END_NAMESPACE |
|
1207 |
|
1208 |
|
1209 #if defined(__SYMBIAN32__WSD__) |
|
1210 // to be called from an stdcpp init. (to init WSD) |
|
1211 void stdcpp_allocators_init() |
|
1212 { |
|
1213 // init oom handler |
|
1214 std::__malloc_alloc_impl::get_oom_handler() = NULL; |
|
1215 |
|
1216 // lock init |
|
1217 stlp_priv::_Node_Alloc_Lock::get_allocator_S_lock()._M_lock.iState = _ENeedsNormalInit; |
|
1218 stlp_priv::_Node_Alloc_Lock::get_allocator_S_lock()._M_lock.iPtr = 0; |
|
1219 stlp_priv::_Node_Alloc_Lock::get_allocator_S_lock()._M_lock.iReentry = 0; |
|
1220 |
|
1221 // init _node_alloc_impl::x |
|
1222 stlp_priv::__node_alloc_impl::get_S_heap_size() = 0; |
|
1223 stlp_priv::__node_alloc_impl::get_S_start_free() = 0; |
|
1224 stlp_priv::__node_alloc_impl::get_S_end_free() = 0; |
|
1225 |
|
1226 // initialize free list |
|
1227 for (int count = 0; count < _STLP_NFREELISTS; count++) |
|
1228 stlp_priv::__node_alloc_impl::_S_free_list[count] = 0; |
|
1229 |
|
1230 //pthread_alloc_impl |
|
1231 stlp_priv::_Pthread_alloc_impl::pt_wsd_init(); |
|
1232 } |
|
1233 #endif |
|
1234 |
|
1235 #undef _S_FREELIST_INDEX |