williamr@2: /* williamr@2: * williamr@2: * Copyright (c) 1996,1997 williamr@2: * Silicon Graphics Computer Systems, Inc. williamr@2: * williamr@2: * Copyright (c) 1997 williamr@2: * Moscow Center for SPARC Technology williamr@2: * williamr@2: * Copyright (c) 1999 williamr@2: * Boris Fomitchev williamr@2: * williamr@2: * This material is provided "as is", with absolutely no warranty expressed williamr@2: * or implied. Any use is at your own risk. williamr@2: * williamr@2: * Permission to use or copy this software for any purpose is hereby granted williamr@2: * without fee, provided the above notices are retained on all copies. williamr@2: * Permission to modify the code and to distribute modified code is granted, williamr@2: * provided the above notices are retained, and a notice that the code was williamr@2: * modified is included with the above copyright notice. williamr@2: * williamr@2: */ williamr@2: #ifndef _STLP_PTHREAD_ALLOC_C williamr@2: #define _STLP_PTHREAD_ALLOC_C williamr@2: williamr@2: #ifdef __WATCOMC__ williamr@2: #pragma warning 13 9 williamr@2: #pragma warning 367 9 williamr@2: #pragma warning 368 9 williamr@2: #endif williamr@2: williamr@2: #ifndef _STLP_PTHREAD_ALLOC_H williamr@2: # include williamr@2: #endif williamr@2: williamr@2: # if defined (_STLP_EXPOSE_GLOBALS_IMPLEMENTATION) williamr@2: williamr@2: # include williamr@2: williamr@2: _STLP_BEGIN_NAMESPACE williamr@2: williamr@2: template williamr@2: void _Pthread_alloc<_Max_size>::_S_destructor(void * __instance) williamr@2: { williamr@2: _M_lock __lock_instance; // Need to acquire lock here. williamr@2: _Pthread_alloc_per_thread_state<_Max_size>* __s = williamr@2: (_Pthread_alloc_per_thread_state<_Max_size> *)__instance; williamr@2: __s -> __next = _S_free_per_thread_states; williamr@2: _S_free_per_thread_states = __s; williamr@2: } williamr@2: williamr@2: template williamr@2: _Pthread_alloc_per_thread_state<_Max_size> * williamr@2: _Pthread_alloc<_Max_size>::_S_new_per_thread_state() williamr@2: { williamr@2: /* lock already held here. */ williamr@2: if (0 != _S_free_per_thread_states) { williamr@2: _Pthread_alloc_per_thread_state<_Max_size> *__result = williamr@2: _S_free_per_thread_states; williamr@2: _S_free_per_thread_states = _S_free_per_thread_states -> __next; williamr@2: return __result; williamr@2: } else { williamr@2: return (_Pthread_alloc_per_thread_state<_Max_size>*) \ williamr@2: _STLP_PLACEMENT_NEW (_Pthread_alloc_per_thread_state<_Max_size>); williamr@2: } williamr@2: } williamr@2: williamr@2: template williamr@2: _Pthread_alloc_per_thread_state<_Max_size> * williamr@2: _Pthread_alloc<_Max_size>::_S_get_per_thread_state() williamr@2: { williamr@2: williamr@2: int __ret_code; williamr@2: __state_type* __result; williamr@2: williamr@2: if (_S_key_initialized && (__result = (__state_type*) pthread_getspecific(_S_key))) williamr@2: return __result; williamr@2: williamr@2: /*REFERENCED*/ williamr@2: _M_lock __lock_instance; // Need to acquire lock here. williamr@2: if (!_S_key_initialized) { williamr@2: if (pthread_key_create(&_S_key, _S_destructor)) { williamr@2: __THROW_BAD_ALLOC; // failed williamr@2: } williamr@2: _S_key_initialized = true; williamr@2: } williamr@2: williamr@2: __result = _S_new_per_thread_state(); williamr@2: __ret_code = pthread_setspecific(_S_key, __result); williamr@2: if (__ret_code) { williamr@2: if (__ret_code == ENOMEM) { williamr@2: __THROW_BAD_ALLOC; williamr@2: } else { williamr@2: // EINVAL williamr@2: _STLP_ABORT(); williamr@2: } williamr@2: } williamr@2: return __result; williamr@2: } williamr@2: williamr@2: /* We allocate memory in large chunks in order to avoid fragmenting */ williamr@2: /* the malloc heap too much. */ williamr@2: /* We assume that size is properly aligned. */ williamr@2: template williamr@2: char *_Pthread_alloc<_Max_size> williamr@2: ::_S_chunk_alloc(size_t __p_size, size_t &__nobjs) williamr@2: { williamr@2: { williamr@2: char * __result; williamr@2: size_t __total_bytes; williamr@2: size_t __bytes_left; williamr@2: /*REFERENCED*/ williamr@2: _M_lock __lock_instance; // Acquire lock for this routine williamr@2: williamr@2: __total_bytes = __p_size * __nobjs; williamr@2: __bytes_left = _S_end_free - _S_start_free; williamr@2: if (__bytes_left >= __total_bytes) { williamr@2: __result = _S_start_free; williamr@2: _S_start_free += __total_bytes; williamr@2: return(__result); williamr@2: } else if (__bytes_left >= __p_size) { williamr@2: __nobjs = __bytes_left/__p_size; williamr@2: __total_bytes = __p_size * __nobjs; williamr@2: __result = _S_start_free; williamr@2: _S_start_free += __total_bytes; williamr@2: return(__result); williamr@2: } else { williamr@2: size_t __bytes_to_get = williamr@2: 2 * __total_bytes + _S_round_up(_S_heap_size >> 4); williamr@2: // Try to make use of the left-over piece. williamr@2: if (__bytes_left > 0) { williamr@2: _Pthread_alloc_per_thread_state<_Max_size>* __a = williamr@2: (_Pthread_alloc_per_thread_state<_Max_size>*) williamr@2: pthread_getspecific(_S_key); williamr@2: __obj * volatile * __my_free_list = williamr@2: __a->__free_list + _S_freelist_index(__bytes_left); williamr@2: williamr@2: ((__obj *)_S_start_free) -> __free_list_link = *__my_free_list; williamr@2: *__my_free_list = (__obj *)_S_start_free; williamr@2: } williamr@2: # ifdef _SGI_SOURCE williamr@2: // Try to get memory that's aligned on something like a williamr@2: // cache line boundary, so as to avoid parceling out williamr@2: // parts of the same line to different threads and thus williamr@2: // possibly different processors. williamr@2: { williamr@2: const int __cache_line_size = 128; // probable upper bound williamr@2: __bytes_to_get &= ~(__cache_line_size-1); williamr@2: _S_start_free = (char *)memalign(__cache_line_size, __bytes_to_get); williamr@2: if (0 == _S_start_free) { williamr@2: _S_start_free = (char *)__malloc_alloc<0>::allocate(__bytes_to_get); williamr@2: } williamr@2: } williamr@2: # else /* !SGI_SOURCE */ williamr@2: _S_start_free = (char *)__malloc_alloc<0>::allocate(__bytes_to_get); williamr@2: # endif williamr@2: _S_heap_size += __bytes_to_get; williamr@2: _S_end_free = _S_start_free + __bytes_to_get; williamr@2: } williamr@2: } williamr@2: // lock is released here williamr@2: return(_S_chunk_alloc(__p_size, __nobjs)); williamr@2: } williamr@2: williamr@2: williamr@2: /* Returns an object of size n, and optionally adds to size n free list.*/ williamr@2: /* We assume that n is properly aligned. */ williamr@2: /* We hold the allocation lock. */ williamr@2: template williamr@2: void *_Pthread_alloc_per_thread_state<_Max_size> williamr@2: ::_M_refill(size_t __n) williamr@2: { williamr@2: size_t __nobjs = 128; williamr@2: char * __chunk = williamr@2: _Pthread_alloc<_Max_size>::_S_chunk_alloc(__n, __nobjs); williamr@2: __obj * volatile * __my_free_list; williamr@2: __obj * __result; williamr@2: __obj * __current_obj, * __next_obj; williamr@2: int __i; williamr@2: williamr@2: if (1 == __nobjs) { williamr@2: return(__chunk); williamr@2: } williamr@2: __my_free_list = __free_list williamr@2: + _Pthread_alloc<_Max_size>::_S_freelist_index(__n); williamr@2: williamr@2: /* Build free list in chunk */ williamr@2: __result = (__obj *)__chunk; williamr@2: *__my_free_list = __next_obj = (__obj *)(__chunk + __n); williamr@2: for (__i = 1; ; __i++) { williamr@2: __current_obj = __next_obj; williamr@2: __next_obj = (__obj *)((char *)__next_obj + __n); williamr@2: if (__nobjs - 1 == __i) { williamr@2: __current_obj -> __free_list_link = 0; williamr@2: break; williamr@2: } else { williamr@2: __current_obj -> __free_list_link = __next_obj; williamr@2: } williamr@2: } williamr@2: return(__result); williamr@2: } williamr@2: williamr@2: template williamr@2: void *_Pthread_alloc<_Max_size> williamr@2: ::reallocate(void *__p, size_t __old_sz, size_t __new_sz) williamr@2: { williamr@2: void * __result; williamr@2: size_t __copy_sz; williamr@2: williamr@2: if (__old_sz > _Max_size williamr@2: && __new_sz > _Max_size) { williamr@2: return(realloc(__p, __new_sz)); williamr@2: } williamr@2: if (_S_round_up(__old_sz) == _S_round_up(__new_sz)) return(__p); williamr@2: __result = allocate(__new_sz); williamr@2: __copy_sz = __new_sz > __old_sz? __old_sz : __new_sz; williamr@2: memcpy(__result, __p, __copy_sz); williamr@2: deallocate(__p, __old_sz); williamr@2: return(__result); williamr@2: } williamr@2: williamr@2: #if defined (_STLP_STATIC_TEMPLATE_DATA) && (_STLP_STATIC_TEMPLATE_DATA > 0) williamr@2: williamr@2: template williamr@2: _Pthread_alloc_per_thread_state<_Max_size> * _Pthread_alloc<_Max_size>::_S_free_per_thread_states = 0; williamr@2: williamr@2: template williamr@2: pthread_key_t _Pthread_alloc<_Max_size>::_S_key =0; williamr@2: williamr@2: template williamr@2: bool _Pthread_alloc<_Max_size>::_S_key_initialized = false; williamr@2: williamr@2: template williamr@2: _STLP_mutex_base _Pthread_alloc<_Max_size>::_S_chunk_allocator_lock _STLP_MUTEX_INITIALIZER; williamr@2: williamr@2: template williamr@2: char *_Pthread_alloc<_Max_size>::_S_start_free = 0; williamr@2: williamr@2: template williamr@2: char *_Pthread_alloc<_Max_size>::_S_end_free = 0; williamr@2: williamr@2: template williamr@2: size_t _Pthread_alloc<_Max_size>::_S_heap_size = 0; williamr@2: williamr@2: # else williamr@2: williamr@2: __DECLARE_INSTANCE(template _Pthread_alloc_per_thread_state<_Max_size> *, _Pthread_alloc<_Max_size>::_S_free_per_thread_states, = 0); williamr@2: __DECLARE_INSTANCE(template pthread_key_t, _Pthread_alloc<_Max_size>::_S_key, = 0); williamr@2: __DECLARE_INSTANCE(template bool, _Pthread_alloc<_Max_size>::_S_key_initialized, = false); williamr@2: __DECLARE_INSTANCE(template char *, _Pthread_alloc<_Max_size>::_S_start_free, = 0); williamr@2: __DECLARE_INSTANCE(template char *, _Pthread_alloc<_Max_size>::_S_end_free, = 0); williamr@2: __DECLARE_INSTANCE(template size_t, _Pthread_alloc<_Max_size>::_S_heap_size, = 0); williamr@2: williamr@2: # endif williamr@2: williamr@2: _STLP_END_NAMESPACE williamr@2: williamr@2: # endif /* _STLP_EXPOSE_GLOBALS_IMPLEMENTATION */ williamr@2: williamr@2: #endif /* _STLP_PTHREAD_ALLOC_C */ williamr@2: williamr@2: // Local Variables: williamr@2: // mode:C++ williamr@2: // End: