epoc32/include/stdapis/stlport/stl/_pthread_alloc.c
author William Roberts <williamr@symbian.org>
Wed, 31 Mar 2010 12:33:34 +0100
branchSymbian3
changeset 4 837f303aceeb
parent 0 061f57f2323e
permissions -rw-r--r--
Current Symbian^3 public API header files (from PDK 3.0.h)
This is the epoc32/include tree with the "platform" subtrees removed, and
all but a selected few mbg and rsg files removed.
williamr@2
     1
/*
williamr@2
     2
 *
williamr@2
     3
 * Copyright (c) 1996,1997
williamr@2
     4
 * Silicon Graphics Computer Systems, Inc.
williamr@2
     5
 *
williamr@2
     6
 * Copyright (c) 1997
williamr@2
     7
 * Moscow Center for SPARC Technology
williamr@2
     8
 *
williamr@2
     9
 * Copyright (c) 1999 
williamr@2
    10
 * Boris Fomitchev
williamr@2
    11
 *
williamr@2
    12
 * This material is provided "as is", with absolutely no warranty expressed
williamr@2
    13
 * or implied. Any use is at your own risk.
williamr@2
    14
 *
williamr@2
    15
 * Permission to use or copy this software for any purpose is hereby granted 
williamr@2
    16
 * without fee, provided the above notices are retained on all copies.
williamr@2
    17
 * Permission to modify the code and to distribute modified code is granted,
williamr@2
    18
 * provided the above notices are retained, and a notice that the code was
williamr@2
    19
 * modified is included with the above copyright notice.
williamr@2
    20
 *
williamr@2
    21
 */
williamr@2
    22
#ifndef _STLP_PTHREAD_ALLOC_C
williamr@2
    23
#define _STLP_PTHREAD_ALLOC_C
williamr@2
    24
williamr@2
    25
#ifdef __WATCOMC__
williamr@2
    26
#pragma warning 13 9
williamr@2
    27
#pragma warning 367 9
williamr@2
    28
#pragma warning 368 9
williamr@2
    29
#endif
williamr@2
    30
williamr@2
    31
#ifndef _STLP_PTHREAD_ALLOC_H
williamr@2
    32
# include <stl/_pthread_alloc.h>
williamr@2
    33
#endif
williamr@2
    34
williamr@2
    35
# if defined (_STLP_EXPOSE_GLOBALS_IMPLEMENTATION)
williamr@2
    36
williamr@2
    37
# include <cerrno>
williamr@2
    38
williamr@2
    39
_STLP_BEGIN_NAMESPACE
williamr@2
    40
williamr@2
    41
template <size_t _Max_size>
williamr@2
    42
void _Pthread_alloc<_Max_size>::_S_destructor(void * __instance)
williamr@2
    43
{
williamr@2
    44
    _M_lock __lock_instance;	// Need to acquire lock here.
williamr@2
    45
    _Pthread_alloc_per_thread_state<_Max_size>* __s =
williamr@2
    46
        (_Pthread_alloc_per_thread_state<_Max_size> *)__instance;
williamr@2
    47
    __s -> __next = _S_free_per_thread_states;
williamr@2
    48
    _S_free_per_thread_states = __s;
williamr@2
    49
}
williamr@2
    50
williamr@2
    51
template <size_t _Max_size>
williamr@2
    52
_Pthread_alloc_per_thread_state<_Max_size> *
williamr@2
    53
_Pthread_alloc<_Max_size>::_S_new_per_thread_state()
williamr@2
    54
{    
williamr@2
    55
    /* lock already held here.	*/
williamr@2
    56
    if (0 != _S_free_per_thread_states) {
williamr@2
    57
        _Pthread_alloc_per_thread_state<_Max_size> *__result =
williamr@2
    58
					_S_free_per_thread_states;
williamr@2
    59
        _S_free_per_thread_states = _S_free_per_thread_states -> __next;
williamr@2
    60
        return __result;
williamr@2
    61
    } else {
williamr@2
    62
        return (_Pthread_alloc_per_thread_state<_Max_size>*) \
williamr@2
    63
                _STLP_PLACEMENT_NEW (_Pthread_alloc_per_thread_state<_Max_size>);
williamr@2
    64
    }
williamr@2
    65
}
williamr@2
    66
williamr@2
    67
template <size_t _Max_size>
williamr@2
    68
_Pthread_alloc_per_thread_state<_Max_size> *
williamr@2
    69
_Pthread_alloc<_Max_size>::_S_get_per_thread_state()
williamr@2
    70
{
williamr@2
    71
williamr@2
    72
    int __ret_code;
williamr@2
    73
    __state_type* __result;
williamr@2
    74
    
williamr@2
    75
    if (_S_key_initialized && (__result = (__state_type*) pthread_getspecific(_S_key)))
williamr@2
    76
      return __result;
williamr@2
    77
    
williamr@2
    78
    /*REFERENCED*/
williamr@2
    79
    _M_lock __lock_instance;	// Need to acquire lock here.
williamr@2
    80
    if (!_S_key_initialized) {
williamr@2
    81
      if (pthread_key_create(&_S_key, _S_destructor)) {
williamr@2
    82
	__THROW_BAD_ALLOC;  // failed
williamr@2
    83
      }
williamr@2
    84
      _S_key_initialized = true;
williamr@2
    85
    }
williamr@2
    86
williamr@2
    87
    __result = _S_new_per_thread_state();
williamr@2
    88
    __ret_code = pthread_setspecific(_S_key, __result);
williamr@2
    89
    if (__ret_code) {
williamr@2
    90
      if (__ret_code == ENOMEM) {
williamr@2
    91
	__THROW_BAD_ALLOC;
williamr@2
    92
      } else {
williamr@2
    93
	// EINVAL
williamr@2
    94
	_STLP_ABORT();
williamr@2
    95
      }
williamr@2
    96
    }
williamr@2
    97
    return __result;
williamr@2
    98
}
williamr@2
    99
williamr@2
   100
/* We allocate memory in large chunks in order to avoid fragmenting     */
williamr@2
   101
/* the malloc heap too much.                                            */
williamr@2
   102
/* We assume that size is properly aligned.                             */
williamr@2
   103
template <size_t _Max_size>
williamr@2
   104
char *_Pthread_alloc<_Max_size>
williamr@2
   105
::_S_chunk_alloc(size_t __p_size, size_t &__nobjs)
williamr@2
   106
{
williamr@2
   107
  {
williamr@2
   108
    char * __result;
williamr@2
   109
    size_t __total_bytes;
williamr@2
   110
    size_t __bytes_left;
williamr@2
   111
    /*REFERENCED*/
williamr@2
   112
    _M_lock __lock_instance;         // Acquire lock for this routine
williamr@2
   113
williamr@2
   114
    __total_bytes = __p_size * __nobjs;
williamr@2
   115
    __bytes_left = _S_end_free - _S_start_free;
williamr@2
   116
    if (__bytes_left >= __total_bytes) {
williamr@2
   117
        __result = _S_start_free;
williamr@2
   118
        _S_start_free += __total_bytes;
williamr@2
   119
        return(__result);
williamr@2
   120
    } else if (__bytes_left >= __p_size) {
williamr@2
   121
        __nobjs = __bytes_left/__p_size;
williamr@2
   122
        __total_bytes = __p_size * __nobjs;
williamr@2
   123
        __result = _S_start_free;
williamr@2
   124
        _S_start_free += __total_bytes;
williamr@2
   125
        return(__result);
williamr@2
   126
    } else {
williamr@2
   127
        size_t __bytes_to_get =
williamr@2
   128
		2 * __total_bytes + _S_round_up(_S_heap_size >> 4);
williamr@2
   129
        // Try to make use of the left-over piece.
williamr@2
   130
        if (__bytes_left > 0) {
williamr@2
   131
            _Pthread_alloc_per_thread_state<_Max_size>* __a = 
williamr@2
   132
                (_Pthread_alloc_per_thread_state<_Max_size>*)
williamr@2
   133
			pthread_getspecific(_S_key);
williamr@2
   134
            __obj * volatile * __my_free_list =
williamr@2
   135
                        __a->__free_list + _S_freelist_index(__bytes_left);
williamr@2
   136
williamr@2
   137
            ((__obj *)_S_start_free) -> __free_list_link = *__my_free_list;
williamr@2
   138
            *__my_free_list = (__obj *)_S_start_free;
williamr@2
   139
        }
williamr@2
   140
#       ifdef _SGI_SOURCE
williamr@2
   141
          // Try to get memory that's aligned on something like a
williamr@2
   142
          // cache line boundary, so as to avoid parceling out
williamr@2
   143
          // parts of the same line to different threads and thus
williamr@2
   144
          // possibly different processors.
williamr@2
   145
          {
williamr@2
   146
            const int __cache_line_size = 128;  // probable upper bound
williamr@2
   147
            __bytes_to_get &= ~(__cache_line_size-1);
williamr@2
   148
            _S_start_free = (char *)memalign(__cache_line_size, __bytes_to_get); 
williamr@2
   149
            if (0 == _S_start_free) {
williamr@2
   150
              _S_start_free = (char *)__malloc_alloc<0>::allocate(__bytes_to_get);
williamr@2
   151
            }
williamr@2
   152
          }
williamr@2
   153
#       else  /* !SGI_SOURCE */
williamr@2
   154
          _S_start_free = (char *)__malloc_alloc<0>::allocate(__bytes_to_get);
williamr@2
   155
#       endif
williamr@2
   156
        _S_heap_size += __bytes_to_get;
williamr@2
   157
        _S_end_free = _S_start_free + __bytes_to_get;
williamr@2
   158
    }
williamr@2
   159
  }
williamr@2
   160
  // lock is released here
williamr@2
   161
  return(_S_chunk_alloc(__p_size, __nobjs));
williamr@2
   162
}
williamr@2
   163
williamr@2
   164
williamr@2
   165
/* Returns an object of size n, and optionally adds to size n free list.*/
williamr@2
   166
/* We assume that n is properly aligned.                                */
williamr@2
   167
/* We hold the allocation lock.                                         */
williamr@2
   168
template <size_t _Max_size>
williamr@2
   169
void *_Pthread_alloc_per_thread_state<_Max_size>
williamr@2
   170
::_M_refill(size_t __n)
williamr@2
   171
{
williamr@2
   172
    size_t __nobjs = 128;
williamr@2
   173
    char * __chunk =
williamr@2
   174
	_Pthread_alloc<_Max_size>::_S_chunk_alloc(__n, __nobjs);
williamr@2
   175
    __obj * volatile * __my_free_list;
williamr@2
   176
    __obj * __result;
williamr@2
   177
    __obj * __current_obj, * __next_obj;
williamr@2
   178
    int __i;
williamr@2
   179
williamr@2
   180
    if (1 == __nobjs)  {
williamr@2
   181
        return(__chunk);
williamr@2
   182
    }
williamr@2
   183
    __my_free_list = __free_list
williamr@2
   184
		 + _Pthread_alloc<_Max_size>::_S_freelist_index(__n);
williamr@2
   185
williamr@2
   186
    /* Build free list in chunk */
williamr@2
   187
      __result = (__obj *)__chunk;
williamr@2
   188
      *__my_free_list = __next_obj = (__obj *)(__chunk + __n);
williamr@2
   189
      for (__i = 1; ; __i++) {
williamr@2
   190
        __current_obj = __next_obj;
williamr@2
   191
        __next_obj = (__obj *)((char *)__next_obj + __n);
williamr@2
   192
        if (__nobjs - 1 == __i) {
williamr@2
   193
            __current_obj -> __free_list_link = 0;
williamr@2
   194
            break;
williamr@2
   195
        } else {
williamr@2
   196
            __current_obj -> __free_list_link = __next_obj;
williamr@2
   197
        }
williamr@2
   198
      }
williamr@2
   199
    return(__result);
williamr@2
   200
}
williamr@2
   201
williamr@2
   202
template <size_t _Max_size>
williamr@2
   203
void *_Pthread_alloc<_Max_size>
williamr@2
   204
::reallocate(void *__p, size_t __old_sz, size_t __new_sz)
williamr@2
   205
{
williamr@2
   206
    void * __result;
williamr@2
   207
    size_t __copy_sz;
williamr@2
   208
williamr@2
   209
    if (__old_sz > _Max_size
williamr@2
   210
	&& __new_sz > _Max_size) {
williamr@2
   211
        return(realloc(__p, __new_sz));
williamr@2
   212
    }
williamr@2
   213
    if (_S_round_up(__old_sz) == _S_round_up(__new_sz)) return(__p);
williamr@2
   214
    __result = allocate(__new_sz);
williamr@2
   215
    __copy_sz = __new_sz > __old_sz? __old_sz : __new_sz;
williamr@2
   216
    memcpy(__result, __p, __copy_sz);
williamr@2
   217
    deallocate(__p, __old_sz);
williamr@2
   218
    return(__result);
williamr@2
   219
}
williamr@2
   220
williamr@2
   221
#if defined (_STLP_STATIC_TEMPLATE_DATA) && (_STLP_STATIC_TEMPLATE_DATA > 0)
williamr@2
   222
williamr@2
   223
template <size_t _Max_size>
williamr@2
   224
_Pthread_alloc_per_thread_state<_Max_size> * _Pthread_alloc<_Max_size>::_S_free_per_thread_states = 0;
williamr@2
   225
williamr@2
   226
template <size_t _Max_size>
williamr@2
   227
pthread_key_t _Pthread_alloc<_Max_size>::_S_key =0;
williamr@2
   228
williamr@2
   229
template <size_t _Max_size>
williamr@2
   230
bool _Pthread_alloc<_Max_size>::_S_key_initialized = false;
williamr@2
   231
williamr@2
   232
template <size_t _Max_size>
williamr@2
   233
_STLP_mutex_base _Pthread_alloc<_Max_size>::_S_chunk_allocator_lock _STLP_MUTEX_INITIALIZER;
williamr@2
   234
williamr@2
   235
template <size_t _Max_size>
williamr@2
   236
char *_Pthread_alloc<_Max_size>::_S_start_free = 0;
williamr@2
   237
williamr@2
   238
template <size_t _Max_size>
williamr@2
   239
char *_Pthread_alloc<_Max_size>::_S_end_free = 0;
williamr@2
   240
williamr@2
   241
template <size_t _Max_size>
williamr@2
   242
size_t _Pthread_alloc<_Max_size>::_S_heap_size = 0;
williamr@2
   243
williamr@2
   244
 # else
williamr@2
   245
 
williamr@2
   246
 __DECLARE_INSTANCE(template <size_t _Max_size> _Pthread_alloc_per_thread_state<_Max_size> *, _Pthread_alloc<_Max_size>::_S_free_per_thread_states, = 0);
williamr@2
   247
 __DECLARE_INSTANCE(template <size_t _Max_size> pthread_key_t, _Pthread_alloc<_Max_size>::_S_key, = 0);
williamr@2
   248
 __DECLARE_INSTANCE(template <size_t _Max_size> bool, _Pthread_alloc<_Max_size>::_S_key_initialized, = false);
williamr@2
   249
 __DECLARE_INSTANCE(template <size_t _Max_size> char *, _Pthread_alloc<_Max_size>::_S_start_free, = 0);
williamr@2
   250
 __DECLARE_INSTANCE(template <size_t _Max_size> char *, _Pthread_alloc<_Max_size>::_S_end_free, = 0);
williamr@2
   251
 __DECLARE_INSTANCE(template <size_t _Max_size> size_t, _Pthread_alloc<_Max_size>::_S_heap_size, = 0);
williamr@2
   252
williamr@2
   253
# endif
williamr@2
   254
williamr@2
   255
_STLP_END_NAMESPACE
williamr@2
   256
williamr@2
   257
# endif /* _STLP_EXPOSE_GLOBALS_IMPLEMENTATION */
williamr@2
   258
williamr@2
   259
#endif /*  _STLP_PTHREAD_ALLOC_C */
williamr@2
   260
williamr@2
   261
// Local Variables:
williamr@2
   262
// mode:C++
williamr@2
   263
// End: