os/kernelhwsrv/kernel/eka/compsupp/symaehabi/unwinder.c
author sl
Tue, 10 Jun 2014 14:32:02 +0200
changeset 1 260cb5ec6c19
permissions -rw-r--r--
Update contrib.
sl@0
     1
/* unwinder.c
sl@0
     2
 *
sl@0
     3
 * Copyright 2002-2005 ARM Limited. All rights reserved.
sl@0
     4
 *
sl@0
     5
 * Your rights to use this code are set out in the accompanying licence
sl@0
     6
 * text file LICENCE.txt (ARM contract number LEC-ELA-00080 v1.0).
sl@0
     7
 */
sl@0
     8
sl@0
     9
/* Portions copyright Copyright (c) 2009 Nokia Corporation and/or its subsidiary(-ies). */
sl@0
    10
sl@0
    11
/*
sl@0
    12
 * RCS $Revision: 92986 $
sl@0
    13
 * Checkin $Date: 2005-10-13 15:56:12 +0100 (Thu, 13 Oct 2005) $
sl@0
    14
 * Revising $Author: achapman $
sl@0
    15
 */
sl@0
    16
sl@0
    17
/* Language-independent unwinder implementation */
sl@0
    18
sl@0
    19
/* This source file is compiled automatically by ARM's make system into
sl@0
    20
 * multiple object files. The source regions constituting object file
sl@0
    21
 * xxx.o are delimited by ifdef xxx_c / endif directives.
sl@0
    22
 *
sl@0
    23
 * The source regions currently marked are:
sl@0
    24
 * unwinder_c
sl@0
    25
 * unwind_activity_c
sl@0
    26
 */
sl@0
    27
sl@0
    28
#ifndef __EPOC32__
sl@0
    29
#include <stddef.h>
sl@0
    30
#include <stdlib.h>
sl@0
    31
#else
sl@0
    32
#include <e32def.h>
sl@0
    33
#endif
sl@0
    34
/* Environment: */
sl@0
    35
#include "unwind_env.h"
sl@0
    36
/* Language-independent unwinder declarations: */
sl@0
    37
#include "unwinder.h"
sl@0
    38
sl@0
    39
#ifdef __EPOC32__
sl@0
    40
/* Symbian specific support */
sl@0
    41
#include "symbian_support.h"
sl@0
    42
#endif
sl@0
    43
sl@0
    44
/* Define UNWIND_ACTIVITY_DIAGNOSTICS for printed information from _Unwind_Activity */
sl@0
    45
/* Define VRS_DIAGNOSTICS for printed diagnostics about VRS operations */
sl@0
    46
sl@0
    47
#if defined(VRS_DIAGNOSTICS) || defined(UNWIND_ACTIVITY_DIAGNOSTICS)
sl@0
    48
#ifndef __EPOC32__
sl@0
    49
extern int printf(const char *, ...);
sl@0
    50
#endif
sl@0
    51
#endif
sl@0
    52
sl@0
    53
#ifdef SUPPORT_NESTED_EXCEPTIONS
sl@0
    54
extern _Unwind_Control_Block *AllocSavedUCB();
sl@0
    55
extern void FreeSavedUCB(_Unwind_Control_Block *context);
sl@0
    56
#endif
sl@0
    57
sl@0
    58
#ifdef unwinder_c
sl@0
    59
sl@0
    60
/* =========================                      ========================= */
sl@0
    61
/* ========================= Virtual register set ========================= */
sl@0
    62
/* =========================                      ========================= */
sl@0
    63
sl@0
    64
/* The approach taken by this implementation is to use the real machine
sl@0
    65
 * registers to hold all but the values of core (integer)
sl@0
    66
 * registers. Consequently the implementation must use only the core
sl@0
    67
 * registers except when manipulating the virtual register set. Non-core
sl@0
    68
 * registers are saved only on first use, so the single implementation can
sl@0
    69
 * cope with execution on processors which lack certain registers.  The
sl@0
    70
 * registers as they were at the start of the propagation must be preserved
sl@0
    71
 * over phase 1 so that the machine state is correct at the start of phase
sl@0
    72
 * 2. This requires a copy to be taken (which can be stack allocated). During
sl@0
    73
 * a stack unwind (phase 1 or phase 2), the "current" virtual register set is
sl@0
    74
 * implemented as core register values held in a data structure, and non-core
sl@0
    75
 * register values held in the registers themselves. To ensure that all
sl@0
    76
 * original register values are available at the beginning of phase 2, the
sl@0
    77
 * core registers are saved in a second structure at the start of phase 1 and
sl@0
    78
 * the non-core registers are demand-saved into another part of the data
sl@0
    79
 * structure that holds the current core registers during the phase 1 stack
sl@0
    80
 * unwind.
sl@0
    81
 */
sl@0
    82
/* Extent to which the access routines are implemented:
sl@0
    83
 * _Unwind_VRS_Get and _Unwind_VRS_Set implement only access to the core registers.
sl@0
    84
 * _Unwind_VRS_Pop implements only popping of core and vfp registers.
sl@0
    85
 * There is no support here for the Intel WMMX registers, but space is nevertheless
sl@0
    86
 * reserved in the virtual register set structure to indicate whether demand-saving
sl@0
    87
 * of those registers is required (as they are unsupported, it never is). The space
sl@0
    88
 * costs nothing as it is required for alignment.
sl@0
    89
 * The level of supported functionality is compliant with the requirements of the
sl@0
    90
 * Exceptions ABI.
sl@0
    91
 */
sl@0
    92
sl@0
    93
typedef unsigned char bool;
sl@0
    94
struct core_s  { uint32_t r[16]; };        /* core integer regs */
sl@0
    95
struct vfp_s   { uint64_t d[32]; };        /* VFP registers saved in FSTMD format */
sl@0
    96
sl@0
    97
/* Phase 1 virtual register set includes demand-save areas */
sl@0
    98
/* The phase 2 virtual register set must be a prefix of the phase 1 set */
sl@0
    99
typedef struct phase1_virtual_register_set_s {
sl@0
   100
  /* demand_save flag == 1 means save the registers in the demand-save area */
sl@0
   101
  bool demand_save_vfp_low;
sl@0
   102
  bool demand_save_vfp_high;
sl@0
   103
  bool demand_save_wmmxd;
sl@0
   104
  bool demand_save_wmmxc;
sl@0
   105
  struct core_s core;      /* current core registers */
sl@0
   106
  struct vfp_s  vfp;       /* demand-saved vfp registers */
sl@0
   107
} phase1_virtual_register_set;
sl@0
   108
sl@0
   109
/* Phase 2 virtual register set has no demand-save areas */
sl@0
   110
/* The phase 2 virtual register set must be a prefix of the phase 1 set */
sl@0
   111
/* The assembly fragments for _Unwind_RaiseException and _Unwind_Resume create
sl@0
   112
 * a phase2_virtual_register_set_s by hand so be careful.
sl@0
   113
 */
sl@0
   114
typedef struct phase2_virtual_register_set_s {
sl@0
   115
  /* demand_save flag == 1 means save the registers in the demand-save area */
sl@0
   116
  /* Always 0 in phase 2 */
sl@0
   117
  bool demand_save_vfp_low;
sl@0
   118
  bool demand_save_vfp_high;
sl@0
   119
  bool demand_save_wmmxd;
sl@0
   120
  bool demand_save_wmmxc;
sl@0
   121
  struct core_s core;      /* current core registers */
sl@0
   122
} phase2_virtual_register_set;
sl@0
   123
sl@0
   124
/* -- Helper macros for the embedded assembly */
sl@0
   125
sl@0
   126
#if defined(__TARGET_ARCH_5T)  || defined(__TARGET_ARCH_5TXM) || \
sl@0
   127
    defined(__TARGET_ARCH_5TE) || defined(__TARGET_ARCH_6) || \
sl@0
   128
    defined(__TARGET_ARCH_6T2) || defined(__TARGET_ARCH_7_A) /* || ... */
sl@0
   129
  #define ARCH_5T_OR_LATER 1
sl@0
   130
#else
sl@0
   131
  #define ARCH_5T_OR_LATER 0
sl@0
   132
#endif
sl@0
   133
sl@0
   134
#if defined(__APCS_INTERWORK) && !ARCH_5T_OR_LATER
sl@0
   135
  #define OLD_STYLE_INTERWORKING 1
sl@0
   136
#else
sl@0
   137
  #define OLD_STYLE_INTERWORKING 0
sl@0
   138
#endif
sl@0
   139
sl@0
   140
#if defined(__TARGET_ARCH_4T) || defined(__TARGET_ARCH_4TXM) || ARCH_5T_OR_LATER
sl@0
   141
  #define HAVE_BX 1
sl@0
   142
#else
sl@0
   143
  #define HAVE_BX 0
sl@0
   144
#endif
sl@0
   145
sl@0
   146
#if defined(__TARGET_ARCH_THUMBNAIL)
sl@0
   147
  #define THUMBNAIL 1
sl@0
   148
#else
sl@0
   149
  #define THUMBNAIL 0
sl@0
   150
#endif
sl@0
   151
sl@0
   152
#if HAVE_BX
sl@0
   153
  #define RET_LR bx lr
sl@0
   154
#else
sl@0
   155
  #define RET_LR mov pc,lr
sl@0
   156
#endif
sl@0
   157
sl@0
   158
/* ----- Routines: ----- */
sl@0
   159
sl@0
   160
/* ----- Helper routines, private ----- */
sl@0
   161
sl@0
   162
/* R_ARM_PREL31 is a place-relative 31-bit signed relocation.  The
sl@0
   163
 * routine takes the address of a location that was relocated by
sl@0
   164
 * R_ARM_PREL31, and returns an absolute address.
sl@0
   165
 */
sl@0
   166
static FORCEINLINE uint32_t __ARM_resolve_prel31(void *p)
sl@0
   167
{
sl@0
   168
  return (uint32_t)((((*(int32_t *)p) << 1) >> 1) + (int32_t)p);
sl@0
   169
}
sl@0
   170
sl@0
   171
/* ----- Helper routines, private but external ----- */
sl@0
   172
sl@0
   173
/* Note '%0' refers to local label '0' */
sl@0
   174
#if defined(__thumb)
sl@0
   175
#define MAYBE_SWITCH_TO_ARM_STATE SWITCH_TO_ARM_STATE
sl@0
   176
#define MAYBE_CODE16 code16
sl@0
   177
#else
sl@0
   178
#define MAYBE_SWITCH_TO_ARM_STATE /* nothing */
sl@0
   179
#define MAYBE_CODE16              /* nothing */
sl@0
   180
#endif
sl@0
   181
__asm void __ARM_Unwind_VRS_VFPpreserve_low(void *vfpp)
sl@0
   182
{
sl@0
   183
vfp_d0 CN 0;
sl@0
   184
  /* Preserve the low vfp registers in the passed memory */
sl@0
   185
#if defined(__thumb)
sl@0
   186
  macro;
sl@0
   187
  SWITCH_TO_ARM_STATE;
sl@0
   188
1
sl@0
   189
  align 4;
sl@0
   190
2
sl@0
   191
  assert (%2 - %1) = 0;
sl@0
   192
  bx pc;
sl@0
   193
  nop;
sl@0
   194
  code32;
sl@0
   195
  mend;
sl@0
   196
#endif
sl@0
   197
sl@0
   198
  MAYBE_SWITCH_TO_ARM_STATE;
sl@0
   199
  stc   p11,vfp_d0,[r0],{0x20};  /* 0xec800b20  FSTMIAD r0,{d0-d15} */
sl@0
   200
  RET_LR;
sl@0
   201
  MAYBE_CODE16;
sl@0
   202
}
sl@0
   203
sl@0
   204
__asm void __ARM_Unwind_VRS_VFPpreserve_high(void *vfpp)
sl@0
   205
{
sl@0
   206
vfp_d16 CN 0;                      /* =16 when used with stcl */
sl@0
   207
  /* Preserve the high vfp registers in the passed memory */
sl@0
   208
  MAYBE_SWITCH_TO_ARM_STATE;
sl@0
   209
  stcl  p11,vfp_d16,[r0],{0x20};  /* 0xecc00b20  FSTMIAD r0,{d16-d31} */
sl@0
   210
  RET_LR;
sl@0
   211
  MAYBE_CODE16;
sl@0
   212
}
sl@0
   213
sl@0
   214
__asm void __ARM_Unwind_VRS_VFPrestore_low(void *vfpp)
sl@0
   215
{
sl@0
   216
  /* Restore the low vfp registers from the passed memory */
sl@0
   217
vfp_d0 CN 0;
sl@0
   218
  MAYBE_SWITCH_TO_ARM_STATE;
sl@0
   219
  ldc   p11,vfp_d0,[r0],{0x20};  /* 0xec900b20  FLDMIAD r0,{d0-d15} */
sl@0
   220
  RET_LR;
sl@0
   221
  MAYBE_CODE16;
sl@0
   222
}
sl@0
   223
sl@0
   224
__asm void __ARM_Unwind_VRS_VFPrestore_high(void *vfpp)
sl@0
   225
{
sl@0
   226
  /* Restore the high vfp registers from the passed memory */
sl@0
   227
vfp_d16 CN 0;                      /* =16 when used with ldcl */
sl@0
   228
  MAYBE_SWITCH_TO_ARM_STATE;
sl@0
   229
  ldcl   p11,vfp_d16,[r0],{0x20};  /* 0xecd00b20  FLDMIAD r0,{d16-d31} */
sl@0
   230
  RET_LR;
sl@0
   231
  MAYBE_CODE16;
sl@0
   232
}
sl@0
   233
sl@0
   234
sl@0
   235
__asm NORETURNDECL void __ARM_Unwind_VRS_corerestore(void *corep)
sl@0
   236
{
sl@0
   237
  /* We rely here on corep pointing to a location in the stack,
sl@0
   238
   * as we briefly assign it to sp. This allows us to safely do
sl@0
   239
   * ldmia's which restore sp (if we use a different base register,
sl@0
   240
   * the updated sp may be used by the handler of any data abort
sl@0
   241
   * that occurs during the ldmia, and the stack gets overwritten).
sl@0
   242
   * By hypothesis this is preserve8 but the load of sp means the
sl@0
   243
   * assembler can't infer that.
sl@0
   244
   */
sl@0
   245
#if THUMBNAIL
sl@0
   246
  preserve8;
sl@0
   247
  mov.w   r13, r0;
sl@0
   248
  ldmia.w r13!,{r0-r12};
sl@0
   249
  ldr.w   r14, [r13, #4]   /* lr */
sl@0
   250
  ldr.w   r12, [r13, #4*2] /* pc */
sl@0
   251
  ldr.w   r13, [r13, #0]   /* sp */
sl@0
   252
  bx      r12
sl@0
   253
  
sl@0
   254
#else
sl@0
   255
  preserve8;
sl@0
   256
  MAYBE_SWITCH_TO_ARM_STATE;
sl@0
   257
#if OLD_STYLE_INTERWORKING
sl@0
   258
  mov   r13, r0;
sl@0
   259
  ldmia r13!,{r0-r12};
sl@0
   260
  ldr   r12,[r13, #4*2]; /* pc */
sl@0
   261
  ldmia r13,{r13-r14};
sl@0
   262
  bx    r12;
sl@0
   263
#else
sl@0
   264
sl@0
   265
  #if __ARMCC_VERSION < 300000
sl@0
   266
  mov   r13, r0;
sl@0
   267
  ldmia r13,{r0-r15};
sl@0
   268
  #else
sl@0
   269
  mov r14, r0;
sl@0
   270
  ldmia r14!, {r0-r12};
sl@0
   271
  ldr r13, [r14], #4;
sl@0
   272
  ldmia r14, {r14,r15};
sl@0
   273
  #endif
sl@0
   274
sl@0
   275
#endif
sl@0
   276
  MAYBE_CODE16;
sl@0
   277
#endif
sl@0
   278
}
sl@0
   279
sl@0
   280
sl@0
   281
/* ----- Development support ----- */
sl@0
   282
sl@0
   283
#ifdef VRS_DIAGNOSTICS
sl@0
   284
static void debug_print_vrs_vfp(uint32_t base, uint64_t *lp)
sl@0
   285
{
sl@0
   286
  int c = 0;
sl@0
   287
  int i;
sl@0
   288
  for (i = 0; i < 16; i++) {
sl@0
   289
    printf("D%-2d  0x%16.16llx    ", i + base, *lp);
sl@0
   290
    lp++;
sl@0
   291
    if (c++ == 1) {
sl@0
   292
      c = 0;
sl@0
   293
      printf("\n");
sl@0
   294
    }
sl@0
   295
  }
sl@0
   296
}
sl@0
   297
sl@0
   298
sl@0
   299
static void debug_print_vrs(_Unwind_Context *context)
sl@0
   300
{
sl@0
   301
  phase1_virtual_register_set *vrsp = (phase1_virtual_register_set *)context;
sl@0
   302
  int i;
sl@0
   303
  int c;
sl@0
   304
  printf("------------------------------------------------------------------------\n");
sl@0
   305
  c = 0;
sl@0
   306
  for (i = 0; i < 16; i++) {
sl@0
   307
    printf("r%-2d  0x%8.8x    ", i, vrsp->core.r[i]);
sl@0
   308
    if (c++ == 3) {
sl@0
   309
      c = 0;
sl@0
   310
      printf("\n");
sl@0
   311
    }
sl@0
   312
  }
sl@0
   313
sl@0
   314
  printf("-----\n");
sl@0
   315
  if (vrsp->demand_save_vfp_low == 1)
sl@0
   316
    printf("VFP low registers not saved\n");
sl@0
   317
  else
sl@0
   318
    debug_print_vrs_vfp(0, &vrsp->vfp.d[0]);
sl@0
   319
  printf("-----\n");
sl@0
   320
  if (vrsp->demand_save_vfp_high == 1)
sl@0
   321
    printf("VFP high registers not saved\n");
sl@0
   322
  else
sl@0
   323
    debug_print_vrs_vfp(16, &vrsp->vfp.d[16]);
sl@0
   324
  printf("------------------------------------------------------------------------\n");
sl@0
   325
}
sl@0
   326
#endif
sl@0
   327
sl@0
   328
sl@0
   329
/* ----- Public routines ----- */
sl@0
   330
sl@0
   331
EXPORT_C _Unwind_VRS_Result _Unwind_VRS_Set(_Unwind_Context *context,
sl@0
   332
                                            _Unwind_VRS_RegClass regclass,
sl@0
   333
                                            uint32_t regno,
sl@0
   334
                                            _Unwind_VRS_DataRepresentation representation,
sl@0
   335
                                            void *valuep)
sl@0
   336
{
sl@0
   337
  phase1_virtual_register_set *vrsp = (phase1_virtual_register_set *)context;
sl@0
   338
  switch (regclass) {
sl@0
   339
  case _UVRSC_CORE:
sl@0
   340
    {
sl@0
   341
      if (representation != _UVRSD_UINT32 || regno > 15)
sl@0
   342
        return _UVRSR_FAILED;
sl@0
   343
       vrsp->core.r[regno] = *(uint32_t *)valuep;
sl@0
   344
       return _UVRSR_OK;
sl@0
   345
    }
sl@0
   346
  case _UVRSC_VFP:
sl@0
   347
  case _UVRSC_WMMXD:
sl@0
   348
  case _UVRSC_WMMXC:
sl@0
   349
    return _UVRSR_NOT_IMPLEMENTED;
sl@0
   350
  default:
sl@0
   351
    break;
sl@0
   352
  }
sl@0
   353
  return _UVRSR_FAILED;
sl@0
   354
}
sl@0
   355
sl@0
   356
sl@0
   357
EXPORT_C _Unwind_VRS_Result _Unwind_VRS_Get(_Unwind_Context *context,
sl@0
   358
                                            _Unwind_VRS_RegClass regclass,
sl@0
   359
                                            uint32_t regno,
sl@0
   360
                                            _Unwind_VRS_DataRepresentation representation,
sl@0
   361
                                            void *valuep)
sl@0
   362
{
sl@0
   363
  phase1_virtual_register_set *vrsp = (phase1_virtual_register_set *)context;
sl@0
   364
  switch (regclass) {
sl@0
   365
  case _UVRSC_CORE:
sl@0
   366
    {
sl@0
   367
      if (representation != _UVRSD_UINT32 || regno > 15)
sl@0
   368
        return _UVRSR_FAILED;
sl@0
   369
      *(uint32_t *)valuep = vrsp->core.r[regno];
sl@0
   370
      return _UVRSR_OK;
sl@0
   371
    }
sl@0
   372
  case _UVRSC_VFP:
sl@0
   373
  case _UVRSC_WMMXD:
sl@0
   374
  case _UVRSC_WMMXC:
sl@0
   375
    return _UVRSR_NOT_IMPLEMENTED;
sl@0
   376
  default:
sl@0
   377
    break;
sl@0
   378
  }
sl@0
   379
  return _UVRSR_FAILED;
sl@0
   380
}
sl@0
   381
sl@0
   382
sl@0
   383
#define R_SP 13
sl@0
   384
sl@0
   385
EXPORT_C _Unwind_VRS_Result _Unwind_VRS_Pop(_Unwind_Context *context,
sl@0
   386
                                            _Unwind_VRS_RegClass regclass,
sl@0
   387
                                            uint32_t descriminator,
sl@0
   388
                                            _Unwind_VRS_DataRepresentation representation)
sl@0
   389
{
sl@0
   390
  phase1_virtual_register_set *vrsp = (phase1_virtual_register_set *)context;
sl@0
   391
  switch (regclass) {
sl@0
   392
  case _UVRSC_CORE:
sl@0
   393
    {
sl@0
   394
      /* If SP is included in the mask, the loaded value is used in preference to
sl@0
   395
       * the writeback value, but only on completion of the loading.
sl@0
   396
       */
sl@0
   397
      uint32_t mask, *vsp, *rp, sp_loaded;
sl@0
   398
      if (representation != _UVRSD_UINT32)
sl@0
   399
        return _UVRSR_FAILED;
sl@0
   400
      vsp = (uint32_t *)vrsp->core.r[R_SP];
sl@0
   401
      rp = (uint32_t *)&vrsp->core;
sl@0
   402
      mask = descriminator & 0xffff;
sl@0
   403
      sp_loaded = mask & (1 << R_SP);
sl@0
   404
      while (mask != 0) {
sl@0
   405
        if (mask & 1) {
sl@0
   406
#ifdef VRS_DIAGNOSTICS
sl@0
   407
          printf("VRS Pop r%d\n", rp - &vrsp->core.r[0]);
sl@0
   408
#endif
sl@0
   409
          *rp = *vsp++;
sl@0
   410
        }
sl@0
   411
        rp++;
sl@0
   412
        mask >>= 1;
sl@0
   413
      }
sl@0
   414
      if (!sp_loaded)
sl@0
   415
        vrsp->core.r[R_SP] = (uint32_t)vsp;
sl@0
   416
      return _UVRSR_OK;
sl@0
   417
    }
sl@0
   418
  case _UVRSC_VFP:
sl@0
   419
    {
sl@0
   420
      uint32_t start = descriminator >> 16;
sl@0
   421
      uint32_t count = descriminator & 0xffff;
sl@0
   422
      bool some_low = start < 16;
sl@0
   423
      bool some_high = start + count > 16;
sl@0
   424
      if ((representation != _UVRSD_VFPX && representation != _UVRSD_DOUBLE) ||
sl@0
   425
          (representation == _UVRSD_VFPX && some_high) ||
sl@0
   426
          (representation == _UVRSD_DOUBLE && start + count > 32))
sl@0
   427
        return _UVRSR_FAILED;
sl@0
   428
      if (some_low && vrsp->demand_save_vfp_low == 1) { /* Demand-save over phase 1 */
sl@0
   429
        vrsp->demand_save_vfp_low = 0;
sl@0
   430
        __ARM_Unwind_VRS_VFPpreserve_low(&vrsp->vfp.d[0]);
sl@0
   431
      }
sl@0
   432
      if (some_high && vrsp->demand_save_vfp_high == 1) { /* Demand-save over phase 1 */
sl@0
   433
        vrsp->demand_save_vfp_high = 0;
sl@0
   434
        __ARM_Unwind_VRS_VFPpreserve_high(&vrsp->vfp.d[16]);
sl@0
   435
      }
sl@0
   436
      /* Now recover from the stack into the real machine registers.
sl@0
   437
       * Note for _UVRSD_VFPX we assume FSTMX standard format 1.
sl@0
   438
       * Do this by saving the current VFP registers to a memory area,
sl@0
   439
       * moving the in-memory values into that area, and
sl@0
   440
       * restoring from the whole area.
sl@0
   441
       * Must be careful as the 64-bit values saved by FSTMX might be
sl@0
   442
       * only 32-bit aligned.
sl@0
   443
       */
sl@0
   444
      {
sl@0
   445
        struct unaligned_vfp_reg_s { uint32_t w1; uint32_t w2; };
sl@0
   446
        struct unaligned_vfp_reg_s *vsp;
sl@0
   447
        struct vfp_s temp_vfp;
sl@0
   448
        if (some_low)
sl@0
   449
          __ARM_Unwind_VRS_VFPpreserve_low(&temp_vfp.d[0]);
sl@0
   450
        if (some_high)
sl@0
   451
          __ARM_Unwind_VRS_VFPpreserve_high(&temp_vfp.d[16]);
sl@0
   452
        vsp = (struct unaligned_vfp_reg_s *)vrsp->core.r[R_SP];
sl@0
   453
        while (count--) {
sl@0
   454
          struct unaligned_vfp_reg_s *v =
sl@0
   455
            (struct unaligned_vfp_reg_s *)&temp_vfp.d[start++];
sl@0
   456
          *v = *vsp++;
sl@0
   457
#ifdef VRS_DIAGNOSTICS
sl@0
   458
          printf("VRS Pop D%d = 0x%llx\n", start - 1, temp_vfp.d[start - 1]);
sl@0
   459
#endif
sl@0
   460
        }
sl@0
   461
        vrsp->core.r[R_SP] = (uint32_t)((uint32_t *)vsp +
sl@0
   462
                                        (representation == _UVRSD_VFPX ?
sl@0
   463
                                         1 : /* +1 to skip the format word */
sl@0
   464
                                         0));
sl@0
   465
        if (some_low)
sl@0
   466
          __ARM_Unwind_VRS_VFPrestore_low(&temp_vfp.d[0]);
sl@0
   467
        if (some_high)
sl@0
   468
          __ARM_Unwind_VRS_VFPrestore_high(&temp_vfp.d[16]);
sl@0
   469
      }
sl@0
   470
      return _UVRSR_OK;
sl@0
   471
    }
sl@0
   472
  case _UVRSC_WMMXD:
sl@0
   473
  case _UVRSC_WMMXC:
sl@0
   474
    return _UVRSR_NOT_IMPLEMENTED;
sl@0
   475
  default:
sl@0
   476
    break;
sl@0
   477
  }
sl@0
   478
  return _UVRSR_FAILED;
sl@0
   479
}
sl@0
   480
sl@0
   481
sl@0
   482
sl@0
   483
/* =========================              ========================= */
sl@0
   484
/* ========================= The unwinder ========================= */
sl@0
   485
/* =========================              ========================= */
sl@0
   486
sl@0
   487
sl@0
   488
/* This implementation uses the UCB unwinder_cache as follows:
sl@0
   489
 * reserved1 is documented in the EABI as requiring initialisation to 0.
sl@0
   490
 *  It is used to manage nested simultaneous propagation. If the value is 0,
sl@0
   491
 *  the UCB is participating in no propagations. If the value is 1, the UCB
sl@0
   492
 *  is participating in one propagation. Otherwise the value is a pointer to
sl@0
   493
 *  a structure holding saved UCB state from the next propagation out.
sl@0
   494
 *  The structure used is simply a mallocated UCB.
sl@0
   495
 * reserved2 is used to preserve the call-site address over calls to a
sl@0
   496
 *  personality routine and cleanup.
sl@0
   497
 * reserved3 is used to cache the PR address.
sl@0
   498
 * reserved4 is used by the Symbian implementation to cache the ROM exeception 
sl@0
   499
 *  search table
sl@0
   500
 * reserved5 is used by the symbian implementation to cache the 
sl@0
   501
 *  TExceptionDescriptor for the executable of the 'current' frame
sl@0
   502
 */
sl@0
   503
sl@0
   504
#define NESTED_CONTEXT      unwinder_cache.reserved1
sl@0
   505
#define SAVED_CALLSITE_ADDR unwinder_cache.reserved2
sl@0
   506
#define PR_ADDR             unwinder_cache.reserved3
sl@0
   507
sl@0
   508
/* Index table entry: */
sl@0
   509
sl@0
   510
#ifndef __EPOC32__  // Symbian OS defines this in symbian_support.h
sl@0
   511
typedef struct __EIT_entry {
sl@0
   512
  uint32_t fnoffset; /* Place-relative */
sl@0
   513
  uint32_t content;
sl@0
   514
} __EIT_entry;
sl@0
   515
#endif
sl@0
   516
sl@0
   517
/* Private defines etc: */
sl@0
   518
sl@0
   519
static const uint32_t EXIDX_CANTUNWIND = 1;
sl@0
   520
static const uint32_t uint32_highbit = 0x80000000;
sl@0
   521
sl@0
   522
/* ARM C++ personality routines: */
sl@0
   523
sl@0
   524
typedef _Unwind_Reason_Code (*personality_routine)(_Unwind_State,
sl@0
   525
                                                   _Unwind_Control_Block *,
sl@0
   526
                                                   _Unwind_Context *);
sl@0
   527
sl@0
   528
WEAKDECL _Unwind_Reason_Code __aeabi_unwind_cpp_pr0(_Unwind_State state, _Unwind_Control_Block *,
sl@0
   529
                                                    _Unwind_Context *context);
sl@0
   530
IMPORT_C WEAKDECL _Unwind_Reason_Code __aeabi_unwind_cpp_pr1(_Unwind_State state, _Unwind_Control_Block *,
sl@0
   531
                                                             _Unwind_Context *context);
sl@0
   532
IMPORT_C WEAKDECL _Unwind_Reason_Code __aeabi_unwind_cpp_pr2(_Unwind_State state, _Unwind_Control_Block *,
sl@0
   533
                                                             _Unwind_Context *context);
sl@0
   534
sl@0
   535
sl@0
   536
/* Various image symbols: */
sl@0
   537
sl@0
   538
struct ExceptionTableInfo {
sl@0
   539
  uint32_t EIT_base;
sl@0
   540
  uint32_t EIT_limit;
sl@0
   541
};
sl@0
   542
sl@0
   543
#ifndef __EPOC32__
sl@0
   544
/* We define __ARM_ETInfo to allow access to some linker-generated
sl@0
   545
   names that are not legal C identifiers. __ARM_ETInfo is extern only
sl@0
   546
   because of scope limitations of the embedded assembler */
sl@0
   547
extern const struct ExceptionTableInfo __ARM_ETInfo;
sl@0
   548
#define EIT_base \
sl@0
   549
    ((const __EIT_entry *)(__ARM_ETInfo.EIT_base + (const char *)&__ARM_ETInfo))
sl@0
   550
#define EIT_limit \
sl@0
   551
    ((const __EIT_entry *)(__ARM_ETInfo.EIT_limit + (const char *)&__ARM_ETInfo))
sl@0
   552
sl@0
   553
#endif
sl@0
   554
sl@0
   555
sl@0
   556
/* ----- Index table processing ----- */
sl@0
   557
sl@0
   558
/* find_and_expand_eit_entry is a support function used in both phases to set
sl@0
   559
 * ucb.pr_cache and internal cache.
sl@0
   560
 * Call with a pointer to the ucb and the return address to look up.
sl@0
   561
 *
sl@0
   562
 * The table is contained in the half-open interval
sl@0
   563
 * [EIT_base, EIT_limit) and is an ordered array of __EIT_entrys.
sl@0
   564
 * Perform a binary search via C library routine bsearch.
sl@0
   565
 * The table contains only function start addresses (encoded as offsets), so
sl@0
   566
 * we need to special-case the end table entry in the comparison function,
sl@0
   567
 * which we do by assuming the function it describes extends to end of memory.
sl@0
   568
 * This causes us problems indirectly in that we would like to fault as
sl@0
   569
 * many attempts as possible to look up an invalid return address. There are
sl@0
   570
 * several ways an invalid return address can be obtained from a broken
sl@0
   571
 * program, such as someone corrupting the stack or broken unwind instructions
sl@0
   572
 * recovered the wrong value. It is plausible that many bad return addresses
sl@0
   573
 * will be either small integers or will point into the heap or stack, hence
sl@0
   574
 * it's desirable to get the length of that final function roughly right.
sl@0
   575
 * Here we make no attempt to do it. Code exclusively for use in toolchains
sl@0
   576
 * which define a suitable limit symbol could make use of that symbol.
sl@0
   577
 * Alternatively (QoI) a smart linker could augment the index table with a
sl@0
   578
 * dummy EXIDX_CANTUNWIND entry pointing just past the last real function.
sl@0
   579
 */
sl@0
   580
sl@0
   581
#ifndef __EPOC32__
sl@0
   582
static int EIT_comparator(const void *ck, const void *ce)
sl@0
   583
{
sl@0
   584
  uint32_t return_address = *(const uint32_t *)ck;
sl@0
   585
  const __EIT_entry *eitp = (const __EIT_entry *)ce;
sl@0
   586
  const __EIT_entry *next_eitp = eitp + 1;
sl@0
   587
  uint32_t next_fn;
sl@0
   588
  if (next_eitp != EIT_limit)
sl@0
   589
    next_fn = __ARM_resolve_prel31((void *)&next_eitp->fnoffset);
sl@0
   590
  else
sl@0
   591
    next_fn = 0xffffffffU;
sl@0
   592
  if (return_address < __ARM_resolve_prel31((void *)&eitp->fnoffset)) return -1;
sl@0
   593
  if (return_address >= next_fn) return 1;
sl@0
   594
  return 0;
sl@0
   595
}
sl@0
   596
#endif
sl@0
   597
sl@0
   598
sl@0
   599
static _Unwind_Reason_Code find_and_expand_eit_entry_V2(_Unwind_Control_Block *ucbp,
sl@0
   600
                                                     uint32_t return_address)
sl@0
   601
{
sl@0
   602
  /* Search the index table for an entry containing the specified return
sl@0
   603
   * address. Subtract the 2 from the return address, as the index table
sl@0
   604
   * contains function start addresses (a trailing noreturn BL would
sl@0
   605
   * appear to return to the first address of the next function (perhaps
sl@0
   606
   * +1 if Thumb); a leading BL would appear to return to function start
sl@0
   607
   * + instruction size (perhaps +1 if Thumb)).
sl@0
   608
   */
sl@0
   609
sl@0
   610
#ifndef __EPOC32__
sl@0
   611
  const __EIT_entry *base = EIT_base;
sl@0
   612
  size_t nelems = EIT_limit - EIT_base;
sl@0
   613
  __EIT_entry *eitp;
sl@0
   614
sl@0
   615
  return_address -= 2;
sl@0
   616
sl@0
   617
  eitp = (__EIT_entry *) bsearch(&return_address, base, nelems,
sl@0
   618
                                 sizeof(__EIT_entry), EIT_comparator);
sl@0
   619
#else
sl@0
   620
  const __EIT_entry *base = EIT_base(ucbp);
sl@0
   621
  size_t nelems = EIT_limit(ucbp) - base;
sl@0
   622
  __EIT_entry *eitp;
sl@0
   623
sl@0
   624
  return_address -= 2;
sl@0
   625
sl@0
   626
  // This must succeed on SymbianOS or else an error will have occured already.
sl@0
   627
  eitp = SearchEITV2(return_address, base, nelems);
sl@0
   628
#endif
sl@0
   629
sl@0
   630
  if (eitp == NULL) {
sl@0
   631
    /* The return address we have was not found in the EIT.
sl@0
   632
     * This breaks the scan and we have to indicate failure.
sl@0
   633
     */
sl@0
   634
    ucbp->PR_ADDR = NULL;
sl@0
   635
    DEBUGGER_BOTTLENECK(ucbp, _UASUBSYS_UNWINDER, _UAACT_ENDING, _UAARG_ENDING_UNWINDER_LOOKUPFAILED);
sl@0
   636
    return _URC_FAILURE;
sl@0
   637
  }
sl@0
   638
sl@0
   639
  /* Cache the function offset */
sl@0
   640
sl@0
   641
  ucbp->pr_cache.fnstart = __ARM_resolve_prel31((void *)&eitp->fnoffset);
sl@0
   642
sl@0
   643
  /* Can this frame be unwound at all? */
sl@0
   644
sl@0
   645
  if (eitp->content == EXIDX_CANTUNWIND) {
sl@0
   646
    ucbp->PR_ADDR = NULL;
sl@0
   647
    DEBUGGER_BOTTLENECK(ucbp, _UASUBSYS_UNWINDER, _UAACT_ENDING, _UAARG_ENDING_NOUNWIND);
sl@0
   648
    return _URC_FAILURE;
sl@0
   649
  }
sl@0
   650
sl@0
   651
  /* Obtain the address of the "real" __EHT_Header word */
sl@0
   652
sl@0
   653
  if (eitp->content & uint32_highbit) {
sl@0
   654
    /* It is immediate data */
sl@0
   655
    ucbp->pr_cache.ehtp = (_Unwind_EHT_Header *)&eitp->content;
sl@0
   656
    ucbp->pr_cache.additional = 1;
sl@0
   657
  } else {
sl@0
   658
    /* The content field is a 31-bit place-relative offset to an _Unwind_EHT_Entry structure */
sl@0
   659
    ucbp->pr_cache.ehtp = (_Unwind_EHT_Header *)__ARM_resolve_prel31((void *)&eitp->content);
sl@0
   660
    ucbp->pr_cache.additional = 0;
sl@0
   661
  }
sl@0
   662
sl@0
   663
  /* Discover the personality routine address */
sl@0
   664
sl@0
   665
  if (*(uint32_t *)(ucbp->pr_cache.ehtp) & uint32_highbit) {
sl@0
   666
    /* It is immediate data - compute matching pr */
sl@0
   667
    uint32_t idx = ((*(uint32_t *)(ucbp->pr_cache.ehtp)) >> 24) & 0xf;
sl@0
   668
    if (idx == 0) ucbp->PR_ADDR = (uint32_t)&__aeabi_unwind_cpp_pr0;
sl@0
   669
    else if (idx == 1) ucbp->PR_ADDR = (uint32_t)&__aeabi_unwind_cpp_pr1;
sl@0
   670
    else if (idx == 2) ucbp->PR_ADDR = (uint32_t)&__aeabi_unwind_cpp_pr2;
sl@0
   671
    else { /* Failed */
sl@0
   672
      ucbp->PR_ADDR = NULL;
sl@0
   673
      DEBUGGER_BOTTLENECK(ucbp, _UASUBSYS_UNWINDER, _UAACT_ENDING, _UAARG_ENDING_TABLECORRUPT);
sl@0
   674
      return _URC_FAILURE;
sl@0
   675
    }
sl@0
   676
  } else {
sl@0
   677
    /* It's a place-relative offset to pr */
sl@0
   678
    ucbp->PR_ADDR = __ARM_resolve_prel31((void *)(ucbp->pr_cache.ehtp));
sl@0
   679
  }
sl@0
   680
  return _URC_OK;
sl@0
   681
}
sl@0
   682
sl@0
   683
static _Unwind_Reason_Code find_and_expand_eit_entry_V1(_Unwind_Control_Block *ucbp,
sl@0
   684
                                                     uint32_t return_address)
sl@0
   685
{
sl@0
   686
  /* Search the index table for an entry containing the specified return
sl@0
   687
   * address. The EIT contains function offsets relative to the base of the
sl@0
   688
   * execute region so adjust the return address accordingly.
sl@0
   689
   */
sl@0
   690
sl@0
   691
#ifndef __EPOC32__
sl@0
   692
  uint32_t return_address_offset = ADDR_TO_ER_RO_OFFSET(return_address, ucbp);
sl@0
   693
  const __EIT_entry *base = EIT_base;
sl@0
   694
  size_t nelems = EIT_limit - EIT_base;
sl@0
   695
sl@0
   696
   const __EIT_entry *eitp =
sl@0
   697
     (const __EIT_entry *) bsearch(&return_address_offset, base, nelems, 
sl@0
   698
                                   sizeof(__EIT_entry), EIT_comparator);
sl@0
   699
  if (eitp == NULL) {
sl@0
   700
    /* The return address we have was not found in the EIT.
sl@0
   701
     * This breaks the scan and we have to indicate failure.
sl@0
   702
     */
sl@0
   703
    ucbp->PR_ADDR = NULL;
sl@0
   704
    DEBUGGER_BOTTLENECK(ucbp, _UASUBSYS_UNWINDER, _UAACT_ENDING, _UAARG_ENDING_UNWINDER_LOOKUPFAILED);
sl@0
   705
    return _URC_FAILURE;
sl@0
   706
  }
sl@0
   707
#else
sl@0
   708
  /* Shouldn't we subtract 2 from here just like in the V2 lookup? 
sl@0
   709
   */
sl@0
   710
  uint32_t return_address_offset = ADDR_TO_ER_RO_OFFSET(return_address, ucbp);
sl@0
   711
  const __EIT_entry *base = EIT_base(ucbp);
sl@0
   712
  size_t nelems = EIT_limit(ucbp) - base;
sl@0
   713
sl@0
   714
  // This must succeed or else an error will have occured already.
sl@0
   715
  const __EIT_entry *eitp = SearchEITV1(return_address_offset, base, nelems);
sl@0
   716
sl@0
   717
#endif
sl@0
   718
sl@0
   719
sl@0
   720
  /* Cache the function offset */
sl@0
   721
sl@0
   722
  ucbp->pr_cache.fnstart = ER_RO_OFFSET_TO_ADDR(eitp->fnoffset, ucbp);
sl@0
   723
sl@0
   724
  /* Can this frame be unwound at all? */
sl@0
   725
sl@0
   726
  if (eitp->content == EXIDX_CANTUNWIND) {
sl@0
   727
    ucbp->PR_ADDR = NULL;
sl@0
   728
    DEBUGGER_BOTTLENECK(ucbp, _UASUBSYS_UNWINDER, _UAACT_ENDING, _UAARG_ENDING_NOUNWIND);
sl@0
   729
    return _URC_FAILURE;
sl@0
   730
  }
sl@0
   731
sl@0
   732
  /* Obtain the address of the "real" __EHT_Header word */
sl@0
   733
  if (eitp->content & uint32_highbit) {
sl@0
   734
    /* It is immediate data */
sl@0
   735
    ucbp->pr_cache.ehtp = (_Unwind_EHT_Header *)&eitp->content;
sl@0
   736
    ucbp->pr_cache.additional = 1;
sl@0
   737
  } else {
sl@0
   738
    /* The content field is a segment relative offset to an _Unwind_EHT_Entry structure */
sl@0
   739
    ucbp->pr_cache.ehtp = (_Unwind_EHT_Header *)ER_RO_OFFSET_TO_ADDR(eitp->content, ucbp);
sl@0
   740
    ucbp->pr_cache.additional = 0;
sl@0
   741
  }
sl@0
   742
sl@0
   743
  /* Discover the personality routine address */
sl@0
   744
sl@0
   745
  if (*(uint32_t *)(ucbp->pr_cache.ehtp) & uint32_highbit) {
sl@0
   746
    /* It is immediate data - compute matching pr */
sl@0
   747
    uint32_t idx = ((*(uint32_t *)(ucbp->pr_cache.ehtp)) >> 24) & 0xf;
sl@0
   748
sl@0
   749
    if (idx == 0) ucbp->PR_ADDR = (uint32_t)&__aeabi_unwind_cpp_pr0;
sl@0
   750
    else if (idx == 1) ucbp->PR_ADDR = (uint32_t)&__aeabi_unwind_cpp_pr1;
sl@0
   751
    else if (idx == 2) ucbp->PR_ADDR = (uint32_t)&__aeabi_unwind_cpp_pr2;
sl@0
   752
    else { /* Failed */
sl@0
   753
      ucbp->PR_ADDR = NULL;
sl@0
   754
      DEBUGGER_BOTTLENECK(ucbp, _UASUBSYS_UNWINDER, _UAACT_ENDING, _UAARG_ENDING_TABLECORRUPT);
sl@0
   755
      return _URC_FAILURE;
sl@0
   756
    }
sl@0
   757
  } else {
sl@0
   758
    /* Execute region offset to PR */
sl@0
   759
    ucbp->PR_ADDR = ER_RO_OFFSET_TO_ADDR(*(uint32_t *)(ucbp->pr_cache.ehtp), ucbp);
sl@0
   760
sl@0
   761
  }
sl@0
   762
  return _URC_OK;
sl@0
   763
}
sl@0
   764
sl@0
   765
static _Unwind_Reason_Code find_and_expand_eit_entry(_Unwind_Control_Block *ucbp,
sl@0
   766
                                                     uint32_t return_address)
sl@0
   767
{
sl@0
   768
  ValidateExceptionDescriptor(return_address, ucbp);
sl@0
   769
  if (EHABI_V2(ucbp))
sl@0
   770
    return find_and_expand_eit_entry_V2(ucbp, return_address);
sl@0
   771
  else
sl@0
   772
    return find_and_expand_eit_entry_V1(ucbp, return_address);
sl@0
   773
}
sl@0
   774
sl@0
   775
sl@0
   776
/* ----- Unwinding: ----- */
sl@0
   777
sl@0
   778
/* Fwd decl */
sl@0
   779
static NORETURNDECL void unwind_next_frame(_Unwind_Control_Block *ucbp, phase2_virtual_register_set *vrsp);
sl@0
   780
sl@0
   781
/* Helper fn: If the demand_save flag in a phase1_virtual_register_set was
sl@0
   782
 * zeroed, the registers were demand-saved. This function restores from
sl@0
   783
 * the save area.
sl@0
   784
*/
sl@0
   785
static FORCEINLINE void restore_non_core_regs(phase1_virtual_register_set *vrsp)
sl@0
   786
{
sl@0
   787
  if (vrsp->demand_save_vfp_low == 0)
sl@0
   788
    __ARM_Unwind_VRS_VFPrestore_low(&vrsp->vfp.d[0]);
sl@0
   789
  if (vrsp->demand_save_vfp_high == 0)
sl@0
   790
    __ARM_Unwind_VRS_VFPrestore_high(&vrsp->vfp.d[16]);
sl@0
   791
}
sl@0
   792
sl@0
   793
/* _Unwind_RaiseException is the external entry point to begin unwinding */
sl@0
   794
__asm _Unwind_Reason_Code _Unwind_RaiseException(_Unwind_Control_Block *ucbp)
sl@0
   795
{
sl@0
   796
  extern __ARM_Unwind_RaiseException;
sl@0
   797
sl@0
   798
#if THUMBNAIL
sl@0
   799
sl@0
   800
  /* Create a phase2_virtual_register_set on the stack */
sl@0
   801
  /* Save the core registers, carefully writing the original sp value */
sl@0
   802
  /* Note we account for the pc but do not actually write it's value here */
sl@0
   803
  str.w    r14,[sp, #-8]!;
sl@0
   804
  add.w    r14, r13, #8;
sl@0
   805
  str.w    r14,[sp, #-4]!  /* pushed 3 words => 3 words */
sl@0
   806
  stmfd.w  sp!,{r0-r12};   /* pushed 13 words => 16 words */
sl@0
   807
  /* Write zeroes for the demand_save bytes so no saving occurs in phase 2 */
sl@0
   808
  mov.w    r1,#0;
sl@0
   809
  str.w    r1,[sp,#-4]!;   /* pushed 1 word => 17 words */
sl@0
   810
  mov.w    r1,sp;
sl@0
   811
  sub.w    sp,sp,#4;       /* preserve 8 byte alignment => 18 words */
sl@0
   812
sl@0
   813
  /* Now pass to C (with r0 still valid) to do the real work.
sl@0
   814
   * r0 = ucbp, r1 = phase2_virtual_register_set.
sl@0
   815
   * If we get control back, pop the stack and return preserving r0.
sl@0
   816
   */
sl@0
   817
sl@0
   818
  /* on arch 5T and later the linker will fix 'bl' => 'blx' as
sl@0
   819
     needed */
sl@0
   820
  bl.w     __ARM_Unwind_RaiseException;
sl@0
   821
  ldr.w    r14,[sp,#16*4];
sl@0
   822
  add.w    sp,sp,#18*4;
sl@0
   823
  bx lr;
sl@0
   824
sl@0
   825
#else
sl@0
   826
sl@0
   827
  MAYBE_SWITCH_TO_ARM_STATE;
sl@0
   828
sl@0
   829
  /* Create a phase2_virtual_register_set on the stack */
sl@0
   830
  /* Save the core registers, carefully writing the original sp value */
sl@0
   831
  #if __ARMCC_VERSION < 300000
sl@0
   832
  stmfd sp!,{r13-r15};  /* pushed 3 words => 3 words */
sl@0
   833
  #else
sl@0
   834
  stmdb r13, {r14,r15};
sl@0
   835
  str r13, [r13,#-3*4];
sl@0
   836
  sub r13, r13, #3*4;
sl@0
   837
  #endif
sl@0
   838
  stmfd sp!,{r0-r12};   /* pushed 13 words => 16 words */
sl@0
   839
  /* Write zeroes for the demand_save bytes so no saving occurs in phase 2 */
sl@0
   840
  mov r1,#0;
sl@0
   841
  str r1,[sp,#-4]!;     /* pushed 1 word => 17 words */
sl@0
   842
  mov r1,sp;
sl@0
   843
  sub sp,sp,#4;         /* preserve 8 byte alignment => 18 words */
sl@0
   844
sl@0
   845
  /* Now pass to C (with r0 still valid) to do the real work.
sl@0
   846
   * r0 = ucbp, r1 = phase2_virtual_register_set.
sl@0
   847
   * If we get control back, pop the stack and return preserving r0.
sl@0
   848
   */
sl@0
   849
sl@0
   850
#if OLD_STYLE_INTERWORKING
sl@0
   851
  ldr r2,Unwind_RaiseException_Offset;
sl@0
   852
  add r2,r2,pc;
sl@0
   853
  mov lr,pc;
sl@0
   854
Offset_Base
sl@0
   855
  bx    r2;
sl@0
   856
#else
sl@0
   857
  /* on arch 5T and later the linker will fix 'bl' => 'blx' as
sl@0
   858
     needed */
sl@0
   859
  bl  __ARM_Unwind_RaiseException;
sl@0
   860
#endif
sl@0
   861
  ldr r14,[sp,#16*4];
sl@0
   862
  add sp,sp,#18*4;
sl@0
   863
  RET_LR;
sl@0
   864
#if OLD_STYLE_INTERWORKING
sl@0
   865
Unwind_RaiseException_Offset dcd __ARM_Unwind_RaiseException - Offset_Base;
sl@0
   866
#endif
sl@0
   867
  MAYBE_CODE16;
sl@0
   868
sl@0
   869
#endif
sl@0
   870
sl@0
   871
#ifndef __EPOC32__
sl@0
   872
  /* Alternate symbol names for difficult symbols.
sl@0
   873
   * It is possible no functions included in the image require
sl@0
   874
   * a handler table. Therefore make only a weak reference to
sl@0
   875
   * the handler table base symbol, which may be absent.
sl@0
   876
   */
sl@0
   877
  align 4
sl@0
   878
  extern |.ARM.exidx$$Base|;
sl@0
   879
  extern |.ARM.exidx$$Limit|;
sl@0
   880
  extern |.ARM.extab$$Base| WEAKASMDECL;
sl@0
   881
  export __ARM_ETInfo;
sl@0
   882
  /* these are offsets for /ropi */
sl@0
   883
__ARM_ETInfo /* layout must match struct ExceptionTableInfo */
sl@0
   884
eit_base   dcd |.ARM.exidx$$Base|  - __ARM_ETInfo; /* index table base */
sl@0
   885
eit_limit  dcd |.ARM.exidx$$Limit| - __ARM_ETInfo; /* index table limit */
sl@0
   886
#endif
sl@0
   887
}
sl@0
   888
sl@0
   889
sl@0
   890
/* __ARM_Unwind_RaiseException performs phase 1 unwinding */
sl@0
   891
sl@0
   892
_Unwind_Reason_Code __ARM_Unwind_RaiseException(_Unwind_Control_Block *ucbp,
sl@0
   893
                                                phase2_virtual_register_set *entry_VRSp)
sl@0
   894
{
sl@0
   895
  phase1_virtual_register_set phase1_VRS;
sl@0
   896
sl@0
   897
  /* Is this a nested simultaneous propagation?
sl@0
   898
   * (see comments with _Unwind_Complete)
sl@0
   899
   */
sl@0
   900
  if (ucbp->NESTED_CONTEXT == 0) {
sl@0
   901
    /* No - this is only propagation */
sl@0
   902
    ucbp->NESTED_CONTEXT = 1;
sl@0
   903
  } else {
sl@0
   904
#ifdef SUPPORT_NESTED_EXCEPTIONS
sl@0
   905
    /* Yes - cache the state elsewhere and restore it when the propagation ends */
sl@0
   906
    /* This representation wastes space and uses malloc; do better?
sl@0
   907
     * On the other hand will it ever be used in practice?
sl@0
   908
     */
sl@0
   909
    _Unwind_Control_Block *saved_ucbp = AllocSavedUCB();
sl@0
   910
    if (ucbp == NULL) {
sl@0
   911
      DEBUGGER_BOTTLENECK(ucbp, _UASUBSYS_UNWINDER, _UAACT_ENDING, _UAARG_ENDING_UNWINDER_BUFFERFAILED);
sl@0
   912
      return _URC_FAILURE;
sl@0
   913
    }
sl@0
   914
    saved_ucbp->unwinder_cache = ucbp->unwinder_cache;
sl@0
   915
    saved_ucbp->barrier_cache = ucbp->barrier_cache;
sl@0
   916
    saved_ucbp->cleanup_cache = ucbp->cleanup_cache;
sl@0
   917
    ucbp->NESTED_CONTEXT = (uint32_t)saved_ucbp;
sl@0
   918
#else
sl@0
   919
    abort();
sl@0
   920
#endif
sl@0
   921
  }
sl@0
   922
sl@0
   923
  /* entry_VRSp contains the core registers as they were when
sl@0
   924
   * _Unwind_RaiseException was called.  Copy the call-site address to r15
sl@0
   925
   * then copy all the registers to phase1_VRS for the phase 1 stack scan.
sl@0
   926
   */
sl@0
   927
sl@0
   928
  entry_VRSp->core.r[15] = entry_VRSp->core.r[14];
sl@0
   929
  phase1_VRS.core = entry_VRSp->core;
sl@0
   930
sl@0
   931
  /* For phase 1 only ensure non-core registers are saved before use.
sl@0
   932
   * If WMMX registers are supported, initialise their flags here and
sl@0
   933
   * take appropriate action elsewhere.
sl@0
   934
   */
sl@0
   935
sl@0
   936
  phase1_VRS.demand_save_vfp_low = 1;
sl@0
   937
  phase1_VRS.demand_save_vfp_high = 1;
sl@0
   938
#ifdef __EPOC32__
sl@0
   939
  /* Set up Symbian specific caches in the _Unwind_Control_Block's 
sl@0
   940
     unwinder_cache. 
sl@0
   941
  */
sl@0
   942
  InitialiseSymbianSpecificUnwinderCache(phase1_VRS.core.r[15], ucbp);
sl@0
   943
#endif
sl@0
   944
sl@0
   945
sl@0
   946
  /* Now perform a virtual unwind until a propagation barrier is met, or
sl@0
   947
   * until something goes wrong.  If something does go wrong, we ought (I
sl@0
   948
   * suppose) to restore registers we may have destroyed.
sl@0
   949
   */
sl@0
   950
sl@0
   951
  while (1) {
sl@0
   952
sl@0
   953
    _Unwind_Reason_Code pr_result;
sl@0
   954
sl@0
   955
    /* Search the index table for the required entry.  Cache the index table
sl@0
   956
     * pointer, and obtain and cache the addresses of the "real" __EHT_Header
sl@0
   957
     * word and the personality routine.
sl@0
   958
     */
sl@0
   959
sl@0
   960
    if (find_and_expand_eit_entry(ucbp, phase1_VRS.core.r[15]) != _URC_OK) {
sl@0
   961
      restore_non_core_regs(&phase1_VRS);
sl@0
   962
      /* Debugger bottleneck fn called during lookup */
sl@0
   963
      return _URC_FAILURE;
sl@0
   964
    }
sl@0
   965
sl@0
   966
    /* Call the pr to decide what to do */
sl@0
   967
sl@0
   968
    pr_result = ((personality_routine)ucbp->PR_ADDR)(_US_VIRTUAL_UNWIND_FRAME,
sl@0
   969
                                                     ucbp,
sl@0
   970
                                                     (_Unwind_Context *)&phase1_VRS);
sl@0
   971
sl@0
   972
    if (pr_result == _URC_HANDLER_FOUND) break;
sl@0
   973
    if (pr_result == _URC_CONTINUE_UNWIND) continue;
sl@0
   974
sl@0
   975
    /* If we get here some sort of failure has occurred in the
sl@0
   976
     * pr and probably the pr returned _URC_FAILURE
sl@0
   977
     */
sl@0
   978
    restore_non_core_regs(&phase1_VRS);
sl@0
   979
    return _URC_FAILURE;
sl@0
   980
  }
sl@0
   981
sl@0
   982
  /* Propagation barrier located... restore entry register state of non-core regs */
sl@0
   983
sl@0
   984
  restore_non_core_regs(&phase1_VRS);
sl@0
   985
sl@0
   986
  /* Initiate real unwinding */
sl@0
   987
  unwind_next_frame(ucbp, entry_VRSp);
sl@0
   988
  /* Unreached, but keep compiler quiet: */
sl@0
   989
  return _URC_FAILURE;
sl@0
   990
}
sl@0
   991
sl@0
   992
sl@0
   993
/* unwind_next_frame performs phase 2 unwinding */
sl@0
   994
sl@0
   995
static NORETURNDECL void unwind_next_frame(_Unwind_Control_Block *ucbp, phase2_virtual_register_set *vrsp)
sl@0
   996
{
sl@0
   997
  while (1) {
sl@0
   998
sl@0
   999
    _Unwind_Reason_Code pr_result;
sl@0
  1000
sl@0
  1001
    /* Search the index table for the required entry.  Cache the index table
sl@0
  1002
     * pointer, and obtain and cache the addresses of the "real" __EHT_Header
sl@0
  1003
     * word and the personality routine.
sl@0
  1004
     */
sl@0
  1005
sl@0
  1006
    if (find_and_expand_eit_entry(ucbp, vrsp->core.r[15]) != _URC_OK)
sl@0
  1007
      abort();
sl@0
  1008
sl@0
  1009
    /* Save the call-site address and call the pr to do whatever it
sl@0
  1010
     * wants to do on this new frame.
sl@0
  1011
     */
sl@0
  1012
sl@0
  1013
    ucbp->SAVED_CALLSITE_ADDR = vrsp->core.r[15];
sl@0
  1014
    pr_result = ((personality_routine)ucbp->PR_ADDR)(_US_UNWIND_FRAME_STARTING, ucbp,
sl@0
  1015
                                                     (_Unwind_Context *)vrsp);
sl@0
  1016
sl@0
  1017
    if (pr_result == _URC_INSTALL_CONTEXT) {
sl@0
  1018
      /* Upload the registers */
sl@0
  1019
      __ARM_Unwind_VRS_corerestore(&vrsp->core);
sl@0
  1020
    } else if (pr_result == _URC_CONTINUE_UNWIND)
sl@0
  1021
      continue;
sl@0
  1022
    else
sl@0
  1023
      abort();
sl@0
  1024
  }
sl@0
  1025
}
sl@0
  1026
sl@0
  1027
sl@0
  1028
/* _Unwind_Resume is the external entry point called after a cleanup
sl@0
  1029
 * to resume unwinding. It tail-calls a helper function,
sl@0
  1030
 * __ARM_Unwind_Resume, which never returns.
sl@0
  1031
 */
sl@0
  1032
__asm NORETURNDECL void _Unwind_Resume(_Unwind_Control_Block *ucbp)
sl@0
  1033
{
sl@0
  1034
  extern __ARM_Unwind_Resume;
sl@0
  1035
sl@0
  1036
#if THUMBNAIL
sl@0
  1037
sl@0
  1038
  /* Create a phase2_virtual_register_set on the stack */
sl@0
  1039
  /* Save the core registers, carefully writing the original sp value */
sl@0
  1040
  /* Note we account for the pc but do not actually write it's value here */
sl@0
  1041
  str.w    r14,[sp, #-8]!;
sl@0
  1042
  add.w    r14, r13, #8;
sl@0
  1043
  str.w    r14,[sp, #-4]!    /* pushed 3 words => 3 words */
sl@0
  1044
  stmfd.w  sp!,{r0-r12};     /* pushed 13 words => 16 words */
sl@0
  1045
  /* Write zeroes for the demand_save bytes so no saving occurs in phase 2 */
sl@0
  1046
  mov.w    r1,#0;
sl@0
  1047
  str.w    r1,[sp,#-4]!;     /* pushed 1 word => 17 words */
sl@0
  1048
  mov.w    r1,sp;
sl@0
  1049
  sub.w    sp,sp,#4;         /* preserve 8 byte alignment => 18 words */
sl@0
  1050
sl@0
  1051
  /* Now pass to C (with r0 still valid) to do the real work.
sl@0
  1052
   * r0 = ucbp, r1 = phase2_virtual_register_set.
sl@0
  1053
   * This call never returns.
sl@0
  1054
   */
sl@0
  1055
sl@0
  1056
  mov      pc,r2
sl@0
  1057
sl@0
  1058
#else
sl@0
  1059
sl@0
  1060
  MAYBE_SWITCH_TO_ARM_STATE;
sl@0
  1061
sl@0
  1062
  /* Create a phase2_virtual_register_set on the stack */
sl@0
  1063
  /* Save the core registers, carefully writing the original sp value */
sl@0
  1064
sl@0
  1065
  #if __ARMCC_VERSION < 300000
sl@0
  1066
  stmfd sp!,{r13-r15};  /* pushed 3 words => 3 words */
sl@0
  1067
  #else
sl@0
  1068
  stmdb r13, {r14,r15};
sl@0
  1069
  str r13, [r13,#-3*4];
sl@0
  1070
  sub r13, r13, #3*4;
sl@0
  1071
  #endif
sl@0
  1072
sl@0
  1073
  stmfd sp!,{r0-r12};   /* pushed 13 words => 16 words */
sl@0
  1074
  /* Write zeroes for the demand_save bytes so no saving occurs in phase 2 */
sl@0
  1075
  mov r1,#0;
sl@0
  1076
  str r1,[sp,#-4]!;     /* pushed 1 word => 17 words */
sl@0
  1077
  mov r1,sp;
sl@0
  1078
  sub sp,sp,#4;         /* preserve 8 byte alignment => 18 words */
sl@0
  1079
sl@0
  1080
  /* Now pass to C (with r0 still valid) to do the real work.
sl@0
  1081
   * r0 = ucbp, r1 = phase2_virtual_register_set.
sl@0
  1082
   * This call never returns.
sl@0
  1083
   */
sl@0
  1084
sl@0
  1085
#ifdef __APCS_INTERWORK
sl@0
  1086
  ldr r2,Unwind_Resume_Offset;
sl@0
  1087
  add r2,r2,pc;
sl@0
  1088
  bx    r2;
sl@0
  1089
Unwind_Resume_Offset dcd __ARM_Unwind_Resume - .;
sl@0
  1090
#else
sl@0
  1091
  b __ARM_Unwind_Resume;
sl@0
  1092
#endif
sl@0
  1093
  MAYBE_CODE16;
sl@0
  1094
sl@0
  1095
#endif
sl@0
  1096
}
sl@0
  1097
sl@0
  1098
sl@0
  1099
/* Helper function for _Unwind_Resume */
sl@0
  1100
sl@0
  1101
NORETURNDECL void __ARM_Unwind_Resume(_Unwind_Control_Block *ucbp,
sl@0
  1102
                                  phase2_virtual_register_set *entry_VRSp)
sl@0
  1103
{
sl@0
  1104
  _Unwind_Reason_Code pr_result;
sl@0
  1105
sl@0
  1106
  /* Recover saved state */
sl@0
  1107
sl@0
  1108
  entry_VRSp->core.r[15] = ucbp->SAVED_CALLSITE_ADDR;
sl@0
  1109
sl@0
  1110
  /* Call the cached PR and dispatch */
sl@0
  1111
sl@0
  1112
  pr_result = ((personality_routine)ucbp->PR_ADDR)(_US_UNWIND_FRAME_RESUME, ucbp,
sl@0
  1113
                                                   (_Unwind_Context *)entry_VRSp);
sl@0
  1114
sl@0
  1115
  if (pr_result == _URC_INSTALL_CONTEXT) {
sl@0
  1116
   /* Upload the registers */
sl@0
  1117
    __ARM_Unwind_VRS_corerestore(&entry_VRSp->core);
sl@0
  1118
  } else if (pr_result == _URC_CONTINUE_UNWIND)
sl@0
  1119
    unwind_next_frame(ucbp, entry_VRSp);
sl@0
  1120
  else
sl@0
  1121
    abort();
sl@0
  1122
}
sl@0
  1123
sl@0
  1124
sl@0
  1125
/* _Unwind_Complete is called at the end of a propagation.
sl@0
  1126
 * If we support multiple simultaneous propagations, restore the cached state
sl@0
  1127
 * of the previous propagation here.
sl@0
  1128
 */
sl@0
  1129
sl@0
  1130
void _Unwind_Complete(_Unwind_Control_Block *ucbp)
sl@0
  1131
{
sl@0
  1132
  _Unwind_Control_Block *context = (_Unwind_Control_Block *)ucbp->NESTED_CONTEXT;
sl@0
  1133
  if ((uint32_t)context == 0) abort();  /* should be impossible */
sl@0
  1134
  if ((uint32_t)context == 1) {
sl@0
  1135
    /* This was the only ongoing propagation of this object */
sl@0
  1136
    ucbp->NESTED_CONTEXT--;
sl@0
  1137
    return;
sl@0
  1138
  }
sl@0
  1139
#ifdef SUPPORT_NESTED_EXCEPTIONS
sl@0
  1140
  /* Otherwise we copy the state back from the cache structure pointed to
sl@0
  1141
   * by ucbp->NESTED_CONTEXT.
sl@0
  1142
   */
sl@0
  1143
  /* This first one updates ucbp->NESTED_CONTEXT */
sl@0
  1144
  ucbp->unwinder_cache = context->unwinder_cache;
sl@0
  1145
  ucbp->barrier_cache = context->barrier_cache;
sl@0
  1146
  ucbp->cleanup_cache = context->cleanup_cache;
sl@0
  1147
  FreeSavedUCB(context);
sl@0
  1148
#else
sl@0
  1149
  abort();
sl@0
  1150
#endif
sl@0
  1151
}
sl@0
  1152
sl@0
  1153
/* _Unwind_DeleteException can be used to invoke the exception_cleanup
sl@0
  1154
 * function after catching a foreign exception.
sl@0
  1155
 */
sl@0
  1156
sl@0
  1157
void _Unwind_DeleteException(_Unwind_Control_Block *ucbp)
sl@0
  1158
{
sl@0
  1159
  if (ucbp->exception_cleanup != NULL)
sl@0
  1160
    (ucbp->exception_cleanup)(_URC_FOREIGN_EXCEPTION_CAUGHT, ucbp);
sl@0
  1161
}
sl@0
  1162
sl@0
  1163
#endif /* unwinder_c */
sl@0
  1164
#ifdef unwind_activity_c
sl@0
  1165
sl@0
  1166
/* Runtime debug "bottleneck function": */
sl@0
  1167
/* (not in the current Exceptions EABI document) */
sl@0
  1168
sl@0
  1169
void _Unwind_Activity(_Unwind_Control_Block *ucbp, uint32_t reason, uint32_t arg)
sl@0
  1170
{
sl@0
  1171
#ifdef UNWIND_ACTIVITY_DIAGNOSTICS
sl@0
  1172
  uint32_t who = reason >> 24;
sl@0
  1173
  uint32_t activity = reason & 0xffffff;
sl@0
  1174
  printf("_Unwind_Activity: UCB=0x%8.8x Reason=(", (uint32_t)ucbp);
sl@0
  1175
  switch (who) {
sl@0
  1176
  case _UASUBSYS_UNWINDER:
sl@0
  1177
    printf("unw,");
sl@0
  1178
    if (activity >= 0x80)
sl@0
  1179
      printf("%x) Arg=0x%8.8x\n", activity, arg);
sl@0
  1180
    break;
sl@0
  1181
  case _UASUBSYS_CPP:
sl@0
  1182
    printf("C++,");
sl@0
  1183
    if (activity >= 0x80) {
sl@0
  1184
      if (activity == _UAACT_CPP_TYPEINFO)
sl@0
  1185
        printf("typeinfo) Typeinfo=0x%8.8x\n", arg);
sl@0
  1186
      else
sl@0
  1187
        printf("%x) Arg=0x%8.8x\n", activity, arg);
sl@0
  1188
    }
sl@0
  1189
    break;
sl@0
  1190
  default:
sl@0
  1191
    printf("???,");
sl@0
  1192
    if (activity >= 0x80)
sl@0
  1193
      printf("%x) Arg=0x%8.8x\n", activity, arg);
sl@0
  1194
    break;
sl@0
  1195
  }
sl@0
  1196
  if (activity < 0x80) {
sl@0
  1197
    switch (activity) {
sl@0
  1198
    case _UAACT_STARTING:
sl@0
  1199
      printf("starting) Typeinfo=0x%8.8x\n", arg);
sl@0
  1200
      break;
sl@0
  1201
    case _UAACT_ENDING:
sl@0
  1202
      printf("ending) Cause=%d\n", arg);
sl@0
  1203
      break;
sl@0
  1204
    case _UAACT_BARRIERFOUND:
sl@0
  1205
      printf("barrierfound) Pad=0x%8.8x\n", arg);
sl@0
  1206
      break;
sl@0
  1207
    case _UAACT_PADENTRY:
sl@0
  1208
      printf("padentry) Pad=0x%8.8x\n", arg);
sl@0
  1209
      break;
sl@0
  1210
    default:
sl@0
  1211
      printf("%x) Arg=0x%8.8x\n", activity, arg);
sl@0
  1212
      break;
sl@0
  1213
    }
sl@0
  1214
  }
sl@0
  1215
#endif
sl@0
  1216
}
sl@0
  1217
sl@0
  1218
#endif /* unwind_activity_c */