os/kernelhwsrv/kernel/eka/compsupp/symaehabi/unwinder.c
changeset 0 bde4ae8d615e
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/os/kernelhwsrv/kernel/eka/compsupp/symaehabi/unwinder.c	Fri Jun 15 03:10:57 2012 +0200
     1.3 @@ -0,0 +1,1218 @@
     1.4 +/* unwinder.c
     1.5 + *
     1.6 + * Copyright 2002-2005 ARM Limited. All rights reserved.
     1.7 + *
     1.8 + * Your rights to use this code are set out in the accompanying licence
     1.9 + * text file LICENCE.txt (ARM contract number LEC-ELA-00080 v1.0).
    1.10 + */
    1.11 +
    1.12 +/* Portions copyright Copyright (c) 2009 Nokia Corporation and/or its subsidiary(-ies). */
    1.13 +
    1.14 +/*
    1.15 + * RCS $Revision: 92986 $
    1.16 + * Checkin $Date: 2005-10-13 15:56:12 +0100 (Thu, 13 Oct 2005) $
    1.17 + * Revising $Author: achapman $
    1.18 + */
    1.19 +
    1.20 +/* Language-independent unwinder implementation */
    1.21 +
    1.22 +/* This source file is compiled automatically by ARM's make system into
    1.23 + * multiple object files. The source regions constituting object file
    1.24 + * xxx.o are delimited by ifdef xxx_c / endif directives.
    1.25 + *
    1.26 + * The source regions currently marked are:
    1.27 + * unwinder_c
    1.28 + * unwind_activity_c
    1.29 + */
    1.30 +
    1.31 +#ifndef __EPOC32__
    1.32 +#include <stddef.h>
    1.33 +#include <stdlib.h>
    1.34 +#else
    1.35 +#include <e32def.h>
    1.36 +#endif
    1.37 +/* Environment: */
    1.38 +#include "unwind_env.h"
    1.39 +/* Language-independent unwinder declarations: */
    1.40 +#include "unwinder.h"
    1.41 +
    1.42 +#ifdef __EPOC32__
    1.43 +/* Symbian specific support */
    1.44 +#include "symbian_support.h"
    1.45 +#endif
    1.46 +
    1.47 +/* Define UNWIND_ACTIVITY_DIAGNOSTICS for printed information from _Unwind_Activity */
    1.48 +/* Define VRS_DIAGNOSTICS for printed diagnostics about VRS operations */
    1.49 +
    1.50 +#if defined(VRS_DIAGNOSTICS) || defined(UNWIND_ACTIVITY_DIAGNOSTICS)
    1.51 +#ifndef __EPOC32__
    1.52 +extern int printf(const char *, ...);
    1.53 +#endif
    1.54 +#endif
    1.55 +
    1.56 +#ifdef SUPPORT_NESTED_EXCEPTIONS
    1.57 +extern _Unwind_Control_Block *AllocSavedUCB();
    1.58 +extern void FreeSavedUCB(_Unwind_Control_Block *context);
    1.59 +#endif
    1.60 +
    1.61 +#ifdef unwinder_c
    1.62 +
    1.63 +/* =========================                      ========================= */
    1.64 +/* ========================= Virtual register set ========================= */
    1.65 +/* =========================                      ========================= */
    1.66 +
    1.67 +/* The approach taken by this implementation is to use the real machine
    1.68 + * registers to hold all but the values of core (integer)
    1.69 + * registers. Consequently the implementation must use only the core
    1.70 + * registers except when manipulating the virtual register set. Non-core
    1.71 + * registers are saved only on first use, so the single implementation can
    1.72 + * cope with execution on processors which lack certain registers.  The
    1.73 + * registers as they were at the start of the propagation must be preserved
    1.74 + * over phase 1 so that the machine state is correct at the start of phase
    1.75 + * 2. This requires a copy to be taken (which can be stack allocated). During
    1.76 + * a stack unwind (phase 1 or phase 2), the "current" virtual register set is
    1.77 + * implemented as core register values held in a data structure, and non-core
    1.78 + * register values held in the registers themselves. To ensure that all
    1.79 + * original register values are available at the beginning of phase 2, the
    1.80 + * core registers are saved in a second structure at the start of phase 1 and
    1.81 + * the non-core registers are demand-saved into another part of the data
    1.82 + * structure that holds the current core registers during the phase 1 stack
    1.83 + * unwind.
    1.84 + */
    1.85 +/* Extent to which the access routines are implemented:
    1.86 + * _Unwind_VRS_Get and _Unwind_VRS_Set implement only access to the core registers.
    1.87 + * _Unwind_VRS_Pop implements only popping of core and vfp registers.
    1.88 + * There is no support here for the Intel WMMX registers, but space is nevertheless
    1.89 + * reserved in the virtual register set structure to indicate whether demand-saving
    1.90 + * of those registers is required (as they are unsupported, it never is). The space
    1.91 + * costs nothing as it is required for alignment.
    1.92 + * The level of supported functionality is compliant with the requirements of the
    1.93 + * Exceptions ABI.
    1.94 + */
    1.95 +
    1.96 +typedef unsigned char bool;
    1.97 +struct core_s  { uint32_t r[16]; };        /* core integer regs */
    1.98 +struct vfp_s   { uint64_t d[32]; };        /* VFP registers saved in FSTMD format */
    1.99 +
   1.100 +/* Phase 1 virtual register set includes demand-save areas */
   1.101 +/* The phase 2 virtual register set must be a prefix of the phase 1 set */
   1.102 +typedef struct phase1_virtual_register_set_s {
   1.103 +  /* demand_save flag == 1 means save the registers in the demand-save area */
   1.104 +  bool demand_save_vfp_low;
   1.105 +  bool demand_save_vfp_high;
   1.106 +  bool demand_save_wmmxd;
   1.107 +  bool demand_save_wmmxc;
   1.108 +  struct core_s core;      /* current core registers */
   1.109 +  struct vfp_s  vfp;       /* demand-saved vfp registers */
   1.110 +} phase1_virtual_register_set;
   1.111 +
   1.112 +/* Phase 2 virtual register set has no demand-save areas */
   1.113 +/* The phase 2 virtual register set must be a prefix of the phase 1 set */
   1.114 +/* The assembly fragments for _Unwind_RaiseException and _Unwind_Resume create
   1.115 + * a phase2_virtual_register_set_s by hand so be careful.
   1.116 + */
   1.117 +typedef struct phase2_virtual_register_set_s {
   1.118 +  /* demand_save flag == 1 means save the registers in the demand-save area */
   1.119 +  /* Always 0 in phase 2 */
   1.120 +  bool demand_save_vfp_low;
   1.121 +  bool demand_save_vfp_high;
   1.122 +  bool demand_save_wmmxd;
   1.123 +  bool demand_save_wmmxc;
   1.124 +  struct core_s core;      /* current core registers */
   1.125 +} phase2_virtual_register_set;
   1.126 +
   1.127 +/* -- Helper macros for the embedded assembly */
   1.128 +
   1.129 +#if defined(__TARGET_ARCH_5T)  || defined(__TARGET_ARCH_5TXM) || \
   1.130 +    defined(__TARGET_ARCH_5TE) || defined(__TARGET_ARCH_6) || \
   1.131 +    defined(__TARGET_ARCH_6T2) || defined(__TARGET_ARCH_7_A) /* || ... */
   1.132 +  #define ARCH_5T_OR_LATER 1
   1.133 +#else
   1.134 +  #define ARCH_5T_OR_LATER 0
   1.135 +#endif
   1.136 +
   1.137 +#if defined(__APCS_INTERWORK) && !ARCH_5T_OR_LATER
   1.138 +  #define OLD_STYLE_INTERWORKING 1
   1.139 +#else
   1.140 +  #define OLD_STYLE_INTERWORKING 0
   1.141 +#endif
   1.142 +
   1.143 +#if defined(__TARGET_ARCH_4T) || defined(__TARGET_ARCH_4TXM) || ARCH_5T_OR_LATER
   1.144 +  #define HAVE_BX 1
   1.145 +#else
   1.146 +  #define HAVE_BX 0
   1.147 +#endif
   1.148 +
   1.149 +#if defined(__TARGET_ARCH_THUMBNAIL)
   1.150 +  #define THUMBNAIL 1
   1.151 +#else
   1.152 +  #define THUMBNAIL 0
   1.153 +#endif
   1.154 +
   1.155 +#if HAVE_BX
   1.156 +  #define RET_LR bx lr
   1.157 +#else
   1.158 +  #define RET_LR mov pc,lr
   1.159 +#endif
   1.160 +
   1.161 +/* ----- Routines: ----- */
   1.162 +
   1.163 +/* ----- Helper routines, private ----- */
   1.164 +
   1.165 +/* R_ARM_PREL31 is a place-relative 31-bit signed relocation.  The
   1.166 + * routine takes the address of a location that was relocated by
   1.167 + * R_ARM_PREL31, and returns an absolute address.
   1.168 + */
   1.169 +static FORCEINLINE uint32_t __ARM_resolve_prel31(void *p)
   1.170 +{
   1.171 +  return (uint32_t)((((*(int32_t *)p) << 1) >> 1) + (int32_t)p);
   1.172 +}
   1.173 +
   1.174 +/* ----- Helper routines, private but external ----- */
   1.175 +
   1.176 +/* Note '%0' refers to local label '0' */
   1.177 +#if defined(__thumb)
   1.178 +#define MAYBE_SWITCH_TO_ARM_STATE SWITCH_TO_ARM_STATE
   1.179 +#define MAYBE_CODE16 code16
   1.180 +#else
   1.181 +#define MAYBE_SWITCH_TO_ARM_STATE /* nothing */
   1.182 +#define MAYBE_CODE16              /* nothing */
   1.183 +#endif
   1.184 +__asm void __ARM_Unwind_VRS_VFPpreserve_low(void *vfpp)
   1.185 +{
   1.186 +vfp_d0 CN 0;
   1.187 +  /* Preserve the low vfp registers in the passed memory */
   1.188 +#if defined(__thumb)
   1.189 +  macro;
   1.190 +  SWITCH_TO_ARM_STATE;
   1.191 +1
   1.192 +  align 4;
   1.193 +2
   1.194 +  assert (%2 - %1) = 0;
   1.195 +  bx pc;
   1.196 +  nop;
   1.197 +  code32;
   1.198 +  mend;
   1.199 +#endif
   1.200 +
   1.201 +  MAYBE_SWITCH_TO_ARM_STATE;
   1.202 +  stc   p11,vfp_d0,[r0],{0x20};  /* 0xec800b20  FSTMIAD r0,{d0-d15} */
   1.203 +  RET_LR;
   1.204 +  MAYBE_CODE16;
   1.205 +}
   1.206 +
   1.207 +__asm void __ARM_Unwind_VRS_VFPpreserve_high(void *vfpp)
   1.208 +{
   1.209 +vfp_d16 CN 0;                      /* =16 when used with stcl */
   1.210 +  /* Preserve the high vfp registers in the passed memory */
   1.211 +  MAYBE_SWITCH_TO_ARM_STATE;
   1.212 +  stcl  p11,vfp_d16,[r0],{0x20};  /* 0xecc00b20  FSTMIAD r0,{d16-d31} */
   1.213 +  RET_LR;
   1.214 +  MAYBE_CODE16;
   1.215 +}
   1.216 +
   1.217 +__asm void __ARM_Unwind_VRS_VFPrestore_low(void *vfpp)
   1.218 +{
   1.219 +  /* Restore the low vfp registers from the passed memory */
   1.220 +vfp_d0 CN 0;
   1.221 +  MAYBE_SWITCH_TO_ARM_STATE;
   1.222 +  ldc   p11,vfp_d0,[r0],{0x20};  /* 0xec900b20  FLDMIAD r0,{d0-d15} */
   1.223 +  RET_LR;
   1.224 +  MAYBE_CODE16;
   1.225 +}
   1.226 +
   1.227 +__asm void __ARM_Unwind_VRS_VFPrestore_high(void *vfpp)
   1.228 +{
   1.229 +  /* Restore the high vfp registers from the passed memory */
   1.230 +vfp_d16 CN 0;                      /* =16 when used with ldcl */
   1.231 +  MAYBE_SWITCH_TO_ARM_STATE;
   1.232 +  ldcl   p11,vfp_d16,[r0],{0x20};  /* 0xecd00b20  FLDMIAD r0,{d16-d31} */
   1.233 +  RET_LR;
   1.234 +  MAYBE_CODE16;
   1.235 +}
   1.236 +
   1.237 +
   1.238 +__asm NORETURNDECL void __ARM_Unwind_VRS_corerestore(void *corep)
   1.239 +{
   1.240 +  /* We rely here on corep pointing to a location in the stack,
   1.241 +   * as we briefly assign it to sp. This allows us to safely do
   1.242 +   * ldmia's which restore sp (if we use a different base register,
   1.243 +   * the updated sp may be used by the handler of any data abort
   1.244 +   * that occurs during the ldmia, and the stack gets overwritten).
   1.245 +   * By hypothesis this is preserve8 but the load of sp means the
   1.246 +   * assembler can't infer that.
   1.247 +   */
   1.248 +#if THUMBNAIL
   1.249 +  preserve8;
   1.250 +  mov.w   r13, r0;
   1.251 +  ldmia.w r13!,{r0-r12};
   1.252 +  ldr.w   r14, [r13, #4]   /* lr */
   1.253 +  ldr.w   r12, [r13, #4*2] /* pc */
   1.254 +  ldr.w   r13, [r13, #0]   /* sp */
   1.255 +  bx      r12
   1.256 +  
   1.257 +#else
   1.258 +  preserve8;
   1.259 +  MAYBE_SWITCH_TO_ARM_STATE;
   1.260 +#if OLD_STYLE_INTERWORKING
   1.261 +  mov   r13, r0;
   1.262 +  ldmia r13!,{r0-r12};
   1.263 +  ldr   r12,[r13, #4*2]; /* pc */
   1.264 +  ldmia r13,{r13-r14};
   1.265 +  bx    r12;
   1.266 +#else
   1.267 +
   1.268 +  #if __ARMCC_VERSION < 300000
   1.269 +  mov   r13, r0;
   1.270 +  ldmia r13,{r0-r15};
   1.271 +  #else
   1.272 +  mov r14, r0;
   1.273 +  ldmia r14!, {r0-r12};
   1.274 +  ldr r13, [r14], #4;
   1.275 +  ldmia r14, {r14,r15};
   1.276 +  #endif
   1.277 +
   1.278 +#endif
   1.279 +  MAYBE_CODE16;
   1.280 +#endif
   1.281 +}
   1.282 +
   1.283 +
   1.284 +/* ----- Development support ----- */
   1.285 +
   1.286 +#ifdef VRS_DIAGNOSTICS
   1.287 +static void debug_print_vrs_vfp(uint32_t base, uint64_t *lp)
   1.288 +{
   1.289 +  int c = 0;
   1.290 +  int i;
   1.291 +  for (i = 0; i < 16; i++) {
   1.292 +    printf("D%-2d  0x%16.16llx    ", i + base, *lp);
   1.293 +    lp++;
   1.294 +    if (c++ == 1) {
   1.295 +      c = 0;
   1.296 +      printf("\n");
   1.297 +    }
   1.298 +  }
   1.299 +}
   1.300 +
   1.301 +
   1.302 +static void debug_print_vrs(_Unwind_Context *context)
   1.303 +{
   1.304 +  phase1_virtual_register_set *vrsp = (phase1_virtual_register_set *)context;
   1.305 +  int i;
   1.306 +  int c;
   1.307 +  printf("------------------------------------------------------------------------\n");
   1.308 +  c = 0;
   1.309 +  for (i = 0; i < 16; i++) {
   1.310 +    printf("r%-2d  0x%8.8x    ", i, vrsp->core.r[i]);
   1.311 +    if (c++ == 3) {
   1.312 +      c = 0;
   1.313 +      printf("\n");
   1.314 +    }
   1.315 +  }
   1.316 +
   1.317 +  printf("-----\n");
   1.318 +  if (vrsp->demand_save_vfp_low == 1)
   1.319 +    printf("VFP low registers not saved\n");
   1.320 +  else
   1.321 +    debug_print_vrs_vfp(0, &vrsp->vfp.d[0]);
   1.322 +  printf("-----\n");
   1.323 +  if (vrsp->demand_save_vfp_high == 1)
   1.324 +    printf("VFP high registers not saved\n");
   1.325 +  else
   1.326 +    debug_print_vrs_vfp(16, &vrsp->vfp.d[16]);
   1.327 +  printf("------------------------------------------------------------------------\n");
   1.328 +}
   1.329 +#endif
   1.330 +
   1.331 +
   1.332 +/* ----- Public routines ----- */
   1.333 +
   1.334 +EXPORT_C _Unwind_VRS_Result _Unwind_VRS_Set(_Unwind_Context *context,
   1.335 +                                            _Unwind_VRS_RegClass regclass,
   1.336 +                                            uint32_t regno,
   1.337 +                                            _Unwind_VRS_DataRepresentation representation,
   1.338 +                                            void *valuep)
   1.339 +{
   1.340 +  phase1_virtual_register_set *vrsp = (phase1_virtual_register_set *)context;
   1.341 +  switch (regclass) {
   1.342 +  case _UVRSC_CORE:
   1.343 +    {
   1.344 +      if (representation != _UVRSD_UINT32 || regno > 15)
   1.345 +        return _UVRSR_FAILED;
   1.346 +       vrsp->core.r[regno] = *(uint32_t *)valuep;
   1.347 +       return _UVRSR_OK;
   1.348 +    }
   1.349 +  case _UVRSC_VFP:
   1.350 +  case _UVRSC_WMMXD:
   1.351 +  case _UVRSC_WMMXC:
   1.352 +    return _UVRSR_NOT_IMPLEMENTED;
   1.353 +  default:
   1.354 +    break;
   1.355 +  }
   1.356 +  return _UVRSR_FAILED;
   1.357 +}
   1.358 +
   1.359 +
   1.360 +EXPORT_C _Unwind_VRS_Result _Unwind_VRS_Get(_Unwind_Context *context,
   1.361 +                                            _Unwind_VRS_RegClass regclass,
   1.362 +                                            uint32_t regno,
   1.363 +                                            _Unwind_VRS_DataRepresentation representation,
   1.364 +                                            void *valuep)
   1.365 +{
   1.366 +  phase1_virtual_register_set *vrsp = (phase1_virtual_register_set *)context;
   1.367 +  switch (regclass) {
   1.368 +  case _UVRSC_CORE:
   1.369 +    {
   1.370 +      if (representation != _UVRSD_UINT32 || regno > 15)
   1.371 +        return _UVRSR_FAILED;
   1.372 +      *(uint32_t *)valuep = vrsp->core.r[regno];
   1.373 +      return _UVRSR_OK;
   1.374 +    }
   1.375 +  case _UVRSC_VFP:
   1.376 +  case _UVRSC_WMMXD:
   1.377 +  case _UVRSC_WMMXC:
   1.378 +    return _UVRSR_NOT_IMPLEMENTED;
   1.379 +  default:
   1.380 +    break;
   1.381 +  }
   1.382 +  return _UVRSR_FAILED;
   1.383 +}
   1.384 +
   1.385 +
   1.386 +#define R_SP 13
   1.387 +
   1.388 +EXPORT_C _Unwind_VRS_Result _Unwind_VRS_Pop(_Unwind_Context *context,
   1.389 +                                            _Unwind_VRS_RegClass regclass,
   1.390 +                                            uint32_t descriminator,
   1.391 +                                            _Unwind_VRS_DataRepresentation representation)
   1.392 +{
   1.393 +  phase1_virtual_register_set *vrsp = (phase1_virtual_register_set *)context;
   1.394 +  switch (regclass) {
   1.395 +  case _UVRSC_CORE:
   1.396 +    {
   1.397 +      /* If SP is included in the mask, the loaded value is used in preference to
   1.398 +       * the writeback value, but only on completion of the loading.
   1.399 +       */
   1.400 +      uint32_t mask, *vsp, *rp, sp_loaded;
   1.401 +      if (representation != _UVRSD_UINT32)
   1.402 +        return _UVRSR_FAILED;
   1.403 +      vsp = (uint32_t *)vrsp->core.r[R_SP];
   1.404 +      rp = (uint32_t *)&vrsp->core;
   1.405 +      mask = descriminator & 0xffff;
   1.406 +      sp_loaded = mask & (1 << R_SP);
   1.407 +      while (mask != 0) {
   1.408 +        if (mask & 1) {
   1.409 +#ifdef VRS_DIAGNOSTICS
   1.410 +          printf("VRS Pop r%d\n", rp - &vrsp->core.r[0]);
   1.411 +#endif
   1.412 +          *rp = *vsp++;
   1.413 +        }
   1.414 +        rp++;
   1.415 +        mask >>= 1;
   1.416 +      }
   1.417 +      if (!sp_loaded)
   1.418 +        vrsp->core.r[R_SP] = (uint32_t)vsp;
   1.419 +      return _UVRSR_OK;
   1.420 +    }
   1.421 +  case _UVRSC_VFP:
   1.422 +    {
   1.423 +      uint32_t start = descriminator >> 16;
   1.424 +      uint32_t count = descriminator & 0xffff;
   1.425 +      bool some_low = start < 16;
   1.426 +      bool some_high = start + count > 16;
   1.427 +      if ((representation != _UVRSD_VFPX && representation != _UVRSD_DOUBLE) ||
   1.428 +          (representation == _UVRSD_VFPX && some_high) ||
   1.429 +          (representation == _UVRSD_DOUBLE && start + count > 32))
   1.430 +        return _UVRSR_FAILED;
   1.431 +      if (some_low && vrsp->demand_save_vfp_low == 1) { /* Demand-save over phase 1 */
   1.432 +        vrsp->demand_save_vfp_low = 0;
   1.433 +        __ARM_Unwind_VRS_VFPpreserve_low(&vrsp->vfp.d[0]);
   1.434 +      }
   1.435 +      if (some_high && vrsp->demand_save_vfp_high == 1) { /* Demand-save over phase 1 */
   1.436 +        vrsp->demand_save_vfp_high = 0;
   1.437 +        __ARM_Unwind_VRS_VFPpreserve_high(&vrsp->vfp.d[16]);
   1.438 +      }
   1.439 +      /* Now recover from the stack into the real machine registers.
   1.440 +       * Note for _UVRSD_VFPX we assume FSTMX standard format 1.
   1.441 +       * Do this by saving the current VFP registers to a memory area,
   1.442 +       * moving the in-memory values into that area, and
   1.443 +       * restoring from the whole area.
   1.444 +       * Must be careful as the 64-bit values saved by FSTMX might be
   1.445 +       * only 32-bit aligned.
   1.446 +       */
   1.447 +      {
   1.448 +        struct unaligned_vfp_reg_s { uint32_t w1; uint32_t w2; };
   1.449 +        struct unaligned_vfp_reg_s *vsp;
   1.450 +        struct vfp_s temp_vfp;
   1.451 +        if (some_low)
   1.452 +          __ARM_Unwind_VRS_VFPpreserve_low(&temp_vfp.d[0]);
   1.453 +        if (some_high)
   1.454 +          __ARM_Unwind_VRS_VFPpreserve_high(&temp_vfp.d[16]);
   1.455 +        vsp = (struct unaligned_vfp_reg_s *)vrsp->core.r[R_SP];
   1.456 +        while (count--) {
   1.457 +          struct unaligned_vfp_reg_s *v =
   1.458 +            (struct unaligned_vfp_reg_s *)&temp_vfp.d[start++];
   1.459 +          *v = *vsp++;
   1.460 +#ifdef VRS_DIAGNOSTICS
   1.461 +          printf("VRS Pop D%d = 0x%llx\n", start - 1, temp_vfp.d[start - 1]);
   1.462 +#endif
   1.463 +        }
   1.464 +        vrsp->core.r[R_SP] = (uint32_t)((uint32_t *)vsp +
   1.465 +                                        (representation == _UVRSD_VFPX ?
   1.466 +                                         1 : /* +1 to skip the format word */
   1.467 +                                         0));
   1.468 +        if (some_low)
   1.469 +          __ARM_Unwind_VRS_VFPrestore_low(&temp_vfp.d[0]);
   1.470 +        if (some_high)
   1.471 +          __ARM_Unwind_VRS_VFPrestore_high(&temp_vfp.d[16]);
   1.472 +      }
   1.473 +      return _UVRSR_OK;
   1.474 +    }
   1.475 +  case _UVRSC_WMMXD:
   1.476 +  case _UVRSC_WMMXC:
   1.477 +    return _UVRSR_NOT_IMPLEMENTED;
   1.478 +  default:
   1.479 +    break;
   1.480 +  }
   1.481 +  return _UVRSR_FAILED;
   1.482 +}
   1.483 +
   1.484 +
   1.485 +
   1.486 +/* =========================              ========================= */
   1.487 +/* ========================= The unwinder ========================= */
   1.488 +/* =========================              ========================= */
   1.489 +
   1.490 +
   1.491 +/* This implementation uses the UCB unwinder_cache as follows:
   1.492 + * reserved1 is documented in the EABI as requiring initialisation to 0.
   1.493 + *  It is used to manage nested simultaneous propagation. If the value is 0,
   1.494 + *  the UCB is participating in no propagations. If the value is 1, the UCB
   1.495 + *  is participating in one propagation. Otherwise the value is a pointer to
   1.496 + *  a structure holding saved UCB state from the next propagation out.
   1.497 + *  The structure used is simply a mallocated UCB.
   1.498 + * reserved2 is used to preserve the call-site address over calls to a
   1.499 + *  personality routine and cleanup.
   1.500 + * reserved3 is used to cache the PR address.
   1.501 + * reserved4 is used by the Symbian implementation to cache the ROM exeception 
   1.502 + *  search table
   1.503 + * reserved5 is used by the symbian implementation to cache the 
   1.504 + *  TExceptionDescriptor for the executable of the 'current' frame
   1.505 + */
   1.506 +
   1.507 +#define NESTED_CONTEXT      unwinder_cache.reserved1
   1.508 +#define SAVED_CALLSITE_ADDR unwinder_cache.reserved2
   1.509 +#define PR_ADDR             unwinder_cache.reserved3
   1.510 +
   1.511 +/* Index table entry: */
   1.512 +
   1.513 +#ifndef __EPOC32__  // Symbian OS defines this in symbian_support.h
   1.514 +typedef struct __EIT_entry {
   1.515 +  uint32_t fnoffset; /* Place-relative */
   1.516 +  uint32_t content;
   1.517 +} __EIT_entry;
   1.518 +#endif
   1.519 +
   1.520 +/* Private defines etc: */
   1.521 +
   1.522 +static const uint32_t EXIDX_CANTUNWIND = 1;
   1.523 +static const uint32_t uint32_highbit = 0x80000000;
   1.524 +
   1.525 +/* ARM C++ personality routines: */
   1.526 +
   1.527 +typedef _Unwind_Reason_Code (*personality_routine)(_Unwind_State,
   1.528 +                                                   _Unwind_Control_Block *,
   1.529 +                                                   _Unwind_Context *);
   1.530 +
   1.531 +WEAKDECL _Unwind_Reason_Code __aeabi_unwind_cpp_pr0(_Unwind_State state, _Unwind_Control_Block *,
   1.532 +                                                    _Unwind_Context *context);
   1.533 +IMPORT_C WEAKDECL _Unwind_Reason_Code __aeabi_unwind_cpp_pr1(_Unwind_State state, _Unwind_Control_Block *,
   1.534 +                                                             _Unwind_Context *context);
   1.535 +IMPORT_C WEAKDECL _Unwind_Reason_Code __aeabi_unwind_cpp_pr2(_Unwind_State state, _Unwind_Control_Block *,
   1.536 +                                                             _Unwind_Context *context);
   1.537 +
   1.538 +
   1.539 +/* Various image symbols: */
   1.540 +
   1.541 +struct ExceptionTableInfo {
   1.542 +  uint32_t EIT_base;
   1.543 +  uint32_t EIT_limit;
   1.544 +};
   1.545 +
   1.546 +#ifndef __EPOC32__
   1.547 +/* We define __ARM_ETInfo to allow access to some linker-generated
   1.548 +   names that are not legal C identifiers. __ARM_ETInfo is extern only
   1.549 +   because of scope limitations of the embedded assembler */
   1.550 +extern const struct ExceptionTableInfo __ARM_ETInfo;
   1.551 +#define EIT_base \
   1.552 +    ((const __EIT_entry *)(__ARM_ETInfo.EIT_base + (const char *)&__ARM_ETInfo))
   1.553 +#define EIT_limit \
   1.554 +    ((const __EIT_entry *)(__ARM_ETInfo.EIT_limit + (const char *)&__ARM_ETInfo))
   1.555 +
   1.556 +#endif
   1.557 +
   1.558 +
   1.559 +/* ----- Index table processing ----- */
   1.560 +
   1.561 +/* find_and_expand_eit_entry is a support function used in both phases to set
   1.562 + * ucb.pr_cache and internal cache.
   1.563 + * Call with a pointer to the ucb and the return address to look up.
   1.564 + *
   1.565 + * The table is contained in the half-open interval
   1.566 + * [EIT_base, EIT_limit) and is an ordered array of __EIT_entrys.
   1.567 + * Perform a binary search via C library routine bsearch.
   1.568 + * The table contains only function start addresses (encoded as offsets), so
   1.569 + * we need to special-case the end table entry in the comparison function,
   1.570 + * which we do by assuming the function it describes extends to end of memory.
   1.571 + * This causes us problems indirectly in that we would like to fault as
   1.572 + * many attempts as possible to look up an invalid return address. There are
   1.573 + * several ways an invalid return address can be obtained from a broken
   1.574 + * program, such as someone corrupting the stack or broken unwind instructions
   1.575 + * recovered the wrong value. It is plausible that many bad return addresses
   1.576 + * will be either small integers or will point into the heap or stack, hence
   1.577 + * it's desirable to get the length of that final function roughly right.
   1.578 + * Here we make no attempt to do it. Code exclusively for use in toolchains
   1.579 + * which define a suitable limit symbol could make use of that symbol.
   1.580 + * Alternatively (QoI) a smart linker could augment the index table with a
   1.581 + * dummy EXIDX_CANTUNWIND entry pointing just past the last real function.
   1.582 + */
   1.583 +
   1.584 +#ifndef __EPOC32__
   1.585 +static int EIT_comparator(const void *ck, const void *ce)
   1.586 +{
   1.587 +  uint32_t return_address = *(const uint32_t *)ck;
   1.588 +  const __EIT_entry *eitp = (const __EIT_entry *)ce;
   1.589 +  const __EIT_entry *next_eitp = eitp + 1;
   1.590 +  uint32_t next_fn;
   1.591 +  if (next_eitp != EIT_limit)
   1.592 +    next_fn = __ARM_resolve_prel31((void *)&next_eitp->fnoffset);
   1.593 +  else
   1.594 +    next_fn = 0xffffffffU;
   1.595 +  if (return_address < __ARM_resolve_prel31((void *)&eitp->fnoffset)) return -1;
   1.596 +  if (return_address >= next_fn) return 1;
   1.597 +  return 0;
   1.598 +}
   1.599 +#endif
   1.600 +
   1.601 +
   1.602 +static _Unwind_Reason_Code find_and_expand_eit_entry_V2(_Unwind_Control_Block *ucbp,
   1.603 +                                                     uint32_t return_address)
   1.604 +{
   1.605 +  /* Search the index table for an entry containing the specified return
   1.606 +   * address. Subtract the 2 from the return address, as the index table
   1.607 +   * contains function start addresses (a trailing noreturn BL would
   1.608 +   * appear to return to the first address of the next function (perhaps
   1.609 +   * +1 if Thumb); a leading BL would appear to return to function start
   1.610 +   * + instruction size (perhaps +1 if Thumb)).
   1.611 +   */
   1.612 +
   1.613 +#ifndef __EPOC32__
   1.614 +  const __EIT_entry *base = EIT_base;
   1.615 +  size_t nelems = EIT_limit - EIT_base;
   1.616 +  __EIT_entry *eitp;
   1.617 +
   1.618 +  return_address -= 2;
   1.619 +
   1.620 +  eitp = (__EIT_entry *) bsearch(&return_address, base, nelems,
   1.621 +                                 sizeof(__EIT_entry), EIT_comparator);
   1.622 +#else
   1.623 +  const __EIT_entry *base = EIT_base(ucbp);
   1.624 +  size_t nelems = EIT_limit(ucbp) - base;
   1.625 +  __EIT_entry *eitp;
   1.626 +
   1.627 +  return_address -= 2;
   1.628 +
   1.629 +  // This must succeed on SymbianOS or else an error will have occured already.
   1.630 +  eitp = SearchEITV2(return_address, base, nelems);
   1.631 +#endif
   1.632 +
   1.633 +  if (eitp == NULL) {
   1.634 +    /* The return address we have was not found in the EIT.
   1.635 +     * This breaks the scan and we have to indicate failure.
   1.636 +     */
   1.637 +    ucbp->PR_ADDR = NULL;
   1.638 +    DEBUGGER_BOTTLENECK(ucbp, _UASUBSYS_UNWINDER, _UAACT_ENDING, _UAARG_ENDING_UNWINDER_LOOKUPFAILED);
   1.639 +    return _URC_FAILURE;
   1.640 +  }
   1.641 +
   1.642 +  /* Cache the function offset */
   1.643 +
   1.644 +  ucbp->pr_cache.fnstart = __ARM_resolve_prel31((void *)&eitp->fnoffset);
   1.645 +
   1.646 +  /* Can this frame be unwound at all? */
   1.647 +
   1.648 +  if (eitp->content == EXIDX_CANTUNWIND) {
   1.649 +    ucbp->PR_ADDR = NULL;
   1.650 +    DEBUGGER_BOTTLENECK(ucbp, _UASUBSYS_UNWINDER, _UAACT_ENDING, _UAARG_ENDING_NOUNWIND);
   1.651 +    return _URC_FAILURE;
   1.652 +  }
   1.653 +
   1.654 +  /* Obtain the address of the "real" __EHT_Header word */
   1.655 +
   1.656 +  if (eitp->content & uint32_highbit) {
   1.657 +    /* It is immediate data */
   1.658 +    ucbp->pr_cache.ehtp = (_Unwind_EHT_Header *)&eitp->content;
   1.659 +    ucbp->pr_cache.additional = 1;
   1.660 +  } else {
   1.661 +    /* The content field is a 31-bit place-relative offset to an _Unwind_EHT_Entry structure */
   1.662 +    ucbp->pr_cache.ehtp = (_Unwind_EHT_Header *)__ARM_resolve_prel31((void *)&eitp->content);
   1.663 +    ucbp->pr_cache.additional = 0;
   1.664 +  }
   1.665 +
   1.666 +  /* Discover the personality routine address */
   1.667 +
   1.668 +  if (*(uint32_t *)(ucbp->pr_cache.ehtp) & uint32_highbit) {
   1.669 +    /* It is immediate data - compute matching pr */
   1.670 +    uint32_t idx = ((*(uint32_t *)(ucbp->pr_cache.ehtp)) >> 24) & 0xf;
   1.671 +    if (idx == 0) ucbp->PR_ADDR = (uint32_t)&__aeabi_unwind_cpp_pr0;
   1.672 +    else if (idx == 1) ucbp->PR_ADDR = (uint32_t)&__aeabi_unwind_cpp_pr1;
   1.673 +    else if (idx == 2) ucbp->PR_ADDR = (uint32_t)&__aeabi_unwind_cpp_pr2;
   1.674 +    else { /* Failed */
   1.675 +      ucbp->PR_ADDR = NULL;
   1.676 +      DEBUGGER_BOTTLENECK(ucbp, _UASUBSYS_UNWINDER, _UAACT_ENDING, _UAARG_ENDING_TABLECORRUPT);
   1.677 +      return _URC_FAILURE;
   1.678 +    }
   1.679 +  } else {
   1.680 +    /* It's a place-relative offset to pr */
   1.681 +    ucbp->PR_ADDR = __ARM_resolve_prel31((void *)(ucbp->pr_cache.ehtp));
   1.682 +  }
   1.683 +  return _URC_OK;
   1.684 +}
   1.685 +
   1.686 +static _Unwind_Reason_Code find_and_expand_eit_entry_V1(_Unwind_Control_Block *ucbp,
   1.687 +                                                     uint32_t return_address)
   1.688 +{
   1.689 +  /* Search the index table for an entry containing the specified return
   1.690 +   * address. The EIT contains function offsets relative to the base of the
   1.691 +   * execute region so adjust the return address accordingly.
   1.692 +   */
   1.693 +
   1.694 +#ifndef __EPOC32__
   1.695 +  uint32_t return_address_offset = ADDR_TO_ER_RO_OFFSET(return_address, ucbp);
   1.696 +  const __EIT_entry *base = EIT_base;
   1.697 +  size_t nelems = EIT_limit - EIT_base;
   1.698 +
   1.699 +   const __EIT_entry *eitp =
   1.700 +     (const __EIT_entry *) bsearch(&return_address_offset, base, nelems, 
   1.701 +                                   sizeof(__EIT_entry), EIT_comparator);
   1.702 +  if (eitp == NULL) {
   1.703 +    /* The return address we have was not found in the EIT.
   1.704 +     * This breaks the scan and we have to indicate failure.
   1.705 +     */
   1.706 +    ucbp->PR_ADDR = NULL;
   1.707 +    DEBUGGER_BOTTLENECK(ucbp, _UASUBSYS_UNWINDER, _UAACT_ENDING, _UAARG_ENDING_UNWINDER_LOOKUPFAILED);
   1.708 +    return _URC_FAILURE;
   1.709 +  }
   1.710 +#else
   1.711 +  /* Shouldn't we subtract 2 from here just like in the V2 lookup? 
   1.712 +   */
   1.713 +  uint32_t return_address_offset = ADDR_TO_ER_RO_OFFSET(return_address, ucbp);
   1.714 +  const __EIT_entry *base = EIT_base(ucbp);
   1.715 +  size_t nelems = EIT_limit(ucbp) - base;
   1.716 +
   1.717 +  // This must succeed or else an error will have occured already.
   1.718 +  const __EIT_entry *eitp = SearchEITV1(return_address_offset, base, nelems);
   1.719 +
   1.720 +#endif
   1.721 +
   1.722 +
   1.723 +  /* Cache the function offset */
   1.724 +
   1.725 +  ucbp->pr_cache.fnstart = ER_RO_OFFSET_TO_ADDR(eitp->fnoffset, ucbp);
   1.726 +
   1.727 +  /* Can this frame be unwound at all? */
   1.728 +
   1.729 +  if (eitp->content == EXIDX_CANTUNWIND) {
   1.730 +    ucbp->PR_ADDR = NULL;
   1.731 +    DEBUGGER_BOTTLENECK(ucbp, _UASUBSYS_UNWINDER, _UAACT_ENDING, _UAARG_ENDING_NOUNWIND);
   1.732 +    return _URC_FAILURE;
   1.733 +  }
   1.734 +
   1.735 +  /* Obtain the address of the "real" __EHT_Header word */
   1.736 +  if (eitp->content & uint32_highbit) {
   1.737 +    /* It is immediate data */
   1.738 +    ucbp->pr_cache.ehtp = (_Unwind_EHT_Header *)&eitp->content;
   1.739 +    ucbp->pr_cache.additional = 1;
   1.740 +  } else {
   1.741 +    /* The content field is a segment relative offset to an _Unwind_EHT_Entry structure */
   1.742 +    ucbp->pr_cache.ehtp = (_Unwind_EHT_Header *)ER_RO_OFFSET_TO_ADDR(eitp->content, ucbp);
   1.743 +    ucbp->pr_cache.additional = 0;
   1.744 +  }
   1.745 +
   1.746 +  /* Discover the personality routine address */
   1.747 +
   1.748 +  if (*(uint32_t *)(ucbp->pr_cache.ehtp) & uint32_highbit) {
   1.749 +    /* It is immediate data - compute matching pr */
   1.750 +    uint32_t idx = ((*(uint32_t *)(ucbp->pr_cache.ehtp)) >> 24) & 0xf;
   1.751 +
   1.752 +    if (idx == 0) ucbp->PR_ADDR = (uint32_t)&__aeabi_unwind_cpp_pr0;
   1.753 +    else if (idx == 1) ucbp->PR_ADDR = (uint32_t)&__aeabi_unwind_cpp_pr1;
   1.754 +    else if (idx == 2) ucbp->PR_ADDR = (uint32_t)&__aeabi_unwind_cpp_pr2;
   1.755 +    else { /* Failed */
   1.756 +      ucbp->PR_ADDR = NULL;
   1.757 +      DEBUGGER_BOTTLENECK(ucbp, _UASUBSYS_UNWINDER, _UAACT_ENDING, _UAARG_ENDING_TABLECORRUPT);
   1.758 +      return _URC_FAILURE;
   1.759 +    }
   1.760 +  } else {
   1.761 +    /* Execute region offset to PR */
   1.762 +    ucbp->PR_ADDR = ER_RO_OFFSET_TO_ADDR(*(uint32_t *)(ucbp->pr_cache.ehtp), ucbp);
   1.763 +
   1.764 +  }
   1.765 +  return _URC_OK;
   1.766 +}
   1.767 +
   1.768 +static _Unwind_Reason_Code find_and_expand_eit_entry(_Unwind_Control_Block *ucbp,
   1.769 +                                                     uint32_t return_address)
   1.770 +{
   1.771 +  ValidateExceptionDescriptor(return_address, ucbp);
   1.772 +  if (EHABI_V2(ucbp))
   1.773 +    return find_and_expand_eit_entry_V2(ucbp, return_address);
   1.774 +  else
   1.775 +    return find_and_expand_eit_entry_V1(ucbp, return_address);
   1.776 +}
   1.777 +
   1.778 +
   1.779 +/* ----- Unwinding: ----- */
   1.780 +
   1.781 +/* Fwd decl */
   1.782 +static NORETURNDECL void unwind_next_frame(_Unwind_Control_Block *ucbp, phase2_virtual_register_set *vrsp);
   1.783 +
   1.784 +/* Helper fn: If the demand_save flag in a phase1_virtual_register_set was
   1.785 + * zeroed, the registers were demand-saved. This function restores from
   1.786 + * the save area.
   1.787 +*/
   1.788 +static FORCEINLINE void restore_non_core_regs(phase1_virtual_register_set *vrsp)
   1.789 +{
   1.790 +  if (vrsp->demand_save_vfp_low == 0)
   1.791 +    __ARM_Unwind_VRS_VFPrestore_low(&vrsp->vfp.d[0]);
   1.792 +  if (vrsp->demand_save_vfp_high == 0)
   1.793 +    __ARM_Unwind_VRS_VFPrestore_high(&vrsp->vfp.d[16]);
   1.794 +}
   1.795 +
   1.796 +/* _Unwind_RaiseException is the external entry point to begin unwinding */
   1.797 +__asm _Unwind_Reason_Code _Unwind_RaiseException(_Unwind_Control_Block *ucbp)
   1.798 +{
   1.799 +  extern __ARM_Unwind_RaiseException;
   1.800 +
   1.801 +#if THUMBNAIL
   1.802 +
   1.803 +  /* Create a phase2_virtual_register_set on the stack */
   1.804 +  /* Save the core registers, carefully writing the original sp value */
   1.805 +  /* Note we account for the pc but do not actually write it's value here */
   1.806 +  str.w    r14,[sp, #-8]!;
   1.807 +  add.w    r14, r13, #8;
   1.808 +  str.w    r14,[sp, #-4]!  /* pushed 3 words => 3 words */
   1.809 +  stmfd.w  sp!,{r0-r12};   /* pushed 13 words => 16 words */
   1.810 +  /* Write zeroes for the demand_save bytes so no saving occurs in phase 2 */
   1.811 +  mov.w    r1,#0;
   1.812 +  str.w    r1,[sp,#-4]!;   /* pushed 1 word => 17 words */
   1.813 +  mov.w    r1,sp;
   1.814 +  sub.w    sp,sp,#4;       /* preserve 8 byte alignment => 18 words */
   1.815 +
   1.816 +  /* Now pass to C (with r0 still valid) to do the real work.
   1.817 +   * r0 = ucbp, r1 = phase2_virtual_register_set.
   1.818 +   * If we get control back, pop the stack and return preserving r0.
   1.819 +   */
   1.820 +
   1.821 +  /* on arch 5T and later the linker will fix 'bl' => 'blx' as
   1.822 +     needed */
   1.823 +  bl.w     __ARM_Unwind_RaiseException;
   1.824 +  ldr.w    r14,[sp,#16*4];
   1.825 +  add.w    sp,sp,#18*4;
   1.826 +  bx lr;
   1.827 +
   1.828 +#else
   1.829 +
   1.830 +  MAYBE_SWITCH_TO_ARM_STATE;
   1.831 +
   1.832 +  /* Create a phase2_virtual_register_set on the stack */
   1.833 +  /* Save the core registers, carefully writing the original sp value */
   1.834 +  #if __ARMCC_VERSION < 300000
   1.835 +  stmfd sp!,{r13-r15};  /* pushed 3 words => 3 words */
   1.836 +  #else
   1.837 +  stmdb r13, {r14,r15};
   1.838 +  str r13, [r13,#-3*4];
   1.839 +  sub r13, r13, #3*4;
   1.840 +  #endif
   1.841 +  stmfd sp!,{r0-r12};   /* pushed 13 words => 16 words */
   1.842 +  /* Write zeroes for the demand_save bytes so no saving occurs in phase 2 */
   1.843 +  mov r1,#0;
   1.844 +  str r1,[sp,#-4]!;     /* pushed 1 word => 17 words */
   1.845 +  mov r1,sp;
   1.846 +  sub sp,sp,#4;         /* preserve 8 byte alignment => 18 words */
   1.847 +
   1.848 +  /* Now pass to C (with r0 still valid) to do the real work.
   1.849 +   * r0 = ucbp, r1 = phase2_virtual_register_set.
   1.850 +   * If we get control back, pop the stack and return preserving r0.
   1.851 +   */
   1.852 +
   1.853 +#if OLD_STYLE_INTERWORKING
   1.854 +  ldr r2,Unwind_RaiseException_Offset;
   1.855 +  add r2,r2,pc;
   1.856 +  mov lr,pc;
   1.857 +Offset_Base
   1.858 +  bx    r2;
   1.859 +#else
   1.860 +  /* on arch 5T and later the linker will fix 'bl' => 'blx' as
   1.861 +     needed */
   1.862 +  bl  __ARM_Unwind_RaiseException;
   1.863 +#endif
   1.864 +  ldr r14,[sp,#16*4];
   1.865 +  add sp,sp,#18*4;
   1.866 +  RET_LR;
   1.867 +#if OLD_STYLE_INTERWORKING
   1.868 +Unwind_RaiseException_Offset dcd __ARM_Unwind_RaiseException - Offset_Base;
   1.869 +#endif
   1.870 +  MAYBE_CODE16;
   1.871 +
   1.872 +#endif
   1.873 +
   1.874 +#ifndef __EPOC32__
   1.875 +  /* Alternate symbol names for difficult symbols.
   1.876 +   * It is possible no functions included in the image require
   1.877 +   * a handler table. Therefore make only a weak reference to
   1.878 +   * the handler table base symbol, which may be absent.
   1.879 +   */
   1.880 +  align 4
   1.881 +  extern |.ARM.exidx$$Base|;
   1.882 +  extern |.ARM.exidx$$Limit|;
   1.883 +  extern |.ARM.extab$$Base| WEAKASMDECL;
   1.884 +  export __ARM_ETInfo;
   1.885 +  /* these are offsets for /ropi */
   1.886 +__ARM_ETInfo /* layout must match struct ExceptionTableInfo */
   1.887 +eit_base   dcd |.ARM.exidx$$Base|  - __ARM_ETInfo; /* index table base */
   1.888 +eit_limit  dcd |.ARM.exidx$$Limit| - __ARM_ETInfo; /* index table limit */
   1.889 +#endif
   1.890 +}
   1.891 +
   1.892 +
   1.893 +/* __ARM_Unwind_RaiseException performs phase 1 unwinding */
   1.894 +
   1.895 +_Unwind_Reason_Code __ARM_Unwind_RaiseException(_Unwind_Control_Block *ucbp,
   1.896 +                                                phase2_virtual_register_set *entry_VRSp)
   1.897 +{
   1.898 +  phase1_virtual_register_set phase1_VRS;
   1.899 +
   1.900 +  /* Is this a nested simultaneous propagation?
   1.901 +   * (see comments with _Unwind_Complete)
   1.902 +   */
   1.903 +  if (ucbp->NESTED_CONTEXT == 0) {
   1.904 +    /* No - this is only propagation */
   1.905 +    ucbp->NESTED_CONTEXT = 1;
   1.906 +  } else {
   1.907 +#ifdef SUPPORT_NESTED_EXCEPTIONS
   1.908 +    /* Yes - cache the state elsewhere and restore it when the propagation ends */
   1.909 +    /* This representation wastes space and uses malloc; do better?
   1.910 +     * On the other hand will it ever be used in practice?
   1.911 +     */
   1.912 +    _Unwind_Control_Block *saved_ucbp = AllocSavedUCB();
   1.913 +    if (ucbp == NULL) {
   1.914 +      DEBUGGER_BOTTLENECK(ucbp, _UASUBSYS_UNWINDER, _UAACT_ENDING, _UAARG_ENDING_UNWINDER_BUFFERFAILED);
   1.915 +      return _URC_FAILURE;
   1.916 +    }
   1.917 +    saved_ucbp->unwinder_cache = ucbp->unwinder_cache;
   1.918 +    saved_ucbp->barrier_cache = ucbp->barrier_cache;
   1.919 +    saved_ucbp->cleanup_cache = ucbp->cleanup_cache;
   1.920 +    ucbp->NESTED_CONTEXT = (uint32_t)saved_ucbp;
   1.921 +#else
   1.922 +    abort();
   1.923 +#endif
   1.924 +  }
   1.925 +
   1.926 +  /* entry_VRSp contains the core registers as they were when
   1.927 +   * _Unwind_RaiseException was called.  Copy the call-site address to r15
   1.928 +   * then copy all the registers to phase1_VRS for the phase 1 stack scan.
   1.929 +   */
   1.930 +
   1.931 +  entry_VRSp->core.r[15] = entry_VRSp->core.r[14];
   1.932 +  phase1_VRS.core = entry_VRSp->core;
   1.933 +
   1.934 +  /* For phase 1 only ensure non-core registers are saved before use.
   1.935 +   * If WMMX registers are supported, initialise their flags here and
   1.936 +   * take appropriate action elsewhere.
   1.937 +   */
   1.938 +
   1.939 +  phase1_VRS.demand_save_vfp_low = 1;
   1.940 +  phase1_VRS.demand_save_vfp_high = 1;
   1.941 +#ifdef __EPOC32__
   1.942 +  /* Set up Symbian specific caches in the _Unwind_Control_Block's 
   1.943 +     unwinder_cache. 
   1.944 +  */
   1.945 +  InitialiseSymbianSpecificUnwinderCache(phase1_VRS.core.r[15], ucbp);
   1.946 +#endif
   1.947 +
   1.948 +
   1.949 +  /* Now perform a virtual unwind until a propagation barrier is met, or
   1.950 +   * until something goes wrong.  If something does go wrong, we ought (I
   1.951 +   * suppose) to restore registers we may have destroyed.
   1.952 +   */
   1.953 +
   1.954 +  while (1) {
   1.955 +
   1.956 +    _Unwind_Reason_Code pr_result;
   1.957 +
   1.958 +    /* Search the index table for the required entry.  Cache the index table
   1.959 +     * pointer, and obtain and cache the addresses of the "real" __EHT_Header
   1.960 +     * word and the personality routine.
   1.961 +     */
   1.962 +
   1.963 +    if (find_and_expand_eit_entry(ucbp, phase1_VRS.core.r[15]) != _URC_OK) {
   1.964 +      restore_non_core_regs(&phase1_VRS);
   1.965 +      /* Debugger bottleneck fn called during lookup */
   1.966 +      return _URC_FAILURE;
   1.967 +    }
   1.968 +
   1.969 +    /* Call the pr to decide what to do */
   1.970 +
   1.971 +    pr_result = ((personality_routine)ucbp->PR_ADDR)(_US_VIRTUAL_UNWIND_FRAME,
   1.972 +                                                     ucbp,
   1.973 +                                                     (_Unwind_Context *)&phase1_VRS);
   1.974 +
   1.975 +    if (pr_result == _URC_HANDLER_FOUND) break;
   1.976 +    if (pr_result == _URC_CONTINUE_UNWIND) continue;
   1.977 +
   1.978 +    /* If we get here some sort of failure has occurred in the
   1.979 +     * pr and probably the pr returned _URC_FAILURE
   1.980 +     */
   1.981 +    restore_non_core_regs(&phase1_VRS);
   1.982 +    return _URC_FAILURE;
   1.983 +  }
   1.984 +
   1.985 +  /* Propagation barrier located... restore entry register state of non-core regs */
   1.986 +
   1.987 +  restore_non_core_regs(&phase1_VRS);
   1.988 +
   1.989 +  /* Initiate real unwinding */
   1.990 +  unwind_next_frame(ucbp, entry_VRSp);
   1.991 +  /* Unreached, but keep compiler quiet: */
   1.992 +  return _URC_FAILURE;
   1.993 +}
   1.994 +
   1.995 +
   1.996 +/* unwind_next_frame performs phase 2 unwinding */
   1.997 +
   1.998 +static NORETURNDECL void unwind_next_frame(_Unwind_Control_Block *ucbp, phase2_virtual_register_set *vrsp)
   1.999 +{
  1.1000 +  while (1) {
  1.1001 +
  1.1002 +    _Unwind_Reason_Code pr_result;
  1.1003 +
  1.1004 +    /* Search the index table for the required entry.  Cache the index table
  1.1005 +     * pointer, and obtain and cache the addresses of the "real" __EHT_Header
  1.1006 +     * word and the personality routine.
  1.1007 +     */
  1.1008 +
  1.1009 +    if (find_and_expand_eit_entry(ucbp, vrsp->core.r[15]) != _URC_OK)
  1.1010 +      abort();
  1.1011 +
  1.1012 +    /* Save the call-site address and call the pr to do whatever it
  1.1013 +     * wants to do on this new frame.
  1.1014 +     */
  1.1015 +
  1.1016 +    ucbp->SAVED_CALLSITE_ADDR = vrsp->core.r[15];
  1.1017 +    pr_result = ((personality_routine)ucbp->PR_ADDR)(_US_UNWIND_FRAME_STARTING, ucbp,
  1.1018 +                                                     (_Unwind_Context *)vrsp);
  1.1019 +
  1.1020 +    if (pr_result == _URC_INSTALL_CONTEXT) {
  1.1021 +      /* Upload the registers */
  1.1022 +      __ARM_Unwind_VRS_corerestore(&vrsp->core);
  1.1023 +    } else if (pr_result == _URC_CONTINUE_UNWIND)
  1.1024 +      continue;
  1.1025 +    else
  1.1026 +      abort();
  1.1027 +  }
  1.1028 +}
  1.1029 +
  1.1030 +
  1.1031 +/* _Unwind_Resume is the external entry point called after a cleanup
  1.1032 + * to resume unwinding. It tail-calls a helper function,
  1.1033 + * __ARM_Unwind_Resume, which never returns.
  1.1034 + */
  1.1035 +__asm NORETURNDECL void _Unwind_Resume(_Unwind_Control_Block *ucbp)
  1.1036 +{
  1.1037 +  extern __ARM_Unwind_Resume;
  1.1038 +
  1.1039 +#if THUMBNAIL
  1.1040 +
  1.1041 +  /* Create a phase2_virtual_register_set on the stack */
  1.1042 +  /* Save the core registers, carefully writing the original sp value */
  1.1043 +  /* Note we account for the pc but do not actually write it's value here */
  1.1044 +  str.w    r14,[sp, #-8]!;
  1.1045 +  add.w    r14, r13, #8;
  1.1046 +  str.w    r14,[sp, #-4]!    /* pushed 3 words => 3 words */
  1.1047 +  stmfd.w  sp!,{r0-r12};     /* pushed 13 words => 16 words */
  1.1048 +  /* Write zeroes for the demand_save bytes so no saving occurs in phase 2 */
  1.1049 +  mov.w    r1,#0;
  1.1050 +  str.w    r1,[sp,#-4]!;     /* pushed 1 word => 17 words */
  1.1051 +  mov.w    r1,sp;
  1.1052 +  sub.w    sp,sp,#4;         /* preserve 8 byte alignment => 18 words */
  1.1053 +
  1.1054 +  /* Now pass to C (with r0 still valid) to do the real work.
  1.1055 +   * r0 = ucbp, r1 = phase2_virtual_register_set.
  1.1056 +   * This call never returns.
  1.1057 +   */
  1.1058 +
  1.1059 +  mov      pc,r2
  1.1060 +
  1.1061 +#else
  1.1062 +
  1.1063 +  MAYBE_SWITCH_TO_ARM_STATE;
  1.1064 +
  1.1065 +  /* Create a phase2_virtual_register_set on the stack */
  1.1066 +  /* Save the core registers, carefully writing the original sp value */
  1.1067 +
  1.1068 +  #if __ARMCC_VERSION < 300000
  1.1069 +  stmfd sp!,{r13-r15};  /* pushed 3 words => 3 words */
  1.1070 +  #else
  1.1071 +  stmdb r13, {r14,r15};
  1.1072 +  str r13, [r13,#-3*4];
  1.1073 +  sub r13, r13, #3*4;
  1.1074 +  #endif
  1.1075 +
  1.1076 +  stmfd sp!,{r0-r12};   /* pushed 13 words => 16 words */
  1.1077 +  /* Write zeroes for the demand_save bytes so no saving occurs in phase 2 */
  1.1078 +  mov r1,#0;
  1.1079 +  str r1,[sp,#-4]!;     /* pushed 1 word => 17 words */
  1.1080 +  mov r1,sp;
  1.1081 +  sub sp,sp,#4;         /* preserve 8 byte alignment => 18 words */
  1.1082 +
  1.1083 +  /* Now pass to C (with r0 still valid) to do the real work.
  1.1084 +   * r0 = ucbp, r1 = phase2_virtual_register_set.
  1.1085 +   * This call never returns.
  1.1086 +   */
  1.1087 +
  1.1088 +#ifdef __APCS_INTERWORK
  1.1089 +  ldr r2,Unwind_Resume_Offset;
  1.1090 +  add r2,r2,pc;
  1.1091 +  bx    r2;
  1.1092 +Unwind_Resume_Offset dcd __ARM_Unwind_Resume - .;
  1.1093 +#else
  1.1094 +  b __ARM_Unwind_Resume;
  1.1095 +#endif
  1.1096 +  MAYBE_CODE16;
  1.1097 +
  1.1098 +#endif
  1.1099 +}
  1.1100 +
  1.1101 +
  1.1102 +/* Helper function for _Unwind_Resume */
  1.1103 +
  1.1104 +NORETURNDECL void __ARM_Unwind_Resume(_Unwind_Control_Block *ucbp,
  1.1105 +                                  phase2_virtual_register_set *entry_VRSp)
  1.1106 +{
  1.1107 +  _Unwind_Reason_Code pr_result;
  1.1108 +
  1.1109 +  /* Recover saved state */
  1.1110 +
  1.1111 +  entry_VRSp->core.r[15] = ucbp->SAVED_CALLSITE_ADDR;
  1.1112 +
  1.1113 +  /* Call the cached PR and dispatch */
  1.1114 +
  1.1115 +  pr_result = ((personality_routine)ucbp->PR_ADDR)(_US_UNWIND_FRAME_RESUME, ucbp,
  1.1116 +                                                   (_Unwind_Context *)entry_VRSp);
  1.1117 +
  1.1118 +  if (pr_result == _URC_INSTALL_CONTEXT) {
  1.1119 +   /* Upload the registers */
  1.1120 +    __ARM_Unwind_VRS_corerestore(&entry_VRSp->core);
  1.1121 +  } else if (pr_result == _URC_CONTINUE_UNWIND)
  1.1122 +    unwind_next_frame(ucbp, entry_VRSp);
  1.1123 +  else
  1.1124 +    abort();
  1.1125 +}
  1.1126 +
  1.1127 +
  1.1128 +/* _Unwind_Complete is called at the end of a propagation.
  1.1129 + * If we support multiple simultaneous propagations, restore the cached state
  1.1130 + * of the previous propagation here.
  1.1131 + */
  1.1132 +
  1.1133 +void _Unwind_Complete(_Unwind_Control_Block *ucbp)
  1.1134 +{
  1.1135 +  _Unwind_Control_Block *context = (_Unwind_Control_Block *)ucbp->NESTED_CONTEXT;
  1.1136 +  if ((uint32_t)context == 0) abort();  /* should be impossible */
  1.1137 +  if ((uint32_t)context == 1) {
  1.1138 +    /* This was the only ongoing propagation of this object */
  1.1139 +    ucbp->NESTED_CONTEXT--;
  1.1140 +    return;
  1.1141 +  }
  1.1142 +#ifdef SUPPORT_NESTED_EXCEPTIONS
  1.1143 +  /* Otherwise we copy the state back from the cache structure pointed to
  1.1144 +   * by ucbp->NESTED_CONTEXT.
  1.1145 +   */
  1.1146 +  /* This first one updates ucbp->NESTED_CONTEXT */
  1.1147 +  ucbp->unwinder_cache = context->unwinder_cache;
  1.1148 +  ucbp->barrier_cache = context->barrier_cache;
  1.1149 +  ucbp->cleanup_cache = context->cleanup_cache;
  1.1150 +  FreeSavedUCB(context);
  1.1151 +#else
  1.1152 +  abort();
  1.1153 +#endif
  1.1154 +}
  1.1155 +
  1.1156 +/* _Unwind_DeleteException can be used to invoke the exception_cleanup
  1.1157 + * function after catching a foreign exception.
  1.1158 + */
  1.1159 +
  1.1160 +void _Unwind_DeleteException(_Unwind_Control_Block *ucbp)
  1.1161 +{
  1.1162 +  if (ucbp->exception_cleanup != NULL)
  1.1163 +    (ucbp->exception_cleanup)(_URC_FOREIGN_EXCEPTION_CAUGHT, ucbp);
  1.1164 +}
  1.1165 +
  1.1166 +#endif /* unwinder_c */
  1.1167 +#ifdef unwind_activity_c
  1.1168 +
  1.1169 +/* Runtime debug "bottleneck function": */
  1.1170 +/* (not in the current Exceptions EABI document) */
  1.1171 +
  1.1172 +void _Unwind_Activity(_Unwind_Control_Block *ucbp, uint32_t reason, uint32_t arg)
  1.1173 +{
  1.1174 +#ifdef UNWIND_ACTIVITY_DIAGNOSTICS
  1.1175 +  uint32_t who = reason >> 24;
  1.1176 +  uint32_t activity = reason & 0xffffff;
  1.1177 +  printf("_Unwind_Activity: UCB=0x%8.8x Reason=(", (uint32_t)ucbp);
  1.1178 +  switch (who) {
  1.1179 +  case _UASUBSYS_UNWINDER:
  1.1180 +    printf("unw,");
  1.1181 +    if (activity >= 0x80)
  1.1182 +      printf("%x) Arg=0x%8.8x\n", activity, arg);
  1.1183 +    break;
  1.1184 +  case _UASUBSYS_CPP:
  1.1185 +    printf("C++,");
  1.1186 +    if (activity >= 0x80) {
  1.1187 +      if (activity == _UAACT_CPP_TYPEINFO)
  1.1188 +        printf("typeinfo) Typeinfo=0x%8.8x\n", arg);
  1.1189 +      else
  1.1190 +        printf("%x) Arg=0x%8.8x\n", activity, arg);
  1.1191 +    }
  1.1192 +    break;
  1.1193 +  default:
  1.1194 +    printf("???,");
  1.1195 +    if (activity >= 0x80)
  1.1196 +      printf("%x) Arg=0x%8.8x\n", activity, arg);
  1.1197 +    break;
  1.1198 +  }
  1.1199 +  if (activity < 0x80) {
  1.1200 +    switch (activity) {
  1.1201 +    case _UAACT_STARTING:
  1.1202 +      printf("starting) Typeinfo=0x%8.8x\n", arg);
  1.1203 +      break;
  1.1204 +    case _UAACT_ENDING:
  1.1205 +      printf("ending) Cause=%d\n", arg);
  1.1206 +      break;
  1.1207 +    case _UAACT_BARRIERFOUND:
  1.1208 +      printf("barrierfound) Pad=0x%8.8x\n", arg);
  1.1209 +      break;
  1.1210 +    case _UAACT_PADENTRY:
  1.1211 +      printf("padentry) Pad=0x%8.8x\n", arg);
  1.1212 +      break;
  1.1213 +    default:
  1.1214 +      printf("%x) Arg=0x%8.8x\n", activity, arg);
  1.1215 +      break;
  1.1216 +    }
  1.1217 +  }
  1.1218 +#endif
  1.1219 +}
  1.1220 +
  1.1221 +#endif /* unwind_activity_c */