1.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
1.2 +++ b/os/kernelhwsrv/kernel/eka/compsupp/aehabi/unwinder.c Fri Jun 15 03:10:57 2012 +0200
1.3 @@ -0,0 +1,1034 @@
1.4 +/* unwinder.c
1.5 + *
1.6 + * Copyright 2002-2003 ARM Limited.
1.7 + */
1.8 +/*
1.9 + Licence
1.10 +
1.11 + 1. Subject to the provisions of clause 2, ARM hereby grants to LICENSEE a
1.12 + perpetual, non-exclusive, nontransferable, royalty free, worldwide licence
1.13 + to use this Example Implementation of Exception Handling solely for the
1.14 + purpose of developing, having developed, manufacturing, having
1.15 + manufactured, offering to sell, selling, supplying or otherwise
1.16 + distributing products which comply with the Exception Handling ABI for the
1.17 + ARM Architecture specification. All other rights are reserved to ARM or its
1.18 + licensors.
1.19 +
1.20 + 2. THIS EXAMPLE IMPLEMENTATION OF EXCEPTION HANDLING IS PROVIDED "AS IS"
1.21 + WITH NO WARRANTIES EXPRESS, IMPLIED OR STATUTORY, INCLUDING BUT NOT LIMITED
1.22 + TO ANY WARRANTY OF SATISFACTORY QUALITY, MERCHANTABILITY, NONINFRINGEMENT
1.23 + OR FITNESS FOR A PARTICULAR PURPOSE.
1.24 +*/
1.25 +/*
1.26 + * RCS $Revision: 1.16 $
1.27 + * Checkin $Date: 2003/10/23 13:57:39 $
1.28 + * Revising $Author: agrant $
1.29 + */
1.30 +
1.31 +/* Language-independent unwinder implementation */
1.32 +
1.33 +/* This source file is compiled automatically by ARM's make system into
1.34 + * multiple object files. The source regions constituting object file
1.35 + * xxx.o are delimited by ifdef xxx_c / endif directives.
1.36 + *
1.37 + * The source regions currently marked are:
1.38 + * unwinder_c
1.39 + * unwind_activity_c
1.40 + */
1.41 +
1.42 +#include <stddef.h>
1.43 +#include <stdlib.h>
1.44 +/* Environment: */
1.45 +#include "unwind_env.h"
1.46 +/* Language-independent unwinder declarations: */
1.47 +#include "unwinder.h"
1.48 +
1.49 +/* Define UNWIND_ACTIVITY_DIAGNOSTICS for printed information from _Unwind_Activity */
1.50 +/* Define VRS_DIAGNOSTICS for printed diagnostics about VRS operations */
1.51 +
1.52 +#if defined(VRS_DIAGNOSTICS) || defined(UNWIND_ACTIVITY_DIAGNOSTICS)
1.53 +extern int printf(const char *, ...);
1.54 +#endif
1.55 +
1.56 +
1.57 +#ifdef unwinder_c
1.58 +
1.59 +/* ========================= ========================= */
1.60 +/* ========================= Virtual register set ========================= */
1.61 +/* ========================= ========================= */
1.62 +
1.63 +/* The approach taken by this implementation is to use the real machine
1.64 + * registers to hold all but the values of core (integer)
1.65 + * registers. Consequently the implementation must use only the core
1.66 + * registers except when manipulating the virtual register set. Non-core
1.67 + * registers are saved only on first use, so the single implementation can
1.68 + * cope with execution on processors which lack certain registers. The
1.69 + * registers as they were at the start of the propagation must be preserved
1.70 + * over phase 1 so that the machine state is correct at the start of phase
1.71 + * 2. This requires a copy to be taken (which can be stack allocated). During
1.72 + * a stack unwind (phase 1 or phase 2), the "current" virtual register set is
1.73 + * implemented as core register values held in a data structure, and non-core
1.74 + * register values held in the registers themselves. To ensure that all
1.75 + * original register values are available at the beginning of phase 2, the
1.76 + * core registers are saved in a second structure at the start of phase 1 and
1.77 + * the non-core registers are demand-saved into another part of the data
1.78 + * structure that holds the current core registers during the phase 1 stack
1.79 + * unwind.
1.80 + */
1.81 +/* Extent to which the access routines are implemented:
1.82 + * _Unwind_VRS_Get and _Unwind_VRS_Set implement only access to the core registers.
1.83 + * _Unwind_VRS_Pop implements only popping of core, vfp and fpa registers.
1.84 + * There is no support here for the Intel WMMX registers, but space is nevertheless
1.85 + * reserved in the virtual register set structure to indicate whether demand-saving
1.86 + * of those registers is required (as they are unsupported, it never is). The space
1.87 + * costs nothing as it is required for alignment.
1.88 + * The level of supported functionality is compliant with the requirements of the
1.89 + * Exceptions ABI.
1.90 + */
1.91 +
1.92 +typedef unsigned char bool;
1.93 +struct core_s { uint32_t r[16]; }; /* core integer regs */
1.94 +struct vfp_s { uint64_t vfp[16+1]; }; /* VFP registers saved in FSTMX format */
1.95 + /* Extra 2 words for the format word + unused */
1.96 +struct fpa_reg { uint32_t word[3]; };
1.97 +struct fpa_s { struct fpa_reg fpa[8]; }; /* FPA registers saved in SFM format */
1.98 +
1.99 +/* Phase 1 virtual register set includes demand-save areas */
1.100 +/* The phase 2 virtual register set must be a prefix of the phase 1 set */
1.101 +typedef struct phase1_virtual_register_set_s {
1.102 + /* demand_save flag == 1 means save the registers in the demand-save area */
1.103 + bool demand_save_vfp;
1.104 + bool demand_save_fpa;
1.105 + bool demand_save_wmmxd;
1.106 + bool demand_save_wmmxc;
1.107 + struct core_s core; /* current core registers */
1.108 + struct vfp_s vfp; /* demand-saved vfp registers */
1.109 + struct fpa_s fpa; /* demand-saved fpa registers */
1.110 +} phase1_virtual_register_set;
1.111 +
1.112 +/* Phase 2 virtual register set has no demand-save areas */
1.113 +/* The phase 2 virtual register set must be a prefix of the phase 1 set */
1.114 +/* The assembly fragments for _Unwind_RaiseException and _Unwind_Resume create
1.115 + * a phase2_virtual_register_set_s by hand so be careful.
1.116 + */
1.117 +typedef struct phase2_virtual_register_set_s {
1.118 + /* demand_save flag == 1 means save the registers in the demand-save area */
1.119 + /* Always 0 in phase 2 */
1.120 + bool demand_save_vfp;
1.121 + bool demand_save_fpa;
1.122 + bool demand_save_wmmxd;
1.123 + bool demand_save_wmmxc;
1.124 + struct core_s core; /* current core registers */
1.125 +} phase2_virtual_register_set;
1.126 +
1.127 +/* -- Helper macros for the embedded assembly */
1.128 +
1.129 +#if defined(__TARGET_ARCH_5T) || defined(__TARGET_ARCH_5TXM) || defined(__TARGET_ARCH_5TE) || \
1.130 + defined(__TARGET_ARCH_6) /* || ... */
1.131 + #define ARCH_5T_OR_LATER 1
1.132 +#else
1.133 + #define ARCH_5T_OR_LATER 0
1.134 +#endif
1.135 +
1.136 +#if defined(__APCS_INTERWORK) && !ARCH_5T_OR_LATER
1.137 + #define OLD_STYLE_INTERWORKING 1
1.138 +#else
1.139 + #define OLD_STYLE_INTERWORKING 0
1.140 +#endif
1.141 +
1.142 +#if defined(__TARGET_ARCH_4T) || defined(__TARGET_ARCH_4TXM) || ARCH_5T_OR_LATER
1.143 + #define HAVE_BX 1
1.144 +#else
1.145 + #define HAVE_BX 0
1.146 +#endif
1.147 +
1.148 +#if HAVE_BX
1.149 + #define RET_LR bx lr
1.150 +#else
1.151 + #define RET_LR mov pc,lr
1.152 +#endif
1.153 +
1.154 +/* ----- Routines: ----- */
1.155 +
1.156 +/* ----- Helper routines, private but external ----- */
1.157 +/* Note '%0' refers to local label '0' */
1.158 +
1.159 +__asm void __ARM_Unwind_VRS_VFPpreserve(void *vfpp)
1.160 +{
1.161 + /* Preserve the vfp registers in the passed memory */
1.162 +#ifdef __thumb
1.163 + #define MAYBE_SWITCH_TO_ARM_STATE SWITCH_TO_ARM_STATE
1.164 + #define MAYBE_CODE16 code16
1.165 + macro;
1.166 + SWITCH_TO_ARM_STATE;
1.167 +1
1.168 + align 4;
1.169 +2
1.170 + assert (%2 - %1) = 0;
1.171 + bx pc;
1.172 + nop;
1.173 + code32;
1.174 + mend;
1.175 +#else
1.176 + #define MAYBE_SWITCH_TO_ARM_STATE /* nothing */
1.177 + #define MAYBE_CODE16 /* nothing */
1.178 +#endif
1.179 +
1.180 +vfp_d0 CN 0;
1.181 + MAYBE_SWITCH_TO_ARM_STATE;
1.182 + stc p11,vfp_d0,[r0],{0x21}; /* 0xec800b21 FSTMIAX r0,{d0-d15} */
1.183 + RET_LR;
1.184 + MAYBE_CODE16;
1.185 +}
1.186 +
1.187 +__asm void __ARM_Unwind_VRS_VFPrestore(void *vfpp)
1.188 +{
1.189 + /* Restore the vfp registers from the passed memory */
1.190 +vfp_d0 CN 0;
1.191 + MAYBE_SWITCH_TO_ARM_STATE;
1.192 + ldc p11,vfp_d0,[r0],{0x21}; /* 0xec900b21 FLDMIAX r0,{d0-d15} */
1.193 + RET_LR;
1.194 + MAYBE_CODE16;
1.195 +}
1.196 +
1.197 +__asm void __ARM_Unwind_VRS_FPApreserve(void *vfpp)
1.198 +{
1.199 + /* Preserve the fpa registers in the passed memory */
1.200 +fpa_f0 CN 0;
1.201 +fpa_f4 CN 0;
1.202 + MAYBE_SWITCH_TO_ARM_STATE;
1.203 + stc p2, fpa_f0, [r0]; /* 0xed800200 SFM f0,4,[r0,#0] */
1.204 + stc p2, fpa_f4, [r0, #48]; /* 0xed80420c SFM f4,4,[r0,#0x30] */
1.205 + RET_LR;
1.206 + MAYBE_CODE16;
1.207 +}
1.208 +
1.209 +__asm void __ARM_Unwind_VRS_FPArestore(void *vfpp)
1.210 +{
1.211 + /* Restore the fpa registers from the passed memory */
1.212 +fpa_f0 CN 0;
1.213 +fpa_f4 CN 0;
1.214 + MAYBE_SWITCH_TO_ARM_STATE;
1.215 + ldc p2, fpa_f0, [r0]; /* 0xed900200 LFM f0,4,[r0,#0] */
1.216 + ldc p2, fpa_f4, [r0, #48]; /* 0xed90020c LFM f4,4,[r0,#0x30] */
1.217 + RET_LR;
1.218 + MAYBE_CODE16;
1.219 +}
1.220 +
1.221 +__asm NORETURNDECL void __ARM_Unwind_VRS_corerestore(void *corep)
1.222 +{
1.223 + /* By hypothesis this is preserve8 but the load of sp means the
1.224 + * assembler can't infer that.
1.225 + */
1.226 + preserve8;
1.227 + MAYBE_SWITCH_TO_ARM_STATE;
1.228 +#if OLD_STYLE_INTERWORKING
1.229 + mov r14, r0;
1.230 + ldmia r14!,{r0-r12};
1.231 + ldr r12,[r14, #4*2]; /* pc */
1.232 + ldmia r14,{r13-r14};
1.233 + bx r12;
1.234 +#else
1.235 + ldmia r0,{r0-r15};
1.236 +#endif
1.237 + MAYBE_CODE16;
1.238 +}
1.239 +
1.240 +
1.241 +/* ----- Development support ----- */
1.242 +
1.243 +#ifdef VRS_DIAGNOSTICS
1.244 +static void debug_print_vrs_vfp(struct vfp_s *vfpp)
1.245 +{
1.246 + uint64_t *lp = (uint64_t *)vfpp;
1.247 + int c = 0;
1.248 + int i;
1.249 + for (i = 0; i < 16; i++) {
1.250 + printf("D%-2d 0x%16.16llx ", i, *lp);
1.251 + lp++;
1.252 + if (c++ == 1) {
1.253 + c = 0;
1.254 + printf("\n");
1.255 + }
1.256 + }
1.257 +}
1.258 +
1.259 +static void debug_print_vrs_fpa(struct fpa_s *fpap)
1.260 +{
1.261 + uint32_t *lp = (uint32_t *)fpap;
1.262 + int c = 0;
1.263 + int i;
1.264 + for (i = 0; i < 8; i++) {
1.265 + printf("F%-2d 0x%8.8x%8.8x%8.8x ", i, *lp, *(lp+1), *(lp+2));
1.266 + lp+=3;
1.267 + if (c++ == 1) {
1.268 + c = 0;
1.269 + printf("\n");
1.270 + }
1.271 + }
1.272 +}
1.273 +
1.274 +static void debug_print_vrs(_Unwind_Context *context)
1.275 +{
1.276 + phase1_virtual_register_set *vrsp = (phase1_virtual_register_set *)context;
1.277 + int i;
1.278 + int c;
1.279 + printf("------------------------------------------------------------------------\n");
1.280 + c = 0;
1.281 + for (i = 0; i < 16; i++) {
1.282 + printf("r%-2d 0x%8.8x ", i, vrsp->core.r[i]);
1.283 + if (c++ == 3) {
1.284 + c = 0;
1.285 + printf("\n");
1.286 + }
1.287 + }
1.288 +
1.289 + printf("-----\n");
1.290 + if (vrsp->demand_save_vfp == 1)
1.291 + printf("VFP is not saved\n");
1.292 + else
1.293 + debug_print_vrs_vfp(&vrsp->vfp);
1.294 + printf("-----\n");
1.295 + if (vrsp->demand_save_fpa == 1)
1.296 + printf("FPA is not saved\n");
1.297 + else
1.298 + debug_print_vrs_fpa(&vrsp->fpa);
1.299 + printf("------------------------------------------------------------------------\n");
1.300 +}
1.301 +#endif
1.302 +
1.303 +
1.304 +/* ----- Public routines ----- */
1.305 +
1.306 +_Unwind_VRS_Result _Unwind_VRS_Set(_Unwind_Context *context,
1.307 + _Unwind_VRS_RegClass regclass,
1.308 + uint32_t regno,
1.309 + _Unwind_VRS_DataRepresentation representation,
1.310 + void *valuep)
1.311 +{
1.312 + phase1_virtual_register_set *vrsp = (phase1_virtual_register_set *)context;
1.313 + switch (regclass) {
1.314 + case _UVRSC_CORE:
1.315 + {
1.316 + if (representation != _UVRSD_UINT32 || regno > 15)
1.317 + return _UVRSR_FAILED;
1.318 + vrsp->core.r[regno] = *(uint32_t *)valuep;
1.319 + return _UVRSR_OK;
1.320 + }
1.321 + case _UVRSC_VFP:
1.322 + case _UVRSC_FPA:
1.323 + case _UVRSC_WMMXD:
1.324 + case _UVRSC_WMMXC:
1.325 + return _UVRSR_NOT_IMPLEMENTED;
1.326 + default:
1.327 + break;
1.328 + }
1.329 + return _UVRSR_FAILED;
1.330 +}
1.331 +
1.332 +
1.333 +_Unwind_VRS_Result _Unwind_VRS_Get(_Unwind_Context *context,
1.334 + _Unwind_VRS_RegClass regclass,
1.335 + uint32_t regno,
1.336 + _Unwind_VRS_DataRepresentation representation,
1.337 + void *valuep)
1.338 +{
1.339 + phase1_virtual_register_set *vrsp = (phase1_virtual_register_set *)context;
1.340 + switch (regclass) {
1.341 + case _UVRSC_CORE:
1.342 + {
1.343 + if (representation != _UVRSD_UINT32 || regno > 15)
1.344 + return _UVRSR_FAILED;
1.345 + *(uint32_t *)valuep = vrsp->core.r[regno];
1.346 + return _UVRSR_OK;
1.347 + }
1.348 + case _UVRSC_VFP:
1.349 + case _UVRSC_FPA:
1.350 + case _UVRSC_WMMXD:
1.351 + case _UVRSC_WMMXC:
1.352 + return _UVRSR_NOT_IMPLEMENTED;
1.353 + default:
1.354 + break;
1.355 + }
1.356 + return _UVRSR_FAILED;
1.357 +}
1.358 +
1.359 +
1.360 +#define R_SP 13
1.361 +
1.362 +_Unwind_VRS_Result _Unwind_VRS_Pop(_Unwind_Context *context,
1.363 + _Unwind_VRS_RegClass regclass,
1.364 + uint32_t descriminator,
1.365 + _Unwind_VRS_DataRepresentation representation)
1.366 +{
1.367 + phase1_virtual_register_set *vrsp = (phase1_virtual_register_set *)context;
1.368 + switch (regclass) {
1.369 + case _UVRSC_CORE:
1.370 + {
1.371 + /* If SP is included in the mask, the loaded value is used in preference to
1.372 + * the writeback value, but only on completion of the loading.
1.373 + */
1.374 + uint32_t mask, *vsp, *rp, sp_loaded;
1.375 + if (representation != _UVRSD_UINT32)
1.376 + return _UVRSR_FAILED;
1.377 + vsp = (uint32_t *)vrsp->core.r[R_SP];
1.378 + rp = (uint32_t *)&vrsp->core;
1.379 + mask = descriminator & 0xffff;
1.380 + sp_loaded = mask & (1 << R_SP);
1.381 + while (mask != 0) {
1.382 + if (mask & 1) {
1.383 +#ifdef VRS_DIAGNOSTICS
1.384 + printf("VRS Pop r%d\n", rp - &vrsp->core.r[0]);
1.385 +#endif
1.386 + *rp = *vsp++;
1.387 + }
1.388 + rp++;
1.389 + mask >>= 1;
1.390 + }
1.391 + if (!sp_loaded)
1.392 + vrsp->core.r[R_SP] = (uint32_t)vsp;
1.393 + return _UVRSR_OK;
1.394 + }
1.395 + case _UVRSC_VFP:
1.396 + {
1.397 + uint32_t start = descriminator >> 16;
1.398 + uint32_t count = descriminator & 0xffff;
1.399 + if (representation != _UVRSD_VFPX || start + count > 16)
1.400 + return _UVRSR_FAILED;
1.401 + if (vrsp->demand_save_vfp == 1) { /* Demand-save over phase 1 */
1.402 + vrsp->demand_save_vfp = 0;
1.403 + __ARM_Unwind_VRS_VFPpreserve(&vrsp->vfp);
1.404 + }
1.405 + /* Now recover from the stack into the real machine registers.
1.406 + * Note we assume FSTMX standard format 1.
1.407 + * Do this by saving the current VFP registers to a memory area,
1.408 + * moving the in-memory values over that area, and
1.409 + * restoring from the whole area.
1.410 + */
1.411 + {
1.412 + struct vfp_s temp_vfp;
1.413 + uint64_t *vsp;
1.414 + __ARM_Unwind_VRS_VFPpreserve(&temp_vfp);
1.415 + vsp = (uint64_t *)vrsp->core.r[R_SP];
1.416 + while (count--) {
1.417 +#ifdef VRS_DIAGNOSTICS
1.418 + printf("VRS Pop D%d = 0x%llx\n", start, *vsp);
1.419 +#endif
1.420 + temp_vfp.vfp[start++] = *vsp++;
1.421 + }
1.422 + vrsp->core.r[R_SP] = (uint32_t)((uint32_t *)vsp + 1); /* +1 to skip the format word */
1.423 + __ARM_Unwind_VRS_VFPrestore(&temp_vfp);
1.424 + }
1.425 + return _UVRSR_OK;
1.426 + }
1.427 + case _UVRSC_FPA:
1.428 + {
1.429 + uint32_t start = descriminator >> 16;
1.430 + uint32_t count = descriminator & 0xffff;
1.431 + if (representation != _UVRSD_FPAX || start > 7 || count > 4)
1.432 + return _UVRSR_FAILED;
1.433 + if (vrsp->demand_save_fpa == 1) { /* Demand-save over phase 1 */
1.434 + vrsp->demand_save_fpa = 0;
1.435 + __ARM_Unwind_VRS_FPApreserve(&vrsp->fpa);
1.436 + }
1.437 + /* Now recover from the stack into the real machine registers.
1.438 + * Do this by saving the current FPA registers to a memory area,
1.439 + * moving the in-memory values over that area, and
1.440 + * restoring from the whole area.
1.441 + * Unlike VFP, here the range is allowed to wrap round.
1.442 + */
1.443 + {
1.444 + struct fpa_s temp_fpa;
1.445 + struct fpa_reg *vsp;
1.446 + __ARM_Unwind_VRS_FPApreserve(&temp_fpa);
1.447 + vsp = (struct fpa_reg *)vrsp->core.r[R_SP];
1.448 + while (count--) {
1.449 +#ifdef VRS_DIAGNOSTICS
1.450 + printf("VRS Pop F%d = 0x%-8.8x%-8.8x%-8.8x\n", start, *(uint32_t *)vsp,
1.451 + *((uint32_t *)vsp + 1), *((uint32_t *)vsp + 2));
1.452 +#endif
1.453 + temp_fpa.fpa[start++] = *vsp++;
1.454 + start &= 7;
1.455 + }
1.456 + vrsp->core.r[R_SP] = (uint32_t)vsp;
1.457 + __ARM_Unwind_VRS_FPArestore(&temp_fpa);
1.458 + }
1.459 + return _UVRSR_OK;
1.460 + }
1.461 + case _UVRSC_WMMXD:
1.462 + case _UVRSC_WMMXC:
1.463 + return _UVRSR_NOT_IMPLEMENTED;
1.464 + default:
1.465 + break;
1.466 + }
1.467 + return _UVRSR_FAILED;
1.468 +}
1.469 +
1.470 +
1.471 +
1.472 +/* ========================= ========================= */
1.473 +/* ========================= The unwinder ========================= */
1.474 +/* ========================= ========================= */
1.475 +
1.476 +
1.477 +/* This implementation uses the UCB unwinder_cache as follows:
1.478 + * reserved1 is documented in the EABI as requiring initialisation to 0.
1.479 + * It is used to manage nested simultaneous propagation. If the value is 0,
1.480 + * the UCB is participating in no propagations. If the value is 1, the UCB
1.481 + * is participating in one propagation. Otherwise the value is a pointer to
1.482 + * a structure holding saved UCB state from the next propagation out.
1.483 + * The structure used is simply a mallocated UCB.
1.484 + * reserved2 is used to preserve the call-site address over calls to a
1.485 + * personality routine and cleanup.
1.486 + * reserved3 is used to cache the PR address.
1.487 + * reserved4 is not used.
1.488 + * reserved5 is not used.
1.489 + */
1.490 +
1.491 +#define NESTED_CONTEXT unwinder_cache.reserved1
1.492 +#define SAVED_CALLSITE_ADDR unwinder_cache.reserved2
1.493 +#define PR_ADDR unwinder_cache.reserved3
1.494 +
1.495 +/* Index table entry: */
1.496 +
1.497 +typedef struct __EIT_entry {
1.498 + uint32_t fnoffset; /* Relative to base of execution region */
1.499 + uint32_t content;
1.500 +} __EIT_entry;
1.501 +
1.502 +
1.503 +/* Private defines etc: */
1.504 +
1.505 +static const uint32_t EXIDX_CANTUNWIND = 1;
1.506 +static const uint32_t uint32_highbit = 0x80000000;
1.507 +
1.508 +/* ARM C++ personality routines: */
1.509 +
1.510 +typedef _Unwind_Reason_Code (*personality_routine)(_Unwind_State,
1.511 + _Unwind_Control_Block *,
1.512 + _Unwind_Context *);
1.513 +
1.514 +WEAKDECL _Unwind_Reason_Code __aeabi_unwind_cpp_pr0(_Unwind_State state, _Unwind_Control_Block *,
1.515 + _Unwind_Context *context);
1.516 +WEAKDECL _Unwind_Reason_Code __aeabi_unwind_cpp_pr1(_Unwind_State state, _Unwind_Control_Block *,
1.517 + _Unwind_Context *context);
1.518 +WEAKDECL _Unwind_Reason_Code __aeabi_unwind_cpp_pr2(_Unwind_State state, _Unwind_Control_Block *,
1.519 + _Unwind_Context *context);
1.520 +
1.521 +
1.522 +/* Various image symbols: */
1.523 +
1.524 +struct ExceptionTableInfo {
1.525 + uint32_t EIT_base;
1.526 + uint32_t EIT_limit;
1.527 +};
1.528 +/* We define __ARM_ETInfo to allow access to some linker-generated
1.529 + names that are not legal C identifiers. __ARM_ETInfo is extern only
1.530 + because of scope limitations of the embedded assembler */
1.531 +extern const struct ExceptionTableInfo __ARM_ETInfo;
1.532 +#define EIT_base \
1.533 + ((const __EIT_entry *)(__ARM_ETInfo.EIT_base + (const char *)&__ARM_ETInfo))
1.534 +#define EIT_limit \
1.535 + ((const __EIT_entry *)(__ARM_ETInfo.EIT_limit + (const char *)&__ARM_ETInfo))
1.536 +
1.537 +
1.538 +/* ----- Address manipulation: ----- */
1.539 +
1.540 +/* The following helper function is never called and is present simply
1.541 + * for ease of packaging. The constant word within is used by
1.542 + * ER_RO_offset_to_addr to compute the RO segment base.
1.543 + * The zero word named W is relocated relative to the base B of the
1.544 + * segment which includes it, hence B is recoverable at runtime by
1.545 + * computing &W - W.
1.546 + */
1.547 +
1.548 +extern const uint32_t __ARM_unwind_ROSegBase_SelfOffset;
1.549 +
1.550 +__asm void __ARM_unwind_basehelper(void)
1.551 +{
1.552 + export __ARM_unwind_ROSegBase_SelfOffset;
1.553 +R_ARM_ROSEGREL32 EQU 39
1.554 +__ARM_unwind_ROSegBase_SelfOffset;
1.555 + dcd 0;
1.556 + __RELOC R_ARM_ROSEGREL32,__ARM_unwind_ROSegBase_SelfOffset;
1.557 +}
1.558 +
1.559 +#define ER_RO_SegBase ((uint32_t)&__ARM_unwind_ROSegBase_SelfOffset - \
1.560 + __ARM_unwind_ROSegBase_SelfOffset)
1.561 +
1.562 +/* And now functions used to convert between segment-relative offsets
1.563 + * and absolute addresses.
1.564 + */
1.565 +
1.566 +static __inline uint32_t addr_to_ER_RO_offset(uint32_t addr)
1.567 +{
1.568 + return addr - ER_RO_SegBase;
1.569 +}
1.570 +
1.571 +static __inline uint32_t ER_RO_offset_to_addr(uint32_t offset)
1.572 +{
1.573 + extern const uint32_t __ARM_unwind_ROSegBase_SelfOffset;
1.574 + return offset + ER_RO_SegBase;
1.575 +}
1.576 +
1.577 +
1.578 +/* ----- Index table processing ----- */
1.579 +
1.580 +/* find_and_expand_eit_entry is a support function used in both phases to set
1.581 + * ucb.pr_cache and internal cache.
1.582 + * Call with a pointer to the ucb and the return address to look up.
1.583 + *
1.584 + * The table is contained in the half-open interval
1.585 + * [EIT_base, EIT_limit) and is an ordered array of __EIT_entrys.
1.586 + * Perform a binary search via C library routine bsearch.
1.587 + * The table contains only function start addresses (encoded as offsets), so
1.588 + * we need to special-case the end table entry in the comparison function,
1.589 + * which we do by assuming the function it describes extends to end of memory.
1.590 + * This causes us problems indirectly in that we would like to fault as
1.591 + * many attempts as possible to look up an invalid return address. There are
1.592 + * several ways an invalid return address can be obtained from a broken
1.593 + * program, such as someone corrupting the stack or broken unwind instructions
1.594 + * recovered the wrong value. It is plausible that many bad return addresses
1.595 + * will be either small integers or will point into the heap or stack, hence
1.596 + * it's desirable to get the length of that final function roughly right.
1.597 + * Here we make no attempt to do it. Code exclusively for use in toolchains
1.598 + * which define a suitable limit symbol could make use of that symbol.
1.599 + * Alternatively (QoI) a smart linker could augment the index table with a
1.600 + * dummy EXIDX_CANTUNWIND entry pointing just past the last real function.
1.601 + */
1.602 +
1.603 +static int EIT_comparator(const void *ck, const void *ce)
1.604 +{
1.605 + uint32_t return_address_offset = *(const uint32_t *)ck;
1.606 + const __EIT_entry *eitp = (const __EIT_entry *)ce;
1.607 + const __EIT_entry *next_eitp = eitp + 1;
1.608 + uint32_t next_fn;
1.609 + if (next_eitp != EIT_limit)
1.610 + next_fn = next_eitp->fnoffset;
1.611 + else
1.612 + next_fn = addr_to_ER_RO_offset(0); /* address 0 is 'just past' the end of memory */
1.613 + if (return_address_offset < eitp->fnoffset) return -1;
1.614 + if (return_address_offset >= next_fn) return 1;
1.615 + return 0;
1.616 +}
1.617 +
1.618 +
1.619 +static _Unwind_Reason_Code find_and_expand_eit_entry(_Unwind_Control_Block *ucbp,
1.620 + uint32_t return_address)
1.621 +{
1.622 + /* Search the index table for an entry containing the specified return
1.623 + * address. The EIT contains function offsets relative to the base of the
1.624 + * execute region so adjust the return address accordingly.
1.625 + */
1.626 +
1.627 + uint32_t return_address_offset = addr_to_ER_RO_offset(return_address);
1.628 + const __EIT_entry *base = EIT_base;
1.629 + size_t nelems = EIT_limit - EIT_base;
1.630 +
1.631 + const __EIT_entry *eitp =
1.632 + (const __EIT_entry *) bsearch(&return_address_offset, base, nelems,
1.633 + sizeof(__EIT_entry), EIT_comparator);
1.634 +
1.635 + if (eitp == NULL) {
1.636 + /* The return address we have was not found in the EIT.
1.637 + * This breaks the scan and we have to indicate failure.
1.638 + */
1.639 + ucbp->PR_ADDR = NULL;
1.640 + DEBUGGER_BOTTLENECK(ucbp, _UASUBSYS_UNWINDER, _UAACT_ENDING, _UAARG_ENDING_UNWINDER_LOOKUPFAILED);
1.641 + return _URC_FAILURE;
1.642 + }
1.643 +
1.644 + /* Cache the function offset */
1.645 +
1.646 + ucbp->pr_cache.fnstart = ER_RO_offset_to_addr(eitp->fnoffset);
1.647 +
1.648 + /* Can this frame be unwound at all? */
1.649 +
1.650 + if (eitp->content == EXIDX_CANTUNWIND) {
1.651 + ucbp->PR_ADDR = NULL;
1.652 + DEBUGGER_BOTTLENECK(ucbp, _UASUBSYS_UNWINDER, _UAACT_ENDING, _UAARG_ENDING_NOUNWIND);
1.653 + return _URC_FAILURE;
1.654 + }
1.655 +
1.656 + /* Obtain the address of the "real" __EHT_Header word */
1.657 +
1.658 + if (eitp->content & uint32_highbit) {
1.659 + /* It is immediate data */
1.660 + ucbp->pr_cache.ehtp = (_Unwind_EHT_Header *)&eitp->content;
1.661 + ucbp->pr_cache.additional = 1;
1.662 + } else {
1.663 + /* The content field is a segment relative offset to an _Unwind_EHT_Entry structure */
1.664 + ucbp->pr_cache.ehtp = (_Unwind_EHT_Header *)ER_RO_offset_to_addr(eitp->content);
1.665 + ucbp->pr_cache.additional = 0;
1.666 + }
1.667 +
1.668 + /* Discover the personality routine address */
1.669 +
1.670 + if (*(uint32_t *)(ucbp->pr_cache.ehtp) & uint32_highbit) {
1.671 + /* It is immediate data - compute matching pr */
1.672 + uint32_t idx = ((*(uint32_t *)(ucbp->pr_cache.ehtp)) >> 24) & 0xf;
1.673 + if (idx == 0) ucbp->PR_ADDR = (uint32_t)&__aeabi_unwind_cpp_pr0;
1.674 + else if (idx == 1) ucbp->PR_ADDR = (uint32_t)&__aeabi_unwind_cpp_pr1;
1.675 + else if (idx == 2) ucbp->PR_ADDR = (uint32_t)&__aeabi_unwind_cpp_pr2;
1.676 + else { /* Failed */
1.677 + ucbp->PR_ADDR = NULL;
1.678 + DEBUGGER_BOTTLENECK(ucbp, _UASUBSYS_UNWINDER, _UAACT_ENDING, _UAARG_ENDING_TABLECORRUPT);
1.679 + return _URC_FAILURE;
1.680 + }
1.681 + } else {
1.682 + /* Execute region offset to PR */
1.683 + ucbp->PR_ADDR = ER_RO_offset_to_addr(*(uint32_t *)(ucbp->pr_cache.ehtp));
1.684 + }
1.685 + return _URC_OK;
1.686 +}
1.687 +
1.688 +
1.689 +
1.690 +
1.691 +/* ----- Unwinding: ----- */
1.692 +
1.693 +/* Fwd decl */
1.694 +static NORETURNDECL void unwind_next_frame(_Unwind_Control_Block *ucbp, phase2_virtual_register_set *vrsp);
1.695 +
1.696 +/* Helper fn: If the demand_save flag in a phase1_virtual_register_set was
1.697 + * zeroed, the registers were demand-saved. This function restores from
1.698 + * the save area.
1.699 +*/
1.700 +static void restore_non_core_regs(phase1_virtual_register_set *vrsp)
1.701 +{
1.702 + if (vrsp->demand_save_vfp == 0)
1.703 + __ARM_Unwind_VRS_VFPrestore(&vrsp->vfp);
1.704 + if (vrsp->demand_save_fpa == 0)
1.705 + __ARM_Unwind_VRS_FPArestore(&vrsp->fpa);
1.706 +}
1.707 +
1.708 +/* _Unwind_RaiseException is the external entry point to begin unwinding */
1.709 +
1.710 +__asm _Unwind_Reason_Code _Unwind_RaiseException(_Unwind_Control_Block *ucbp)
1.711 +{
1.712 + extern __ARM_Unwind_RaiseException;
1.713 +
1.714 + MAYBE_SWITCH_TO_ARM_STATE;
1.715 +
1.716 + /* Create a phase2_virtual_register_set on the stack */
1.717 + /* Save the core registers, carefully writing the original sp value */
1.718 + stmfd sp!,{r13-r15}; /* pushed 3 words => 3 words */
1.719 + stmfd sp!,{r0-r12}; /* pushed 13 words => 16 words */
1.720 + /* Write zeroes for the demand_save bytes so no saving occurs in phase 2 */
1.721 + mov r1,#0;
1.722 + str r1,[sp,#-4]!; /* pushed 1 word => 17 words */
1.723 + mov r1,sp;
1.724 + sub sp,sp,#4; /* preserve 8 byte alignment => 18 words */
1.725 +
1.726 + /* Now pass to C (with r0 still valid) to do the real work.
1.727 + * r0 = ucbp, r1 = phase2_virtual_register_set.
1.728 + * If we get control back, pop the stack and return preserving r0.
1.729 + */
1.730 +
1.731 +#if OLD_STYLE_INTERWORKING
1.732 + ldr r2,Unwind_RaiseException_Offset;
1.733 + add r2,r2,pc;
1.734 + mov lr,pc;
1.735 +Offset_Base
1.736 + bx r2;
1.737 +#else
1.738 + /* on arch 5T and later the linker will fix 'bl' => 'blx' as
1.739 + needed */
1.740 + bl __ARM_Unwind_RaiseException;
1.741 +#endif
1.742 + ldr r14,[sp,#16*4];
1.743 + add sp,sp,#18*4;
1.744 + RET_LR;
1.745 +#if OLD_STYLE_INTERWORKING
1.746 +Unwind_RaiseException_Offset dcd __ARM_Unwind_RaiseException - Offset_Base;
1.747 +#endif
1.748 + MAYBE_CODE16;
1.749 +
1.750 + /* Alternate symbol names for difficult symbols.
1.751 + * It is possible no functions included in the image require
1.752 + * a handler table. Therefore make only a weak reference to
1.753 + * the handler table base symbol, which may be absent.
1.754 + */
1.755 + extern |.ARM.exidx$$Base|;
1.756 + extern |.ARM.exidx$$Limit|;
1.757 + extern |.ARM.extab$$Base| WEAKASMDECL;
1.758 + export __ARM_ETInfo;
1.759 + /* these are offsets for /ropi */
1.760 +__ARM_ETInfo /* layout must match struct ExceptionTableInfo */
1.761 +eit_base dcd |.ARM.exidx$$Base| - __ARM_ETInfo; /* index table base */
1.762 +eit_limit dcd |.ARM.exidx$$Limit| - __ARM_ETInfo; /* index table limit */
1.763 +}
1.764 +
1.765 +
1.766 +/* __ARM_Unwind_RaiseException performs phase 1 unwinding */
1.767 +
1.768 +_Unwind_Reason_Code __ARM_Unwind_RaiseException(_Unwind_Control_Block *ucbp,
1.769 + phase2_virtual_register_set *entry_VRSp)
1.770 +{
1.771 + phase1_virtual_register_set phase1_VRS;
1.772 +
1.773 + /* Is this a nested simultaneous propagation?
1.774 + * (see comments with _Unwind_Complete)
1.775 + */
1.776 + if (ucbp->NESTED_CONTEXT == 0) {
1.777 + /* No - this is only propagation */
1.778 + ucbp->NESTED_CONTEXT = 1;
1.779 + } else {
1.780 + /* Yes - cache the state elsewhere and restore it when the propagation ends */
1.781 + /* This representation wastes space and uses malloc; do better?
1.782 + * On the other hand will it ever be used in practice?
1.783 + */
1.784 + _Unwind_Control_Block *saved_ucbp =
1.785 + (_Unwind_Control_Block *)malloc(sizeof(_Unwind_Control_Block));
1.786 + if (ucbp == NULL) {
1.787 + DEBUGGER_BOTTLENECK(ucbp, _UASUBSYS_UNWINDER, _UAACT_ENDING, _UAARG_ENDING_UNWINDER_BUFFERFAILED);
1.788 + return _URC_FAILURE;
1.789 + }
1.790 + saved_ucbp->unwinder_cache = ucbp->unwinder_cache;
1.791 + saved_ucbp->barrier_cache = ucbp->barrier_cache;
1.792 + saved_ucbp->cleanup_cache = ucbp->cleanup_cache;
1.793 + ucbp->NESTED_CONTEXT = (uint32_t)saved_ucbp;
1.794 + }
1.795 +
1.796 + /* entry_VRSp contains the core registers as they were when
1.797 + * _Unwind_RaiseException was called. Copy the call-site address to r15
1.798 + * then copy all the registers to phase1_VRS for the phase 1 stack scan.
1.799 + */
1.800 +
1.801 + entry_VRSp->core.r[15] = entry_VRSp->core.r[14];
1.802 + phase1_VRS.core = entry_VRSp->core;
1.803 +
1.804 + /* For phase 1 only ensure non-core registers are saved before use.
1.805 + * If WMMX registers are supported, initialise their flags here and
1.806 + * take appropriate action elsewhere.
1.807 + */
1.808 +
1.809 + phase1_VRS.demand_save_vfp = 1;
1.810 + phase1_VRS.demand_save_fpa = 1;
1.811 +
1.812 + /* Now perform a virtual unwind until a propagation barrier is met, or
1.813 + * until something goes wrong. If something does go wrong, we ought (I
1.814 + * suppose) to restore registers we may have destroyed.
1.815 + */
1.816 +
1.817 + while (1) {
1.818 +
1.819 + _Unwind_Reason_Code pr_result;
1.820 +
1.821 + /* Search the index table for the required entry. Cache the index table
1.822 + * pointer, and obtain and cache the addresses of the "real" __EHT_Header
1.823 + * word and the personality routine.
1.824 + */
1.825 +
1.826 + if (find_and_expand_eit_entry(ucbp, phase1_VRS.core.r[15]) != _URC_OK) {
1.827 + restore_non_core_regs(&phase1_VRS);
1.828 + /* Debugger bottleneck fn called during lookup */
1.829 + return _URC_FAILURE;
1.830 + }
1.831 +
1.832 + /* Call the pr to decide what to do */
1.833 +
1.834 + pr_result = ((personality_routine)ucbp->PR_ADDR)(_US_VIRTUAL_UNWIND_FRAME,
1.835 + ucbp,
1.836 + (_Unwind_Context *)&phase1_VRS);
1.837 +
1.838 + if (pr_result == _URC_HANDLER_FOUND) break;
1.839 + if (pr_result == _URC_CONTINUE_UNWIND) continue;
1.840 +
1.841 + /* If we get here some sort of failure has occurred in the
1.842 + * pr and probably the pr returned _URC_FAILURE
1.843 + */
1.844 + restore_non_core_regs(&phase1_VRS);
1.845 + return _URC_FAILURE;
1.846 + }
1.847 +
1.848 + /* Propagation barrier located... restore entry register state of non-core regs */
1.849 +
1.850 + restore_non_core_regs(&phase1_VRS);
1.851 +
1.852 + /* Initiate real unwinding */
1.853 + unwind_next_frame(ucbp, entry_VRSp);
1.854 + /* Unreached, but keep compiler quiet: */
1.855 + return _URC_FAILURE;
1.856 +}
1.857 +
1.858 +
1.859 +/* unwind_next_frame performs phase 2 unwinding */
1.860 +
1.861 +static NORETURNDECL void unwind_next_frame(_Unwind_Control_Block *ucbp, phase2_virtual_register_set *vrsp)
1.862 +{
1.863 + while (1) {
1.864 +
1.865 + _Unwind_Reason_Code pr_result;
1.866 +
1.867 + /* Search the index table for the required entry. Cache the index table
1.868 + * pointer, and obtain and cache the addresses of the "real" __EHT_Header
1.869 + * word and the personality routine.
1.870 + */
1.871 +
1.872 + if (find_and_expand_eit_entry(ucbp, vrsp->core.r[15]) != _URC_OK)
1.873 + abort();
1.874 +
1.875 + /* Save the call-site address and call the pr to do whatever it
1.876 + * wants to do on this new frame.
1.877 + */
1.878 +
1.879 + ucbp->SAVED_CALLSITE_ADDR = vrsp->core.r[15];
1.880 + pr_result = ((personality_routine)ucbp->PR_ADDR)(_US_UNWIND_FRAME_STARTING, ucbp,
1.881 + (_Unwind_Context *)vrsp);
1.882 +
1.883 + if (pr_result == _URC_INSTALL_CONTEXT) {
1.884 + /* Upload the registers */
1.885 + __ARM_Unwind_VRS_corerestore(&vrsp->core);
1.886 + } else if (pr_result == _URC_CONTINUE_UNWIND)
1.887 + continue;
1.888 + else
1.889 + abort();
1.890 + }
1.891 +}
1.892 +
1.893 +
1.894 +/* _Unwind_Resume is the external entry point called after a cleanup
1.895 + * to resume unwinding. It tail-calls a helper function,
1.896 + * __ARM_Unwind_Resume, which never returns.
1.897 + */
1.898 +__asm NORETURNDECL void _Unwind_Resume(_Unwind_Control_Block *ucbp)
1.899 +{
1.900 + extern __ARM_Unwind_Resume;
1.901 +
1.902 + MAYBE_SWITCH_TO_ARM_STATE;
1.903 +
1.904 + /* Create a phase2_virtual_register_set on the stack */
1.905 + /* Save the core registers, carefully writing the original sp value */
1.906 +
1.907 + stmfd sp!,{r13-r15}; /* pushed 3 words => 3 words */
1.908 + stmfd sp!,{r0-r12}; /* pushed 13 words => 16 words */
1.909 + /* Write zeroes for the demand_save bytes so no saving occurs in phase 2 */
1.910 + mov r1,#0;
1.911 + str r1,[sp,#-4]!; /* pushed 1 word => 17 words */
1.912 + mov r1,sp;
1.913 + sub sp,sp,#4; /* preserve 8 byte alignment => 18 words */
1.914 +
1.915 + /* Now pass to C (with r0 still valid) to do the real work.
1.916 + * r0 = ucbp, r1 = phase2_virtual_register_set.
1.917 + * This call never returns.
1.918 + */
1.919 +
1.920 +#ifdef __APCS_INTERWORK
1.921 + ldr r2,Unwind_Resume_Offset;
1.922 + add r2,r2,pc;
1.923 + bx r2;
1.924 +Unwind_Resume_Offset dcd __ARM_Unwind_Resume - .;
1.925 +#else
1.926 + b __ARM_Unwind_Resume;
1.927 +#endif
1.928 + MAYBE_CODE16;
1.929 +}
1.930 +
1.931 +
1.932 +/* Helper function for _Unwind_Resume */
1.933 +
1.934 +NORETURNDECL void __ARM_Unwind_Resume(_Unwind_Control_Block *ucbp,
1.935 + phase2_virtual_register_set *entry_VRSp)
1.936 +{
1.937 + _Unwind_Reason_Code pr_result;
1.938 +
1.939 + /* Recover saved state */
1.940 +
1.941 + entry_VRSp->core.r[15] = ucbp->SAVED_CALLSITE_ADDR;
1.942 +
1.943 + /* Call the cached PR and dispatch */
1.944 +
1.945 + pr_result = ((personality_routine)ucbp->PR_ADDR)(_US_UNWIND_FRAME_RESUME, ucbp,
1.946 + (_Unwind_Context *)entry_VRSp);
1.947 +
1.948 + if (pr_result == _URC_INSTALL_CONTEXT) {
1.949 + /* Upload the registers */
1.950 + __ARM_Unwind_VRS_corerestore(&entry_VRSp->core);
1.951 + } else if (pr_result == _URC_CONTINUE_UNWIND)
1.952 + unwind_next_frame(ucbp, entry_VRSp);
1.953 + else
1.954 + abort();
1.955 +}
1.956 +
1.957 +
1.958 +/* _Unwind_Complete is called at the end of a propagation.
1.959 + * If we support multiple simultaneous propagations, restore the cached state
1.960 + * of the previous propagation here.
1.961 + */
1.962 +
1.963 +void _Unwind_Complete(_Unwind_Control_Block *ucbp)
1.964 +{
1.965 + _Unwind_Control_Block *context = (_Unwind_Control_Block *)ucbp->NESTED_CONTEXT;
1.966 + if ((uint32_t)context == 0) abort(); /* should be impossible */
1.967 + if ((uint32_t)context == 1) {
1.968 + /* This was the only ongoing propagation of this object */
1.969 + ucbp->NESTED_CONTEXT--;
1.970 + return;
1.971 + }
1.972 + /* Otherwise we copy the state back from the cache structure pointed to
1.973 + * by ucbp->NESTED_CONTEXT.
1.974 + */
1.975 + /* This first one updates ucbp->NESTED_CONTEXT */
1.976 + ucbp->unwinder_cache = context->unwinder_cache;
1.977 + ucbp->barrier_cache = context->barrier_cache;
1.978 + ucbp->cleanup_cache = context->cleanup_cache;
1.979 + free(context);
1.980 +}
1.981 +
1.982 +#endif /* unwinder_c */
1.983 +#ifdef unwind_activity_c
1.984 +
1.985 +/* Runtime debug "bottleneck function": */
1.986 +/* (not in the current Exceptions EABI document) */
1.987 +
1.988 +void _Unwind_Activity(_Unwind_Control_Block *ucbp, uint32_t reason, uint32_t arg)
1.989 +{
1.990 +#ifdef UNWIND_ACTIVITY_DIAGNOSTICS
1.991 + uint32_t who = reason >> 24;
1.992 + uint32_t activity = reason & 0xffffff;
1.993 + printf("_Unwind_Activity: UCB=0x%8.8x Reason=(", (uint32_t)ucbp);
1.994 + switch (who) {
1.995 + case _UASUBSYS_UNWINDER:
1.996 + printf("unw,");
1.997 + if (activity >= 0x80)
1.998 + printf("%x) Arg=0x%8.8x\n", activity, arg);
1.999 + break;
1.1000 + case _UASUBSYS_CPP:
1.1001 + printf("C++,");
1.1002 + if (activity >= 0x80) {
1.1003 + if (activity == _UAACT_CPP_TYPEINFO)
1.1004 + printf("typeinfo) Typeinfo=0x%8.8x\n", arg);
1.1005 + else
1.1006 + printf("%x) Arg=0x%8.8x\n", activity, arg);
1.1007 + }
1.1008 + break;
1.1009 + default:
1.1010 + printf("???,");
1.1011 + if (activity >= 0x80)
1.1012 + printf("%x) Arg=0x%8.8x\n", activity, arg);
1.1013 + break;
1.1014 + }
1.1015 + if (activity < 0x80) {
1.1016 + switch (activity) {
1.1017 + case _UAACT_STARTING:
1.1018 + printf("starting) Typeinfo=0x%8.8x\n", arg);
1.1019 + break;
1.1020 + case _UAACT_ENDING:
1.1021 + printf("ending) Cause=%d\n", arg);
1.1022 + break;
1.1023 + case _UAACT_BARRIERFOUND:
1.1024 + printf("barrierfound) Pad=0x%8.8x\n", arg);
1.1025 + break;
1.1026 + case _UAACT_PADENTRY:
1.1027 + printf("padentry) Pad=0x%8.8x\n", arg);
1.1028 + break;
1.1029 + default:
1.1030 + printf("%x) Arg=0x%8.8x\n", activity, arg);
1.1031 + break;
1.1032 + }
1.1033 + }
1.1034 +#endif
1.1035 +}
1.1036 +
1.1037 +#endif /* unwind_activity_c */