Update contrib.
3 * Copyright 2002-2005 ARM Limited. All rights reserved.
5 * Your rights to use this code are set out in the accompanying licence
6 * text file LICENCE.txt (ARM contract number LEC-ELA-00080 v1.0).
9 /* Portions copyright Copyright (c) 2009 Nokia Corporation and/or its subsidiary(-ies). */
12 * RCS $Revision: 92986 $
13 * Checkin $Date: 2005-10-13 15:56:12 +0100 (Thu, 13 Oct 2005) $
14 * Revising $Author: achapman $
17 /* Language-independent unwinder implementation */
19 /* This source file is compiled automatically by ARM's make system into
20 * multiple object files. The source regions constituting object file
21 * xxx.o are delimited by ifdef xxx_c / endif directives.
23 * The source regions currently marked are:
35 #include "unwind_env.h"
36 /* Language-independent unwinder declarations: */
40 /* Symbian specific support */
41 #include "symbian_support.h"
44 /* Define UNWIND_ACTIVITY_DIAGNOSTICS for printed information from _Unwind_Activity */
45 /* Define VRS_DIAGNOSTICS for printed diagnostics about VRS operations */
47 #if defined(VRS_DIAGNOSTICS) || defined(UNWIND_ACTIVITY_DIAGNOSTICS)
49 extern int printf(const char *, ...);
53 #ifdef SUPPORT_NESTED_EXCEPTIONS
54 extern _Unwind_Control_Block *AllocSavedUCB();
55 extern void FreeSavedUCB(_Unwind_Control_Block *context);
60 /* ========================= ========================= */
61 /* ========================= Virtual register set ========================= */
62 /* ========================= ========================= */
64 /* The approach taken by this implementation is to use the real machine
65 * registers to hold all but the values of core (integer)
66 * registers. Consequently the implementation must use only the core
67 * registers except when manipulating the virtual register set. Non-core
68 * registers are saved only on first use, so the single implementation can
69 * cope with execution on processors which lack certain registers. The
70 * registers as they were at the start of the propagation must be preserved
71 * over phase 1 so that the machine state is correct at the start of phase
72 * 2. This requires a copy to be taken (which can be stack allocated). During
73 * a stack unwind (phase 1 or phase 2), the "current" virtual register set is
74 * implemented as core register values held in a data structure, and non-core
75 * register values held in the registers themselves. To ensure that all
76 * original register values are available at the beginning of phase 2, the
77 * core registers are saved in a second structure at the start of phase 1 and
78 * the non-core registers are demand-saved into another part of the data
79 * structure that holds the current core registers during the phase 1 stack
82 /* Extent to which the access routines are implemented:
83 * _Unwind_VRS_Get and _Unwind_VRS_Set implement only access to the core registers.
84 * _Unwind_VRS_Pop implements only popping of core and vfp registers.
85 * There is no support here for the Intel WMMX registers, but space is nevertheless
86 * reserved in the virtual register set structure to indicate whether demand-saving
87 * of those registers is required (as they are unsupported, it never is). The space
88 * costs nothing as it is required for alignment.
89 * The level of supported functionality is compliant with the requirements of the
93 typedef unsigned char bool;
94 struct core_s { uint32_t r[16]; }; /* core integer regs */
95 struct vfp_s { uint64_t d[32]; }; /* VFP registers saved in FSTMD format */
97 /* Phase 1 virtual register set includes demand-save areas */
98 /* The phase 2 virtual register set must be a prefix of the phase 1 set */
99 typedef struct phase1_virtual_register_set_s {
100 /* demand_save flag == 1 means save the registers in the demand-save area */
101 bool demand_save_vfp_low;
102 bool demand_save_vfp_high;
103 bool demand_save_wmmxd;
104 bool demand_save_wmmxc;
105 struct core_s core; /* current core registers */
106 struct vfp_s vfp; /* demand-saved vfp registers */
107 } phase1_virtual_register_set;
109 /* Phase 2 virtual register set has no demand-save areas */
110 /* The phase 2 virtual register set must be a prefix of the phase 1 set */
111 /* The assembly fragments for _Unwind_RaiseException and _Unwind_Resume create
112 * a phase2_virtual_register_set_s by hand so be careful.
114 typedef struct phase2_virtual_register_set_s {
115 /* demand_save flag == 1 means save the registers in the demand-save area */
116 /* Always 0 in phase 2 */
117 bool demand_save_vfp_low;
118 bool demand_save_vfp_high;
119 bool demand_save_wmmxd;
120 bool demand_save_wmmxc;
121 struct core_s core; /* current core registers */
122 } phase2_virtual_register_set;
124 /* -- Helper macros for the embedded assembly */
126 #if defined(__TARGET_ARCH_5T) || defined(__TARGET_ARCH_5TXM) || \
127 defined(__TARGET_ARCH_5TE) || defined(__TARGET_ARCH_6) || \
128 defined(__TARGET_ARCH_6T2) || defined(__TARGET_ARCH_7_A) /* || ... */
129 #define ARCH_5T_OR_LATER 1
131 #define ARCH_5T_OR_LATER 0
134 #if defined(__APCS_INTERWORK) && !ARCH_5T_OR_LATER
135 #define OLD_STYLE_INTERWORKING 1
137 #define OLD_STYLE_INTERWORKING 0
140 #if defined(__TARGET_ARCH_4T) || defined(__TARGET_ARCH_4TXM) || ARCH_5T_OR_LATER
146 #if defined(__TARGET_ARCH_THUMBNAIL)
155 #define RET_LR mov pc,lr
158 /* ----- Routines: ----- */
160 /* ----- Helper routines, private ----- */
162 /* R_ARM_PREL31 is a place-relative 31-bit signed relocation. The
163 * routine takes the address of a location that was relocated by
164 * R_ARM_PREL31, and returns an absolute address.
166 static FORCEINLINE uint32_t __ARM_resolve_prel31(void *p)
168 return (uint32_t)((((*(int32_t *)p) << 1) >> 1) + (int32_t)p);
171 /* ----- Helper routines, private but external ----- */
173 /* Note '%0' refers to local label '0' */
175 #define MAYBE_SWITCH_TO_ARM_STATE SWITCH_TO_ARM_STATE
176 #define MAYBE_CODE16 code16
178 #define MAYBE_SWITCH_TO_ARM_STATE /* nothing */
179 #define MAYBE_CODE16 /* nothing */
181 __asm void __ARM_Unwind_VRS_VFPpreserve_low(void *vfpp)
184 /* Preserve the low vfp registers in the passed memory */
191 assert (%2 - %1) = 0;
198 MAYBE_SWITCH_TO_ARM_STATE;
199 stc p11,vfp_d0,[r0],{0x20}; /* 0xec800b20 FSTMIAD r0,{d0-d15} */
204 __asm void __ARM_Unwind_VRS_VFPpreserve_high(void *vfpp)
206 vfp_d16 CN 0; /* =16 when used with stcl */
207 /* Preserve the high vfp registers in the passed memory */
208 MAYBE_SWITCH_TO_ARM_STATE;
209 stcl p11,vfp_d16,[r0],{0x20}; /* 0xecc00b20 FSTMIAD r0,{d16-d31} */
214 __asm void __ARM_Unwind_VRS_VFPrestore_low(void *vfpp)
216 /* Restore the low vfp registers from the passed memory */
218 MAYBE_SWITCH_TO_ARM_STATE;
219 ldc p11,vfp_d0,[r0],{0x20}; /* 0xec900b20 FLDMIAD r0,{d0-d15} */
224 __asm void __ARM_Unwind_VRS_VFPrestore_high(void *vfpp)
226 /* Restore the high vfp registers from the passed memory */
227 vfp_d16 CN 0; /* =16 when used with ldcl */
228 MAYBE_SWITCH_TO_ARM_STATE;
229 ldcl p11,vfp_d16,[r0],{0x20}; /* 0xecd00b20 FLDMIAD r0,{d16-d31} */
235 __asm NORETURNDECL void __ARM_Unwind_VRS_corerestore(void *corep)
237 /* We rely here on corep pointing to a location in the stack,
238 * as we briefly assign it to sp. This allows us to safely do
239 * ldmia's which restore sp (if we use a different base register,
240 * the updated sp may be used by the handler of any data abort
241 * that occurs during the ldmia, and the stack gets overwritten).
242 * By hypothesis this is preserve8 but the load of sp means the
243 * assembler can't infer that.
248 ldmia.w r13!,{r0-r12};
249 ldr.w r14, [r13, #4] /* lr */
250 ldr.w r12, [r13, #4*2] /* pc */
251 ldr.w r13, [r13, #0] /* sp */
256 MAYBE_SWITCH_TO_ARM_STATE;
257 #if OLD_STYLE_INTERWORKING
260 ldr r12,[r13, #4*2]; /* pc */
265 #if __ARMCC_VERSION < 300000
270 ldmia r14!, {r0-r12};
272 ldmia r14, {r14,r15};
281 /* ----- Development support ----- */
283 #ifdef VRS_DIAGNOSTICS
284 static void debug_print_vrs_vfp(uint32_t base, uint64_t *lp)
288 for (i = 0; i < 16; i++) {
289 printf("D%-2d 0x%16.16llx ", i + base, *lp);
299 static void debug_print_vrs(_Unwind_Context *context)
301 phase1_virtual_register_set *vrsp = (phase1_virtual_register_set *)context;
304 printf("------------------------------------------------------------------------\n");
306 for (i = 0; i < 16; i++) {
307 printf("r%-2d 0x%8.8x ", i, vrsp->core.r[i]);
315 if (vrsp->demand_save_vfp_low == 1)
316 printf("VFP low registers not saved\n");
318 debug_print_vrs_vfp(0, &vrsp->vfp.d[0]);
320 if (vrsp->demand_save_vfp_high == 1)
321 printf("VFP high registers not saved\n");
323 debug_print_vrs_vfp(16, &vrsp->vfp.d[16]);
324 printf("------------------------------------------------------------------------\n");
329 /* ----- Public routines ----- */
331 EXPORT_C _Unwind_VRS_Result _Unwind_VRS_Set(_Unwind_Context *context,
332 _Unwind_VRS_RegClass regclass,
334 _Unwind_VRS_DataRepresentation representation,
337 phase1_virtual_register_set *vrsp = (phase1_virtual_register_set *)context;
341 if (representation != _UVRSD_UINT32 || regno > 15)
342 return _UVRSR_FAILED;
343 vrsp->core.r[regno] = *(uint32_t *)valuep;
349 return _UVRSR_NOT_IMPLEMENTED;
353 return _UVRSR_FAILED;
357 EXPORT_C _Unwind_VRS_Result _Unwind_VRS_Get(_Unwind_Context *context,
358 _Unwind_VRS_RegClass regclass,
360 _Unwind_VRS_DataRepresentation representation,
363 phase1_virtual_register_set *vrsp = (phase1_virtual_register_set *)context;
367 if (representation != _UVRSD_UINT32 || regno > 15)
368 return _UVRSR_FAILED;
369 *(uint32_t *)valuep = vrsp->core.r[regno];
375 return _UVRSR_NOT_IMPLEMENTED;
379 return _UVRSR_FAILED;
385 EXPORT_C _Unwind_VRS_Result _Unwind_VRS_Pop(_Unwind_Context *context,
386 _Unwind_VRS_RegClass regclass,
387 uint32_t descriminator,
388 _Unwind_VRS_DataRepresentation representation)
390 phase1_virtual_register_set *vrsp = (phase1_virtual_register_set *)context;
394 /* If SP is included in the mask, the loaded value is used in preference to
395 * the writeback value, but only on completion of the loading.
397 uint32_t mask, *vsp, *rp, sp_loaded;
398 if (representation != _UVRSD_UINT32)
399 return _UVRSR_FAILED;
400 vsp = (uint32_t *)vrsp->core.r[R_SP];
401 rp = (uint32_t *)&vrsp->core;
402 mask = descriminator & 0xffff;
403 sp_loaded = mask & (1 << R_SP);
406 #ifdef VRS_DIAGNOSTICS
407 printf("VRS Pop r%d\n", rp - &vrsp->core.r[0]);
415 vrsp->core.r[R_SP] = (uint32_t)vsp;
420 uint32_t start = descriminator >> 16;
421 uint32_t count = descriminator & 0xffff;
422 bool some_low = start < 16;
423 bool some_high = start + count > 16;
424 if ((representation != _UVRSD_VFPX && representation != _UVRSD_DOUBLE) ||
425 (representation == _UVRSD_VFPX && some_high) ||
426 (representation == _UVRSD_DOUBLE && start + count > 32))
427 return _UVRSR_FAILED;
428 if (some_low && vrsp->demand_save_vfp_low == 1) { /* Demand-save over phase 1 */
429 vrsp->demand_save_vfp_low = 0;
430 __ARM_Unwind_VRS_VFPpreserve_low(&vrsp->vfp.d[0]);
432 if (some_high && vrsp->demand_save_vfp_high == 1) { /* Demand-save over phase 1 */
433 vrsp->demand_save_vfp_high = 0;
434 __ARM_Unwind_VRS_VFPpreserve_high(&vrsp->vfp.d[16]);
436 /* Now recover from the stack into the real machine registers.
437 * Note for _UVRSD_VFPX we assume FSTMX standard format 1.
438 * Do this by saving the current VFP registers to a memory area,
439 * moving the in-memory values into that area, and
440 * restoring from the whole area.
441 * Must be careful as the 64-bit values saved by FSTMX might be
442 * only 32-bit aligned.
445 struct unaligned_vfp_reg_s { uint32_t w1; uint32_t w2; };
446 struct unaligned_vfp_reg_s *vsp;
447 struct vfp_s temp_vfp;
449 __ARM_Unwind_VRS_VFPpreserve_low(&temp_vfp.d[0]);
451 __ARM_Unwind_VRS_VFPpreserve_high(&temp_vfp.d[16]);
452 vsp = (struct unaligned_vfp_reg_s *)vrsp->core.r[R_SP];
454 struct unaligned_vfp_reg_s *v =
455 (struct unaligned_vfp_reg_s *)&temp_vfp.d[start++];
457 #ifdef VRS_DIAGNOSTICS
458 printf("VRS Pop D%d = 0x%llx\n", start - 1, temp_vfp.d[start - 1]);
461 vrsp->core.r[R_SP] = (uint32_t)((uint32_t *)vsp +
462 (representation == _UVRSD_VFPX ?
463 1 : /* +1 to skip the format word */
466 __ARM_Unwind_VRS_VFPrestore_low(&temp_vfp.d[0]);
468 __ARM_Unwind_VRS_VFPrestore_high(&temp_vfp.d[16]);
474 return _UVRSR_NOT_IMPLEMENTED;
478 return _UVRSR_FAILED;
483 /* ========================= ========================= */
484 /* ========================= The unwinder ========================= */
485 /* ========================= ========================= */
488 /* This implementation uses the UCB unwinder_cache as follows:
489 * reserved1 is documented in the EABI as requiring initialisation to 0.
490 * It is used to manage nested simultaneous propagation. If the value is 0,
491 * the UCB is participating in no propagations. If the value is 1, the UCB
492 * is participating in one propagation. Otherwise the value is a pointer to
493 * a structure holding saved UCB state from the next propagation out.
494 * The structure used is simply a mallocated UCB.
495 * reserved2 is used to preserve the call-site address over calls to a
496 * personality routine and cleanup.
497 * reserved3 is used to cache the PR address.
498 * reserved4 is used by the Symbian implementation to cache the ROM exeception
500 * reserved5 is used by the symbian implementation to cache the
501 * TExceptionDescriptor for the executable of the 'current' frame
504 #define NESTED_CONTEXT unwinder_cache.reserved1
505 #define SAVED_CALLSITE_ADDR unwinder_cache.reserved2
506 #define PR_ADDR unwinder_cache.reserved3
508 /* Index table entry: */
510 #ifndef __EPOC32__ // Symbian OS defines this in symbian_support.h
511 typedef struct __EIT_entry {
512 uint32_t fnoffset; /* Place-relative */
517 /* Private defines etc: */
519 static const uint32_t EXIDX_CANTUNWIND = 1;
520 static const uint32_t uint32_highbit = 0x80000000;
522 /* ARM C++ personality routines: */
524 typedef _Unwind_Reason_Code (*personality_routine)(_Unwind_State,
525 _Unwind_Control_Block *,
528 WEAKDECL _Unwind_Reason_Code __aeabi_unwind_cpp_pr0(_Unwind_State state, _Unwind_Control_Block *,
529 _Unwind_Context *context);
530 IMPORT_C WEAKDECL _Unwind_Reason_Code __aeabi_unwind_cpp_pr1(_Unwind_State state, _Unwind_Control_Block *,
531 _Unwind_Context *context);
532 IMPORT_C WEAKDECL _Unwind_Reason_Code __aeabi_unwind_cpp_pr2(_Unwind_State state, _Unwind_Control_Block *,
533 _Unwind_Context *context);
536 /* Various image symbols: */
538 struct ExceptionTableInfo {
544 /* We define __ARM_ETInfo to allow access to some linker-generated
545 names that are not legal C identifiers. __ARM_ETInfo is extern only
546 because of scope limitations of the embedded assembler */
547 extern const struct ExceptionTableInfo __ARM_ETInfo;
549 ((const __EIT_entry *)(__ARM_ETInfo.EIT_base + (const char *)&__ARM_ETInfo))
551 ((const __EIT_entry *)(__ARM_ETInfo.EIT_limit + (const char *)&__ARM_ETInfo))
556 /* ----- Index table processing ----- */
558 /* find_and_expand_eit_entry is a support function used in both phases to set
559 * ucb.pr_cache and internal cache.
560 * Call with a pointer to the ucb and the return address to look up.
562 * The table is contained in the half-open interval
563 * [EIT_base, EIT_limit) and is an ordered array of __EIT_entrys.
564 * Perform a binary search via C library routine bsearch.
565 * The table contains only function start addresses (encoded as offsets), so
566 * we need to special-case the end table entry in the comparison function,
567 * which we do by assuming the function it describes extends to end of memory.
568 * This causes us problems indirectly in that we would like to fault as
569 * many attempts as possible to look up an invalid return address. There are
570 * several ways an invalid return address can be obtained from a broken
571 * program, such as someone corrupting the stack or broken unwind instructions
572 * recovered the wrong value. It is plausible that many bad return addresses
573 * will be either small integers or will point into the heap or stack, hence
574 * it's desirable to get the length of that final function roughly right.
575 * Here we make no attempt to do it. Code exclusively for use in toolchains
576 * which define a suitable limit symbol could make use of that symbol.
577 * Alternatively (QoI) a smart linker could augment the index table with a
578 * dummy EXIDX_CANTUNWIND entry pointing just past the last real function.
582 static int EIT_comparator(const void *ck, const void *ce)
584 uint32_t return_address = *(const uint32_t *)ck;
585 const __EIT_entry *eitp = (const __EIT_entry *)ce;
586 const __EIT_entry *next_eitp = eitp + 1;
588 if (next_eitp != EIT_limit)
589 next_fn = __ARM_resolve_prel31((void *)&next_eitp->fnoffset);
591 next_fn = 0xffffffffU;
592 if (return_address < __ARM_resolve_prel31((void *)&eitp->fnoffset)) return -1;
593 if (return_address >= next_fn) return 1;
599 static _Unwind_Reason_Code find_and_expand_eit_entry_V2(_Unwind_Control_Block *ucbp,
600 uint32_t return_address)
602 /* Search the index table for an entry containing the specified return
603 * address. Subtract the 2 from the return address, as the index table
604 * contains function start addresses (a trailing noreturn BL would
605 * appear to return to the first address of the next function (perhaps
606 * +1 if Thumb); a leading BL would appear to return to function start
607 * + instruction size (perhaps +1 if Thumb)).
611 const __EIT_entry *base = EIT_base;
612 size_t nelems = EIT_limit - EIT_base;
617 eitp = (__EIT_entry *) bsearch(&return_address, base, nelems,
618 sizeof(__EIT_entry), EIT_comparator);
620 const __EIT_entry *base = EIT_base(ucbp);
621 size_t nelems = EIT_limit(ucbp) - base;
626 // This must succeed on SymbianOS or else an error will have occured already.
627 eitp = SearchEITV2(return_address, base, nelems);
631 /* The return address we have was not found in the EIT.
632 * This breaks the scan and we have to indicate failure.
634 ucbp->PR_ADDR = NULL;
635 DEBUGGER_BOTTLENECK(ucbp, _UASUBSYS_UNWINDER, _UAACT_ENDING, _UAARG_ENDING_UNWINDER_LOOKUPFAILED);
639 /* Cache the function offset */
641 ucbp->pr_cache.fnstart = __ARM_resolve_prel31((void *)&eitp->fnoffset);
643 /* Can this frame be unwound at all? */
645 if (eitp->content == EXIDX_CANTUNWIND) {
646 ucbp->PR_ADDR = NULL;
647 DEBUGGER_BOTTLENECK(ucbp, _UASUBSYS_UNWINDER, _UAACT_ENDING, _UAARG_ENDING_NOUNWIND);
651 /* Obtain the address of the "real" __EHT_Header word */
653 if (eitp->content & uint32_highbit) {
654 /* It is immediate data */
655 ucbp->pr_cache.ehtp = (_Unwind_EHT_Header *)&eitp->content;
656 ucbp->pr_cache.additional = 1;
658 /* The content field is a 31-bit place-relative offset to an _Unwind_EHT_Entry structure */
659 ucbp->pr_cache.ehtp = (_Unwind_EHT_Header *)__ARM_resolve_prel31((void *)&eitp->content);
660 ucbp->pr_cache.additional = 0;
663 /* Discover the personality routine address */
665 if (*(uint32_t *)(ucbp->pr_cache.ehtp) & uint32_highbit) {
666 /* It is immediate data - compute matching pr */
667 uint32_t idx = ((*(uint32_t *)(ucbp->pr_cache.ehtp)) >> 24) & 0xf;
668 if (idx == 0) ucbp->PR_ADDR = (uint32_t)&__aeabi_unwind_cpp_pr0;
669 else if (idx == 1) ucbp->PR_ADDR = (uint32_t)&__aeabi_unwind_cpp_pr1;
670 else if (idx == 2) ucbp->PR_ADDR = (uint32_t)&__aeabi_unwind_cpp_pr2;
672 ucbp->PR_ADDR = NULL;
673 DEBUGGER_BOTTLENECK(ucbp, _UASUBSYS_UNWINDER, _UAACT_ENDING, _UAARG_ENDING_TABLECORRUPT);
677 /* It's a place-relative offset to pr */
678 ucbp->PR_ADDR = __ARM_resolve_prel31((void *)(ucbp->pr_cache.ehtp));
683 static _Unwind_Reason_Code find_and_expand_eit_entry_V1(_Unwind_Control_Block *ucbp,
684 uint32_t return_address)
686 /* Search the index table for an entry containing the specified return
687 * address. The EIT contains function offsets relative to the base of the
688 * execute region so adjust the return address accordingly.
692 uint32_t return_address_offset = ADDR_TO_ER_RO_OFFSET(return_address, ucbp);
693 const __EIT_entry *base = EIT_base;
694 size_t nelems = EIT_limit - EIT_base;
696 const __EIT_entry *eitp =
697 (const __EIT_entry *) bsearch(&return_address_offset, base, nelems,
698 sizeof(__EIT_entry), EIT_comparator);
700 /* The return address we have was not found in the EIT.
701 * This breaks the scan and we have to indicate failure.
703 ucbp->PR_ADDR = NULL;
704 DEBUGGER_BOTTLENECK(ucbp, _UASUBSYS_UNWINDER, _UAACT_ENDING, _UAARG_ENDING_UNWINDER_LOOKUPFAILED);
708 /* Shouldn't we subtract 2 from here just like in the V2 lookup?
710 uint32_t return_address_offset = ADDR_TO_ER_RO_OFFSET(return_address, ucbp);
711 const __EIT_entry *base = EIT_base(ucbp);
712 size_t nelems = EIT_limit(ucbp) - base;
714 // This must succeed or else an error will have occured already.
715 const __EIT_entry *eitp = SearchEITV1(return_address_offset, base, nelems);
720 /* Cache the function offset */
722 ucbp->pr_cache.fnstart = ER_RO_OFFSET_TO_ADDR(eitp->fnoffset, ucbp);
724 /* Can this frame be unwound at all? */
726 if (eitp->content == EXIDX_CANTUNWIND) {
727 ucbp->PR_ADDR = NULL;
728 DEBUGGER_BOTTLENECK(ucbp, _UASUBSYS_UNWINDER, _UAACT_ENDING, _UAARG_ENDING_NOUNWIND);
732 /* Obtain the address of the "real" __EHT_Header word */
733 if (eitp->content & uint32_highbit) {
734 /* It is immediate data */
735 ucbp->pr_cache.ehtp = (_Unwind_EHT_Header *)&eitp->content;
736 ucbp->pr_cache.additional = 1;
738 /* The content field is a segment relative offset to an _Unwind_EHT_Entry structure */
739 ucbp->pr_cache.ehtp = (_Unwind_EHT_Header *)ER_RO_OFFSET_TO_ADDR(eitp->content, ucbp);
740 ucbp->pr_cache.additional = 0;
743 /* Discover the personality routine address */
745 if (*(uint32_t *)(ucbp->pr_cache.ehtp) & uint32_highbit) {
746 /* It is immediate data - compute matching pr */
747 uint32_t idx = ((*(uint32_t *)(ucbp->pr_cache.ehtp)) >> 24) & 0xf;
749 if (idx == 0) ucbp->PR_ADDR = (uint32_t)&__aeabi_unwind_cpp_pr0;
750 else if (idx == 1) ucbp->PR_ADDR = (uint32_t)&__aeabi_unwind_cpp_pr1;
751 else if (idx == 2) ucbp->PR_ADDR = (uint32_t)&__aeabi_unwind_cpp_pr2;
753 ucbp->PR_ADDR = NULL;
754 DEBUGGER_BOTTLENECK(ucbp, _UASUBSYS_UNWINDER, _UAACT_ENDING, _UAARG_ENDING_TABLECORRUPT);
758 /* Execute region offset to PR */
759 ucbp->PR_ADDR = ER_RO_OFFSET_TO_ADDR(*(uint32_t *)(ucbp->pr_cache.ehtp), ucbp);
765 static _Unwind_Reason_Code find_and_expand_eit_entry(_Unwind_Control_Block *ucbp,
766 uint32_t return_address)
768 ValidateExceptionDescriptor(return_address, ucbp);
770 return find_and_expand_eit_entry_V2(ucbp, return_address);
772 return find_and_expand_eit_entry_V1(ucbp, return_address);
776 /* ----- Unwinding: ----- */
779 static NORETURNDECL void unwind_next_frame(_Unwind_Control_Block *ucbp, phase2_virtual_register_set *vrsp);
781 /* Helper fn: If the demand_save flag in a phase1_virtual_register_set was
782 * zeroed, the registers were demand-saved. This function restores from
785 static FORCEINLINE void restore_non_core_regs(phase1_virtual_register_set *vrsp)
787 if (vrsp->demand_save_vfp_low == 0)
788 __ARM_Unwind_VRS_VFPrestore_low(&vrsp->vfp.d[0]);
789 if (vrsp->demand_save_vfp_high == 0)
790 __ARM_Unwind_VRS_VFPrestore_high(&vrsp->vfp.d[16]);
793 /* _Unwind_RaiseException is the external entry point to begin unwinding */
794 __asm _Unwind_Reason_Code _Unwind_RaiseException(_Unwind_Control_Block *ucbp)
796 extern __ARM_Unwind_RaiseException;
800 /* Create a phase2_virtual_register_set on the stack */
801 /* Save the core registers, carefully writing the original sp value */
802 /* Note we account for the pc but do not actually write it's value here */
803 str.w r14,[sp, #-8]!;
805 str.w r14,[sp, #-4]! /* pushed 3 words => 3 words */
806 stmfd.w sp!,{r0-r12}; /* pushed 13 words => 16 words */
807 /* Write zeroes for the demand_save bytes so no saving occurs in phase 2 */
809 str.w r1,[sp,#-4]!; /* pushed 1 word => 17 words */
811 sub.w sp,sp,#4; /* preserve 8 byte alignment => 18 words */
813 /* Now pass to C (with r0 still valid) to do the real work.
814 * r0 = ucbp, r1 = phase2_virtual_register_set.
815 * If we get control back, pop the stack and return preserving r0.
818 /* on arch 5T and later the linker will fix 'bl' => 'blx' as
820 bl.w __ARM_Unwind_RaiseException;
821 ldr.w r14,[sp,#16*4];
827 MAYBE_SWITCH_TO_ARM_STATE;
829 /* Create a phase2_virtual_register_set on the stack */
830 /* Save the core registers, carefully writing the original sp value */
831 #if __ARMCC_VERSION < 300000
832 stmfd sp!,{r13-r15}; /* pushed 3 words => 3 words */
834 stmdb r13, {r14,r15};
835 str r13, [r13,#-3*4];
838 stmfd sp!,{r0-r12}; /* pushed 13 words => 16 words */
839 /* Write zeroes for the demand_save bytes so no saving occurs in phase 2 */
841 str r1,[sp,#-4]!; /* pushed 1 word => 17 words */
843 sub sp,sp,#4; /* preserve 8 byte alignment => 18 words */
845 /* Now pass to C (with r0 still valid) to do the real work.
846 * r0 = ucbp, r1 = phase2_virtual_register_set.
847 * If we get control back, pop the stack and return preserving r0.
850 #if OLD_STYLE_INTERWORKING
851 ldr r2,Unwind_RaiseException_Offset;
857 /* on arch 5T and later the linker will fix 'bl' => 'blx' as
859 bl __ARM_Unwind_RaiseException;
864 #if OLD_STYLE_INTERWORKING
865 Unwind_RaiseException_Offset dcd __ARM_Unwind_RaiseException - Offset_Base;
872 /* Alternate symbol names for difficult symbols.
873 * It is possible no functions included in the image require
874 * a handler table. Therefore make only a weak reference to
875 * the handler table base symbol, which may be absent.
878 extern |.ARM.exidx$$Base|;
879 extern |.ARM.exidx$$Limit|;
880 extern |.ARM.extab$$Base| WEAKASMDECL;
882 /* these are offsets for /ropi */
883 __ARM_ETInfo /* layout must match struct ExceptionTableInfo */
884 eit_base dcd |.ARM.exidx$$Base| - __ARM_ETInfo; /* index table base */
885 eit_limit dcd |.ARM.exidx$$Limit| - __ARM_ETInfo; /* index table limit */
890 /* __ARM_Unwind_RaiseException performs phase 1 unwinding */
892 _Unwind_Reason_Code __ARM_Unwind_RaiseException(_Unwind_Control_Block *ucbp,
893 phase2_virtual_register_set *entry_VRSp)
895 phase1_virtual_register_set phase1_VRS;
897 /* Is this a nested simultaneous propagation?
898 * (see comments with _Unwind_Complete)
900 if (ucbp->NESTED_CONTEXT == 0) {
901 /* No - this is only propagation */
902 ucbp->NESTED_CONTEXT = 1;
904 #ifdef SUPPORT_NESTED_EXCEPTIONS
905 /* Yes - cache the state elsewhere and restore it when the propagation ends */
906 /* This representation wastes space and uses malloc; do better?
907 * On the other hand will it ever be used in practice?
909 _Unwind_Control_Block *saved_ucbp = AllocSavedUCB();
911 DEBUGGER_BOTTLENECK(ucbp, _UASUBSYS_UNWINDER, _UAACT_ENDING, _UAARG_ENDING_UNWINDER_BUFFERFAILED);
914 saved_ucbp->unwinder_cache = ucbp->unwinder_cache;
915 saved_ucbp->barrier_cache = ucbp->barrier_cache;
916 saved_ucbp->cleanup_cache = ucbp->cleanup_cache;
917 ucbp->NESTED_CONTEXT = (uint32_t)saved_ucbp;
923 /* entry_VRSp contains the core registers as they were when
924 * _Unwind_RaiseException was called. Copy the call-site address to r15
925 * then copy all the registers to phase1_VRS for the phase 1 stack scan.
928 entry_VRSp->core.r[15] = entry_VRSp->core.r[14];
929 phase1_VRS.core = entry_VRSp->core;
931 /* For phase 1 only ensure non-core registers are saved before use.
932 * If WMMX registers are supported, initialise their flags here and
933 * take appropriate action elsewhere.
936 phase1_VRS.demand_save_vfp_low = 1;
937 phase1_VRS.demand_save_vfp_high = 1;
939 /* Set up Symbian specific caches in the _Unwind_Control_Block's
942 InitialiseSymbianSpecificUnwinderCache(phase1_VRS.core.r[15], ucbp);
946 /* Now perform a virtual unwind until a propagation barrier is met, or
947 * until something goes wrong. If something does go wrong, we ought (I
948 * suppose) to restore registers we may have destroyed.
953 _Unwind_Reason_Code pr_result;
955 /* Search the index table for the required entry. Cache the index table
956 * pointer, and obtain and cache the addresses of the "real" __EHT_Header
957 * word and the personality routine.
960 if (find_and_expand_eit_entry(ucbp, phase1_VRS.core.r[15]) != _URC_OK) {
961 restore_non_core_regs(&phase1_VRS);
962 /* Debugger bottleneck fn called during lookup */
966 /* Call the pr to decide what to do */
968 pr_result = ((personality_routine)ucbp->PR_ADDR)(_US_VIRTUAL_UNWIND_FRAME,
970 (_Unwind_Context *)&phase1_VRS);
972 if (pr_result == _URC_HANDLER_FOUND) break;
973 if (pr_result == _URC_CONTINUE_UNWIND) continue;
975 /* If we get here some sort of failure has occurred in the
976 * pr and probably the pr returned _URC_FAILURE
978 restore_non_core_regs(&phase1_VRS);
982 /* Propagation barrier located... restore entry register state of non-core regs */
984 restore_non_core_regs(&phase1_VRS);
986 /* Initiate real unwinding */
987 unwind_next_frame(ucbp, entry_VRSp);
988 /* Unreached, but keep compiler quiet: */
993 /* unwind_next_frame performs phase 2 unwinding */
995 static NORETURNDECL void unwind_next_frame(_Unwind_Control_Block *ucbp, phase2_virtual_register_set *vrsp)
999 _Unwind_Reason_Code pr_result;
1001 /* Search the index table for the required entry. Cache the index table
1002 * pointer, and obtain and cache the addresses of the "real" __EHT_Header
1003 * word and the personality routine.
1006 if (find_and_expand_eit_entry(ucbp, vrsp->core.r[15]) != _URC_OK)
1009 /* Save the call-site address and call the pr to do whatever it
1010 * wants to do on this new frame.
1013 ucbp->SAVED_CALLSITE_ADDR = vrsp->core.r[15];
1014 pr_result = ((personality_routine)ucbp->PR_ADDR)(_US_UNWIND_FRAME_STARTING, ucbp,
1015 (_Unwind_Context *)vrsp);
1017 if (pr_result == _URC_INSTALL_CONTEXT) {
1018 /* Upload the registers */
1019 __ARM_Unwind_VRS_corerestore(&vrsp->core);
1020 } else if (pr_result == _URC_CONTINUE_UNWIND)
1028 /* _Unwind_Resume is the external entry point called after a cleanup
1029 * to resume unwinding. It tail-calls a helper function,
1030 * __ARM_Unwind_Resume, which never returns.
1032 __asm NORETURNDECL void _Unwind_Resume(_Unwind_Control_Block *ucbp)
1034 extern __ARM_Unwind_Resume;
1038 /* Create a phase2_virtual_register_set on the stack */
1039 /* Save the core registers, carefully writing the original sp value */
1040 /* Note we account for the pc but do not actually write it's value here */
1041 str.w r14,[sp, #-8]!;
1043 str.w r14,[sp, #-4]! /* pushed 3 words => 3 words */
1044 stmfd.w sp!,{r0-r12}; /* pushed 13 words => 16 words */
1045 /* Write zeroes for the demand_save bytes so no saving occurs in phase 2 */
1047 str.w r1,[sp,#-4]!; /* pushed 1 word => 17 words */
1049 sub.w sp,sp,#4; /* preserve 8 byte alignment => 18 words */
1051 /* Now pass to C (with r0 still valid) to do the real work.
1052 * r0 = ucbp, r1 = phase2_virtual_register_set.
1053 * This call never returns.
1060 MAYBE_SWITCH_TO_ARM_STATE;
1062 /* Create a phase2_virtual_register_set on the stack */
1063 /* Save the core registers, carefully writing the original sp value */
1065 #if __ARMCC_VERSION < 300000
1066 stmfd sp!,{r13-r15}; /* pushed 3 words => 3 words */
1068 stmdb r13, {r14,r15};
1069 str r13, [r13,#-3*4];
1073 stmfd sp!,{r0-r12}; /* pushed 13 words => 16 words */
1074 /* Write zeroes for the demand_save bytes so no saving occurs in phase 2 */
1076 str r1,[sp,#-4]!; /* pushed 1 word => 17 words */
1078 sub sp,sp,#4; /* preserve 8 byte alignment => 18 words */
1080 /* Now pass to C (with r0 still valid) to do the real work.
1081 * r0 = ucbp, r1 = phase2_virtual_register_set.
1082 * This call never returns.
1085 #ifdef __APCS_INTERWORK
1086 ldr r2,Unwind_Resume_Offset;
1089 Unwind_Resume_Offset dcd __ARM_Unwind_Resume - .;
1091 b __ARM_Unwind_Resume;
1099 /* Helper function for _Unwind_Resume */
1101 NORETURNDECL void __ARM_Unwind_Resume(_Unwind_Control_Block *ucbp,
1102 phase2_virtual_register_set *entry_VRSp)
1104 _Unwind_Reason_Code pr_result;
1106 /* Recover saved state */
1108 entry_VRSp->core.r[15] = ucbp->SAVED_CALLSITE_ADDR;
1110 /* Call the cached PR and dispatch */
1112 pr_result = ((personality_routine)ucbp->PR_ADDR)(_US_UNWIND_FRAME_RESUME, ucbp,
1113 (_Unwind_Context *)entry_VRSp);
1115 if (pr_result == _URC_INSTALL_CONTEXT) {
1116 /* Upload the registers */
1117 __ARM_Unwind_VRS_corerestore(&entry_VRSp->core);
1118 } else if (pr_result == _URC_CONTINUE_UNWIND)
1119 unwind_next_frame(ucbp, entry_VRSp);
1125 /* _Unwind_Complete is called at the end of a propagation.
1126 * If we support multiple simultaneous propagations, restore the cached state
1127 * of the previous propagation here.
1130 void _Unwind_Complete(_Unwind_Control_Block *ucbp)
1132 _Unwind_Control_Block *context = (_Unwind_Control_Block *)ucbp->NESTED_CONTEXT;
1133 if ((uint32_t)context == 0) abort(); /* should be impossible */
1134 if ((uint32_t)context == 1) {
1135 /* This was the only ongoing propagation of this object */
1136 ucbp->NESTED_CONTEXT--;
1139 #ifdef SUPPORT_NESTED_EXCEPTIONS
1140 /* Otherwise we copy the state back from the cache structure pointed to
1141 * by ucbp->NESTED_CONTEXT.
1143 /* This first one updates ucbp->NESTED_CONTEXT */
1144 ucbp->unwinder_cache = context->unwinder_cache;
1145 ucbp->barrier_cache = context->barrier_cache;
1146 ucbp->cleanup_cache = context->cleanup_cache;
1147 FreeSavedUCB(context);
1153 /* _Unwind_DeleteException can be used to invoke the exception_cleanup
1154 * function after catching a foreign exception.
1157 void _Unwind_DeleteException(_Unwind_Control_Block *ucbp)
1159 if (ucbp->exception_cleanup != NULL)
1160 (ucbp->exception_cleanup)(_URC_FOREIGN_EXCEPTION_CAUGHT, ucbp);
1163 #endif /* unwinder_c */
1164 #ifdef unwind_activity_c
1166 /* Runtime debug "bottleneck function": */
1167 /* (not in the current Exceptions EABI document) */
1169 void _Unwind_Activity(_Unwind_Control_Block *ucbp, uint32_t reason, uint32_t arg)
1171 #ifdef UNWIND_ACTIVITY_DIAGNOSTICS
1172 uint32_t who = reason >> 24;
1173 uint32_t activity = reason & 0xffffff;
1174 printf("_Unwind_Activity: UCB=0x%8.8x Reason=(", (uint32_t)ucbp);
1176 case _UASUBSYS_UNWINDER:
1178 if (activity >= 0x80)
1179 printf("%x) Arg=0x%8.8x\n", activity, arg);
1183 if (activity >= 0x80) {
1184 if (activity == _UAACT_CPP_TYPEINFO)
1185 printf("typeinfo) Typeinfo=0x%8.8x\n", arg);
1187 printf("%x) Arg=0x%8.8x\n", activity, arg);
1192 if (activity >= 0x80)
1193 printf("%x) Arg=0x%8.8x\n", activity, arg);
1196 if (activity < 0x80) {
1198 case _UAACT_STARTING:
1199 printf("starting) Typeinfo=0x%8.8x\n", arg);
1202 printf("ending) Cause=%d\n", arg);
1204 case _UAACT_BARRIERFOUND:
1205 printf("barrierfound) Pad=0x%8.8x\n", arg);
1207 case _UAACT_PADENTRY:
1208 printf("padentry) Pad=0x%8.8x\n", arg);
1211 printf("%x) Arg=0x%8.8x\n", activity, arg);
1218 #endif /* unwind_activity_c */