williamr@2
|
1 |
/*--------------------------------------------------------------------
|
williamr@2
|
2 |
*© Portions copyright (c) 2006 Nokia Corporation. All rights reserved.
|
williamr@2
|
3 |
*--------------------------------------------------------------------
|
williamr@2
|
4 |
*/
|
williamr@2
|
5 |
/*-
|
williamr@2
|
6 |
* Copyright (c) 2004-2005 David Schultz <das@FreeBSD.ORG>
|
williamr@2
|
7 |
* All rights reserved.
|
williamr@2
|
8 |
*
|
williamr@2
|
9 |
* Redistribution and use in source and binary forms, with or without
|
williamr@2
|
10 |
* modification, are permitted provided that the following conditions
|
williamr@2
|
11 |
* are met:
|
williamr@2
|
12 |
* 1. Redistributions of source code must retain the above copyright
|
williamr@2
|
13 |
* notice, this list of conditions and the following disclaimer.
|
williamr@2
|
14 |
* 2. Redistributions in binary form must reproduce the above copyright
|
williamr@2
|
15 |
* notice, this list of conditions and the following disclaimer in the
|
williamr@2
|
16 |
* documentation and/or other materials provided with the distribution.
|
williamr@2
|
17 |
*
|
williamr@2
|
18 |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
williamr@2
|
19 |
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
williamr@2
|
20 |
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
williamr@2
|
21 |
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
williamr@2
|
22 |
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
williamr@2
|
23 |
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
williamr@2
|
24 |
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
williamr@2
|
25 |
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
williamr@2
|
26 |
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
williamr@2
|
27 |
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
williamr@2
|
28 |
* SUCH DAMAGE.
|
williamr@2
|
29 |
*
|
williamr@2
|
30 |
* $FreeBSD: src/lib/msun/arm/fenv.h,v 1.5 2005/03/16 19:03:45 das Exp $
|
williamr@2
|
31 |
*/
|
williamr@2
|
32 |
|
williamr@2
|
33 |
#ifndef _FENV_H_
|
williamr@2
|
34 |
#define _FENV_H_
|
williamr@2
|
35 |
|
williamr@2
|
36 |
#include <sys/_types.h>
|
williamr@2
|
37 |
|
williamr@2
|
38 |
#ifdef __WINSCW__
|
williamr@2
|
39 |
/*to supress warnings 'arguments not used in function'
|
williamr@2
|
40 |
This warnings crop up wherever fevn.h is included. So
|
williamr@2
|
41 |
it is desirable to supress it here. The warning arises as a
|
williamr@2
|
42 |
fact that these arguments are used within macros which get
|
williamr@2
|
43 |
get replaced at the time of pre-processing and thus the
|
williamr@2
|
44 |
compiler(s)warning 'argument/variable is not used in function*/
|
williamr@2
|
45 |
#pragma warn_unusedarg off
|
williamr@2
|
46 |
#endif //__WINSCW__
|
williamr@2
|
47 |
|
williamr@2
|
48 |
typedef __uint32_t fenv_t;
|
williamr@2
|
49 |
typedef __uint32_t fexcept_t;
|
williamr@2
|
50 |
|
williamr@2
|
51 |
/* Exception flags */
|
williamr@2
|
52 |
#define FE_INVALID 0x0001
|
williamr@2
|
53 |
#define FE_DIVBYZERO 0x0002
|
williamr@2
|
54 |
#define FE_OVERFLOW 0x0004
|
williamr@2
|
55 |
#define FE_UNDERFLOW 0x0008
|
williamr@2
|
56 |
#define FE_INEXACT 0x0010
|
williamr@2
|
57 |
#define FE_ALL_EXCEPT (FE_DIVBYZERO | FE_INEXACT | \
|
williamr@2
|
58 |
FE_INVALID | FE_OVERFLOW | FE_UNDERFLOW)
|
williamr@2
|
59 |
|
williamr@2
|
60 |
/* Rounding modes */
|
williamr@2
|
61 |
#define FE_TONEAREST 0x0000
|
williamr@2
|
62 |
#define FE_TOWARDZERO 0x0001
|
williamr@2
|
63 |
#define FE_UPWARD 0x0002
|
williamr@2
|
64 |
#define FE_DOWNWARD 0x0003
|
williamr@2
|
65 |
#define _ROUND_MASK (FE_TONEAREST | FE_DOWNWARD | \
|
williamr@2
|
66 |
FE_UPWARD | FE_TOWARDZERO)
|
williamr@2
|
67 |
__BEGIN_DECLS
|
williamr@2
|
68 |
|
williamr@2
|
69 |
/* Default floating-point environment */
|
williamr@2
|
70 |
extern const fenv_t __fe_dfl_env;
|
williamr@2
|
71 |
#define FE_DFL_ENV (&__fe_dfl_env)
|
williamr@2
|
72 |
|
williamr@2
|
73 |
/* We need to be able to map status flag positions to mask flag positions */
|
williamr@2
|
74 |
#define _FPUSW_SHIFT 16
|
williamr@2
|
75 |
#define _ENABLE_MASK (FE_ALL_EXCEPT << _FPUSW_SHIFT)
|
williamr@2
|
76 |
|
williamr@2
|
77 |
#ifdef ARM_HARD_FLOAT
|
williamr@2
|
78 |
#define __rfs(__fpsr) __asm __volatile("rfs %0" : "=r" (*(__fpsr)))
|
williamr@2
|
79 |
#define __wfs(__fpsr) __asm __volatile("wfs %0" : : "r" (__fpsr))
|
williamr@2
|
80 |
#else
|
williamr@2
|
81 |
#define __rfs(__fpsr)
|
williamr@2
|
82 |
#define __wfs(__fpsr)
|
williamr@2
|
83 |
#endif
|
williamr@2
|
84 |
|
williamr@2
|
85 |
static __inline int
|
williamr@2
|
86 |
feclearexcept(int __excepts)
|
williamr@2
|
87 |
{
|
williamr@2
|
88 |
#ifdef __SYMBIAN32__
|
williamr@2
|
89 |
fexcept_t __fpsr = 0;
|
williamr@2
|
90 |
#else
|
williamr@2
|
91 |
fexcept_t __fpsr ;
|
williamr@2
|
92 |
#endif //__SYMBIAN32__
|
williamr@2
|
93 |
|
williamr@2
|
94 |
__rfs(&__fpsr);
|
williamr@2
|
95 |
__fpsr &= ~__excepts;
|
williamr@2
|
96 |
__wfs(__fpsr);
|
williamr@2
|
97 |
return (0);
|
williamr@2
|
98 |
}
|
williamr@2
|
99 |
|
williamr@2
|
100 |
static __inline int
|
williamr@2
|
101 |
fegetexceptflag(fexcept_t *__flagp, int __excepts)
|
williamr@2
|
102 |
{
|
williamr@2
|
103 |
#ifdef __SYMBIAN32__
|
williamr@2
|
104 |
fexcept_t __fpsr = 0;
|
williamr@2
|
105 |
#else
|
williamr@2
|
106 |
fexcept_t __fpsr ;
|
williamr@2
|
107 |
#endif //__SYMBIAN32__
|
williamr@2
|
108 |
|
williamr@2
|
109 |
__rfs(&__fpsr);
|
williamr@2
|
110 |
*__flagp = __fpsr & __excepts;
|
williamr@2
|
111 |
return (0);
|
williamr@2
|
112 |
}
|
williamr@2
|
113 |
|
williamr@2
|
114 |
static __inline int
|
williamr@2
|
115 |
fesetexceptflag(const fexcept_t *__flagp, int __excepts)
|
williamr@2
|
116 |
{
|
williamr@2
|
117 |
#ifdef __SYMBIAN32__
|
williamr@2
|
118 |
fexcept_t __fpsr = 0;
|
williamr@2
|
119 |
#else
|
williamr@2
|
120 |
fexcept_t __fpsr ;
|
williamr@2
|
121 |
#endif //__SYMBIAN32__
|
williamr@2
|
122 |
|
williamr@2
|
123 |
__rfs(&__fpsr);
|
williamr@2
|
124 |
__fpsr &= ~__excepts;
|
williamr@2
|
125 |
__fpsr |= *__flagp & __excepts;
|
williamr@2
|
126 |
__wfs(__fpsr);
|
williamr@2
|
127 |
return (0);
|
williamr@2
|
128 |
}
|
williamr@2
|
129 |
|
williamr@2
|
130 |
static __inline int
|
williamr@2
|
131 |
feraiseexcept(int __excepts)
|
williamr@2
|
132 |
{
|
williamr@2
|
133 |
fexcept_t __ex = __excepts;
|
williamr@2
|
134 |
|
williamr@2
|
135 |
fesetexceptflag(&__ex, __excepts); /* XXX */
|
williamr@2
|
136 |
return (0);
|
williamr@2
|
137 |
}
|
williamr@2
|
138 |
|
williamr@2
|
139 |
static __inline int
|
williamr@2
|
140 |
fetestexcept(int __excepts)
|
williamr@2
|
141 |
{
|
williamr@2
|
142 |
#ifdef __SYMBIAN32__
|
williamr@2
|
143 |
fexcept_t __fpsr = 0;
|
williamr@2
|
144 |
#else
|
williamr@2
|
145 |
fexcept_t __fpsr ;
|
williamr@2
|
146 |
#endif //__SYMBIAN32__
|
williamr@2
|
147 |
|
williamr@2
|
148 |
__rfs(&__fpsr);
|
williamr@2
|
149 |
return (__fpsr & __excepts);
|
williamr@2
|
150 |
}
|
williamr@2
|
151 |
|
williamr@2
|
152 |
static __inline int
|
williamr@2
|
153 |
fegetround(void)
|
williamr@2
|
154 |
{
|
williamr@2
|
155 |
|
williamr@2
|
156 |
/*
|
williamr@2
|
157 |
* Apparently, the rounding mode is specified as part of the
|
williamr@2
|
158 |
* instruction format on ARM, so the dynamic rounding mode is
|
williamr@2
|
159 |
* indeterminate. Some FPUs may differ.
|
williamr@2
|
160 |
*/
|
williamr@2
|
161 |
return (-1);
|
williamr@2
|
162 |
}
|
williamr@2
|
163 |
|
williamr@2
|
164 |
static __inline int
|
williamr@2
|
165 |
fesetround(int __round)
|
williamr@2
|
166 |
{
|
williamr@2
|
167 |
|
williamr@2
|
168 |
return (-1);
|
williamr@2
|
169 |
}
|
williamr@2
|
170 |
|
williamr@2
|
171 |
static __inline int
|
williamr@2
|
172 |
fegetenv(fenv_t *__envp)
|
williamr@2
|
173 |
{
|
williamr@2
|
174 |
|
williamr@2
|
175 |
__rfs(__envp);
|
williamr@2
|
176 |
return (0);
|
williamr@2
|
177 |
}
|
williamr@2
|
178 |
|
williamr@2
|
179 |
static __inline int
|
williamr@2
|
180 |
feholdexcept(fenv_t *__envp)
|
williamr@2
|
181 |
{
|
williamr@2
|
182 |
#ifdef __SYMBIAN32__
|
williamr@2
|
183 |
fenv_t __env = 0;
|
williamr@2
|
184 |
#else
|
williamr@2
|
185 |
fenv_t __env ;
|
williamr@2
|
186 |
#endif //__SYMBIAN32__
|
williamr@2
|
187 |
|
williamr@2
|
188 |
__rfs(&__env);
|
williamr@2
|
189 |
*__envp = __env;
|
williamr@2
|
190 |
__env &= ~(FE_ALL_EXCEPT | _ENABLE_MASK);
|
williamr@2
|
191 |
__wfs(__env);
|
williamr@2
|
192 |
return (0);
|
williamr@2
|
193 |
}
|
williamr@2
|
194 |
|
williamr@2
|
195 |
static __inline int
|
williamr@2
|
196 |
fesetenv(const fenv_t *__envp)
|
williamr@2
|
197 |
{
|
williamr@2
|
198 |
|
williamr@2
|
199 |
__wfs(*__envp);
|
williamr@2
|
200 |
return (0);
|
williamr@2
|
201 |
}
|
williamr@2
|
202 |
|
williamr@2
|
203 |
static __inline int
|
williamr@2
|
204 |
feupdateenv(const fenv_t *__envp)
|
williamr@2
|
205 |
{
|
williamr@2
|
206 |
#ifdef __SYMBIAN32__
|
williamr@2
|
207 |
fexcept_t __fpsr = 0;
|
williamr@2
|
208 |
#else
|
williamr@2
|
209 |
fexcept_t __fpsr ;
|
williamr@2
|
210 |
#endif //__SYMBIAN32__
|
williamr@2
|
211 |
|
williamr@2
|
212 |
__rfs(&__fpsr);
|
williamr@2
|
213 |
__wfs(*__envp);
|
williamr@2
|
214 |
feraiseexcept(__fpsr & FE_ALL_EXCEPT);
|
williamr@2
|
215 |
return (0);
|
williamr@2
|
216 |
}
|
williamr@2
|
217 |
|
williamr@2
|
218 |
#if __BSD_VISIBLE
|
williamr@2
|
219 |
|
williamr@2
|
220 |
static __inline int
|
williamr@2
|
221 |
feenableexcept(int __mask)
|
williamr@2
|
222 |
{
|
williamr@2
|
223 |
#ifdef __SYMBIAN32__
|
williamr@2
|
224 |
fenv_t __old_fpsr = 0, __new_fpsr= 0;
|
williamr@2
|
225 |
#else
|
williamr@2
|
226 |
fenv_t __old_fpsr, __new_fpsr;
|
williamr@2
|
227 |
#endif //__SYMBIAN32__
|
williamr@2
|
228 |
|
williamr@2
|
229 |
__rfs(&__old_fpsr);
|
williamr@2
|
230 |
__new_fpsr = __old_fpsr | (__mask & FE_ALL_EXCEPT) << _FPUSW_SHIFT;
|
williamr@2
|
231 |
__wfs(__new_fpsr);
|
williamr@2
|
232 |
return ((__old_fpsr >> _FPUSW_SHIFT) & FE_ALL_EXCEPT);
|
williamr@2
|
233 |
}
|
williamr@2
|
234 |
|
williamr@2
|
235 |
static __inline int
|
williamr@2
|
236 |
fedisableexcept(int __mask)
|
williamr@2
|
237 |
{
|
williamr@2
|
238 |
|
williamr@2
|
239 |
#ifdef __SYMBIAN32__
|
williamr@2
|
240 |
fenv_t __old_fpsr =0, __new_fpsr =0;
|
williamr@2
|
241 |
#else
|
williamr@2
|
242 |
fenv_t __old_fpsr, __new_fpsr;
|
williamr@2
|
243 |
#endif //__SYMBIAN32__
|
williamr@2
|
244 |
|
williamr@2
|
245 |
__rfs(&__old_fpsr);
|
williamr@2
|
246 |
__new_fpsr = __old_fpsr & ~((__mask & FE_ALL_EXCEPT) << _FPUSW_SHIFT);
|
williamr@2
|
247 |
__wfs(__new_fpsr);
|
williamr@2
|
248 |
return ((__old_fpsr >> _FPUSW_SHIFT) & FE_ALL_EXCEPT);
|
williamr@2
|
249 |
}
|
williamr@2
|
250 |
|
williamr@2
|
251 |
static __inline int
|
williamr@2
|
252 |
fegetexcept(void)
|
williamr@2
|
253 |
{
|
williamr@2
|
254 |
#ifdef __SYMBIAN32__
|
williamr@2
|
255 |
fexcept_t __fpsr = 0;
|
williamr@2
|
256 |
#else
|
williamr@2
|
257 |
fexcept_t __fpsr ;
|
williamr@2
|
258 |
#endif //__SYMBIAN32__
|
williamr@2
|
259 |
|
williamr@2
|
260 |
__rfs(&__fpsr);
|
williamr@2
|
261 |
return ((__fpsr & _ENABLE_MASK) >> _FPUSW_SHIFT);
|
williamr@2
|
262 |
}
|
williamr@2
|
263 |
|
williamr@2
|
264 |
#endif /* __BSD_VISIBLE */
|
williamr@2
|
265 |
|
williamr@2
|
266 |
__END_DECLS
|
williamr@2
|
267 |
|
williamr@2
|
268 |
#endif /* !_FENV_H_ */
|