The Android Open Source Project | 1dc9e47 | 2009-03-03 19:28:35 -0800 | [diff] [blame^] | 1 | /*- |
| 2 | * Copyright (c) 2004-2005 David Schultz <das@FreeBSD.ORG> |
| 3 | * All rights reserved. |
| 4 | * |
| 5 | * Redistribution and use in source and binary forms, with or without |
| 6 | * modification, are permitted provided that the following conditions |
| 7 | * are met: |
| 8 | * 1. Redistributions of source code must retain the above copyright |
| 9 | * notice, this list of conditions and the following disclaimer. |
| 10 | * 2. Redistributions in binary form must reproduce the above copyright |
| 11 | * notice, this list of conditions and the following disclaimer in the |
| 12 | * documentation and/or other materials provided with the distribution. |
| 13 | * |
| 14 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND |
| 15 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
| 16 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
| 17 | * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE |
| 18 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
| 19 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
| 20 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
| 21 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
| 22 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
| 23 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
| 24 | * SUCH DAMAGE. |
| 25 | * |
| 26 | * $FreeBSD: src/lib/msun/powerpc/fenv.h,v 1.3 2005/03/16 19:03:45 das Exp $ |
| 27 | */ |
| 28 | |
| 29 | #ifndef _FENV_H_ |
| 30 | #define _FENV_H_ |
| 31 | |
| 32 | #include <sys/_types.h> |
| 33 | |
| 34 | typedef __uint32_t fenv_t; |
| 35 | typedef __uint32_t fexcept_t; |
| 36 | |
| 37 | /* Exception flags */ |
| 38 | #define FE_INEXACT 0x02000000 |
| 39 | #define FE_DIVBYZERO 0x04000000 |
| 40 | #define FE_UNDERFLOW 0x08000000 |
| 41 | #define FE_OVERFLOW 0x10000000 |
| 42 | #define FE_INVALID 0x20000000 /* all types of invalid FP ops */ |
| 43 | |
| 44 | /* |
| 45 | * The PowerPC architecture has extra invalid flags that indicate the |
| 46 | * specific type of invalid operation occurred. These flags may be |
| 47 | * tested, set, and cleared---but not masked---separately. All of |
| 48 | * these bits are cleared when FE_INVALID is cleared, but only |
| 49 | * FE_VXSOFT is set when FE_INVALID is explicitly set in software. |
| 50 | */ |
| 51 | #define FE_VXCVI 0x00000100 /* invalid integer convert */ |
| 52 | #define FE_VXSQRT 0x00000200 /* square root of a negative */ |
| 53 | #define FE_VXSOFT 0x00000400 /* software-requested exception */ |
| 54 | #define FE_VXVC 0x00080000 /* ordered comparison involving NaN */ |
| 55 | #define FE_VXIMZ 0x00100000 /* inf * 0 */ |
| 56 | #define FE_VXZDZ 0x00200000 /* 0 / 0 */ |
| 57 | #define FE_VXIDI 0x00400000 /* inf / inf */ |
| 58 | #define FE_VXISI 0x00800000 /* inf - inf */ |
| 59 | #define FE_VXSNAN 0x01000000 /* operation on a signalling NaN */ |
| 60 | #define FE_ALL_INVALID (FE_VXCVI | FE_VXSQRT | FE_VXSOFT | FE_VXVC | \ |
| 61 | FE_VXIMZ | FE_VXZDZ | FE_VXIDI | FE_VXISI | \ |
| 62 | FE_VXSNAN | FE_INVALID) |
| 63 | #define FE_ALL_EXCEPT (FE_DIVBYZERO | FE_INEXACT | \ |
| 64 | FE_ALL_INVALID | FE_OVERFLOW | FE_UNDERFLOW) |
| 65 | |
| 66 | /* Rounding modes */ |
| 67 | #define FE_TONEAREST 0x0000 |
| 68 | #define FE_TOWARDZERO 0x0001 |
| 69 | #define FE_UPWARD 0x0002 |
| 70 | #define FE_DOWNWARD 0x0003 |
| 71 | #define _ROUND_MASK (FE_TONEAREST | FE_DOWNWARD | \ |
| 72 | FE_UPWARD | FE_TOWARDZERO) |
| 73 | |
| 74 | __BEGIN_DECLS |
| 75 | |
| 76 | /* Default floating-point environment */ |
| 77 | extern const fenv_t __fe_dfl_env; |
| 78 | #define FE_DFL_ENV (&__fe_dfl_env) |
| 79 | |
| 80 | /* We need to be able to map status flag positions to mask flag positions */ |
| 81 | #define _FPUSW_SHIFT 22 |
| 82 | #define _ENABLE_MASK ((FE_DIVBYZERO | FE_INEXACT | FE_INVALID | \ |
| 83 | FE_OVERFLOW | FE_UNDERFLOW) >> _FPUSW_SHIFT) |
| 84 | |
| 85 | #define __mffs(__env) __asm __volatile("mffs %0" : "=f" (*(__env))) |
| 86 | #define __mtfsf(__env) __asm __volatile("mtfsf 255,%0" : : "f" (__env)) |
| 87 | |
| 88 | union __fpscr { |
| 89 | double __d; |
| 90 | struct { |
| 91 | __uint32_t __junk; |
| 92 | fenv_t __reg; |
| 93 | } __bits; |
| 94 | }; |
| 95 | |
| 96 | static __inline int |
| 97 | feclearexcept(int __excepts) |
| 98 | { |
| 99 | union __fpscr __r; |
| 100 | |
| 101 | if (__excepts & FE_INVALID) |
| 102 | __excepts |= FE_ALL_INVALID; |
| 103 | __mffs(&__r.__d); |
| 104 | __r.__bits.__reg &= ~__excepts; |
| 105 | __mtfsf(__r.__d); |
| 106 | return (0); |
| 107 | } |
| 108 | |
| 109 | static __inline int |
| 110 | fegetexceptflag(fexcept_t *__flagp, int __excepts) |
| 111 | { |
| 112 | union __fpscr __r; |
| 113 | |
| 114 | __mffs(&__r.__d); |
| 115 | *__flagp = __r.__bits.__reg & __excepts; |
| 116 | return (0); |
| 117 | } |
| 118 | |
| 119 | static __inline int |
| 120 | fesetexceptflag(const fexcept_t *__flagp, int __excepts) |
| 121 | { |
| 122 | union __fpscr __r; |
| 123 | |
| 124 | if (__excepts & FE_INVALID) |
| 125 | __excepts |= FE_ALL_EXCEPT; |
| 126 | __mffs(&__r.__d); |
| 127 | __r.__bits.__reg &= ~__excepts; |
| 128 | __r.__bits.__reg |= *__flagp & __excepts; |
| 129 | __mtfsf(__r.__d); |
| 130 | return (0); |
| 131 | } |
| 132 | |
| 133 | static __inline int |
| 134 | feraiseexcept(int __excepts) |
| 135 | { |
| 136 | union __fpscr __r; |
| 137 | |
| 138 | if (__excepts & FE_INVALID) |
| 139 | __excepts |= FE_VXSOFT; |
| 140 | __mffs(&__r.__d); |
| 141 | __r.__bits.__reg |= __excepts; |
| 142 | __mtfsf(__r.__d); |
| 143 | return (0); |
| 144 | } |
| 145 | |
| 146 | static __inline int |
| 147 | fetestexcept(int __excepts) |
| 148 | { |
| 149 | union __fpscr __r; |
| 150 | |
| 151 | __mffs(&__r.__d); |
| 152 | return (__r.__bits.__reg & __excepts); |
| 153 | } |
| 154 | |
| 155 | static __inline int |
| 156 | fegetround(void) |
| 157 | { |
| 158 | union __fpscr __r; |
| 159 | |
| 160 | __mffs(&__r.__d); |
| 161 | return (__r.__bits.__reg & _ROUND_MASK); |
| 162 | } |
| 163 | |
| 164 | static __inline int |
| 165 | fesetround(int __round) |
| 166 | { |
| 167 | union __fpscr __r; |
| 168 | |
| 169 | if (__round & ~_ROUND_MASK) |
| 170 | return (-1); |
| 171 | __mffs(&__r.__d); |
| 172 | __r.__bits.__reg &= ~_ROUND_MASK; |
| 173 | __r.__bits.__reg |= __round; |
| 174 | __mtfsf(__r.__d); |
| 175 | return (0); |
| 176 | } |
| 177 | |
| 178 | static __inline int |
| 179 | fegetenv(fenv_t *__envp) |
| 180 | { |
| 181 | union __fpscr __r; |
| 182 | |
| 183 | __mffs(&__r.__d); |
| 184 | *__envp = __r.__bits.__reg; |
| 185 | return (0); |
| 186 | } |
| 187 | |
| 188 | static __inline int |
| 189 | feholdexcept(fenv_t *__envp) |
| 190 | { |
| 191 | union __fpscr __r; |
| 192 | |
| 193 | __mffs(&__r.__d); |
| 194 | *__envp = __r.__d; |
| 195 | __r.__bits.__reg &= ~(FE_ALL_EXCEPT | _ENABLE_MASK); |
| 196 | __mtfsf(__r.__d); |
| 197 | return (0); |
| 198 | } |
| 199 | |
| 200 | static __inline int |
| 201 | fesetenv(const fenv_t *__envp) |
| 202 | { |
| 203 | union __fpscr __r; |
| 204 | |
| 205 | __r.__bits.__reg = *__envp; |
| 206 | __mtfsf(__r.__d); |
| 207 | return (0); |
| 208 | } |
| 209 | |
| 210 | static __inline int |
| 211 | feupdateenv(const fenv_t *__envp) |
| 212 | { |
| 213 | union __fpscr __r; |
| 214 | |
| 215 | __mffs(&__r.__d); |
| 216 | __r.__bits.__reg &= FE_ALL_EXCEPT; |
| 217 | __r.__bits.__reg |= *__envp; |
| 218 | __mtfsf(__r.__d); |
| 219 | return (0); |
| 220 | } |
| 221 | |
| 222 | #if __BSD_VISIBLE |
| 223 | |
| 224 | static __inline int |
| 225 | feenableexcept(int __mask) |
| 226 | { |
| 227 | union __fpscr __r; |
| 228 | fenv_t __oldmask; |
| 229 | |
| 230 | __mffs(&__r.__d); |
| 231 | __oldmask = __r.__bits.__reg; |
| 232 | __r.__bits.__reg |= (__mask & FE_ALL_EXCEPT) >> _FPUSW_SHIFT; |
| 233 | __mtfsf(__r.__d); |
| 234 | return ((__oldmask & _ENABLE_MASK) << _FPUSW_SHIFT); |
| 235 | } |
| 236 | |
| 237 | static __inline int |
| 238 | fedisableexcept(int __mask) |
| 239 | { |
| 240 | union __fpscr __r; |
| 241 | fenv_t __oldmask; |
| 242 | |
| 243 | __mffs(&__r.__d); |
| 244 | __oldmask = __r.__bits.__reg; |
| 245 | __r.__bits.__reg &= ~((__mask & FE_ALL_EXCEPT) >> _FPUSW_SHIFT); |
| 246 | __mtfsf(__r.__d); |
| 247 | return ((__oldmask & _ENABLE_MASK) << _FPUSW_SHIFT); |
| 248 | } |
| 249 | |
| 250 | static __inline int |
| 251 | fegetexcept(void) |
| 252 | { |
| 253 | union __fpscr __r; |
| 254 | |
| 255 | __mffs(&__r.__d); |
| 256 | return ((__r.__bits.__reg & _ENABLE_MASK) << _FPUSW_SHIFT); |
| 257 | } |
| 258 | |
| 259 | #endif /* __BSD_VISIBLE */ |
| 260 | |
| 261 | __END_DECLS |
| 262 | |
| 263 | #endif /* !_FENV_H_ */ |