Initial Contribution
diff --git a/libm/include/alpha/fenv.h b/libm/include/alpha/fenv.h
new file mode 100644
index 0000000..dc7bcb7
--- /dev/null
+++ b/libm/include/alpha/fenv.h
@@ -0,0 +1,185 @@
+/*-
+ * Copyright (c) 2004-2005 David Schultz <das@FreeBSD.ORG>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: src/lib/msun/alpha/fenv.h,v 1.3 2005/03/16 19:03:44 das Exp $
+ */
+
+#ifndef	_FENV_H_
+#define	_FENV_H_
+
+#include <sys/_types.h>
+
+typedef	__uint64_t	fenv_t;
+typedef	__uint16_t	fexcept_t;
+
+/* Exception flags */
+#define	FE_INVALID	0x02
+#define	FE_DIVBYZERO	0x04
+#define	FE_OVERFLOW	0x08
+#define	FE_UNDERFLOW	0x10
+#define	FE_INEXACT	0x20
+#define	FE_INTOVF	0x40	/* not maskable */
+#define	FE_ALL_EXCEPT	(FE_DIVBYZERO | FE_INEXACT | FE_INTOVF | \
+			 FE_INVALID | FE_OVERFLOW | FE_UNDERFLOW)
+
+/* Rounding modes */
+#define	FE_TOWARDZERO	0x00
+#define	FE_DOWNWARD	0x01
+#define	FE_TONEAREST	0x02
+#define	FE_UPWARD	0x03
+#define	_ROUND_MASK	(FE_TONEAREST | FE_DOWNWARD | \
+			 FE_UPWARD | FE_TOWARDZERO)
+#define	_ROUND_SHIFT	58
+
+#define	_FPUSW_SHIFT	51
+
+#define	__excb()	__asm __volatile("excb")
+#define	__mf_fpcr(__cw)	__asm __volatile("mf_fpcr %0" : "=f" (*(__cw)))
+#define	__mt_fpcr(__cw)	__asm __volatile("mt_fpcr %0" : : "f" (__cw))
+
+union __fpcr {
+	double __d;
+	fenv_t __bits;
+};
+
+__BEGIN_DECLS
+
+/* Default floating-point environment */
+extern const fenv_t	__fe_dfl_env;
+#define	FE_DFL_ENV	(&__fe_dfl_env)
+
+static __inline int
+feclearexcept(int __excepts)
+{
+	union __fpcr __r;
+
+	__excb();
+	__mf_fpcr(&__r.__d);
+	__r.__bits &= ~((fenv_t)__excepts << _FPUSW_SHIFT);
+	__mt_fpcr(__r.__d);
+	__excb();
+	return (0);
+}
+
+static __inline int
+fegetexceptflag(fexcept_t *__flagp, int __excepts)
+{
+	union __fpcr __r;
+
+	__excb();
+	__mf_fpcr(&__r.__d);
+	__excb();
+	*__flagp = (__r.__bits >> _FPUSW_SHIFT) & __excepts;
+	return (0);
+}
+
+static __inline int
+fesetexceptflag(const fexcept_t *__flagp, int __excepts)
+{
+	union __fpcr __r;
+	fenv_t __xflag, __xexcepts;
+
+	__xflag = (fenv_t)*__flagp << _FPUSW_SHIFT;
+	__xexcepts = (fenv_t)__excepts << _FPUSW_SHIFT;
+	__excb();
+	__mf_fpcr(&__r.__d);
+	__r.__bits &= ~__xexcepts;
+	__r.__bits |= __xflag & __xexcepts;
+	__mt_fpcr(__r.__d);
+	__excb();
+	return (0);
+}
+
+static __inline int
+feraiseexcept(int __excepts)
+{
+
+	/*
+	 * XXX Generating exceptions this way does not actually invoke
+	 * a userland trap handler when enabled, but neither do
+	 * arithmetic operations as far as I can tell.  Perhaps there
+	 * are more bugs in the kernel trap handler.
+	 */
+	fexcept_t __ex = __excepts;
+	fesetexceptflag(&__ex, __excepts);
+	return (0);
+}
+
+static __inline int
+fetestexcept(int __excepts)
+{
+	union __fpcr __r;
+
+	__excb();
+	__mf_fpcr(&__r.__d);
+	__excb();
+	return ((__r.__bits >> _FPUSW_SHIFT) & __excepts);
+}
+
+static __inline int
+fegetround(void)
+{
+	union __fpcr __r;
+
+	/*
+	 * No exception barriers should be required here if we assume
+	 * that only fesetround() can change the rounding mode.
+	 */
+	__mf_fpcr(&__r.__d);
+	return ((int)(__r.__bits >> _ROUND_SHIFT) & _ROUND_MASK);
+}
+
+static __inline int
+fesetround(int __round)
+{
+	union __fpcr __r;
+
+	if (__round & ~_ROUND_MASK)
+		return (-1);
+	__excb();
+	__mf_fpcr(&__r.__d);
+	__r.__bits &= ~((fenv_t)_ROUND_MASK << _ROUND_SHIFT);
+	__r.__bits |= (fenv_t)__round << _ROUND_SHIFT;
+	__mt_fpcr(__r.__d);
+	__excb();
+	return (0);
+}
+
+int	fegetenv(fenv_t *__envp);
+int	feholdexcept(fenv_t *__envp);
+int	fesetenv(const fenv_t *__envp);
+int	feupdateenv(const fenv_t *__envp);
+
+#if __BSD_VISIBLE
+
+int	feenableexcept(int __mask);
+int	fedisableexcept(int __mask);
+int	fegetexcept(void);
+
+#endif /* __BSD_VISIBLE */
+
+__END_DECLS
+
+#endif	/* !_FENV_H_ */
diff --git a/libm/include/amd64/fenv.h b/libm/include/amd64/fenv.h
new file mode 100644
index 0000000..c4f9432
--- /dev/null
+++ b/libm/include/amd64/fenv.h
@@ -0,0 +1,203 @@
+/*-
+ * Copyright (c) 2004-2005 David Schultz <das@FreeBSD.ORG>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: src/lib/msun/amd64/fenv.h,v 1.5 2005/03/16 22:34:14 das Exp $
+ */
+
+#ifndef	_FENV_H_
+#define	_FENV_H_
+
+#include <sys/cdefs.h>
+#include <sys/_types.h>
+
+typedef struct {
+	struct {
+		__uint32_t	__control;
+		__uint32_t	__status;
+		__uint32_t	__tag;
+		char		__other[16];
+	} __x87;
+	__uint32_t		__mxcsr;
+} fenv_t;
+
+typedef	__uint16_t	fexcept_t;
+
+/* Exception flags */
+#define	FE_INVALID	0x01
+#define	FE_DENORMAL	0x02
+#define	FE_DIVBYZERO	0x04
+#define	FE_OVERFLOW	0x08
+#define	FE_UNDERFLOW	0x10
+#define	FE_INEXACT	0x20
+#define	FE_ALL_EXCEPT	(FE_DIVBYZERO | FE_DENORMAL | FE_INEXACT | \
+			 FE_INVALID | FE_OVERFLOW | FE_UNDERFLOW)
+
+/* Rounding modes */
+#define	FE_TONEAREST	0x0000
+#define	FE_DOWNWARD	0x0400
+#define	FE_UPWARD	0x0800
+#define	FE_TOWARDZERO	0x0c00
+#define	_ROUND_MASK	(FE_TONEAREST | FE_DOWNWARD | \
+			 FE_UPWARD | FE_TOWARDZERO)
+
+/*
+ * As compared to the x87 control word, the SSE unit's control word
+ * has the rounding control bits offset by 3 and the exception mask
+ * bits offset by 7.
+ */
+#define	_SSE_ROUND_SHIFT	3
+#define	_SSE_EMASK_SHIFT	7
+
+__BEGIN_DECLS
+
+/* Default floating-point environment */
+extern const fenv_t	__fe_dfl_env;
+#define	FE_DFL_ENV	(&__fe_dfl_env)
+
+#define	__fldcw(__cw)		__asm __volatile("fldcw %0" : : "m" (__cw))
+#define	__fldenv(__env)		__asm __volatile("fldenv %0" : : "m" (__env))
+#define	__fnclex()		__asm __volatile("fnclex")
+#define	__fnstenv(__env)	__asm __volatile("fnstenv %0" : "=m" (*(__env)))
+#define	__fnstcw(__cw)		__asm __volatile("fnstcw %0" : "=m" (*(__cw)))
+#define	__fnstsw(__sw)		__asm __volatile("fnstsw %0" : "=am" (*(__sw)))
+#define	__fwait()		__asm __volatile("fwait")
+#define	__ldmxcsr(__csr)	__asm __volatile("ldmxcsr %0" : : "m" (__csr))
+#define	__stmxcsr(__csr)	__asm __volatile("stmxcsr %0" : "=m" (*(__csr)))
+
+static __inline int
+feclearexcept(int __excepts)
+{
+	fenv_t __env;
+
+	if (__excepts == FE_ALL_EXCEPT) {
+		__fnclex();
+	} else {
+		__fnstenv(&__env.__x87);
+		__env.__x87.__status &= ~__excepts;
+		__fldenv(__env.__x87);
+	}
+	__stmxcsr(&__env.__mxcsr);
+	__env.__mxcsr &= ~__excepts;
+	__ldmxcsr(__env.__mxcsr);
+	return (0);
+}
+
+static __inline int
+fegetexceptflag(fexcept_t *__flagp, int __excepts)
+{
+	int __mxcsr, __status;
+
+	__stmxcsr(&__mxcsr);
+	__fnstsw(&__status);
+	*__flagp = (__mxcsr | __status) & __excepts;
+	return (0);
+}
+
+int fesetexceptflag(const fexcept_t *__flagp, int __excepts);
+int feraiseexcept(int __excepts);
+
+static __inline int
+fetestexcept(int __excepts)
+{
+	int __mxcsr, __status;
+
+	__stmxcsr(&__mxcsr);
+	__fnstsw(&__status);
+	return ((__status | __mxcsr) & __excepts);
+}
+
+static __inline int
+fegetround(void)
+{
+	int __control;
+
+	/*
+	 * We assume that the x87 and the SSE unit agree on the
+	 * rounding mode.  Reading the control word on the x87 turns
+	 * out to be about 5 times faster than reading it on the SSE
+	 * unit on an Opteron 244.
+	 */
+	__fnstcw(&__control);
+	return (__control & _ROUND_MASK);
+}
+
+static __inline int
+fesetround(int __round)
+{
+	int __mxcsr, __control;
+
+	if (__round & ~_ROUND_MASK)
+		return (-1);
+
+	__fnstcw(&__control);
+	__control &= ~_ROUND_MASK;
+	__control |= __round;
+	__fldcw(__control);
+
+	__stmxcsr(&__mxcsr);
+	__mxcsr &= ~(_ROUND_MASK << _SSE_ROUND_SHIFT);
+	__mxcsr |= __round << _SSE_ROUND_SHIFT;
+	__ldmxcsr(__mxcsr);
+
+	return (0);
+}
+
+int fegetenv(fenv_t *__envp);
+int feholdexcept(fenv_t *__envp);
+
+static __inline int
+fesetenv(const fenv_t *__envp)
+{
+
+	__fldenv(__envp->__x87);
+	__ldmxcsr(__envp->__mxcsr);
+	return (0);
+}
+
+int feupdateenv(const fenv_t *__envp);
+
+#if __BSD_VISIBLE
+
+int feenableexcept(int __mask);
+int fedisableexcept(int __mask);
+
+static __inline int
+fegetexcept(void)
+{
+	int __control;
+
+	/*
+	 * We assume that the masks for the x87 and the SSE unit are
+	 * the same.
+	 */
+	__fnstcw(&__control);
+	return (~__control & FE_ALL_EXCEPT);
+}
+
+#endif /* __BSD_VISIBLE */
+
+__END_DECLS
+
+#endif	/* !_FENV_H_ */
diff --git a/libm/include/arm/fenv.h b/libm/include/arm/fenv.h
new file mode 100644
index 0000000..e7a8860
--- /dev/null
+++ b/libm/include/arm/fenv.h
@@ -0,0 +1,217 @@
+/*-
+ * Copyright (c) 2004-2005 David Schultz <das@FreeBSD.ORG>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: src/lib/msun/arm/fenv.h,v 1.5 2005/03/16 19:03:45 das Exp $
+ */
+
+#ifndef	_FENV_H_
+#define	_FENV_H_
+
+#include <sys/_types.h>
+
+typedef	__uint32_t	fenv_t;
+typedef	__uint32_t	fexcept_t;
+
+/* Exception flags */
+#define	FE_INVALID	0x0001
+#define	FE_DIVBYZERO	0x0002
+#define	FE_OVERFLOW	0x0004
+#define	FE_UNDERFLOW	0x0008
+#define	FE_INEXACT	0x0010
+#define	FE_ALL_EXCEPT	(FE_DIVBYZERO | FE_INEXACT | \
+			 FE_INVALID | FE_OVERFLOW | FE_UNDERFLOW)
+
+/* Rounding modes */
+#define	FE_TONEAREST	0x0000
+#define	FE_TOWARDZERO	0x0001
+#define	FE_UPWARD	0x0002
+#define	FE_DOWNWARD	0x0003
+#define	_ROUND_MASK	(FE_TONEAREST | FE_DOWNWARD | \
+			 FE_UPWARD | FE_TOWARDZERO)
+__BEGIN_DECLS
+
+/* Default floating-point environment */
+extern const fenv_t	__fe_dfl_env;
+#define	FE_DFL_ENV	(&__fe_dfl_env)
+
+/* We need to be able to map status flag positions to mask flag positions */
+#define _FPUSW_SHIFT	16
+#define	_ENABLE_MASK	(FE_ALL_EXCEPT << _FPUSW_SHIFT)
+
+#ifdef	ARM_HARD_FLOAT
+#define	__rfs(__fpsr)	__asm __volatile("rfs %0" : "=r" (*(__fpsr)))
+#define	__wfs(__fpsr)	__asm __volatile("wfs %0" : : "r" (__fpsr))
+#else
+#define __rfs(__fpsr)
+#define __wfs(__fpsr)
+#endif
+
+static __inline int
+feclearexcept(int __excepts)
+{
+	fexcept_t __fpsr;
+
+	__rfs(&__fpsr);
+	__fpsr &= ~__excepts;
+	__wfs(__fpsr);
+	return (0);
+}
+
+static __inline int
+fegetexceptflag(fexcept_t *__flagp, int __excepts)
+{
+	fexcept_t __fpsr;
+
+	__rfs(&__fpsr);
+	*__flagp = __fpsr & __excepts;
+	return (0);
+}
+
+static __inline int
+fesetexceptflag(const fexcept_t *__flagp, int __excepts)
+{
+	fexcept_t __fpsr;
+
+	__rfs(&__fpsr);
+	__fpsr &= ~__excepts;
+	__fpsr |= *__flagp & __excepts;
+	__wfs(__fpsr);
+	return (0);
+}
+
+static __inline int
+feraiseexcept(int __excepts)
+{
+	fexcept_t __ex = __excepts;
+
+	fesetexceptflag(&__ex, __excepts);	/* XXX */
+	return (0);
+}
+
+static __inline int
+fetestexcept(int __excepts)
+{
+	fexcept_t __fpsr;
+
+	__rfs(&__fpsr);
+	return (__fpsr & __excepts);
+}
+
+static __inline int
+fegetround(void)
+{
+
+	/*
+	 * Apparently, the rounding mode is specified as part of the
+	 * instruction format on ARM, so the dynamic rounding mode is
+	 * indeterminate.  Some FPUs may differ.
+	 */
+	return (-1);
+}
+
+static __inline int
+fesetround(int __round)
+{
+
+	return (-1);
+}
+
+static __inline int
+fegetenv(fenv_t *__envp)
+{
+
+	__rfs(__envp);
+	return (0);
+}
+
+static __inline int
+feholdexcept(fenv_t *__envp)
+{
+	fenv_t __env;
+
+	__rfs(&__env);
+	*__envp = __env;
+	__env &= ~(FE_ALL_EXCEPT | _ENABLE_MASK);
+	__wfs(__env);
+	return (0);
+}
+
+static __inline int
+fesetenv(const fenv_t *__envp)
+{
+
+	__wfs(*__envp);
+	return (0);
+}
+
+static __inline int
+feupdateenv(const fenv_t *__envp)
+{
+	fexcept_t __fpsr;
+
+	__rfs(&__fpsr);
+	__wfs(*__envp);
+	feraiseexcept(__fpsr & FE_ALL_EXCEPT);
+	return (0);
+}
+
+#if __BSD_VISIBLE
+
+static __inline int
+feenableexcept(int __mask)
+{
+	fenv_t __old_fpsr, __new_fpsr;
+
+	__rfs(&__old_fpsr);
+	__new_fpsr = __old_fpsr | (__mask & FE_ALL_EXCEPT) << _FPUSW_SHIFT;
+	__wfs(__new_fpsr);
+	return ((__old_fpsr >> _FPUSW_SHIFT) & FE_ALL_EXCEPT);
+}
+
+static __inline int
+fedisableexcept(int __mask)
+{
+	fenv_t __old_fpsr, __new_fpsr;
+
+	__rfs(&__old_fpsr);
+	__new_fpsr = __old_fpsr & ~((__mask & FE_ALL_EXCEPT) << _FPUSW_SHIFT);
+	__wfs(__new_fpsr);
+	return ((__old_fpsr >> _FPUSW_SHIFT) & FE_ALL_EXCEPT);
+}
+
+static __inline int
+fegetexcept(void)
+{
+	fenv_t __fpsr;
+
+	__rfs(&__fpsr);
+	return ((__fpsr & _ENABLE_MASK) >> _FPUSW_SHIFT);
+}
+
+#endif /* __BSD_VISIBLE */
+
+__END_DECLS
+
+#endif	/* !_FENV_H_ */
diff --git a/libm/include/i387/fenv.h b/libm/include/i387/fenv.h
new file mode 100644
index 0000000..b124366
--- /dev/null
+++ b/libm/include/i387/fenv.h
@@ -0,0 +1,240 @@
+/*-
+ * Copyright (c) 2004-2005 David Schultz <das@FreeBSD.ORG>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: src/lib/msun/i387/fenv.h,v 1.4 2005/03/17 22:21:46 das Exp $
+ */
+
+#ifndef	_FENV_H_
+#define	_FENV_H_
+
+#include <sys/cdefs.h>
+#include <sys/_types.h>
+
+/*                   
+ * To preserve binary compatibility with FreeBSD 5.3, we pack the
+ * mxcsr into some reserved fields, rather than changing sizeof(fenv_t).
+ */
+typedef struct {
+	__uint16_t	__control;
+	__uint16_t      __mxcsr_hi;
+	__uint16_t	__status;
+	__uint16_t      __mxcsr_lo;
+	__uint32_t	__tag;
+	char		__other[16];
+} fenv_t;
+
+#define	__get_mxcsr(env)	(((env).__mxcsr_hi << 16) |	\
+				 ((env).__mxcsr_lo))
+#define	__set_mxcsr(env, x)	do {				\
+	(env).__mxcsr_hi = (__uint32_t)(x) >> 16;		\
+	(env).__mxcsr_lo = (__uint16_t)(x);			\
+} while (0)
+
+typedef	__uint16_t	fexcept_t;
+
+/* Exception flags */
+#define	FE_INVALID	0x01
+#define	FE_DENORMAL	0x02
+#define	FE_DIVBYZERO	0x04
+#define	FE_OVERFLOW	0x08
+#define	FE_UNDERFLOW	0x10
+#define	FE_INEXACT	0x20
+#define	FE_ALL_EXCEPT	(FE_DIVBYZERO | FE_DENORMAL | FE_INEXACT | \
+			 FE_INVALID | FE_OVERFLOW | FE_UNDERFLOW)
+
+/* Rounding modes */
+#define	FE_TONEAREST	0x0000
+#define	FE_DOWNWARD	0x0400
+#define	FE_UPWARD	0x0800
+#define	FE_TOWARDZERO	0x0c00
+#define	_ROUND_MASK	(FE_TONEAREST | FE_DOWNWARD | \
+			 FE_UPWARD | FE_TOWARDZERO)
+
+/*
+ * As compared to the x87 control word, the SSE unit's control word
+ * has the rounding control bits offset by 3 and the exception mask
+ * bits offset by 7.
+ */
+#define	_SSE_ROUND_SHIFT	3
+#define	_SSE_EMASK_SHIFT	7
+
+/* After testing for SSE support once, we cache the result in __has_sse. */
+enum __sse_support { __SSE_YES, __SSE_NO, __SSE_UNK };
+extern enum __sse_support __has_sse;
+int __test_sse(void);
+#ifdef __SSE__
+#define	__HAS_SSE()	1
+#else
+#define	__HAS_SSE()	(__has_sse == __SSE_YES ||			\
+			 (__has_sse == __SSE_UNK && __test_sse()))
+#endif
+
+__BEGIN_DECLS
+
+/* Default floating-point environment */
+extern const fenv_t	__fe_dfl_env;
+#define	FE_DFL_ENV	(&__fe_dfl_env)
+
+#define	__fldcw(__cw)		__asm __volatile("fldcw %0" : : "m" (__cw))
+#define	__fldenv(__env)		__asm __volatile("fldenv %0" : : "m" (__env))
+#define	__fnclex()		__asm __volatile("fnclex")
+#define	__fnstenv(__env)	__asm __volatile("fnstenv %0" : "=m" (*(__env)))
+#define	__fnstcw(__cw)		__asm __volatile("fnstcw %0" : "=m" (*(__cw)))
+#define	__fnstsw(__sw)		__asm __volatile("fnstsw %0" : "=am" (*(__sw)))
+#define	__fwait()		__asm __volatile("fwait")
+#define	__ldmxcsr(__csr)	__asm __volatile("ldmxcsr %0" : : "m" (__csr))
+#define	__stmxcsr(__csr)	__asm __volatile("stmxcsr %0" : "=m" (*(__csr)))
+
+static __inline int
+feclearexcept(int __excepts)
+{
+	fenv_t __env;
+	int __mxcsr;
+
+	if (__excepts == FE_ALL_EXCEPT) {
+		__fnclex();
+	} else {
+		__fnstenv(&__env);
+		__env.__status &= ~__excepts;
+		__fldenv(__env);
+	}
+	if (__HAS_SSE()) {
+		__stmxcsr(&__mxcsr);
+		__mxcsr &= ~__excepts;
+		__ldmxcsr(__mxcsr);
+	}
+	return (0);
+}
+
+static __inline int
+fegetexceptflag(fexcept_t *__flagp, int __excepts)
+{
+	int __mxcsr, __status;
+
+	__fnstsw(&__status);
+	if (__HAS_SSE())
+		__stmxcsr(&__mxcsr);
+	else
+		__mxcsr = 0;
+	*__flagp = (__mxcsr | __status) & __excepts;
+	return (0);
+}
+
+int fesetexceptflag(const fexcept_t *__flagp, int __excepts);
+int feraiseexcept(int __excepts);
+
+static __inline int
+fetestexcept(int __excepts)
+{
+	int __mxcsr, __status;
+
+	__fnstsw(&__status);
+	if (__HAS_SSE())
+		__stmxcsr(&__mxcsr);
+	else
+		__mxcsr = 0;
+	return ((__status | __mxcsr) & __excepts);
+}
+
+static __inline int
+fegetround(void)
+{
+	int __control;
+
+	/*
+	 * We assume that the x87 and the SSE unit agree on the
+	 * rounding mode.  Reading the control word on the x87 turns
+	 * out to be about 5 times faster than reading it on the SSE
+	 * unit on an Opteron 244.
+	 */
+	__fnstcw(&__control);
+	return (__control & _ROUND_MASK);
+}
+
+static __inline int
+fesetround(int __round)
+{
+	int __mxcsr, __control;
+
+	if (__round & ~_ROUND_MASK)
+		return (-1);
+
+	__fnstcw(&__control);
+	__control &= ~_ROUND_MASK;
+	__control |= __round;
+	__fldcw(__control);
+
+	if (__HAS_SSE()) {
+		__stmxcsr(&__mxcsr);
+		__mxcsr &= ~(_ROUND_MASK << _SSE_ROUND_SHIFT);
+		__mxcsr |= __round << _SSE_ROUND_SHIFT;
+		__ldmxcsr(__mxcsr);
+	}
+
+	return (0);
+}
+
+int fegetenv(fenv_t *__envp);
+int feholdexcept(fenv_t *__envp);
+
+static __inline int
+fesetenv(const fenv_t *__envp)
+{
+	fenv_t __env = *__envp;
+	int __mxcsr;
+
+	__mxcsr = __get_mxcsr(__env);
+	__set_mxcsr(__env, 0xffffffff);
+	__fldenv(__env);
+	if (__HAS_SSE())
+		__ldmxcsr(__mxcsr);
+	return (0);
+}
+
+int feupdateenv(const fenv_t *__envp);
+
+#if __BSD_VISIBLE
+
+int feenableexcept(int __mask);
+int fedisableexcept(int __mask);
+
+static __inline int
+fegetexcept(void)
+{
+	int __control;
+
+	/*
+	 * We assume that the masks for the x87 and the SSE unit are
+	 * the same.
+	 */
+	__fnstcw(&__control);
+	return (~__control & FE_ALL_EXCEPT);
+}
+
+#endif /* __BSD_VISIBLE */
+
+__END_DECLS
+
+#endif	/* !_FENV_H_ */
diff --git a/libm/include/ia64/fenv.h b/libm/include/ia64/fenv.h
new file mode 100644
index 0000000..8c6b65b
--- /dev/null
+++ b/libm/include/ia64/fenv.h
@@ -0,0 +1,242 @@
+/*-
+ * Copyright (c) 2004-2005 David Schultz <das@FreeBSD.ORG>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: src/lib/msun/ia64/fenv.h,v 1.4 2005/03/16 19:03:45 das Exp $
+ */
+
+#ifndef	_FENV_H_
+#define	_FENV_H_
+
+#include <sys/_types.h>
+
+typedef	__uint64_t	fenv_t;
+typedef	__uint16_t	fexcept_t;
+
+/* Exception flags */
+#define	FE_INVALID	0x01
+#define	FE_DENORMAL	0x02
+#define	FE_DIVBYZERO	0x04
+#define	FE_OVERFLOW	0x08
+#define	FE_UNDERFLOW	0x10
+#define	FE_INEXACT	0x20
+#define	FE_ALL_EXCEPT	(FE_DIVBYZERO | FE_DENORMAL | FE_INEXACT | \
+			 FE_INVALID | FE_OVERFLOW | FE_UNDERFLOW)
+
+/* Rounding modes */
+#define	FE_TONEAREST	0x0000
+#define	FE_DOWNWARD	0x0400
+#define	FE_UPWARD	0x0800
+#define	FE_TOWARDZERO	0x0c00
+#define	_ROUND_MASK	(FE_TONEAREST | FE_DOWNWARD | \
+			 FE_UPWARD | FE_TOWARDZERO)
+
+__BEGIN_DECLS
+
+/* Default floating-point environment */
+extern const fenv_t	__fe_dfl_env;
+#define	FE_DFL_ENV	(&__fe_dfl_env)
+
+#define	_FPUSW_SHIFT	13
+
+#define	__stfpsr(__r)	__asm __volatile("mov %0=ar.fpsr" : "=r" (*(__r)))
+#define	__ldfpsr(__r)	__asm __volatile("mov ar.fpsr=%0;;" : : "r" (__r))
+
+static __inline int
+feclearexcept(int __excepts)
+{
+	fenv_t __fpsr;
+
+	__stfpsr(&__fpsr);
+	__fpsr &= ~((fenv_t)__excepts << _FPUSW_SHIFT);
+	__ldfpsr(__fpsr);
+	return (0);
+}
+
+static __inline int
+fegetexceptflag(fexcept_t *__flagp, int __excepts)
+{
+	fenv_t __fpsr;
+
+	__stfpsr(&__fpsr);
+	*__flagp = (fexcept_t)(__fpsr >> _FPUSW_SHIFT) & __excepts;
+	return (0);
+}
+
+static __inline int
+fesetexceptflag(const fexcept_t *__flagp, int __excepts)
+{
+	fenv_t __fpsr;
+
+	__stfpsr(&__fpsr);
+	__fpsr &= ~((fenv_t)__excepts << _FPUSW_SHIFT);
+	__fpsr |= (fenv_t)(__excepts & *__flagp) << _FPUSW_SHIFT;
+	__ldfpsr(__fpsr);
+	return (0);
+}
+
+/*
+ * It is worthwhile to use the inline version of this function iff it
+ * is called with arguments that are compile-time constants (due to
+ * dead code elimination).  Unfortunately, gcc isn't smart enough to
+ * figure this out automatically, and there's no way to tell it.
+ * We assume that constant arguments will be the common case.
+ */
+static __inline int
+feraiseexcept(int __excepts)
+{
+	volatile double d;
+
+	/*
+	 * With a compiler that supports the FENV_ACCESS pragma
+	 * properly, simple expressions like '0.0 / 0.0' should
+	 * be sufficient to generate traps.  Unfortunately, we
+	 * need to bring a volatile variable into the equation
+	 * to prevent incorrect optimizations.
+	 */
+	if (__excepts & FE_INVALID) {
+		d = 0.0;
+		d = 0.0 / d;
+	}
+	if (__excepts & FE_DIVBYZERO) {
+		d = 0.0;
+		d = 1.0 / d;
+	}
+	if (__excepts & FE_OVERFLOW) {
+		d = 0x1.ffp1023;
+		d *= 2.0;
+	}
+	if (__excepts & FE_UNDERFLOW) {
+		d = 0x1p-1022;
+		d /= 0x1p1023;
+	}
+	if (__excepts & FE_INEXACT) {
+		d = 0x1p-1022;
+		d += 1.0;
+	}
+	return (0);
+}
+
+static __inline int
+fetestexcept(int __excepts)
+{
+	fenv_t __fpsr;
+
+	__stfpsr(&__fpsr);
+	return ((__fpsr >> _FPUSW_SHIFT) & __excepts);
+}
+
+
+static __inline int
+fegetround(void)
+{
+	fenv_t __fpsr;
+
+	__stfpsr(&__fpsr);
+	return (__fpsr & _ROUND_MASK);
+}
+
+static __inline int
+fesetround(int __round)
+{
+	fenv_t __fpsr;
+
+	if (__round & ~_ROUND_MASK)
+		return (-1);
+	__stfpsr(&__fpsr);
+	__fpsr &= ~_ROUND_MASK;
+	__fpsr |= __round;
+	__ldfpsr(__fpsr);
+	return (0);
+}
+
+static __inline int
+fegetenv(fenv_t *__envp)
+{
+
+	__stfpsr(__envp);
+	return (0);
+}
+
+static __inline int
+feholdexcept(fenv_t *__envp)
+{
+	fenv_t __fpsr;
+
+	__stfpsr(&__fpsr);
+	*__envp = __fpsr;
+	__fpsr &= ~((fenv_t)FE_ALL_EXCEPT << _FPUSW_SHIFT);
+	__fpsr |= FE_ALL_EXCEPT;
+	__ldfpsr(__fpsr);
+	return (0);
+}
+
+static __inline int
+fesetenv(const fenv_t *__envp)
+{
+
+	__ldfpsr(*__envp);
+	return (0);
+}
+
+int feupdateenv(const fenv_t *__envp);
+
+#if __BSD_VISIBLE
+
+static __inline int
+feenableexcept(int __mask)
+{
+	fenv_t __newfpsr, __oldfpsr;
+
+	__stfpsr(&__oldfpsr);
+	__newfpsr = __oldfpsr & ~(__mask & FE_ALL_EXCEPT);
+	__ldfpsr(__newfpsr);
+	return (~__oldfpsr & FE_ALL_EXCEPT);
+}
+
+static __inline int
+fedisableexcept(int __mask)
+{
+	fenv_t __newfpsr, __oldfpsr;
+
+	__stfpsr(&__oldfpsr);
+	__newfpsr = __oldfpsr | (__mask & FE_ALL_EXCEPT);
+	__ldfpsr(__newfpsr);
+	return (~__oldfpsr & FE_ALL_EXCEPT);
+}
+
+static __inline int
+fegetexcept(void)
+{
+	fenv_t __fpsr;
+
+	__stfpsr(&__fpsr);
+	return (~__fpsr & FE_ALL_EXCEPT);
+}
+
+#endif /* __BSD_VISIBLE */
+
+__END_DECLS
+
+#endif	/* !_FENV_H_ */
diff --git a/libm/include/math.h b/libm/include/math.h
new file mode 100644
index 0000000..ef6a9e6
--- /dev/null
+++ b/libm/include/math.h
@@ -0,0 +1,486 @@
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunPro, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/*
+ * from: @(#)fdlibm.h 5.1 93/09/24
+ * $FreeBSD: src/lib/msun/src/math.h,v 1.61 2005/04/16 21:12:47 das Exp $
+ */
+
+#ifndef _MATH_H_
+#define	_MATH_H_
+
+#include <sys/cdefs.h>
+#include <sys/types.h>
+#include <limits.h>
+
+#define __pure2
+
+/*
+ * ANSI/POSIX
+ */
+extern const union __infinity_un {
+	unsigned char	__uc[8];
+	double		__ud;
+} __infinity;
+
+extern const union __nan_un {
+	unsigned char	__uc[sizeof(float)];
+	float		__uf;
+} __nan;
+
+/* #if __GNUC_PREREQ__(3, 3) || (defined(__INTEL_COMPILER) && __INTEL_COMPILER >= 800) */
+#if 1
+#define	__MATH_BUILTIN_CONSTANTS
+#endif
+
+/* #if __GNUC_PREREQ__(3, 0) && !defined(__INTEL_COMPILER) */
+#if 1
+#define	__MATH_BUILTIN_RELOPS
+#endif
+
+/* #ifdef __MATH_BUILTIN_CONSTANTS */
+#if 1
+#define	HUGE_VAL	__builtin_huge_val()
+#else
+#define	HUGE_VAL	(__infinity.__ud)
+#endif
+
+/* #if __ISO_C_VISIBLE >= 1999 */
+#if 0
+#define	FP_ILOGB0	(-__INT_MAX)
+#define	FP_ILOGBNAN	__INT_MAX
+#else
+#define	FP_ILOGB0	(-INT_MAX)
+#define	FP_ILOGBNAN	INT_MAX
+#endif
+
+#ifdef __MATH_BUILTIN_CONSTANTS
+#define	HUGE_VALF	__builtin_huge_valf()
+#define	HUGE_VALL	__builtin_huge_vall()
+#define	INFINITY	__builtin_inf()
+#define	NAN		__builtin_nan("")
+#else
+#define	HUGE_VALF	(float)HUGE_VAL
+#define	HUGE_VALL	(long double)HUGE_VAL
+#define	INFINITY	HUGE_VALF
+#define	NAN		(__nan.__uf)
+#endif /* __MATH_BUILTIN_CONSTANTS */
+
+#define	MATH_ERRNO	1
+#define	MATH_ERREXCEPT	2
+#define	math_errhandling	MATH_ERREXCEPT
+
+/* XXX We need a <machine/math.h>. */
+#if defined(__ia64__) || defined(__sparc64__)
+#define	FP_FAST_FMA
+#endif
+#ifdef __ia64__
+#define	FP_FAST_FMAL
+#endif
+#define	FP_FAST_FMAF
+
+/* Symbolic constants to classify floating point numbers. */
+#define	FP_INFINITE	0x01
+#define	FP_NAN		0x02
+#define	FP_NORMAL	0x04
+#define	FP_SUBNORMAL	0x08
+#define	FP_ZERO		0x10
+#define	fpclassify(x) \
+    ((sizeof (x) == sizeof (float)) ? __fpclassifyf(x) \
+    : (sizeof (x) == sizeof (double)) ? __fpclassifyd(x) \
+    : __fpclassifyl(x))
+
+#define	isfinite(x)					\
+    ((sizeof (x) == sizeof (float)) ? __isfinitef(x)	\
+    : (sizeof (x) == sizeof (double)) ? __isfinite(x)	\
+    : __isfinitel(x))
+#define	isinf(x)					\
+    ((sizeof (x) == sizeof (float)) ? __isinff(x)	\
+    : (sizeof (x) == sizeof (double)) ? __isinf(x)	\
+    : __isinfl(x))
+#define	isnan(x)					\
+    ((sizeof (x) == sizeof (float)) ? isnanf(x)		\
+    : (sizeof (x) == sizeof (double)) ? isnan(x)	\
+    : __isnanl(x))
+#define	isnormal(x)					\
+    ((sizeof (x) == sizeof (float)) ? __isnormalf(x)	\
+    : (sizeof (x) == sizeof (double)) ? __isnormal(x)	\
+    : __isnormall(x))
+
+#ifdef __MATH_BUILTIN_RELOPS
+#define	isgreater(x, y)		__builtin_isgreater((x), (y))
+#define	isgreaterequal(x, y)	__builtin_isgreaterequal((x), (y))
+#define	isless(x, y)		__builtin_isless((x), (y))
+#define	islessequal(x, y)	__builtin_islessequal((x), (y))
+#define	islessgreater(x, y)	__builtin_islessgreater((x), (y))
+#define	isunordered(x, y)	__builtin_isunordered((x), (y))
+#else
+#define	isgreater(x, y)		(!isunordered((x), (y)) && (x) > (y))
+#define	isgreaterequal(x, y)	(!isunordered((x), (y)) && (x) >= (y))
+#define	isless(x, y)		(!isunordered((x), (y)) && (x) < (y))
+#define	islessequal(x, y)	(!isunordered((x), (y)) && (x) <= (y))
+#define	islessgreater(x, y)	(!isunordered((x), (y)) && \
+					((x) > (y) || (y) > (x)))
+#define	isunordered(x, y)	(isnan(x) || isnan(y))
+#endif /* __MATH_BUILTIN_RELOPS */
+
+#define	signbit(x)					\
+    ((sizeof (x) == sizeof (float)) ? __signbitf(x)	\
+    : (sizeof (x) == sizeof (double)) ? __signbit(x)	\
+    : __signbitl(x))
+
+#if 0
+typedef	__double_t	double_t;
+typedef	__float_t	float_t;
+#endif 
+/* #endif */ /* __ISO_C_VISIBLE >= 1999 */
+
+/*
+ * XOPEN/SVID
+ */
+/* #if __BSD_VISIBLE || __XSI_VISIBLE */
+#define	M_E		2.7182818284590452354	/* e */
+#define	M_LOG2E		1.4426950408889634074	/* log 2e */
+#define	M_LOG10E	0.43429448190325182765	/* log 10e */
+#define	M_LN2		0.69314718055994530942	/* log e2 */
+#define	M_LN10		2.30258509299404568402	/* log e10 */
+#define	M_PI		3.14159265358979323846	/* pi */
+#define	M_PI_2		1.57079632679489661923	/* pi/2 */
+#define	M_PI_4		0.78539816339744830962	/* pi/4 */
+#define	M_1_PI		0.31830988618379067154	/* 1/pi */
+#define	M_2_PI		0.63661977236758134308	/* 2/pi */
+#define	M_2_SQRTPI	1.12837916709551257390	/* 2/sqrt(pi) */
+#define	M_SQRT2		1.41421356237309504880	/* sqrt(2) */
+#define	M_SQRT1_2	0.70710678118654752440	/* 1/sqrt(2) */
+
+#define	MAXFLOAT	((float)3.40282346638528860e+38)
+extern int signgam;
+/* #endif */ /* __BSD_VISIBLE || __XSI_VISIBLE */
+
+#if __BSD_VISIBLE
+#if 0
+/* Old value from 4.4BSD-Lite math.h; this is probably better. */
+#define	HUGE		HUGE_VAL
+#else
+#define	HUGE		MAXFLOAT
+#endif
+#endif /* __BSD_VISIBLE */
+
+/*
+ * Most of these functions depend on the rounding mode and have the side
+ * effect of raising floating-point exceptions, so they are not declared
+ * as __pure2.  In C99, FENV_ACCESS affects the purity of these functions.
+ */
+__BEGIN_DECLS
+/*
+ * ANSI/POSIX
+ */
+int	__fpclassifyd(double) __pure2;
+int	__fpclassifyf(float) __pure2;
+int	__fpclassifyl(long double) __pure2;
+int	__isfinitef(float) __pure2;
+int	__isfinite(double) __pure2;
+int	__isfinitel(long double) __pure2;
+int	__isinff(float) __pure2;
+int     __isinf(double) __pure2;
+int	__isinfl(long double) __pure2;
+int	__isnanl(long double) __pure2;
+int	__isnormalf(float) __pure2;
+int	__isnormal(double) __pure2;
+int	__isnormall(long double) __pure2;
+int	__signbit(double) __pure2;
+int	__signbitf(float) __pure2;
+int	__signbitl(long double) __pure2;
+
+double	acos(double);
+double	asin(double);
+double	atan(double);
+double	atan2(double, double);
+double	cos(double);
+double	sin(double);
+double	tan(double);
+
+double	cosh(double);
+double	sinh(double);
+double	tanh(double);
+
+double	exp(double);
+double	frexp(double, int *);	/* fundamentally !__pure2 */
+double	ldexp(double, int);
+double	log(double);
+double	log10(double);
+double	modf(double, double *);	/* fundamentally !__pure2 */
+
+double	pow(double, double);
+double	sqrt(double);
+
+double	ceil(double);
+double	fabs(double) __pure2;
+double	floor(double);
+double	fmod(double, double);
+
+/*
+ * These functions are not in C90.
+ */
+/* #if __BSD_VISIBLE || __ISO_C_VISIBLE >= 1999 || __XSI_VISIBLE */
+double	acosh(double);
+double	asinh(double);
+double	atanh(double);
+double	cbrt(double);
+double	erf(double);
+double	erfc(double);
+double	exp2(double);
+double	expm1(double);
+double	fma(double, double, double);
+double	hypot(double, double);
+int	ilogb(double) __pure2;
+/* int	(isinf)(double) __pure2; */
+int	(isnan)(double) __pure2;
+double	lgamma(double);
+long long llrint(double);
+long long llround(double);
+double	log1p(double);
+double	logb(double);
+long	lrint(double);
+long	lround(double);
+double	nextafter(double, double);
+double	remainder(double, double);
+double	remquo(double, double, int *);
+double	rint(double);
+/* #endif */ /* __BSD_VISIBLE || __ISO_C_VISIBLE >= 1999 || __XSI_VISIBLE */
+
+/* #if __BSD_VISIBLE || __XSI_VISIBLE */
+double	j0(double);
+double	j1(double);
+double	jn(int, double);
+double	scalb(double, double);
+double	y0(double);
+double	y1(double);
+double	yn(int, double);
+
+/* #if __XSI_VISIBLE <= 500 || __BSD_VISIBLE */
+double	gamma(double);
+/* #endif */
+/* #endif */ /* __BSD_VISIBLE || __XSI_VISIBLE */
+
+/* #if __BSD_VISIBLE || __ISO_C_VISIBLE >= 1999 */
+double	copysign(double, double) __pure2;
+double	fdim(double, double);
+double	fmax(double, double) __pure2;
+double	fmin(double, double) __pure2;
+double	nearbyint(double);
+double	round(double);
+double	scalbln(double, long);
+double	scalbn(double, int);
+double	tgamma(double);
+double	trunc(double);
+/* #endif */
+
+/*
+ * BSD math library entry points
+ */
+/* #if __BSD_VISIBLE */
+double	drem(double, double);
+int	finite(double) __pure2;
+int	isnanf(float) __pure2;
+
+/*
+ * Reentrant version of gamma & lgamma; passes signgam back by reference
+ * as the second argument; user must allocate space for signgam.
+ */
+double	gamma_r(double, int *);
+double	lgamma_r(double, int *);
+
+/*
+ * IEEE Test Vector
+ */
+double	significand(double);
+/* #endif */ /* __BSD_VISIBLE */
+
+/* float versions of ANSI/POSIX functions */
+/*#if __ISO_C_VISIBLE >= 1999 */
+float	acosf(float);
+float	asinf(float);
+float	atanf(float);
+float	atan2f(float, float);
+float	cosf(float);
+float	sinf(float);
+float	tanf(float);
+
+float	coshf(float);
+float	sinhf(float);
+float	tanhf(float);
+
+float	exp2f(float);
+float	expf(float);
+float	expm1f(float);
+float	frexpf(float, int *);	/* fundamentally !__pure2 */
+int	ilogbf(float) __pure2;
+float	ldexpf(float, int);
+float	log10f(float);
+float	log1pf(float);
+float	logf(float);
+float	modff(float, float *);	/* fundamentally !__pure2 */
+
+float	powf(float, float);
+float	sqrtf(float);
+
+float	ceilf(float);
+float	fabsf(float) __pure2;
+float	floorf(float);
+float	fmodf(float, float);
+float	roundf(float);
+
+float	erff(float);
+float	erfcf(float);
+float	hypotf(float, float);
+float	lgammaf(float);
+
+float	acoshf(float);
+float	asinhf(float);
+float	atanhf(float);
+float	cbrtf(float);
+float	logbf(float);
+float	copysignf(float, float) __pure2;
+long long llrintf(float);
+long long llroundf(float);
+long	lrintf(float);
+long	lroundf(float);
+float	nearbyintf(float);
+float	nextafterf(float, float);
+float	remainderf(float, float);
+float	remquof(float, float, int *);
+float	rintf(float);
+float	scalblnf(float, long);
+float	scalbnf(float, int);
+float	truncf(float);
+
+float	fdimf(float, float);
+float	fmaf(float, float, float);
+float	fmaxf(float, float) __pure2;
+float	fminf(float, float) __pure2;
+/* #endif */
+
+/*
+ * float versions of BSD math library entry points
+ */
+/* #if __BSD_VISIBLE */
+float	dremf(float, float);
+int	finitef(float) __pure2;
+float	gammaf(float);
+float	j0f(float);
+float	j1f(float);
+float	jnf(int, float);
+float	scalbf(float, float);
+float	y0f(float);
+float	y1f(float);
+float	ynf(int, float);
+
+/*
+ * Float versions of reentrant version of gamma & lgamma; passes
+ * signgam back by reference as the second argument; user must
+ * allocate space for signgam.
+ */
+float	gammaf_r(float, int *);
+float	lgammaf_r(float, int *);
+
+/*
+ * float version of IEEE Test Vector
+ */
+float	significandf(float);
+/* #endif */	/* __BSD_VISIBLE */ 
+
+/*
+ * long double versions of ISO/POSIX math functions
+ */
+/* #if __ISO_C_VISIBLE >= 1999 */
+#if 0
+long double	acoshl(long double);
+long double	acosl(long double);
+long double	asinhl(long double);
+long double	asinl(long double);
+long double	atan2l(long double, long double);
+long double	atanhl(long double);
+long double	atanl(long double);
+long double	cbrtl(long double);
+#endif
+long double	ceill(long double);
+long double	copysignl(long double, long double) __pure2;
+#if 0
+long double	coshl(long double);
+long double	cosl(long double);
+long double	erfcl(long double);
+long double	erfl(long double);
+long double	exp2l(long double);
+long double	expl(long double);
+long double	expm1l(long double);
+#endif
+long double	fabsl(long double) __pure2;
+long double	fdiml(long double, long double);
+long double	floorl(long double);
+long double	fmal(long double, long double, long double);
+long double	fmaxl(long double, long double) __pure2;
+long double	fminl(long double, long double) __pure2;
+#if 0
+long double	fmodl(long double, long double);
+#endif
+long double	frexpl(long double value, int *); /* fundamentally !__pure2 */
+#if 0
+long double	hypotl(long double, long double);
+#endif
+int		ilogbl(long double) __pure2;
+long double	ldexpl(long double, int);
+#if 0
+long double	lgammal(long double);
+long long	llrintl(long double);
+#endif
+long long	llroundl(long double);
+#if 0
+long double	log10l(long double);
+long double	log1pl(long double);
+long double	log2l(long double);
+long double	logbl(long double);
+long double	logl(long double);
+long		lrintl(long double);
+#endif
+long		lroundl(long double);
+#if 0
+long double	modfl(long double, long double *); /* fundamentally !__pure2 */
+long double	nanl(const char *) __pure2;
+long double	nearbyintl(long double);
+#endif
+long double	nextafterl(long double, long double);
+double		nexttoward(double, long double);
+float		nexttowardf(float, long double);
+long double	nexttowardl(long double, long double);
+#if 0
+long double	powl(long double, long double);
+long double	remainderl(long double, long double);
+long double	remquol(long double, long double, int *);
+long double	rintl(long double);
+#endif
+long double	roundl(long double);
+long double	scalblnl(long double, long);
+long double	scalbnl(long double, int);
+#if 0
+long double	sinhl(long double);
+long double	sinl(long double);
+long double	sqrtl(long double);
+long double	tanhl(long double);
+long double	tanl(long double);
+long double	tgammal(long double);
+#endif
+long double	truncl(long double);
+
+/* #endif */ /* __ISO_C_VISIBLE >= 1999 */
+__END_DECLS
+
+#endif /* !_MATH_H_ */
diff --git a/libm/include/powerpc/fenv.h b/libm/include/powerpc/fenv.h
new file mode 100644
index 0000000..3fd2389
--- /dev/null
+++ b/libm/include/powerpc/fenv.h
@@ -0,0 +1,263 @@
+/*-
+ * Copyright (c) 2004-2005 David Schultz <das@FreeBSD.ORG>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: src/lib/msun/powerpc/fenv.h,v 1.3 2005/03/16 19:03:45 das Exp $
+ */
+
+#ifndef	_FENV_H_
+#define	_FENV_H_
+
+#include <sys/_types.h>
+
+typedef	__uint32_t	fenv_t;
+typedef	__uint32_t	fexcept_t;
+
+/* Exception flags */
+#define	FE_INEXACT	0x02000000
+#define	FE_DIVBYZERO	0x04000000
+#define	FE_UNDERFLOW	0x08000000
+#define	FE_OVERFLOW	0x10000000
+#define	FE_INVALID	0x20000000	/* all types of invalid FP ops */
+
+/*
+ * The PowerPC architecture has extra invalid flags that indicate the
+ * specific type of invalid operation occurred.  These flags may be
+ * tested, set, and cleared---but not masked---separately.  All of
+ * these bits are cleared when FE_INVALID is cleared, but only
+ * FE_VXSOFT is set when FE_INVALID is explicitly set in software.
+ */
+#define	FE_VXCVI	0x00000100	/* invalid integer convert */
+#define	FE_VXSQRT	0x00000200	/* square root of a negative */
+#define	FE_VXSOFT	0x00000400	/* software-requested exception */
+#define	FE_VXVC		0x00080000	/* ordered comparison involving NaN */
+#define	FE_VXIMZ	0x00100000	/* inf * 0 */
+#define	FE_VXZDZ	0x00200000	/* 0 / 0 */
+#define	FE_VXIDI	0x00400000	/* inf / inf */
+#define	FE_VXISI	0x00800000	/* inf - inf */
+#define	FE_VXSNAN	0x01000000	/* operation on a signalling NaN */
+#define	FE_ALL_INVALID	(FE_VXCVI | FE_VXSQRT | FE_VXSOFT | FE_VXVC | \
+			 FE_VXIMZ | FE_VXZDZ | FE_VXIDI | FE_VXISI | \
+			 FE_VXSNAN | FE_INVALID)
+#define	FE_ALL_EXCEPT	(FE_DIVBYZERO | FE_INEXACT | \
+			 FE_ALL_INVALID | FE_OVERFLOW | FE_UNDERFLOW)
+
+/* Rounding modes */
+#define	FE_TONEAREST	0x0000
+#define	FE_TOWARDZERO	0x0001
+#define	FE_UPWARD	0x0002
+#define	FE_DOWNWARD	0x0003
+#define	_ROUND_MASK	(FE_TONEAREST | FE_DOWNWARD | \
+			 FE_UPWARD | FE_TOWARDZERO)
+
+__BEGIN_DECLS
+
+/* Default floating-point environment */
+extern const fenv_t	__fe_dfl_env;
+#define	FE_DFL_ENV	(&__fe_dfl_env)
+
+/* We need to be able to map status flag positions to mask flag positions */
+#define	_FPUSW_SHIFT	22
+#define	_ENABLE_MASK	((FE_DIVBYZERO | FE_INEXACT | FE_INVALID | \
+			 FE_OVERFLOW | FE_UNDERFLOW) >> _FPUSW_SHIFT)
+
+#define	__mffs(__env)	__asm __volatile("mffs %0" : "=f" (*(__env)))
+#define	__mtfsf(__env)	__asm __volatile("mtfsf 255,%0" : : "f" (__env))
+
+union __fpscr {
+	double __d;
+	struct {
+		__uint32_t __junk;
+		fenv_t __reg;
+	} __bits;
+};
+
+static __inline int
+feclearexcept(int __excepts)
+{
+	union __fpscr __r;
+
+	if (__excepts & FE_INVALID)
+		__excepts |= FE_ALL_INVALID;
+	__mffs(&__r.__d);
+	__r.__bits.__reg &= ~__excepts;
+	__mtfsf(__r.__d);
+	return (0);
+}
+
+static __inline int
+fegetexceptflag(fexcept_t *__flagp, int __excepts)
+{
+	union __fpscr __r;
+
+	__mffs(&__r.__d);
+	*__flagp = __r.__bits.__reg & __excepts;
+	return (0);
+}
+
+static __inline int
+fesetexceptflag(const fexcept_t *__flagp, int __excepts)
+{
+	union __fpscr __r;
+
+	if (__excepts & FE_INVALID)
+		__excepts |= FE_ALL_EXCEPT;
+	__mffs(&__r.__d);
+	__r.__bits.__reg &= ~__excepts;
+	__r.__bits.__reg |= *__flagp & __excepts;
+	__mtfsf(__r.__d);
+	return (0);
+}
+
+static __inline int
+feraiseexcept(int __excepts)
+{
+	union __fpscr __r;
+
+	if (__excepts & FE_INVALID)
+		__excepts |= FE_VXSOFT;
+	__mffs(&__r.__d);
+	__r.__bits.__reg |= __excepts;
+	__mtfsf(__r.__d);
+	return (0);
+}
+
+static __inline int
+fetestexcept(int __excepts)
+{
+	union __fpscr __r;
+
+	__mffs(&__r.__d);
+	return (__r.__bits.__reg & __excepts);
+}
+
+static __inline int
+fegetround(void)
+{
+	union __fpscr __r;
+
+	__mffs(&__r.__d);
+	return (__r.__bits.__reg & _ROUND_MASK);
+}
+
+static __inline int
+fesetround(int __round)
+{
+	union __fpscr __r;
+
+	if (__round & ~_ROUND_MASK)
+		return (-1);
+	__mffs(&__r.__d);
+	__r.__bits.__reg &= ~_ROUND_MASK;
+	__r.__bits.__reg |= __round;
+	__mtfsf(__r.__d);
+	return (0);
+}
+
+static __inline int
+fegetenv(fenv_t *__envp)
+{
+	union __fpscr __r;
+
+	__mffs(&__r.__d);
+	*__envp = __r.__bits.__reg;
+	return (0);
+}
+
+static __inline int
+feholdexcept(fenv_t *__envp)
+{
+	union __fpscr __r;
+
+	__mffs(&__r.__d);
+	*__envp = __r.__d;
+	__r.__bits.__reg &= ~(FE_ALL_EXCEPT | _ENABLE_MASK);
+	__mtfsf(__r.__d);
+	return (0);
+}
+
+static __inline int
+fesetenv(const fenv_t *__envp)
+{
+	union __fpscr __r;
+
+	__r.__bits.__reg = *__envp;
+	__mtfsf(__r.__d);
+	return (0);
+}
+
+static __inline int
+feupdateenv(const fenv_t *__envp)
+{
+	union __fpscr __r;
+
+	__mffs(&__r.__d);
+	__r.__bits.__reg &= FE_ALL_EXCEPT;
+	__r.__bits.__reg |= *__envp;
+	__mtfsf(__r.__d);
+	return (0);
+}
+
+#if __BSD_VISIBLE
+
+static __inline int
+feenableexcept(int __mask)
+{
+	union __fpscr __r;
+	fenv_t __oldmask;
+
+	__mffs(&__r.__d);
+	__oldmask = __r.__bits.__reg;
+	__r.__bits.__reg |= (__mask & FE_ALL_EXCEPT) >> _FPUSW_SHIFT;
+	__mtfsf(__r.__d);
+	return ((__oldmask & _ENABLE_MASK) << _FPUSW_SHIFT);
+}
+
+static __inline int
+fedisableexcept(int __mask)
+{
+	union __fpscr __r;
+	fenv_t __oldmask;
+
+	__mffs(&__r.__d);
+	__oldmask = __r.__bits.__reg;
+	__r.__bits.__reg &= ~((__mask & FE_ALL_EXCEPT) >> _FPUSW_SHIFT);
+	__mtfsf(__r.__d);
+	return ((__oldmask & _ENABLE_MASK) << _FPUSW_SHIFT);
+}
+
+static __inline int
+fegetexcept(void)
+{
+	union __fpscr __r;
+
+	__mffs(&__r.__d);
+	return ((__r.__bits.__reg & _ENABLE_MASK) << _FPUSW_SHIFT);
+}
+
+#endif /* __BSD_VISIBLE */
+
+__END_DECLS
+
+#endif	/* !_FENV_H_ */
diff --git a/libm/include/sparc64/fenv.h b/libm/include/sparc64/fenv.h
new file mode 100644
index 0000000..684c4a2
--- /dev/null
+++ b/libm/include/sparc64/fenv.h
@@ -0,0 +1,254 @@
+/*-
+ * Copyright (c) 2004-2005 David Schultz <das@FreeBSD.ORG>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: src/lib/msun/sparc64/fenv.h,v 1.3 2005/03/16 19:03:46 das Exp $
+ */
+
+#ifndef	_FENV_H_
+#define	_FENV_H_
+
+#include <sys/_types.h>
+
+typedef	__uint64_t	fenv_t;
+typedef	__uint64_t	fexcept_t;
+
+/* Exception flags */
+#define	FE_INVALID	0x00000200
+#define	FE_DIVBYZERO	0x00000040
+#define	FE_OVERFLOW	0x00000100
+#define	FE_UNDERFLOW	0x00000080
+#define	FE_INEXACT	0x00000020
+#define	FE_ALL_EXCEPT	(FE_DIVBYZERO | FE_INEXACT | \
+			 FE_INVALID | FE_OVERFLOW | FE_UNDERFLOW)
+
+/*
+ * Rounding modes
+ *
+ * We can't just use the hardware bit values here, because that would
+ * make FE_UPWARD and FE_DOWNWARD negative, which is not allowed.
+ */
+#define	FE_TONEAREST	0x0
+#define	FE_TOWARDZERO	0x1
+#define	FE_UPWARD	0x2
+#define	FE_DOWNWARD	0x3
+#define	_ROUND_MASK	(FE_TONEAREST | FE_DOWNWARD | \
+			 FE_UPWARD | FE_TOWARDZERO)
+#define	_ROUND_SHIFT	30
+
+__BEGIN_DECLS
+
+/* Default floating-point environment */
+extern const fenv_t	__fe_dfl_env;
+#define	FE_DFL_ENV	(&__fe_dfl_env)
+
+/* We need to be able to map status flag positions to mask flag positions */
+#define _FPUSW_SHIFT	18
+#define	_ENABLE_MASK	(FE_ALL_EXCEPT << _FPUSW_SHIFT)
+
+#define	__ldxfsr(__r)	__asm __volatile("ldx %0, %%fsr" : : "m" (__r))
+#define	__stxfsr(__r)	__asm __volatile("stx %%fsr, %0" : "=m" (*(__r)))
+
+static __inline int
+feclearexcept(int __excepts)
+{
+	fexcept_t __r;
+
+	__stxfsr(&__r);
+	__r &= ~__excepts;
+	__ldxfsr(__r);
+	return (0);
+}
+
+static __inline int
+fegetexceptflag(fexcept_t *__flagp, int __excepts)
+{
+	fexcept_t __r;
+
+	__stxfsr(&__r);
+	*__flagp = __r & __excepts;
+	return (0);
+}
+
+static __inline int
+fesetexceptflag(const fexcept_t *__flagp, int __excepts)
+{
+	fexcept_t __r;
+
+	__stxfsr(&__r);
+	__r &= ~__excepts;
+	__r |= *__flagp & __excepts;
+	__ldxfsr(__r);
+	return (0);
+}
+
+/*
+ * In contrast with the ia64 platform, it seems to be worthwhile to
+ * inline this function on sparc64 even when the arguments are not
+ * compile-time constants.  Perhaps this depends on the register window.
+ */
+static __inline int
+feraiseexcept(int __excepts)
+{
+	volatile double d;
+
+	/*
+	 * With a compiler that supports the FENV_ACCESS pragma
+	 * properly, simple expressions like '0.0 / 0.0' should
+	 * be sufficient to generate traps.  Unfortunately, we
+	 * need to bring a volatile variable into the equation
+	 * to prevent incorrect optimizations.
+	 */
+	if (__excepts & FE_INVALID) {
+		d = 0.0;
+		d = 0.0 / d;
+	}
+	if (__excepts & FE_DIVBYZERO) {
+		d = 0.0;
+		d = 1.0 / d;
+	}
+	if (__excepts & FE_OVERFLOW) {
+		d = 0x1.ffp1023;
+		d *= 2.0;
+	}
+	if (__excepts & FE_UNDERFLOW) {
+		d = 0x1p-1022;
+		d /= 0x1p1023;
+	}
+	if (__excepts & FE_INEXACT) {
+		d = 0x1p-1022;
+		d += 1.0;
+	}
+	return (0);
+}
+
+static __inline int
+fetestexcept(int __excepts)
+{
+	fexcept_t __r;
+
+	__stxfsr(&__r);
+	return (__r & __excepts);
+}
+
+static __inline int
+fegetround(void)
+{
+	fenv_t __r;
+
+	__stxfsr(&__r);
+	return ((__r >> _ROUND_SHIFT) & _ROUND_MASK);
+}
+
+static __inline int
+fesetround(int __round)
+{
+	fenv_t __r;
+
+	if (__round & ~_ROUND_MASK)
+		return (-1);
+	__stxfsr(&__r);
+	__r &= ~(_ROUND_MASK << _ROUND_SHIFT);
+	__r |= __round << _ROUND_SHIFT;
+	__ldxfsr(__r);
+	return (0);
+}
+
+static __inline int
+fegetenv(fenv_t *__envp)
+{
+
+	__stxfsr(__envp);
+	return (0);
+}
+
+static __inline int
+feholdexcept(fenv_t *__envp)
+{
+	fenv_t __r;
+
+	__stxfsr(&__r);
+	*__envp = __r;
+	__r &= ~(FE_ALL_EXCEPT | _ENABLE_MASK);
+	__ldxfsr(__r);
+	return (0);
+}
+
+static __inline int
+fesetenv(const fenv_t *__envp)
+{
+
+	__ldxfsr(*__envp);
+	return (0);
+}
+
+static __inline int
+feupdateenv(const fenv_t *__envp)
+{
+	fexcept_t __r;
+
+	__stxfsr(&__r);
+	__ldxfsr(*__envp);
+	feraiseexcept(__r & FE_ALL_EXCEPT);
+	return (0);
+}
+
+#if __BSD_VISIBLE
+
+static __inline int
+feenableexcept(int __mask)
+{
+	fenv_t __old_r, __new_r;
+
+	__stxfsr(&__old_r);
+	__new_r = __old_r | ((__mask & FE_ALL_EXCEPT) << _FPUSW_SHIFT);
+	__ldxfsr(__new_r);
+	return ((__old_r >> _FPUSW_SHIFT) & FE_ALL_EXCEPT);
+}
+
+static __inline int
+fedisableexcept(int __mask)
+{
+	fenv_t __old_r, __new_r;
+
+	__stxfsr(&__old_r);
+	__new_r = __old_r & ~((__mask & FE_ALL_EXCEPT) << _FPUSW_SHIFT);
+	__ldxfsr(__new_r);
+	return ((__old_r >> _FPUSW_SHIFT) & FE_ALL_EXCEPT);
+}
+
+static __inline int
+fegetexcept(void)
+{
+	fenv_t __r;
+
+	__stxfsr(&__r);
+	return ((__r & _ENABLE_MASK) >> _FPUSW_SHIFT);
+}
+
+#endif /* __BSD_VISIBLE */
+
+__END_DECLS
+
+#endif	/* !_FENV_H_ */