Fix an off-by-one error in the sigset_t function error handling.
Spotted while running the tests on MIPS, where sigset_t is
actually large enough. The bits in sigset_t are used such that
signal 1 is represented by bit 0, so the range of signals is
actually [1, 8*sizeof(sigset_t)]; it seems clearer to reword
the code in terms of valid bit offsets [0, 8*sizeof(sigset_t)),
which leads to the usual bounds checking idiom.
Change-Id: Id899c288e15ff71c85dd2fd33c47f8e97aa1956f
diff --git a/libc/include/signal.h b/libc/include/signal.h
index 9d3badc..e2713db 100644
--- a/libc/include/signal.h
+++ b/libc/include/signal.h
@@ -58,34 +58,34 @@
extern const char* const sys_signame[];
static __inline__ int sigismember(sigset_t* set, int signum) {
- if (set == NULL || signum < 1 || signum >= 8*sizeof(sigset_t)) {
+ int bit = signum - 1; // Signal numbers start at 1, but bit positions start at 0.
+ if (set == NULL || bit < 0 || bit >= 8*sizeof(sigset_t)) {
errno = EINVAL;
return -1;
}
unsigned long* local_set = (unsigned long*) set;
- signum--;
- return (int) ((local_set[signum/LONG_BIT] >> (signum%LONG_BIT)) & 1);
+ return (int) ((local_set[bit / LONG_BIT] >> (bit % LONG_BIT)) & 1);
}
static __inline__ int sigaddset(sigset_t* set, int signum) {
- if (set == NULL || signum < 1 || signum >= 8*sizeof(sigset_t)) {
+ int bit = signum - 1; // Signal numbers start at 1, but bit positions start at 0.
+ if (set == NULL || bit < 0 || bit >= 8*sizeof(sigset_t)) {
errno = EINVAL;
return -1;
}
unsigned long* local_set = (unsigned long*) set;
- signum--;
- local_set[signum/LONG_BIT] |= 1UL << (signum%LONG_BIT);
+ local_set[bit / LONG_BIT] |= 1UL << (bit % LONG_BIT);
return 0;
}
static __inline__ int sigdelset(sigset_t* set, int signum) {
- if (set == NULL || signum < 1 || signum >= 8*sizeof(sigset_t)) {
+ int bit = signum - 1; // Signal numbers start at 1, but bit positions start at 0.
+ if (set == NULL || bit < 0 || bit >= 8*sizeof(sigset_t)) {
errno = EINVAL;
return -1;
}
unsigned long* local_set = (unsigned long*) set;
- signum--;
- local_set[signum/LONG_BIT] &= ~(1UL << (signum%LONG_BIT));
+ local_set[bit / LONG_BIT] &= ~(1UL << (bit % LONG_BIT));
return 0;
}
diff --git a/tests/signal_test.cpp b/tests/signal_test.cpp
index fcfcb18..1292568 100644
--- a/tests/signal_test.cpp
+++ b/tests/signal_test.cpp
@@ -48,13 +48,13 @@
int min_signal = SIGHUP;
int max_signal = SIGRTMAX;
-#if __BIONIC__
- // bionic's sigset_t is too small: 32 bits instead of 64.
+#if defined(__BIONIC__) && !defined(__mips__)
+ // bionic's sigset_t is too small for ARM and x86: 32 bits instead of 64.
// This means you can't refer to any of the real-time signals.
// See http://b/3038348 and http://b/5828899.
- max_signal = 31;
+ max_signal = 32;
#else
- // Other C libraries are perfectly capable of using their largest signal.
+ // Other C libraries (or bionic for MIPS) are perfectly capable of using their largest signal.
ASSERT_GE(sizeof(sigset_t) * 8, static_cast<size_t>(SIGRTMAX));
#endif