Elliott Hughes | 04303f5 | 2014-09-18 16:11:59 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2008 The Android Open Source Project |
| 3 | * All rights reserved. |
| 4 | * |
| 5 | * Redistribution and use in source and binary forms, with or without |
| 6 | * modification, are permitted provided that the following conditions |
| 7 | * are met: |
| 8 | * * Redistributions of source code must retain the above copyright |
| 9 | * notice, this list of conditions and the following disclaimer. |
| 10 | * * Redistributions in binary form must reproduce the above copyright |
| 11 | * notice, this list of conditions and the following disclaimer in |
| 12 | * the documentation and/or other materials provided with the |
| 13 | * distribution. |
| 14 | * |
| 15 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 16 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 17 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS |
| 18 | * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE |
| 19 | * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, |
| 20 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, |
| 21 | * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS |
| 22 | * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED |
| 23 | * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, |
| 24 | * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT |
| 25 | * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
| 26 | * SUCH DAMAGE. |
| 27 | */ |
| 28 | |
Yabin Cui | a3d97da | 2015-01-30 12:30:08 -0800 | [diff] [blame] | 29 | // Memory order requirements for POSIX semaphores appear unclear and are |
| 30 | // currently interpreted inconsistently. |
| 31 | // We conservatively prefer sequentially consistent operations for now. |
| 32 | // CAUTION: This is more conservative than some other major implementations, |
| 33 | // and may change if and when the issue is resolved. |
| 34 | |
Elliott Hughes | 04303f5 | 2014-09-18 16:11:59 -0700 | [diff] [blame] | 35 | #include <semaphore.h> |
| 36 | #include <errno.h> |
| 37 | #include <limits.h> |
Yabin Cui | a3d97da | 2015-01-30 12:30:08 -0800 | [diff] [blame] | 38 | #include <stdatomic.h> |
Elliott Hughes | 04303f5 | 2014-09-18 16:11:59 -0700 | [diff] [blame] | 39 | #include <sys/time.h> |
| 40 | #include <time.h> |
| 41 | |
Elliott Hughes | 04303f5 | 2014-09-18 16:11:59 -0700 | [diff] [blame] | 42 | #include "private/bionic_constants.h" |
| 43 | #include "private/bionic_futex.h" |
| 44 | #include "private/bionic_time_conversions.h" |
| 45 | |
| 46 | // In this implementation, a semaphore contains a |
| 47 | // 31-bit signed value and a 1-bit 'shared' flag |
| 48 | // (for process-sharing purpose). |
| 49 | // |
| 50 | // We use the value -1 to indicate contention on the |
| 51 | // semaphore, 0 or more to indicate uncontended state, |
| 52 | // any value lower than -2 is invalid at runtime. |
| 53 | // |
| 54 | // State diagram: |
| 55 | // |
| 56 | // post(1) ==> 2 |
| 57 | // post(0) ==> 1 |
| 58 | // post(-1) ==> 1, then wake all waiters |
| 59 | // |
| 60 | // wait(2) ==> 1 |
| 61 | // wait(1) ==> 0 |
| 62 | // wait(0) ==> -1 then wait for a wake up + loop |
| 63 | // wait(-1) ==> -1 then wait for a wake up + loop |
| 64 | |
| 65 | // Use the upper 31-bits for the counter, and the lower one |
| 66 | // for the shared flag. |
| 67 | #define SEMCOUNT_SHARED_MASK 0x00000001 |
| 68 | #define SEMCOUNT_VALUE_MASK 0xfffffffe |
| 69 | #define SEMCOUNT_VALUE_SHIFT 1 |
| 70 | |
| 71 | // Convert a value into the corresponding sem->count bit pattern. |
| 72 | #define SEMCOUNT_FROM_VALUE(val) (((val) << SEMCOUNT_VALUE_SHIFT) & SEMCOUNT_VALUE_MASK) |
| 73 | |
| 74 | // Convert a sem->count bit pattern into the corresponding signed value. |
Yabin Cui | a3d97da | 2015-01-30 12:30:08 -0800 | [diff] [blame] | 75 | static inline int SEMCOUNT_TO_VALUE(unsigned int sval) { |
Elliott Hughes | 04303f5 | 2014-09-18 16:11:59 -0700 | [diff] [blame] | 76 | return (static_cast<int>(sval) >> SEMCOUNT_VALUE_SHIFT); |
| 77 | } |
| 78 | |
| 79 | // The value +1 as a sem->count bit-pattern. |
| 80 | #define SEMCOUNT_ONE SEMCOUNT_FROM_VALUE(1) |
| 81 | |
| 82 | // The value -1 as a sem->count bit-pattern. |
| 83 | #define SEMCOUNT_MINUS_ONE SEMCOUNT_FROM_VALUE(-1) |
| 84 | |
| 85 | #define SEMCOUNT_DECREMENT(sval) (((sval) - (1U << SEMCOUNT_VALUE_SHIFT)) & SEMCOUNT_VALUE_MASK) |
| 86 | #define SEMCOUNT_INCREMENT(sval) (((sval) + (1U << SEMCOUNT_VALUE_SHIFT)) & SEMCOUNT_VALUE_MASK) |
| 87 | |
Yabin Cui | a3d97da | 2015-01-30 12:30:08 -0800 | [diff] [blame] | 88 | static inline atomic_uint* SEM_TO_ATOMIC_POINTER(sem_t* sem) { |
| 89 | static_assert(sizeof(atomic_uint) == sizeof(sem->count), |
| 90 | "sem->count should actually be atomic_uint in implementation."); |
| 91 | |
| 92 | // We prefer casting to atomic_uint instead of declaring sem->count to be atomic_uint directly. |
| 93 | // Because using the second method pollutes semaphore.h. |
| 94 | return reinterpret_cast<atomic_uint*>(&sem->count); |
Elliott Hughes | 04303f5 | 2014-09-18 16:11:59 -0700 | [diff] [blame] | 95 | } |
| 96 | |
Yabin Cui | a3d97da | 2015-01-30 12:30:08 -0800 | [diff] [blame] | 97 | // Return the shared bitflag from a semaphore counter. |
| 98 | static inline unsigned int SEM_GET_SHARED(atomic_uint* sem_count_ptr) { |
| 99 | // memory_order_relaxed is used as SHARED flag will not be changed after init. |
| 100 | return (atomic_load_explicit(sem_count_ptr, memory_order_relaxed) & SEMCOUNT_SHARED_MASK); |
| 101 | } |
Elliott Hughes | 04303f5 | 2014-09-18 16:11:59 -0700 | [diff] [blame] | 102 | |
| 103 | int sem_init(sem_t* sem, int pshared, unsigned int value) { |
Elliott Hughes | 04303f5 | 2014-09-18 16:11:59 -0700 | [diff] [blame] | 104 | // Ensure that 'value' can be stored in the semaphore. |
| 105 | if (value > SEM_VALUE_MAX) { |
| 106 | errno = EINVAL; |
| 107 | return -1; |
| 108 | } |
| 109 | |
Yabin Cui | a3d97da | 2015-01-30 12:30:08 -0800 | [diff] [blame] | 110 | unsigned int count = SEMCOUNT_FROM_VALUE(value); |
Elliott Hughes | 04303f5 | 2014-09-18 16:11:59 -0700 | [diff] [blame] | 111 | if (pshared != 0) { |
Yabin Cui | a3d97da | 2015-01-30 12:30:08 -0800 | [diff] [blame] | 112 | count |= SEMCOUNT_SHARED_MASK; |
Elliott Hughes | 04303f5 | 2014-09-18 16:11:59 -0700 | [diff] [blame] | 113 | } |
Yabin Cui | a3d97da | 2015-01-30 12:30:08 -0800 | [diff] [blame] | 114 | |
| 115 | atomic_uint* sem_count_ptr = SEM_TO_ATOMIC_POINTER(sem); |
| 116 | atomic_init(sem_count_ptr, count); |
Elliott Hughes | 04303f5 | 2014-09-18 16:11:59 -0700 | [diff] [blame] | 117 | return 0; |
| 118 | } |
| 119 | |
Elliott Hughes | 75129ae | 2014-10-08 15:11:44 -0700 | [diff] [blame] | 120 | int sem_destroy(sem_t*) { |
Elliott Hughes | 04303f5 | 2014-09-18 16:11:59 -0700 | [diff] [blame] | 121 | return 0; |
| 122 | } |
| 123 | |
| 124 | sem_t* sem_open(const char*, int, ...) { |
| 125 | errno = ENOSYS; |
| 126 | return SEM_FAILED; |
| 127 | } |
| 128 | |
| 129 | int sem_close(sem_t*) { |
| 130 | errno = ENOSYS; |
| 131 | return -1; |
| 132 | } |
| 133 | |
| 134 | int sem_unlink(const char*) { |
| 135 | errno = ENOSYS; |
| 136 | return -1; |
| 137 | } |
| 138 | |
| 139 | // Decrement a semaphore's value atomically, |
| 140 | // and return the old one. As a special case, |
| 141 | // this returns immediately if the value is |
| 142 | // negative (i.e. -1) |
Yabin Cui | a3d97da | 2015-01-30 12:30:08 -0800 | [diff] [blame] | 143 | static int __sem_dec(atomic_uint* sem_count_ptr) { |
| 144 | unsigned int old_value = atomic_load_explicit(sem_count_ptr, memory_order_relaxed); |
| 145 | unsigned int shared = old_value & SEMCOUNT_SHARED_MASK; |
Elliott Hughes | 04303f5 | 2014-09-18 16:11:59 -0700 | [diff] [blame] | 146 | |
Yabin Cui | a3d97da | 2015-01-30 12:30:08 -0800 | [diff] [blame] | 147 | // Use memory_order_seq_cst in atomic_compare_exchange operation to ensure all |
| 148 | // memory access made by other threads can be seen in current thread. |
| 149 | // An acquire fence may be sufficient, but it is still in discussion whether |
| 150 | // POSIX semaphores should provide sequential consistency. |
Elliott Hughes | 04303f5 | 2014-09-18 16:11:59 -0700 | [diff] [blame] | 151 | do { |
Yabin Cui | a3d97da | 2015-01-30 12:30:08 -0800 | [diff] [blame] | 152 | if (SEMCOUNT_TO_VALUE(old_value) < 0) { |
Elliott Hughes | 04303f5 | 2014-09-18 16:11:59 -0700 | [diff] [blame] | 153 | break; |
| 154 | } |
Yabin Cui | a3d97da | 2015-01-30 12:30:08 -0800 | [diff] [blame] | 155 | } while (!atomic_compare_exchange_weak(sem_count_ptr, &old_value, |
| 156 | SEMCOUNT_DECREMENT(old_value) | shared)); |
Elliott Hughes | 04303f5 | 2014-09-18 16:11:59 -0700 | [diff] [blame] | 157 | |
Yabin Cui | a3d97da | 2015-01-30 12:30:08 -0800 | [diff] [blame] | 158 | return SEMCOUNT_TO_VALUE(old_value); |
Elliott Hughes | 04303f5 | 2014-09-18 16:11:59 -0700 | [diff] [blame] | 159 | } |
| 160 | |
| 161 | // Same as __sem_dec, but will not touch anything if the |
| 162 | // value is already negative *or* 0. Returns the old value. |
Yabin Cui | a3d97da | 2015-01-30 12:30:08 -0800 | [diff] [blame] | 163 | static int __sem_trydec(atomic_uint* sem_count_ptr) { |
| 164 | unsigned int old_value = atomic_load_explicit(sem_count_ptr, memory_order_relaxed); |
| 165 | unsigned int shared = old_value & SEMCOUNT_SHARED_MASK; |
Elliott Hughes | 04303f5 | 2014-09-18 16:11:59 -0700 | [diff] [blame] | 166 | |
Yabin Cui | a3d97da | 2015-01-30 12:30:08 -0800 | [diff] [blame] | 167 | // Use memory_order_seq_cst in atomic_compare_exchange operation to ensure all |
| 168 | // memory access made by other threads can be seen in current thread. |
| 169 | // An acquire fence may be sufficient, but it is still in discussion whether |
| 170 | // POSIX semaphores should provide sequential consistency. |
Elliott Hughes | 04303f5 | 2014-09-18 16:11:59 -0700 | [diff] [blame] | 171 | do { |
Yabin Cui | a3d97da | 2015-01-30 12:30:08 -0800 | [diff] [blame] | 172 | if (SEMCOUNT_TO_VALUE(old_value) <= 0) { |
Elliott Hughes | 04303f5 | 2014-09-18 16:11:59 -0700 | [diff] [blame] | 173 | break; |
| 174 | } |
Yabin Cui | a3d97da | 2015-01-30 12:30:08 -0800 | [diff] [blame] | 175 | } while (!atomic_compare_exchange_weak(sem_count_ptr, &old_value, |
| 176 | SEMCOUNT_DECREMENT(old_value) | shared)); |
Elliott Hughes | 04303f5 | 2014-09-18 16:11:59 -0700 | [diff] [blame] | 177 | |
Yabin Cui | a3d97da | 2015-01-30 12:30:08 -0800 | [diff] [blame] | 178 | return SEMCOUNT_TO_VALUE(old_value); |
Elliott Hughes | 04303f5 | 2014-09-18 16:11:59 -0700 | [diff] [blame] | 179 | } |
| 180 | |
Elliott Hughes | 04303f5 | 2014-09-18 16:11:59 -0700 | [diff] [blame] | 181 | // "Increment" the value of a semaphore atomically and |
| 182 | // return its old value. Note that this implements |
| 183 | // the special case of "incrementing" any negative |
| 184 | // value to +1 directly. |
| 185 | // |
| 186 | // NOTE: The value will _not_ wrap above SEM_VALUE_MAX |
Yabin Cui | a3d97da | 2015-01-30 12:30:08 -0800 | [diff] [blame] | 187 | static int __sem_inc(atomic_uint* sem_count_ptr) { |
| 188 | unsigned int old_value = atomic_load_explicit(sem_count_ptr, memory_order_relaxed); |
| 189 | unsigned int shared = old_value & SEMCOUNT_SHARED_MASK; |
| 190 | unsigned int new_value; |
Elliott Hughes | 04303f5 | 2014-09-18 16:11:59 -0700 | [diff] [blame] | 191 | |
Yabin Cui | a3d97da | 2015-01-30 12:30:08 -0800 | [diff] [blame] | 192 | // Use memory_order_seq_cst in atomic_compare_exchange operation to ensure all |
| 193 | // memory access made before can be seen in other threads. |
| 194 | // A release fence may be sufficient, but it is still in discussion whether |
| 195 | // POSIX semaphores should provide sequential consistency. |
Elliott Hughes | 04303f5 | 2014-09-18 16:11:59 -0700 | [diff] [blame] | 196 | do { |
Elliott Hughes | 04303f5 | 2014-09-18 16:11:59 -0700 | [diff] [blame] | 197 | // Can't go higher than SEM_VALUE_MAX. |
Yabin Cui | a3d97da | 2015-01-30 12:30:08 -0800 | [diff] [blame] | 198 | if (SEMCOUNT_TO_VALUE(old_value) == SEM_VALUE_MAX) { |
Elliott Hughes | 04303f5 | 2014-09-18 16:11:59 -0700 | [diff] [blame] | 199 | break; |
| 200 | } |
| 201 | |
Yabin Cui | a3d97da | 2015-01-30 12:30:08 -0800 | [diff] [blame] | 202 | // If the counter is negative, go directly to one, otherwise just increment. |
| 203 | if (SEMCOUNT_TO_VALUE(old_value) < 0) { |
| 204 | new_value = SEMCOUNT_ONE | shared; |
Elliott Hughes | 04303f5 | 2014-09-18 16:11:59 -0700 | [diff] [blame] | 205 | } else { |
Yabin Cui | a3d97da | 2015-01-30 12:30:08 -0800 | [diff] [blame] | 206 | new_value = SEMCOUNT_INCREMENT(old_value) | shared; |
Elliott Hughes | 04303f5 | 2014-09-18 16:11:59 -0700 | [diff] [blame] | 207 | } |
Yabin Cui | a3d97da | 2015-01-30 12:30:08 -0800 | [diff] [blame] | 208 | } while (!atomic_compare_exchange_weak(sem_count_ptr, &old_value, |
| 209 | new_value)); |
Elliott Hughes | 04303f5 | 2014-09-18 16:11:59 -0700 | [diff] [blame] | 210 | |
Yabin Cui | a3d97da | 2015-01-30 12:30:08 -0800 | [diff] [blame] | 211 | return SEMCOUNT_TO_VALUE(old_value); |
Elliott Hughes | 04303f5 | 2014-09-18 16:11:59 -0700 | [diff] [blame] | 212 | } |
| 213 | |
| 214 | int sem_wait(sem_t* sem) { |
Yabin Cui | a3d97da | 2015-01-30 12:30:08 -0800 | [diff] [blame] | 215 | atomic_uint* sem_count_ptr = SEM_TO_ATOMIC_POINTER(sem); |
| 216 | unsigned int shared = SEM_GET_SHARED(sem_count_ptr); |
Elliott Hughes | 04303f5 | 2014-09-18 16:11:59 -0700 | [diff] [blame] | 217 | |
| 218 | while (true) { |
Yabin Cui | a3d97da | 2015-01-30 12:30:08 -0800 | [diff] [blame] | 219 | if (__sem_dec(sem_count_ptr) > 0) { |
Elliott Hughes | 04303f5 | 2014-09-18 16:11:59 -0700 | [diff] [blame] | 220 | return 0; |
| 221 | } |
| 222 | |
Yabin Cui | a3d97da | 2015-01-30 12:30:08 -0800 | [diff] [blame] | 223 | __futex_wait_ex(sem_count_ptr, shared, shared | SEMCOUNT_MINUS_ONE, NULL); |
Elliott Hughes | 04303f5 | 2014-09-18 16:11:59 -0700 | [diff] [blame] | 224 | } |
| 225 | } |
| 226 | |
| 227 | int sem_timedwait(sem_t* sem, const timespec* abs_timeout) { |
Yabin Cui | a3d97da | 2015-01-30 12:30:08 -0800 | [diff] [blame] | 228 | atomic_uint* sem_count_ptr = SEM_TO_ATOMIC_POINTER(sem); |
| 229 | |
Elliott Hughes | 04303f5 | 2014-09-18 16:11:59 -0700 | [diff] [blame] | 230 | // POSIX says we need to try to decrement the semaphore |
| 231 | // before checking the timeout value. Note that if the |
| 232 | // value is currently 0, __sem_trydec() does nothing. |
Yabin Cui | a3d97da | 2015-01-30 12:30:08 -0800 | [diff] [blame] | 233 | if (__sem_trydec(sem_count_ptr) > 0) { |
Elliott Hughes | 04303f5 | 2014-09-18 16:11:59 -0700 | [diff] [blame] | 234 | return 0; |
| 235 | } |
| 236 | |
| 237 | // Check it as per POSIX. |
| 238 | if (abs_timeout == NULL || abs_timeout->tv_sec < 0 || abs_timeout->tv_nsec < 0 || abs_timeout->tv_nsec >= NS_PER_S) { |
| 239 | errno = EINVAL; |
| 240 | return -1; |
| 241 | } |
| 242 | |
Yabin Cui | a3d97da | 2015-01-30 12:30:08 -0800 | [diff] [blame] | 243 | unsigned int shared = SEM_GET_SHARED(sem_count_ptr); |
Elliott Hughes | 04303f5 | 2014-09-18 16:11:59 -0700 | [diff] [blame] | 244 | |
| 245 | while (true) { |
| 246 | // POSIX mandates CLOCK_REALTIME here. |
| 247 | timespec ts; |
| 248 | if (!timespec_from_absolute_timespec(ts, *abs_timeout, CLOCK_REALTIME)) { |
| 249 | errno = ETIMEDOUT; |
| 250 | return -1; |
| 251 | } |
| 252 | |
| 253 | // Try to grab the semaphore. If the value was 0, this will also change it to -1. |
Yabin Cui | a3d97da | 2015-01-30 12:30:08 -0800 | [diff] [blame] | 254 | if (__sem_dec(sem_count_ptr) > 0) { |
Elliott Hughes | 04303f5 | 2014-09-18 16:11:59 -0700 | [diff] [blame] | 255 | break; |
| 256 | } |
| 257 | |
| 258 | // Contention detected. Wait for a wakeup event. |
Yabin Cui | a3d97da | 2015-01-30 12:30:08 -0800 | [diff] [blame] | 259 | int ret = __futex_wait_ex(sem_count_ptr, shared, shared | SEMCOUNT_MINUS_ONE, &ts); |
Elliott Hughes | 04303f5 | 2014-09-18 16:11:59 -0700 | [diff] [blame] | 260 | |
| 261 | // Return in case of timeout or interrupt. |
| 262 | if (ret == -ETIMEDOUT || ret == -EINTR) { |
| 263 | errno = -ret; |
| 264 | return -1; |
| 265 | } |
| 266 | } |
| 267 | return 0; |
| 268 | } |
| 269 | |
| 270 | int sem_post(sem_t* sem) { |
Yabin Cui | a3d97da | 2015-01-30 12:30:08 -0800 | [diff] [blame] | 271 | atomic_uint* sem_count_ptr = SEM_TO_ATOMIC_POINTER(sem); |
| 272 | unsigned int shared = SEM_GET_SHARED(sem_count_ptr); |
Elliott Hughes | 04303f5 | 2014-09-18 16:11:59 -0700 | [diff] [blame] | 273 | |
Yabin Cui | a3d97da | 2015-01-30 12:30:08 -0800 | [diff] [blame] | 274 | int old_value = __sem_inc(sem_count_ptr); |
Elliott Hughes | 04303f5 | 2014-09-18 16:11:59 -0700 | [diff] [blame] | 275 | if (old_value < 0) { |
| 276 | // Contention on the semaphore. Wake up all waiters. |
Yabin Cui | a3d97da | 2015-01-30 12:30:08 -0800 | [diff] [blame] | 277 | __futex_wake_ex(sem_count_ptr, shared, INT_MAX); |
Elliott Hughes | 04303f5 | 2014-09-18 16:11:59 -0700 | [diff] [blame] | 278 | } else if (old_value == SEM_VALUE_MAX) { |
| 279 | // Overflow detected. |
| 280 | errno = EOVERFLOW; |
| 281 | return -1; |
| 282 | } |
| 283 | |
| 284 | return 0; |
| 285 | } |
| 286 | |
| 287 | int sem_trywait(sem_t* sem) { |
Yabin Cui | a3d97da | 2015-01-30 12:30:08 -0800 | [diff] [blame] | 288 | atomic_uint* sem_count_ptr = SEM_TO_ATOMIC_POINTER(sem); |
| 289 | if (__sem_trydec(sem_count_ptr) > 0) { |
Elliott Hughes | 04303f5 | 2014-09-18 16:11:59 -0700 | [diff] [blame] | 290 | return 0; |
| 291 | } else { |
| 292 | errno = EAGAIN; |
| 293 | return -1; |
| 294 | } |
| 295 | } |
| 296 | |
| 297 | int sem_getvalue(sem_t* sem, int* sval) { |
Yabin Cui | a3d97da | 2015-01-30 12:30:08 -0800 | [diff] [blame] | 298 | atomic_uint* sem_count_ptr = SEM_TO_ATOMIC_POINTER(sem); |
| 299 | |
| 300 | // Use memory_order_seq_cst in atomic_load operation. |
| 301 | // memory_order_relaxed may be fine here, but it is still in discussion |
| 302 | // whether POSIX semaphores should provide sequential consistency. |
| 303 | int val = SEMCOUNT_TO_VALUE(atomic_load(sem_count_ptr)); |
Elliott Hughes | 04303f5 | 2014-09-18 16:11:59 -0700 | [diff] [blame] | 304 | if (val < 0) { |
| 305 | val = 0; |
| 306 | } |
| 307 | |
| 308 | *sval = val; |
| 309 | return 0; |
| 310 | } |