blob: 2370f23589c935ad0c8317953a10d88e265e45fe [file] [log] [blame]
The Android Open Source Project1dc9e472009-03-03 19:28:35 -08001#include <sys/linux-syscalls.h>
2
3#define FUTEX_WAIT 0
4#define FUTEX_WAKE 1
5
6
7/*
8 * int __futex_wait(volatile void *ftx, int val, const struct timespec *timeout)
9 */
10.text
11.globl __futex_wait
12.type __futex_wait, @function
13.align 4
14__futex_wait:
15 pushl %ebx
16 pushl %esi
17 mov 12(%esp), %ebx /* ftx */
18 movl $FUTEX_WAIT, %ecx
19 mov 16(%esp), %edx /* val */
20 mov 20(%esp), %esi /* timeout */
21 movl $__NR_futex, %eax
22 int $0x80
23 popl %esi
24 popl %ebx
25 ret
26
27
28/* int __futex_wake(volatile void *ftx, int count) */
29
30.text
31.globl __futex_wake
32.type __futex_wake, @function
33.align 4
34__futex_wake:
35 pushl %ebx
36 mov 8(%esp), %ebx /* ftx */
37 movl $FUTEX_WAKE, %ecx
38 mov 12(%esp), %edx /* count */
39 movl $__NR_futex, %eax
40 int $0x80
41 popl %ebx
42 ret
43
44
45/* int __atomic_cmpxchg(int old, int new, volatile int* addr) */
46
47.text
48.globl __atomic_cmpxchg
49.type __atomic_cmpxchg, @function
50.align 4
51__atomic_cmpxchg:
52 mov 4(%esp), %eax /* old */
53 mov 8(%esp), %ecx /* new */
54 mov 12(%esp), %edx /* addr */
55 lock cmpxchg %ecx, (%edx)
56 jnz 1f
57 xor %eax, %eax
58 jmp 2f
591:
60 movl $1, %eax
612:
62 ret /* 0 == success, 1 == failure */
63
64
65/* int __atomic_swap(int new, volatile int* addr) */
66
67.text
68.globl __atomic_swap
69.type __atomic_swap, @function
70.align 4
71__atomic_swap:
72 mov 4(%esp), %ecx /* new */
73 mov 8(%esp), %edx /* addr */
74 lock xchg %ecx, (%edx)
75 mov %ecx, %eax
76 ret
77
78
79/*
80 * int __atomic_dec(volatile int* addr)
81 *
82 * My x86 asm is really rusty.. this is probably suboptimal
83 */
84
85.text
86.globl __atomic_dec
87.type __atomic_dec, @function
88.align 4
89__atomic_dec:
90 pushl %ebx
91 pushl %esi
92 movl 12(%esp), %ebx /* addr */
93
941:
95 movl (%ebx), %esi /* old = *addr */
96 movl %esi, %edx
97 subl $1, %edx /* new = old - 1 */
98
99 pushl %ebx
100 pushl %edx
101 pushl %esi
102 call __atomic_cmpxchg
103 addl $12, %esp
104 test %eax, %eax
105 jnz 1b
106
107 movl %esi, %eax /* return old */
108 popl %esi
109 popl %ebx
110 ret
111
112
113.text
114/* int __atomic_inc(volatile int* addr) */
115.globl __atomic_inc
116.type __atomic_inc, @function
117.align 4
118__atomic_inc:
119 pushl %ebx
120 pushl %esi
121 movl 12(%esp), %ebx /* addr */
122
1231:
124 movl (%ebx), %esi /* old = *addr */
125 movl %esi, %edx
126 addl $1, %edx /* new = old + 1 */
127
128 pushl %ebx
129 pushl %edx
130 pushl %esi
131 call __atomic_cmpxchg
132 addl $12, %esp
133 test %eax, %eax
134 jnz 1b
135
136 movl %esi, %eax /* return old */
137 popl %esi
138 popl %ebx
139 ret
140