libc: Add ftruncate64 and improve 64-bit parameter syscall handling.
This patch improves the handling of 64-bit parameters in syscalls on ARM.
The ARM EABI mandates that 64-bit quantities be passed in even/odd register
pairs, which requires special treatment.
This allows us to simplify our implementations of pread() and pwrite()
and remove the C stubs for pread64() and pwrite64().
Also add ftruncate64() to <unistd.h>
Change-Id: I407e2fd223ba0093dd2d0b04c6152fadfc9ce3ef
Bug 3107933
diff --git a/libc/arch-x86/syscalls/ftruncate64.S b/libc/arch-x86/syscalls/ftruncate64.S
new file mode 100644
index 0000000..66835ab
--- /dev/null
+++ b/libc/arch-x86/syscalls/ftruncate64.S
@@ -0,0 +1,29 @@
+/* autogenerated by gensyscalls.py */
+#include <sys/linux-syscalls.h>
+
+ .text
+ .type ftruncate64, @function
+ .globl ftruncate64
+ .align 4
+
+ftruncate64:
+ pushl %ebx
+ pushl %ecx
+ pushl %edx
+ mov 16(%esp), %ebx
+ mov 20(%esp), %ecx
+ mov 24(%esp), %edx
+ movl $__NR_ftruncate64, %eax
+ int $0x80
+ cmpl $-129, %eax
+ jb 1f
+ negl %eax
+ pushl %eax
+ call __set_errno
+ addl $4, %esp
+ orl $-1, %eax
+1:
+ popl %edx
+ popl %ecx
+ popl %ebx
+ ret
diff --git a/libc/arch-x86/syscalls/__pread64.S b/libc/arch-x86/syscalls/pread64.S
similarity index 90%
rename from libc/arch-x86/syscalls/__pread64.S
rename to libc/arch-x86/syscalls/pread64.S
index 3114673..eb004a9 100644
--- a/libc/arch-x86/syscalls/__pread64.S
+++ b/libc/arch-x86/syscalls/pread64.S
@@ -2,11 +2,11 @@
#include <sys/linux-syscalls.h>
.text
- .type __pread64, @function
- .globl __pread64
+ .type pread64, @function
+ .globl pread64
.align 4
-__pread64:
+pread64:
pushl %ebx
pushl %ecx
pushl %edx
diff --git a/libc/arch-x86/syscalls/__pwrite64.S b/libc/arch-x86/syscalls/pwrite64.S
similarity index 90%
rename from libc/arch-x86/syscalls/__pwrite64.S
rename to libc/arch-x86/syscalls/pwrite64.S
index 28f6536..01389f8 100644
--- a/libc/arch-x86/syscalls/__pwrite64.S
+++ b/libc/arch-x86/syscalls/pwrite64.S
@@ -2,11 +2,11 @@
#include <sys/linux-syscalls.h>
.text
- .type __pwrite64, @function
- .globl __pwrite64
+ .type pwrite64, @function
+ .globl pwrite64
.align 4
-__pwrite64:
+pwrite64:
pushl %ebx
pushl %ecx
pushl %edx