OSDN Git Service

Add posix_fadvise posix_fadvise64 for arm.
authorKhem Raj <kraj@mvista.com>
Tue, 6 Mar 2007 08:06:26 +0000 (08:06 -0000)
committerKhem Raj <kraj@mvista.com>
Tue, 6 Mar 2007 08:06:26 +0000 (08:06 -0000)
Import INTERNAL_SYSCALL macro for i386 from glibc.
Use above macro in posix_fadvise implementation if an arch defines it.

libc/sysdeps/linux/arm/Makefile.arch
libc/sysdeps/linux/common/posix_fadvise.c
libc/sysdeps/linux/common/posix_fadvise64.c
libc/sysdeps/linux/i386/bits/syscalls.h

index c599a22..a3b6fc4 100644 (file)
@@ -5,7 +5,8 @@
 # Licensed under the LGPL v2.1, see the file COPYING.LIB in this tarball.
 #
 
-CSRC := brk.c ioperm.c iopl.c mmap.c sigaction.c __syscall_error.c
+CSRC := brk.c ioperm.c iopl.c mmap.c posix_fadvise.c posix_fadvise64.c \
+       sigaction.c __syscall_error.c
 
 SSRC := \
        __longjmp.S vfork.S clone.S setjmp.S bsd-setjmp.S \
index 47cf66a..3bd689f 100644 (file)
 
 #ifdef __NR_fadvise64
 #define __NR_posix_fadvise __NR_fadvise64
+/* get rid of following conditional when
+   all supported arches are having INTERNAL_SYSCALL defined
+*/
+#ifdef INTERNAL_SYSCALL
+int posix_fadvise(int fd, off_t offset, off_t len, int advice)
+{
+       INTERNAL_SYSCALL_DECL(err);
+       int ret = (int) (INTERNAL_SYSCALL(posix_fadvise, err, 5, fd,
+        __LONG_LONG_PAIR (offset >> 31, offset), len, advice));
+    if (INTERNAL_SYSCALL_ERROR_P (ret, err))
+      return INTERNAL_SYSCALL_ERRNO (ret, err);
+    return 0;
+}
+#else
 _syscall4(int, posix_fadvise, int, fd, off_t, offset,
           off_t, len, int, advice);
 
+#endif
+
 #if defined __UCLIBC_HAS_LFS__ && (!defined __NR_fadvise64_64 || !defined _syscall6)
 extern __typeof(posix_fadvise) posix_fadvise64;
 strong_alias(posix_fadvise,posix_fadvise64)
index d931aff..31ed5ef 100644 (file)
 #if __WORDSIZE == 64
 
 #define __NR_posix_fadvise64 __NR_fadvise64_64
+
+#ifdef INTERNAL_SYSCALL
+int posix_fadvise64(int fd, __off64_t offset, __off64_t len, int advice)
+{
+  if (len != (off_t) len)
+    return EOVERFLOW;
+  INTERNAL_SYSCALL_DECL (err);
+    int ret = INTERNAL_SYSCALL (posix_fadvise64, err, 6, fd,
+                               __LONG_LONG_PAIR ((long) (offset >> 32),
+                                                 (long) offset),
+                               (off_t) len, advise);
+  if (!INTERNAL_SYSCALL_ERROR_P (ret, err))
+    return 0;
+  return INTERNAL_SYSCALL_ERRNO (ret, err);
+}
+#else
 _syscall4(int, posix_fadvise64, int, fd, __off64_t, offset,
           __off64_t, len, int, advice);
+#endif
 
 /* 32 bit implementation is kind of a pita */
 #elif __WORDSIZE == 32
index d612071..eb77ea8 100644 (file)
 
 #define SYS_ify(syscall_name)  (__NR_##syscall_name)
 
+#define INTERNAL_SYSCALL_DECL(err) do { } while (0)
+
+#define INTERNAL_SYSCALL_ERROR_P(val, err) \
+  ((unsigned int) (val) >= 0xfffff001u)
+
+#define INTERNAL_SYSCALL_ERRNO(val, err)        (-(val))
+
 /* We need some help from the assembler to generate optimal code.  We
    define some macros here which later will be used.  */
 
@@ -145,24 +152,29 @@ type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, type6 arg6) \
 { \
 return (type) (INLINE_SYSCALL(name, 6, arg1, arg2, arg3, arg4, arg5, arg6)); \
 }
-
 #define INLINE_SYSCALL(name, nr, args...) \
-  ({                                                                         \
-    unsigned int resultvar;                                                  \
-    __asm__ __volatile__ (                                                           \
-    LOADARGS_##nr                                                            \
-    "movl %1, %%eax\n\t"                                                     \
-    "int $0x80\n\t"                                                          \
-    RESTOREARGS_##nr                                                         \
-    : "=a" (resultvar)                                                       \
-    : "i" (__NR_##name) ASMFMT_##nr(args) : "memory", "cc");                 \
-    if (resultvar >= 0xfffff001)                                             \
-      {                                                                              \
-       __set_errno (-resultvar);                                             \
-       resultvar = 0xffffffff;                                               \
-      }                                                                              \
+  ({                                                                          \
+    unsigned int result = INTERNAL_SYSCALL (name, , nr, args);                \
+    if (__builtin_expect (INTERNAL_SYSCALL_ERROR_P (result, ), 0))            \
+      {                                                                       \
+        __set_errno (INTERNAL_SYSCALL_ERRNO (result, ));                      \
+        result = 0xffffffff;                                                  \
+      }                                                                       \
+    (int) result; })
+
+#define INTERNAL_SYSCALL(name, err, nr, args...) \
+  ({                                                                          \
+    unsigned int resultvar;                                                   \
+    __asm__ __volatile__ (                                                    \
+    LOADARGS_##nr                                                             \
+    "movl %1, %%eax\n\t"                                                      \
+    "int $0x80\n\t"                                                           \
+    RESTOREARGS_##nr                                                          \
+    : "=a" (resultvar)                                                        \
+    : "i" (__NR_##name) ASMFMT_##nr(args) : "memory", "cc");                  \
     (int) resultvar; })
 
+
 #define LOADARGS_0
 #define LOADARGS_1 \
     "bpushl .L__X'%k2, %k2\n\t"                                                      \