.long sys_add_key
.long sys_request_key
.long sys_keyctl
- .long sys_ni_syscall // sys_vperfctr_open
- .long sys_ni_syscall // sys_vperfctr_control /* 290 */
- .long sys_ni_syscall // sys_vperfctr_unlink
- .long sys_ni_syscall // sys_vperfctr_iresume
- .long sys_ni_syscall // sys_vperfctr_read
+ .long sys_ioprio_set
+ .long sys_ioprio_get /* 290 */
+ .long sys_inotify_init
+ .long sys_inotify_add_watch
+ .long sys_inotify_rm_watch
+ .long sys_migrate_pages
+ .long sys_openat /* 295 */
+ .long sys_mkdirat
+ .long sys_mknodat
+ .long sys_fchownat
+ .long sys_futimesat
+ .long sys_newfstatat /* 300 */
+ .long sys_unlinkat
+ .long sys_renameat
+ .long sys_linkat
+ .long sys_symlinkat
+ .long sys_readlinkat /* 305 */
+ .long sys_fchmodat
+ .long sys_faccessat
+ .long sys_pselect6
+ .long sys_ppoll
syscall_table_size = (. - sys_call_table)
}
/*
- * __iounmap unmaps nearly everything, so be careful
- * it doesn't free currently pointer/page tables anymore but it
- * wans't used anyway and might be added later.
- */
-void __iounmap(void *addr, unsigned long size)
-{
-}
-
-/*
* Set new cache mode for some kernel address space.
* The caller must push data for that range itself, if such data may already
* be in the cache.
switch (sizeof(__xg_orig)) { \
case 4: \
asm volatile( \
- "swap%I0 %2,%M0" \
- : "+m"(*__xg_ptr), "=&r"(__xg_orig) \
- : "r"(x) \
+ "swap%I0 %M0,%1" \
+ : "+m"(*__xg_ptr), "=r"(__xg_orig) \
+ : "1"(x) \
: "memory" \
); \
break; \
flush_icache_user_range(vma, page, page_to_phys(page), PAGE_SIZE);
}
+/*
+ * permit ptrace to access another process's address space through the icache
+ * and the dcache
+ */
+#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
+do { \
+ memcpy((dst), (src), (len)); \
+ flush_icache_user_range((vma), (page), (vaddr), (len)); \
+} while(0)
+
+#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
+ memcpy((dst), (src), (len))
#endif /* _ASM_CACHEFLUSH_H */
#define IOMAP_WRITETHROUGH 3
extern void __iomem *__ioremap(unsigned long physaddr, unsigned long size, int cacheflag);
-extern void __iounmap(void __iomem *addr, unsigned long size);
static inline void __iomem *ioremap(unsigned long physaddr, unsigned long size)
{
extern unsigned long search_exception_table(unsigned long addr);
-#define copy_to_user_page(vma, page, vaddr, dst, src, len) memcpy(dst, src, len)
-#define copy_from_user_page(vma, page, vaddr, dst, src, len) memcpy(dst, src, len)
-
#endif /* _ASM_UACCESS_H */
#define __NR_add_key 286
#define __NR_request_key 287
#define __NR_keyctl 288
-#define __NR_vperfctr_open 289
-#define __NR_vperfctr_control (__NR_perfctr_info+1)
-#define __NR_vperfctr_unlink (__NR_perfctr_info+2)
-#define __NR_vperfctr_iresume (__NR_perfctr_info+3)
-#define __NR_vperfctr_read (__NR_perfctr_info+4)
+#define __NR_ioprio_set 289
+#define __NR_ioprio_get 290
+#define __NR_inotify_init 291
+#define __NR_inotify_add_watch 292
+#define __NR_inotify_rm_watch 293
+#define __NR_migrate_pages 294
+#define __NR_openat 295
+#define __NR_mkdirat 296
+#define __NR_mknodat 297
+#define __NR_fchownat 298
+#define __NR_futimesat 299
+#define __NR_newfstatat 300
+#define __NR_unlinkat 301
+#define __NR_renameat 302
+#define __NR_linkat 303
+#define __NR_symlinkat 304
+#define __NR_readlinkat 305
+#define __NR_fchmodat 306
+#define __NR_faccessat 307
+#define __NR_pselect6 308
+#define __NR_ppoll 309
-#define NR_syscalls 294
+#define NR_syscalls 310
/*
* process the return value of a syscall, consigning it to one of two possible fates