config RX
def_bool y
- select EMBEDDED
select HAVE_CLK
select HAVE_SYSCALL_TRACEPOINTS
select HAVE_KERNEL_GZIP
select HAVE_DMA_ATTRS
select HAVE_GENERIC_DMA_COHERENT
+ select GENERIC_ATOMIC64
+ select HAVE_UID16
+ select VIRT_TO_BUS
config MMU
def_bool n
config CPU_RX610
bool
- select RX_IPR
+ select RX_ICUA
config CPU_RX62N
bool
-include include/asm-generic/Kbuild.asm
-
-header-y += posix_types.h
-header-y += ptrace.h
-header-y += unistd.h
+generic-y += asm-offsets.h
+generic-y += auxvec.h
+generic-y += barrier.h
+generic-y += bugs.h
+generic-y += cacheflush.h
+generic-y += checksum.h
+generic-y += clkdev.h
+generic-y += cputime.h
+generic-y += current.h
+generic-y += delay.h
+generic-y += device.h
+generic-y += div64.h
+generic-y += dma.h
+generic-y += emergency-restart.h
+generic-y += errno.h
+generic-y += exec.h
+generic-y += fb.h
+generic-y += fcntl.h
+generic-y += ftrace.h
+generic-y += futex.h
+generic-y += hardirq.h
+generic-y += hash.h
+generic-y += hw_irq.h
+generic-y += ioctl.h
+generic-y += ioctls.h
+generic-y += ipcbuf.h
+generic-y += irq_regs.h
+generic-y += irq_work.h
+generic-y += kdebug.h
+generic-y += kmap_types.h
+generic-y += kvm_para.h
+generic-y += linkage.h
+generic-y += local.h
+generic-y += local64.h
+generic-y += mcs_spinlock.h
+generic-y += mm-arch-hooks.h
+generic-y += mman.h
+generic-y += mmu.h
+generic-y += mmu_context.h
+generic-y += module.h
+generic-y += msgbuf.h
+generic-y += param.h
+generic-y += parport.h
+generic-y += percpu.h
+generic-y += pgalloc.h
+generic-y += poll.h
+generic-y += posix_types.h
+generic-y += preempt.h
+generic-y += resource.h
+generic-y += scatterlist.h
+generic-y += sections.h
+generic-y += sembuf.h
+generic-y += serial.h
+generic-y += setup.h
+generic-y += shmbuf.h
+generic-y += shmparam.h
+generic-y += siginfo.h
+generic-y += sizes.h
+generic-y += socket.h
+generic-y += sockios.h
+generic-y += spinlock.h
+generic-y += stat.h
+generic-y += statfs.h
+generic-y += swab.h
+generic-y += termbits.h
+generic-y += termios.h
+generic-y += timex.h
+generic-y += tlbflush.h
+generic-y += trace_clock.h
+generic-y += topology.h
+generic-y += types.h
+generic-y += uaccess.h
+generic-y += ucontext.h
+generic-y += unaligned.h
+generic-y += vga.h
+generic-y += word-at-a-time.h
+generic-y += xor.h
-#ifndef __ASM_RX_ATOMIC_H__
-#define __ASM_RX_ATOMIC_H__
+#ifndef __ARCH_RX_ATOMIC__
+#define __ARCH_RX_ATOMIC__
#include <linux/types.h>
+#include <asm/cmpxchg.h>
/*
* Atomic operations that C can't guarantee us. Useful for
#define ATOMIC_INIT(i) { (i) }
-#define atomic_read(v) ((v)->counter)
-#define atomic_set(v, i) (((v)->counter) = i)
+#define atomic_read(v) READ_ONCE((v)->counter)
+#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
-#include <asm/system.h>
#include <linux/kernel.h>
-static __inline__ int atomic_add_return(int i, atomic_t *v)
-{
- int ret;
- unsigned long psw;
- local_irq_save(psw);
- ret = v->counter += i;
- local_irq_restore(psw);
- return ret;
+#define ATOMIC_OP_RETURN(op, c_op) \
+static inline int atomic_##op##_return(int i, atomic_t *v) \
+{ \
+ unsigned long flags; \
+ int ret; \
+ \
+ flags = arch_local_irq_save(); \
+ ret = v->counter c_op i; \
+ arch_local_irq_restore(flags); \
+ return ret; \
}
-#define atomic_add(i, v) atomic_add_return(i, v)
-#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
-
-static __inline__ int atomic_sub_return(int i, atomic_t *v)
-{
- int ret;
- unsigned long psw;
- local_irq_save(psw);
- ret = v->counter -= i;
- local_irq_restore(psw);
- return ret;
+#define ATOMIC_OP(op, c_op) \
+static inline void atomic_##op(int i, atomic_t *v) \
+{ \
+ unsigned long flags; \
+ \
+ flags = arch_local_irq_save(); \
+ v->counter c_op i; \
+ arch_local_irq_restore(flags); \
}
-#define atomic_sub(i, v) atomic_sub_return(i, v)
-#define atomic_sub_and_test(i,v) (atomic_sub_return(i, v) == 0)
+ATOMIC_OP_RETURN(add, +=)
+ATOMIC_OP_RETURN(sub, -=)
-static __inline__ int atomic_inc_return(atomic_t *v)
-{
- int ret;
- unsigned long psw;
- local_irq_save(psw);
- v->counter++;
- ret = v->counter;
- local_irq_restore(psw);
- return ret;
-}
+ATOMIC_OP(and, &=)
+ATOMIC_OP(or, |=)
+ATOMIC_OP(xor, ^=)
-#define atomic_inc(v) atomic_inc_return(v)
+#undef ATOMIC_OP_RETURN
+#undef ATOMIC_OP
-/*
- * atomic_inc_and_test - increment and test
- * @v: pointer of type atomic_t
- *
- * Atomically increments @v by 1
- * and returns true if the result is zero, or false for all
- * other cases.
- */
-#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
+#define atomic_add(i, v) (void)atomic_add_return(i, v)
+#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
-static __inline__ int atomic_dec_return(atomic_t *v)
-{
- int ret;
- unsigned long psw;
- local_irq_save(psw);
- --v->counter;
- ret = v->counter;
- local_irq_restore(psw);
- return ret;
-}
+#define atomic_sub(i, v) (void)atomic_sub_return(i, v)
+#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
-#define atomic_dec(v) atomic_dec_return(v)
+#define atomic_inc_return(v) atomic_add_return(1, v)
+#define atomic_dec_return(v) atomic_sub_return(1, v)
-static __inline__ int atomic_dec_and_test(atomic_t *v)
-{
- int ret;
- unsigned long psw;
- local_irq_save(psw);
- --v->counter;
- ret = v->counter;
- local_irq_restore(psw);
- return ret == 0;
-}
+#define atomic_inc(v) (void)atomic_inc_return(v)
+#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
+
+#define atomic_dec(v) (void)atomic_dec_return(v)
+#define atomic_dec_and_test(v) (atomic_dec_return(v) == 0)
static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
{
int ret;
- unsigned long psw;
+ unsigned long flags;
- local_irq_save(psw);
+ flags = arch_local_irq_save();
ret = v->counter;
if (likely(ret == old))
v->counter = new;
- local_irq_restore(psw);
+ arch_local_irq_restore(flags);
return ret;
}
-#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
-
static inline int __atomic_add_unless(atomic_t *v, int a, int u)
{
- int c, old;
- c = atomic_read(v);
- for (;;) {
- if (unlikely(c == (u)))
- break;
- old = atomic_cmpxchg((v), c, c + (a));
- if (likely(old == c))
- break;
- c = old;
- }
- return c;
-}
-
-#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
-
-static __inline__ void atomic_clear_mask(unsigned long mask, atomic_t *v)
-{
- unsigned long psw;
-
- local_irq_save(psw);
- v->counter &= ~mask;
- local_irq_restore(psw);
-}
-
-static __inline__ void atomic_set_mask(unsigned long mask, atomic_t *v)
-{
- unsigned long psw;
+ int ret;
+ unsigned long flags;
- local_irq_save(psw);
- v->counter &= mask;
- local_irq_restore(psw);
+ flags = arch_local_irq_save();
+ ret = v->counter;
+ if (ret != u)
+ v->counter += a;
+ arch_local_irq_restore(flags);
+ return ret;
}
-/* Atomic operations are already serializing */
-#define smp_mb__before_atomic_dec() barrier()
-#define smp_mb__after_atomic_dec() barrier()
-#define smp_mb__before_atomic_inc() barrier()
-#define smp_mb__after_atomic_inc() barrier()
-
-#endif /* __ASM_RX_ATOMIC_H__ */
+#endif /* __ARCH_RX_ATOMIC __ */
#ifndef __ASM_RX_CACHE_H__
#define __ASM_RX_CACHE_H__
-#define L1_CACHE_BYTES 16
+/* bytes per L1 cache line */
+#define L1_CACHE_SHIFT 2
+#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
#define __cacheline_aligned
#define ____cacheline_aligned
#include <asm-generic/dma-coherent.h>
#include <asm-generic/dma-mapping-common.h>
-static inline int dma_supported(struct device *dev, u64 mask)
-{
- struct dma_map_ops *ops = get_dma_ops(dev);
-
- if (ops->dma_supported)
- return ops->dma_supported(dev, mask);
-
- return 1;
-}
-
-static inline int dma_set_mask(struct device *dev, u64 mask)
-{
- struct dma_map_ops *ops = get_dma_ops(dev);
-
- if (!dev->dma_mask || !dma_supported(dev, mask))
- return -EIO;
- if (ops->set_dma_mask)
- return ops->set_dma_mask(dev, mask);
-
- *dev->dma_mask = mask;
-
- return 0;
-}
-
-void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
- enum dma_data_direction dir);
-
-#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
-#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
-
-static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
-{
- struct dma_map_ops *ops = get_dma_ops(dev);
-
- if (ops->mapping_error)
- return ops->mapping_error(dev, dma_addr);
-
- return dma_addr == 0;
-}
-
-static inline void *dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp)
-{
- struct dma_map_ops *ops = get_dma_ops(dev);
- void *memory;
-
- if (dma_alloc_from_coherent(dev, size, dma_handle, &memory))
- return memory;
- if (!ops->alloc_coherent)
- return NULL;
-
- memory = ops->alloc_coherent(dev, size, dma_handle, gfp);
-
- return memory;
-}
-
-static inline void dma_free_coherent(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle)
-{
- struct dma_map_ops *ops = get_dma_ops(dev);
-
- if (dma_release_from_coherent(dev, get_order(size), vaddr))
- return;
-
- if (ops->free_coherent)
- ops->free_coherent(dev, size, vaddr, dma_handle);
-}
#endif /* __ASM_RX_DMA_MAPPING_H */
*/
#define ELF_CLASS ELFCLASS32
#define ELF_DATA ELFDATA2LSB
+#define ELF_RX 173
#define ELF_ARCH EM_RX
#define USE_ELF_CORE_DUMP
+++ /dev/null
-#ifndef __ASM_RX_HARDIRQ_H__
-#define __ASM_RX_HARDIRQ_H__
-
-#include <asm/irq.h>
-
-#define HARDIRQ_BITS 8
-
-/*
- * The hardirq mask has to be large enough to have
- * space for potentially all IRQ sources in the system
- * nesting on a single CPU:
- */
-#if (1 << HARDIRQ_BITS) < NR_IRQS
-# error HARDIRQ_BITS is too low!
-#endif
-
-#include <asm-generic/hardirq.h>
-
-#endif
struct pt_regs *regs);
#define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.esp0 - 1)
-#define KSTK_EIP(tsk) (task_pt_regs(task)->pc)
-#define KSTK_ESP(tsk) (task_pt_regs(task)->r[0])
+#define KSTK_EIP(tsk) (task_pt_regs(tsk)->pc)
+#define KSTK_ESP(tsk) (task_pt_regs(tsk)->r[0])
#define cpu_relax() barrier()
#define PT_REG(reg) ((long)&((struct pt_regs *)0)->reg)
#define user_mode(regs) (((regs)->psw & (1<<20)))
-#define instruction_pointer(regs) ((regs)->pc)
-#define profile_pc(regs) instruction_pointer(regs)
extern void show_regs(struct pt_regs *);
#endif /* __ASSEMBLY__ */
#define OFF_R1 (1*4)
#define OFF_USP (16*4)
#define OFF_VEC (17*4)
#define OFF_PSW (19*4)
+
+#define GET_FP(regs) (0)
+#define SET_FP(regs, val) do {} while(0)
+#include <asm-generic/ptrace.h>
+
#endif /* __ASM_RX_PTRACE_H__ */
#define smp_wmb() barrier()
#define smp_read_barrier_depends() do { } while(0)
-#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
-
-struct __xchg_dummy { unsigned long a[100]; };
-#define __xg(x) ((volatile struct __xchg_dummy *)(x))
-
-static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
-{
- unsigned long tmp, flags;
-
- switch (size) {
- case 1:
- local_irq_save(flags);
- tmp = *(u8 *)__xg(ptr);
- *(u8 *)__xg(ptr) = x;
- local_irq_restore(flags);
- break;
- case 2:
- local_irq_save(flags);
- tmp = *(u16 *)__xg(ptr);
- *(u16 *)__xg(ptr) = x;
- local_irq_restore(flags);
- break;
- case 4:
- __asm__ volatile("xchg %0, %1"
- :"=m"(__xg(ptr)),"=r"(x));
- break;
- default:
- tmp = 0;
- }
- return tmp;
-}
-
-#include <asm-generic/cmpxchg-local.h>
-
-/*
- * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
- * them available.
- */
-#define cmpxchg_local(ptr, o, n) \
- ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
- (unsigned long)(n), sizeof(*(ptr))))
-#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
-
-#ifndef CONFIG_SMP
-#include <asm-generic/cmpxchg.h>
-#endif
-
#define arch_align_stack(x) (x)
#endif /* __ASM_RX_SYSTEM_H__ */
#define INIT_THREAD_INFO(tsk) \
{ \
.task = &tsk, \
- .exec_domain = &default_exec_domain, \
.flags = 0, \
.cpu = 0, \
.preempt_count = INIT_PREEMPT_COUNT, \
+++ /dev/null
-#ifndef __ASM_RX_UNISTD_H__
-#define __ASM_RX_UNISTD_H__
-
-/*
- * This file contains the system call numbers.
- */
-
-#define __NR_restart_syscall 0
-#define __NR_exit 1
-#define __NR_fork 2
-#define __NR_read 3
-#define __NR_write 4
-#define __NR_open 5
-#define __NR_close 6
-#define __NR_waitpid 7
-#define __NR_creat 8
-#define __NR_link 9
-#define __NR_unlink 10
-#define __NR_execve 11
-#define __NR_chdir 12
-#define __NR_time 13
-#define __NR_mknod 14
-#define __NR_chmod 15
-#define __NR_lchown 16
-#define __NR_break 17
-#define __NR_oldstat 18
-#define __NR_lseek 19
-#define __NR_getpid 20
-#define __NR_mount 21
-#define __NR_umount 22
-#define __NR_setuid 23
-#define __NR_getuid 24
-#define __NR_stime 25
-#define __NR_ptrace 26
-#define __NR_alarm 27
-#define __NR_oldfstat 28
-#define __NR_pause 29
-#define __NR_utime 30
-#define __NR_stty 31
-#define __NR_gtty 32
-#define __NR_access 33
-#define __NR_nice 34
-#define __NR_ftime 35
-#define __NR_sync 36
-#define __NR_kill 37
-#define __NR_rename 38
-#define __NR_mkdir 39
-#define __NR_rmdir 40
-#define __NR_dup 41
-#define __NR_pipe 42
-#define __NR_times 43
-#define __NR_prof 44
-#define __NR_brk 45
-#define __NR_setgid 46
-#define __NR_getgid 47
-#define __NR_signal 48
-#define __NR_geteuid 49
-#define __NR_getegid 50
-#define __NR_acct 51
-#define __NR_umount2 52
-#define __NR_lock 53
-#define __NR_ioctl 54
-#define __NR_fcntl 55
-#define __NR_mpx 56
-#define __NR_setpgid 57
-#define __NR_ulimit 58
-#define __NR_oldolduname 59
-#define __NR_umask 60
-#define __NR_chroot 61
-#define __NR_ustat 62
-#define __NR_dup2 63
-#define __NR_getppid 64
-#define __NR_getpgrp 65
-#define __NR_setsid 66
-#define __NR_sigaction 67
-#define __NR_sgetmask 68
-#define __NR_ssetmask 69
-#define __NR_setreuid 70
-#define __NR_setregid 71
-#define __NR_sigsuspend 72
-#define __NR_sigpending 73
-#define __NR_sethostname 74
-#define __NR_setrlimit 75
-#define __NR_getrlimit 76 /* Back compatible 2Gig limited rlimit */
-#define __NR_getrusage 77
-#define __NR_gettimeofday 78
-#define __NR_settimeofday 79
-#define __NR_getgroups 80
-#define __NR_setgroups 81
-#define __NR_select 82
-#define __NR_symlink 83
-#define __NR_oldlstat 84
-#define __NR_readlink 85
-#define __NR_uselib 86
-#define __NR_swapon 87
-#define __NR_reboot 88
-#define __NR_readdir 89
-#define __NR_mmap 90
-#define __NR_munmap 91
-#define __NR_truncate 92
-#define __NR_ftruncate 93
-#define __NR_fchmod 94
-#define __NR_fchown 95
-#define __NR_getpriority 96
-#define __NR_setpriority 97
-#define __NR_profil 98
-#define __NR_statfs 99
-#define __NR_fstatfs 100
-#define __NR_ioperm 101
-#define __NR_socketcall 102
-#define __NR_syslog 103
-#define __NR_setitimer 104
-#define __NR_getitimer 105
-#define __NR_stat 106
-#define __NR_lstat 107
-#define __NR_fstat 108
-#define __NR_olduname 109
-#define __NR_iopl 110
-#define __NR_vhangup 111
-#define __NR_idle 112
-#define __NR_vm86old 113
-#define __NR_wait4 114
-#define __NR_swapoff 115
-#define __NR_sysinfo 116
-#define __NR_ipc 117
-#define __NR_fsync 118
-#define __NR_sigreturn 119
-#define __NR_clone 120
-#define __NR_setdomainname 121
-#define __NR_uname 122
-#define __NR_modify_ldt 123
-#define __NR_adjtimex 124
-#define __NR_mprotect 125
-#define __NR_sigprocmask 126
-#define __NR_create_module 127
-#define __NR_init_module 128
-#define __NR_delete_module 129
-#define __NR_get_kernel_syms 130
-#define __NR_quotactl 131
-#define __NR_getpgid 132
-#define __NR_fchdir 133
-#define __NR_bdflush 134
-#define __NR_sysfs 135
-#define __NR_personality 136
-#define __NR_afs_syscall 137 /* Syscall for Andrew File System */
-#define __NR_setfsuid 138
-#define __NR_setfsgid 139
-#define __NR_llseek 140
-#define __NR_getdents 141
-#define __NR_newselect 142
-#define __NR_flock 143
-#define __NR_msync 144
-#define __NR_readv 145
-#define __NR_writev 146
-#define __NR_getsid 147
-#define __NR_fdatasync 148
-#define __NR_sysctl 149
-#define __NR_mlock 150
-#define __NR_munlock 151
-#define __NR_mlockall 152
-#define __NR_munlockall 153
-#define __NR_sched_setparam 154
-#define __NR_sched_getparam 155
-#define __NR_sched_setscheduler 156
-#define __NR_sched_getscheduler 157
-#define __NR_sched_yield 158
-#define __NR_sched_get_priority_max 159
-#define __NR_sched_get_priority_min 160
-#define __NR_sched_rr_get_interval 161
-#define __NR_nanosleep 162
-#define __NR_mremap 163
-#define __NR_setresuid 164
-#define __NR_getresuid 165
-#define __NR_vm86 166
-#define __NR_query_module 167
-#define __NR_poll 168
-#define __NR_nfsservctl 169
-#define __NR_setresgid 170
-#define __NR_getresgid 171
-#define __NR_prctl 172
-#define __NR_rt_sigreturn 173
-#define __NR_rt_sigaction 174
-#define __NR_rt_sigprocmask 175
-#define __NR_rt_sigpending 176
-#define __NR_rt_sigtimedwait 177
-#define __NR_rt_sigqueueinfo 178
-#define __NR_rt_sigsuspend 179
-#define __NR_pread64 180
-#define __NR_pwrite64 181
-#define __NR_chown 182
-#define __NR_getcwd 183
-#define __NR_capget 184
-#define __NR_capset 185
-#define __NR_sigaltstack 186
-#define __NR_sendfile 187
-#define __NR_getpmsg 188 /* some people actually want streams */
-#define __NR_putpmsg 189 /* some people actually want streams */
-#define __NR_vfork 190
-#define __NR_ugetrlimit 191 /* SuS compliant getrlimit */
-#define __NR_mmap2 192
-#define __NR_truncate64 193
-#define __NR_ftruncate64 194
-#define __NR_stat64 195
-#define __NR_lstat64 196
-#define __NR_fstat64 197
-#define __NR_lchown32 198
-#define __NR_getuid32 199
-#define __NR_getgid32 200
-#define __NR_geteuid32 201
-#define __NR_getegid32 202
-#define __NR_setreuid32 203
-#define __NR_setregid32 204
-#define __NR_getgroups32 205
-#define __NR_setgroups32 206
-#define __NR_fchown32 207
-#define __NR_setresuid32 208
-#define __NR_getresuid32 209
-#define __NR_setresgid32 210
-#define __NR_getresgid32 211
-#define __NR_chown32 212
-#define __NR_setuid32 213
-#define __NR_setgid32 214
-#define __NR_setfsuid32 215
-#define __NR_setfsgid32 216
-#define __NR_pivot_root 217
-#define __NR_mincore 218
-#define __NR_madvise 219
-#define __NR_madvise1 219 /* delete when C lib stub is removed */
-#define __NR_getdents64 220
-#define __NR_fcntl64 221
-/* 222 is unused */
-/* 223 is unused */
-#define __NR_gettid 224
-#define __NR_readahead 225
-#define __NR_setxattr 226
-#define __NR_lsetxattr 227
-#define __NR_fsetxattr 228
-#define __NR_getxattr 229
-#define __NR_lgetxattr 230
-#define __NR_fgetxattr 231
-#define __NR_listxattr 232
-#define __NR_llistxattr 233
-#define __NR_flistxattr 234
-#define __NR_removexattr 235
-#define __NR_lremovexattr 236
-#define __NR_fremovexattr 237
-#define __NR_tkill 238
-#define __NR_sendfile64 239
-#define __NR_futex 240
-#define __NR_sched_setaffinity 241
-#define __NR_sched_getaffinity 242
-#define __NR_set_thread_area 243
-#define __NR_get_thread_area 244
-#define __NR_io_setup 245
-#define __NR_io_destroy 246
-#define __NR_io_getevents 247
-#define __NR_io_submit 248
-#define __NR_io_cancel 249
-#define __NR_fadvise64 250
-/* 251 is available for reuse (was briefly sys_set_zone_reclaim) */
-#define __NR_exit_group 252
-#define __NR_lookup_dcookie 253
-#define __NR_epoll_create 254
-#define __NR_epoll_ctl 255
-#define __NR_epoll_wait 256
-#define __NR_remap_file_pages 257
-#define __NR_set_tid_address 258
-#define __NR_timer_create 259
-#define __NR_timer_settime (__NR_timer_create+1)
-#define __NR_timer_gettime (__NR_timer_create+2)
-#define __NR_timer_getoverrun (__NR_timer_create+3)
-#define __NR_timer_delete (__NR_timer_create+4)
-#define __NR_clock_settime (__NR_timer_create+5)
-#define __NR_clock_gettime (__NR_timer_create+6)
-#define __NR_clock_getres (__NR_timer_create+7)
-#define __NR_clock_nanosleep (__NR_timer_create+8)
-#define __NR_statfs64 268
-#define __NR_fstatfs64 269
-#define __NR_tgkill 270
-#define __NR_utimes 271
-#define __NR_fadvise64_64 272
-#define __NR_vserver 273
-#define __NR_mbind 274
-#define __NR_get_mempolicy 275
-#define __NR_set_mempolicy 276
-#define __NR_mq_open 277
-#define __NR_mq_unlink (__NR_mq_open+1)
-#define __NR_mq_timedsend (__NR_mq_open+2)
-#define __NR_mq_timedreceive (__NR_mq_open+3)
-#define __NR_mq_notify (__NR_mq_open+4)
-#define __NR_mq_getsetattr (__NR_mq_open+5)
-#define __NR_kexec_load 283
-#define __NR_waitid 284
-/* #define __NR_sys_setaltroot 285 */
-#define __NR_add_key 286
-#define __NR_request_key 287
-#define __NR_keyctl 288
-#define __NR_ioprio_set 289
-#define __NR_ioprio_get 290
-#define __NR_inotify_init 291
-#define __NR_inotify_add_watch 292
-#define __NR_inotify_rm_watch 293
-#define __NR_migrate_pages 294
-#define __NR_openat 295
-#define __NR_mkdirat 296
-#define __NR_mknodat 297
-#define __NR_fchownat 298
-#define __NR_futimesat 299
-#define __NR_fstatat64 300
-#define __NR_unlinkat 301
-#define __NR_renameat 302
-#define __NR_linkat 303
-#define __NR_symlinkat 304
-#define __NR_readlinkat 305
-#define __NR_fchmodat 306
-#define __NR_faccessat 307
-#define __NR_pselect6 308
-#define __NR_ppoll 309
-#define __NR_unshare 310
-#define __NR_set_robust_list 311
-#define __NR_get_robust_list 312
-#define __NR_splice 313
-#define __NR_sync_file_range 314
-#define __NR_tee 315
-#define __NR_vmsplice 316
-#define __NR_move_pages 317
-#define __NR_getcpu 318
-#define __NR_epoll_pwait 319
-#define __NR_utimensat 320
-#define __NR_signalfd 321
-#define __NR_timerfd_create 322
-#define __NR_eventfd 323
-#define __NR_fallocate 324
-#define __NR_timerfd_settime 325
-#define __NR_timerfd_gettime 326
-#define __NR_signalfd4 327
-#define __NR_eventfd2 328
-#define __NR_epoll_create1 329
-#define __NR_dup3 330
-#define __NR_pipe2 331
-#define __NR_inotify_init1 332
-#define __NR_preadv 333
-#define __NR_pwritev 334
-#define __NR_rt_tgsigqueueinfo 335
-#define __NR_perf_event_open 336
-
-#ifdef __KERNEL__
-
-#define NR_syscalls 337
-
-#define __ARCH_WANT_IPC_PARSE_VERSION
-#define __ARCH_WANT_STAT64
-#define __ARCH_WANT_SYS_ALARM
-#define __ARCH_WANT_SYS_GETHOSTNAME
-#define __ARCH_WANT_SYS_PAUSE
-#define __ARCH_WANT_SYS_SGETMASK
-#define __ARCH_WANT_SYS_SIGNAL
-#define __ARCH_WANT_SYS_TIME
-#define __ARCH_WANT_SYS_UTIME
-#define __ARCH_WANT_SYS_WAITPID
-#define __ARCH_WANT_SYS_SOCKETCALL
-#define __ARCH_WANT_SYS_FADVISE64
-#define __ARCH_WANT_SYS_GETPGRP
-#define __ARCH_WANT_SYS_LLSEEK
-#define __ARCH_WANT_SYS_NICE
-#define __ARCH_WANT_SYS_OLD_GETRLIMIT
-#define __ARCH_WANT_SYS_OLDUMOUNT
-#define __ARCH_WANT_SYS_SIGPENDING
-#define __ARCH_WANT_SYS_SIGPROCMASK
-#define __ARCH_WANT_SYS_RT_SIGACTION
-#define __ARCH_WANT_SYS_RT_SIGSUSPEND
-#define __ARCH_WANT_SYS_OLD_READDIR
-
-#define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall")
-
-#endif /* __KERNEL__ */
-#endif /* __ASM_RX_UNISTD_H__ */
extra-y := vmlinux.lds
-obj-y := head.o process.o traps.o irq.o sys_rx.o time.o signal.o \
- setup.o syscalls.o entry.o init_task.o ptrace.o dma-nommu.o \
+obj-y := head.o process.o traps.o irq.o time.o signal.o \
+ setup.o syscalls.o entry.o ptrace.o dma-nommu.o \
timer/ cpu/
obj-$(CONFIG_EARLY_PRINTK) += early-printk.o
#include <linux/export.h>
#include <asm/io.h>
+#define IR (0x00087000)
#define IER (0x00087200)
#define IPR (0x00087300)
static void disable_icua_irq(struct irq_data *data);
static void enable_icua_irq(struct irq_data *data);
-static void dummy_ack(struct irq_data *data);
+static void icua_eoi(struct irq_data *data);
struct irq_chip chip = {
.name = "RX-ICUa",
.irq_mask = disable_icua_irq,
.irq_unmask = enable_icua_irq,
- .irq_ack = dummy_ack,
+ .irq_eoi = icua_eoi,
.irq_mask_ack = disable_icua_irq,
};
__raw_writeb(val, ier);
}
-static void dummy_ack(struct irq_data *data)
+static void icua_eoi(struct irq_data *data)
{
+ __raw_writeb(0, (void *)(IR + data->irq));
}
void __init setup_rx_irq_desc(void)
}
disable_irq_nosync(i);
- irq_set_chip_and_handler_name(i, &chip, handle_simple_irq, "level");
+ irq_set_chip_and_handler_name(i, &chip, handle_fasteoi_irq, "icua");
}
for (i = 0; i < 0x90; i++)
__raw_writeb(1, (void __iomem *)(IPR + i));
#define IR (0x00087000)
#define IER (0x00087200)
-static void disable_ipr_irq(unsigned int irq)
+static void disable_ipr_irq(struct irq_data *data)
{
- void *ipr = get_irq_chip_data(irq);
+ void *ipr = irq_data_get_irq_chip(data);
__raw_writeb(0, ipr);
}
-static void enable_ipr_irq(unsigned int irq)
+static void enable_ipr_irq(struct irq_data *data)
{
unsigned int offset;
unsigned int bit;
u8 ier;
- void *ipr = get_irq_chip_data(irq);
+ void *ipr = irq_data_get_irq_chip(data);
__raw_writeb(1, ipr);
- offset = irq / 8;
- bit = irq % 8;
+ offset = data->irq / 8;
+ bit = data->irq % 8;
ier = __raw_readb((void __iomem *)(IER + offset));
ier |= (1 << bit); /* enable IRQ on ICU */
__raw_writeb(ier, (void __iomem *)(IER + offset));
#endif
};
-void __init setup_rx_irq_desc(struct irq_chip *chip)
+struct irq_chip chip = {
+ .name = "RX-IPR",
+ .irq_mask = disable_ipr_irq,
+ .irq_unmask = enable_ipr_irq,
+ .irq_mask_ack = disable_ipr_irq,
+};
+
+void __init setup_rx_irq_desc(void)
{
int i;
- chip->mask = disable_ipr_irq;
- chip->unmask = enable_ipr_irq;
- chip->mask_ack = disable_ipr_irq;
-
for (i = 0; i < ARRAY_SIZE(irq_info); i++) {
- struct irq_desc *irq_desc;
-
- irq_desc = irq_to_desc_alloc_node(irq_info[i].irq, numa_node_id());
- if (unlikely(!irq_desc)) {
+ if (unlikely(!irq_alloc_desc_at(irq_info[i].irq, numa_node_id()))) {
printk(KERN_INFO "can not get irq_desc for %d\n",
irq_info[i].irq);
continue;
}
disable_irq_nosync(irq_info[i].irq);
- set_irq_chip_and_handler_name(irq_info[i].irq, chip, handle_level_irq,"");
- set_irq_chip_data(irq_info[i].irq, (void *)(0x00087300 +irq_info[i].ipr));
- disable_ipr_irq(irq_info[i].irq);
+ irq_set_chip_and_handler_name(irq_info[i].irq, &chip, handle_level_irq,"");
+ irq_set_chip_data(irq_info[i].irq, (void *)(0x00087300 +irq_info[i].ipr));
+ disable_ipr_irq(irq_get_irq_data(irq_info[i].irq));
}
}
#include <linux/platform_device.h>
#include <linux/serial_sci.h>
-static struct plat_sci_port sci_platform_data[] = {
- /* SCI0 to SCI2 */
- {
- .mapbase = 0x00088240,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCI,
- .irqs = { 214, 215, 216, 0 },
- }, {
- .mapbase = 0x00088248,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCI,
- .irqs = { 218, 219, 220, 0 },
- }, {
- .mapbase = 0x00088250,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCI,
- .irqs = { 222, 223, 224, 0 },
- }, {
- .flags = 0,
- }
+static struct plat_sci_port sci0_platform_data = {
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCI,
+ .scscr = SCSCR_RE | SCSCR_TE,
+};
+static struct plat_sci_port sci1_platform_data = {
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCI,
+ .scscr = SCSCR_RE | SCSCR_TE,
+};
+static struct plat_sci_port sci2_platform_data = {
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCI,
+ .scscr = SCSCR_RE | SCSCR_TE,
+};
+
+static struct resource sci0_resource[] = {
+ DEFINE_RES_MEM(0x00088240, 8),
+ DEFINE_RES_IRQ(214),
+};
+
+static struct resource sci1_resource[] = {
+ DEFINE_RES_MEM(0x00088248, 8),
+ DEFINE_RES_IRQ(218),
+};
+
+static struct resource sci2_resource[] = {
+ DEFINE_RES_MEM(0x00088250, 8),
+ DEFINE_RES_IRQ(222),
};
static struct platform_device sci_device[] = {
{
.name = "sh-sci",
.id = 0,
+ .resource = sci0_resource,
+ .num_resources = ARRAY_SIZE(sci0_resource),
.dev = {
- .platform_data = &sci_platform_data[0],
+ .platform_data = &sci0_platform_data,
},
},
{
.name = "sh-sci",
.id = 1,
+ .resource = sci1_resource,
+ .num_resources = ARRAY_SIZE(sci1_resource),
.dev = {
- .platform_data = &sci_platform_data[1],
+ .platform_data = &sci1_platform_data,
},
},
{
.name = "sh-sci",
.id = 2,
+ .resource = sci2_resource,
+ .num_resources = ARRAY_SIZE(sci2_resource),
.dev = {
- .platform_data = &sci_platform_data[2],
+ .platform_data = &sci2_platform_data,
},
},
};
EXPORT_SYMBOL(dma_ops);
static void *dma_generic_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp)
+ dma_addr_t *dma_handle, gfp_t gfp,
+ struct dma_attrs *attrs)
{
void *ret;
int order = get_order(size);
}
static void dma_generic_free_coherent(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle)
+ void *vaddr, dma_addr_t dma_handle,
+ struct dma_attrs *attrs)
{
int order = get_order(size);
unsigned long pfn = dma_handle >> PAGE_SHIFT;
}
struct dma_map_ops rx_dma_ops = {
- .alloc_coherent = dma_generic_alloc_coherent,
- .free_coherent = dma_generic_free_coherent,
- .map_page = nommu_dma_map_page,
- .map_sg = nommu_dma_map_sg,
- .is_phys = 1,
+ .alloc = dma_generic_alloc_coherent,
+ .free = dma_generic_free_coherent,
+ .map_page = nommu_dma_map_page,
+ .map_sg = nommu_dma_map_sg,
+ .is_phys = 1,
};
void __init no_iommu_init(void)
};
#endif
-static struct console *early_console = NULL;
-
static int __init setup_early_printk(char *buf)
{
if (!buf)
.global rx_int_table
.global ret_from_fork
+.global ret_from_kernel_thread
.global rx_exception_handler
.section .text
tst #_TIF_WORK_SYSCALL_MASK,r14
bnz 4f
1:
- cmp #NR_syscalls,r15
+ cmp #__NR_syscalls,r15
blt 2f
mov.l #-ENOSYS,r1 ; invalid no
bra 3f
2:
shll #2,r15
- add #syscall_table,r15
+ add #_sys_call_table,r15
mov.l [r15],r15
mov.l r7,[-r0]
mov.l r5,[-r0]
4:
;; syscall trace enter
mov.l r0,r1
- bsr syscall_trace_enter ; syscall_trace_enter(pt_regs *)
+ bsr do_syscall_trace_enter ; syscall_trace_enter(pt_regs *)
mov.l OFF_R1[r0],r1
mov.l OFF_R2[r0],r2
mov.l OFF_R3[r0],r3
bsr schedule_tail
bra ret_from_exception
+ret_from_kernel_thread:
+ bsr schedule_tail
+ mov.l OFF_R1[r0],r1
+ mov.l OFF_R2[r0],r2
+ jsr r2
+ bra ret_from_exception
+
.end
add #1,r1
sub #1,r4
bne 1b
+ mov.b #0,[r1]
#endif
/* clear BSS */
mov.l #_sbss,r1
regs->r[14],regs->r[15]);
}
-/*
- * Create a kernel thread
- */
-static void __noreturn kernel_thread_helper(int dummy, int (*fn)(void *), void *arg)
-{
- fn(arg);
- do_exit(-1);
-}
-
-int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
-{
- struct pt_regs regs;
-
- memset(®s, 0, sizeof(regs));
- regs.r[2] = (unsigned long)fn;
- regs.r[3] = (unsigned long)arg;
-
- regs.pc = (unsigned long)kernel_thread_helper;
- regs.psw = 0;
-
- /* Ok, create the new process.. */
- return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0,
- ®s, 0, NULL, NULL);
-}
-
asmlinkage void ret_from_fork(void);
+asmlinkage void ret_from_kernel_thread(void);
int copy_thread(unsigned long clone_flags,
unsigned long usp, unsigned long topstk,
- struct task_struct * p, struct pt_regs * regs)
+ struct task_struct * p)
{
struct pt_regs *childregs =
(struct pt_regs *) (THREAD_SIZE + task_stack_page(p)) - 1;
- *childregs = *regs;
- childregs->usp = usp;
- childregs->r[1] = 0;
+ if (unlikely(p->flags & PF_KTHREAD)) {
+ memset(childregs, 0, sizeof(struct pt_regs));
+ p->thread.pc = (unsigned long) ret_from_kernel_thread;
+ childregs->r[1] = topstk; /* arg */
+ childregs->r[2] = usp; /* fn */
+ } else {
+ *childregs = *current_pt_regs();
+ childregs->r[1] = 0;
+ p->thread.pc = (unsigned long)ret_from_fork;
+ childregs->usp = usp;
+ }
p->thread.sp = (unsigned long)childregs;
- p->thread.pc = (unsigned long)ret_from_fork;
return 0;
}
-asmlinkage int sys_fork(void)
-{
- return -EINVAL;
-}
-
-asmlinkage int rx_clone(struct pt_regs *regs)
+asmlinkage int sys_clone(struct pt_regs *regs)
{
unsigned long clone_flags = regs->r[1];
unsigned long newsp = regs->r[2];
if (!newsp)
newsp = regs->usp;
- return do_fork(clone_flags, newsp, regs, 0,
+ return do_fork(clone_flags, newsp, 0,
(int __user *)parent_tidptr,
(int __user *)child_tidptr);
}
-asmlinkage int rx_vfork(struct pt_regs *regs)
-{
- return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->usp, regs,
- 0, NULL, NULL);
-}
-
-/*
- * sys_execve() executes a new program.
- */
-asmlinkage int sys_execve(const char __user *ufilename, const char __user * __user *uargv,
- const char __user * __user *uenvp, int dummy, ...)
-{
- struct pt_regs *regs = (struct pt_regs *)
- ((unsigned char *)&dummy + 8);
- int error;
- char *filename;
-
- filename = getname(ufilename);
- error = PTR_ERR(filename);
- if (IS_ERR(filename))
- goto out;
-
- error = do_execve(filename, uargv, uenvp, regs);
- putname(filename);
-out:
- return error;
-}
-
unsigned long get_wchan(struct task_struct *p)
{
int count = 0;
{
}
-asmlinkage long syscall_trace_enter(struct pt_regs *regs)
+asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
{
long ret = 0;
- secure_computing(regs->r[8]);
-
if (test_thread_flag(TIF_SYSCALL_TRACE) &&
tracehook_report_syscall_entry(regs))
/*
trace_sys_enter(regs, regs->r[8]);
if (unlikely(current->audit_context))
- audit_syscall_entry(EM_RX|__AUDIT_ARCH_LE, regs->r[8],
- regs->r[1], regs->r[2],
- regs->r[3], regs->r[4]);
+ audit_syscall_entry(regs->r[1], regs->r[2], regs->r[3],
+ regs->r[4], regs->r[5]);
- return ret ?: regs->r[8];
+ return ret ?: regs->r[1];
}
asmlinkage void syscall_trace_leave(struct pt_regs *regs)
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/initrd.h>
+#include <linux/platform_device.h>
#include <asm/setup.h>
#include <asm/irq.h>
#include <asm/pgtable.h>
unsigned long memory_start;
unsigned long memory_end;
-#define COMMAND_LINE ((char *)CONFIG_RAMSTART)
+#define COMMAND_LINE ((char *)0x400)
static struct resource code_resource = {
.name = "Kernel code",
};
static int
-restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc,
+restore_sigcontext(struct sigcontext __user *sc,
unsigned long *r1)
{
+ struct pt_regs *regs = current_pt_regs();
unsigned int err = 0;
/* Always make any pending restarted system calls return -EINTR */
current_thread_info()->restart_block.fn = do_no_restart_syscall;
-#define COPY(x) err |= __get_user(regs->x, &sc->sc_##x)
+#define COPY(x) err |= __get_user(regs->x, &sc->sc_##x)
COPY(r[0]); COPY(r[1]);
COPY(r[2]); COPY(r[3]);
COPY(r[4]); COPY(r[5]);
return err;
}
+asmlinkage int sys_rt_sigreturn(void)
+{
+ struct pt_regs *regs = current_pt_regs();
+ struct rt_sigframe *frame = (struct rt_sigframe *)(regs->usp - 4);
+ sigset_t set;
+ unsigned long r1;
+
+ if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
+ goto badframe;
+ if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
+ goto badframe;
+
+ set_current_blocked(&set);
+
+ if (restore_sigcontext(&frame->uc.uc_mcontext, &r1))
+ goto badframe;
+
+ if (restore_altstack(&frame->uc.uc_stack))
+ goto badframe;
+
+ return r1;
+
+badframe:
+ force_sig(SIGSEGV, current);
+ return 0;
+}
static int
setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
return err;
}
-asmlinkage long rx_rt_sigreturn(struct pt_regs *regs)
-{
- struct rt_sigframe __user *frame;
- unsigned long result;
- sigset_t set;
-
- frame = (struct rt_sigframe __user *)(regs->usp - 4);
- if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
- goto badframe;
- if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
- goto badframe;
-
- sigdelsetmask(&set, ~_BLOCKABLE);
- spin_lock_irq(¤t->sighand->siglock);
- current->blocked = set;
- recalc_sigpending();
- spin_unlock_irq(¤t->sighand->siglock);
-
- if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &result))
- goto badframe;
-
- if (do_sigaltstack(&frame->uc.uc_stack, NULL, regs->usp) == -EFAULT)
- goto badframe;
-
- return result;
-
-badframe:
- force_sig(SIGSEGV, current);
- return 0;
-}
-
-asmlinkage int rx_sigaltstack(const stack_t *uss, stack_t *uoss,
- struct pt_regs *regs)
-{
- return do_sigaltstack(uss, uoss, regs->usp);
-}
-
/*
* Determine which stack to use..
*/
static inline void __user *
-get_sigframe(struct k_sigaction *ka, unsigned long sp, size_t frame_size)
+get_sigframe(struct ksignal *ksig, unsigned long sp, size_t frame_size)
{
/* This is the X/Open sanctioned signal stack switching. */
- if (ka->sa.sa_flags & SA_ONSTACK) {
+ if (ksig->ka.sa.sa_flags & SA_ONSTACK) {
if (sas_ss_flags(sp) == 0)
sp = current->sas_ss_sp + current->sas_ss_size;
}
0x75, 0x60, 0x08, /* int #0x08 */
0x03}; /* nop (padding) */
-static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
- sigset_t *set, struct pt_regs *regs)
+static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
+ struct pt_regs *regs)
{
struct rt_sigframe __user *frame;
int err = 0;
- int signal;
- frame = get_sigframe(ka, regs->usp, sizeof(*frame));
+ frame = get_sigframe(ksig, regs->usp, sizeof(*frame));
if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
- goto give_sigsegv;
-
- signal = current_thread_info()->exec_domain
- && current_thread_info()->exec_domain->signal_invmap
- && sig < 32
- ? current_thread_info()->exec_domain->signal_invmap[sig]
- : sig;
+ return -EFAULT;
- err |= copy_siginfo_to_user(&frame->info, info);
- if (err)
- goto give_sigsegv;
+ if (ksig->ka.sa.sa_flags & SA_SIGINFO)
+ err |= copy_siginfo_to_user(&frame->info, &ksig->info);
/* Create the ucontext. */
err |= __put_user(0, &frame->uc.uc_flags);
err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, set->sig[0]);
err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
if (err)
- goto give_sigsegv;
+ return -EFAULT;
/* setup retcode */
err |= __put_user(*((u64 *)&__rt_retcode), (u64 *)frame->retcode);
err |= __put_user(frame->retcode, &(frame->pretcode));
if (err)
- goto give_sigsegv;
+ return -EFAULT;
/* Set up registers for signal handler */
regs->usp = (unsigned long)frame;
- regs->r[1] = signal; /* Arg for signal handler */
+ regs->r[1] = ksig->sig; /* Arg for signal handler */
regs->r[2] = (unsigned long)&frame->info;
regs->r[3] = (unsigned long)&frame->uc;
- regs->pc = (unsigned long)ka->sa.sa_handler;
+ regs->pc = (unsigned long)ksig->ka.sa.sa_handler;
set_fs(USER_DS);
-#if 0
- printk("SIG deliver (%s:%d): sp=%p pc=%p\n",
- current->comm, current->pid, frame, regs->pc);
-#endif
-
- return;
+ return 0 ;
-give_sigsegv:
- force_sigsegv(sig, current);
}
-/*
- * OK, we're invoking a handler
- */
-static int
-handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
- sigset_t *oldset, struct pt_regs *regs, unsigned long saved_r1)
+static void
+handle_restart(struct pt_regs *regs, struct k_sigaction *ka)
{
- /* Are we from a system call? */
- if (regs->vec >= 0x1000) {
- /* check for system call restart.. */
- switch (regs->r[1]) {
- case -ERESTART_RESTARTBLOCK:
- case -ERESTARTNOHAND:
+ /* check for system call restart.. */
+ switch (regs->r[1]) {
+ case -ERESTARTNOHAND:
+ if (!ka)
+ goto do_restart;
+ regs->r[1] = -EINTR;
+ break;
+ case -ERESTART_RESTARTBLOCK:
+ if (!ka) {
+ regs->r[1] = __NR_restart_syscall;
+ regs->pc -= 1;
+ } else
+ regs->r[1] = -EINTR;
+ break;
+ case -ERESTARTSYS:
+ if (!(ka->sa.sa_flags & SA_RESTART)) {
regs->r[1] = -EINTR;
- break;
-
- case -ERESTARTSYS:
- if (!(ka->sa.sa_flags & SA_RESTART)) {
- regs->r[1] = -EINTR;
- break;
- }
- /* fallthrough */
- case -ERESTARTNOINTR:
- regs->r[1] = saved_r1;
- regs->pc -= 3;
break;
}
+ /* fallthrough */
+ case -ERESTARTNOINTR:
+do_restart:
+ regs->pc -= 1;
+ break;
}
- /* Set up the stack frame */
- setup_rt_frame(sig, ka, info, oldset, regs);
-
- if (ka->sa.sa_flags & SA_ONESHOT)
- ka->sa.sa_handler = SIG_DFL;
-
- spin_lock_irq(¤t->sighand->siglock);
- sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask);
- if (!(ka->sa.sa_flags & SA_NODEFER))
- sigaddset(¤t->blocked,sig);
- recalc_sigpending();
- spin_unlock_irq(¤t->sighand->siglock);
- return 0;
+}
+
+static void
+handle_signal(struct ksignal *ksig, struct pt_regs *regs)
+{
+ sigset_t *oldset = sigmask_to_save();
+ int ret;
+
+ /* Are we from a system call? */
+ if (regs->vec >= 0x1000)
+ handle_restart(regs, &ksig->ka);
+
+ ret = setup_rt_frame(ksig, oldset, regs);
+
+ signal_setup_done(ret, ksig, 0);
}
/*
* want to handle. Thus you cannot kill init even with a SIGKILL even by
* mistake.
*/
-static int do_signal(struct pt_regs *regs, unsigned long saved_r1)
+static void do_signal(struct pt_regs *regs)
{
- siginfo_t info;
- int signr;
- sigset_t *oldset;
- struct k_sigaction ka;
-
- /*
- * We want the common case to go fast, which
- * is why we may in certain cases get here from
- * kernel mode. Just return without doing anything
- * if so.
- */
- if (!user_mode(regs))
- return 1;
-
- if (try_to_freeze())
- goto no_signal;
-
- if (test_thread_flag(TIF_RESTORE_SIGMASK))
- oldset = ¤t->saved_sigmask;
- else
- oldset = ¤t->blocked;
-
- signr = get_signal_to_deliver(&info, &ka, regs, NULL);
- if (signr > 0) {
- /* Re-enable any watchpoints before delivering the
- * signal to user space. The processor register will
- * have been cleared if the watchpoint triggered
- * inside the kernel.
- */
+ struct ksignal ksig;
+
+ current->thread.esp0 = (unsigned long) regs;
+ if (get_signal(&ksig)) {
/* Whee! Actually deliver the signal. */
- handle_signal(signr, &ka, &info, oldset, regs, saved_r1);
- return 1;
+ handle_signal(&ksig, regs);
+ return;
}
-
- no_signal:
/* Did we come from a system call? */
- if (regs->vec >= 0x1000) {
- /* Restart the system call - no handlers present */
- if (regs->r[1] == -ERESTARTNOHAND ||
- regs->r[1] == -ERESTARTSYS ||
- regs->r[1] == -ERESTARTNOINTR) {
- regs->r[1] = saved_r1;
- regs->pc -= 3;
- }
- if (regs->r[1] == -ERESTART_RESTARTBLOCK){
- regs->r[1] = saved_r1;
- regs->r[8] = __NR_restart_syscall;
- regs->pc -=3;
- }
- }
- return 0;
+ if (regs->vec >= 0x1000)
+ handle_restart(regs, NULL);
+
+ /* If there's no signal to deliver, we just restore the saved mask. */
+ restore_saved_sigmask();
}
/*
* notification of userspace execution resumption
* - triggered by current->work.notify_resume
*/
-void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags,
- unsigned long saved_r1)
+asmlinkage void do_notify_resume(struct pt_regs *regs, u32 thread_info_flags)
{
/* deal with pending signal delivery */
if (thread_info_flags & _TIF_SIGPENDING)
- do_signal(regs, saved_r1);
+ do_signal(regs);
if (thread_info_flags & _TIF_NOTIFY_RESUME) {
clear_thread_flag(TIF_NOTIFY_RESUME);
tracehook_notify_resume(regs);
- if (current->replacement_session_keyring)
- key_replace_session_keyring();
}
}