#include <linux/kernel.h>
-static __inline__ int atomic_add_return(int i, atomic_t *v)
+static inline int atomic_add_return(int i, atomic_t *v)
{
unsigned long flags;
int ret;
+
local_irq_save(flags);
ret = v->counter += i;
local_irq_restore(flags);
#define atomic_add(i, v) atomic_add_return(i, v)
#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
-static __inline__ int atomic_sub_return(int i, atomic_t *v)
+static inline int atomic_sub_return(int i, atomic_t *v)
{
unsigned long flags;
int ret;
+
local_irq_save(flags);
ret = v->counter -= i;
local_irq_restore(flags);
}
#define atomic_sub(i, v) atomic_sub_return(i, v)
-#define atomic_sub_and_test(i,v) (atomic_sub_return(i, v) == 0)
+#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
-static __inline__ int atomic_inc_return(atomic_t *v)
+static inline int atomic_inc_return(atomic_t *v)
{
unsigned long flags;
int ret;
+
local_irq_save(flags);
v->counter++;
ret = v->counter;
*/
#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
-static __inline__ int atomic_dec_return(atomic_t *v)
+static inline int atomic_dec_return(atomic_t *v)
{
unsigned long flags;
int ret;
+
local_irq_save(flags);
--v->counter;
ret = v->counter;
#define atomic_dec(v) atomic_dec_return(v)
-static __inline__ int atomic_dec_and_test(atomic_t *v)
+static inline int atomic_dec_and_test(atomic_t *v)
{
unsigned long flags;
int ret;
+
local_irq_save(flags);
--v->counter;
ret = v->counter;
return ret;
}
-static __inline__ void atomic_clear_mask(unsigned long mask, unsigned long *v)
+static inline void atomic_clear_mask(unsigned long mask, unsigned long *v)
{
- __asm__ __volatile__("stc ccr,r1l\n\t"
- "orc #0x80,ccr\n\t"
- "mov.l %0,er0\n\t"
- "and.l %1,er0\n\t"
- "mov.l er0,%0\n\t"
- "ldc r1l,ccr"
- : "=m" (*v) : "g" (~(mask)) :"er0","er1");
+ unsigned char ccr;
+ unsigned long tmp;
+
+ __asm__ __volatile__("stc ccr,%w3\n\t"
+ "orc #0x80,ccr\n\t"
+ "mov.l %0,%1\n\t"
+ "and.l %2,%1\n\t"
+ "mov.l %1,%0\n\t"
+ "ldc %w3,ccr"
+ : "=m"(*v), "=r"(tmp)
+ : "g"(~(mask)), "r"(ccr));
}
-static __inline__ void atomic_set_mask(unsigned long mask, unsigned long *v)
+static inline void atomic_set_mask(unsigned long mask, unsigned long *v)
{
- __asm__ __volatile__("stc ccr,r1l\n\t"
- "orc #0x80,ccr\n\t"
- "mov.l %0,er0\n\t"
- "or.l %1,er0\n\t"
- "mov.l er0,%0\n\t"
- "ldc r1l,ccr"
- : "=m" (*v) : "g" (mask) :"er0","er1");
+ unsigned char ccr;
+ unsigned long tmp;
+
+ __asm__ __volatile__("stc ccr,%w3\n\t"
+ "orc #0x80,ccr\n\t"
+ "mov.l %0,%1\n\t"
+ "or.l %2,%1\n\t"
+ "mov.l %1,%0\n\t"
+ "ldc %w3,ccr"
+ : "=m"(*v), "=r"(tmp)
+ : "g"(~(mask)), "r"(ccr));
}
/* Atomic operations are already serializing */
* ffz = Find First Zero in word. Undefined if no zero exists,
* so code should check against ~0UL first..
*/
-static __inline__ unsigned long ffz(unsigned long word)
+static inline unsigned long ffz(unsigned long word)
{
unsigned long result;
"shlr.l %2\n\t"
"adds #1,%0\n\t"
"bcs 1b"
- : "=r" (result)
- : "0" (result),"r" (word));
+ : "=r"(result)
+ : "0"(result), "r"(word));
return result;
}
-#define H8300_GEN_BITOP_CONST(OP,BIT) \
+#define H8300_GEN_BITOP_CONST(OP, BIT) \
case BIT: \
__asm__(OP " #" #BIT ",@%0"::"r"(b_addr):"memory"); \
- break;
+ break
-#define H8300_GEN_BITOP(FNAME,OP) \
-static __inline__ void FNAME(int nr, volatile unsigned long* addr) \
+#define H8300_GEN_BITOP(FNAME, OP) \
+static inline void FNAME(int nr, volatile unsigned long *addr) \
{ \
volatile unsigned char *b_addr; \
b_addr = (volatile unsigned char *)addr + ((nr >> 3) ^ 3); \
if (__builtin_constant_p(nr)) { \
switch(nr & 7) { \
- H8300_GEN_BITOP_CONST(OP,0) \
- H8300_GEN_BITOP_CONST(OP,1) \
- H8300_GEN_BITOP_CONST(OP,2) \
- H8300_GEN_BITOP_CONST(OP,3) \
- H8300_GEN_BITOP_CONST(OP,4) \
- H8300_GEN_BITOP_CONST(OP,5) \
- H8300_GEN_BITOP_CONST(OP,6) \
- H8300_GEN_BITOP_CONST(OP,7) \
+ H8300_GEN_BITOP_CONST(OP, 0); \
+ H8300_GEN_BITOP_CONST(OP, 1); \
+ H8300_GEN_BITOP_CONST(OP, 2); \
+ H8300_GEN_BITOP_CONST(OP, 3); \
+ H8300_GEN_BITOP_CONST(OP, 4); \
+ H8300_GEN_BITOP_CONST(OP, 5); \
+ H8300_GEN_BITOP_CONST(OP, 6); \
+ H8300_GEN_BITOP_CONST(OP, 7); \
} \
} else { \
- __asm__(OP " %w0,@%1"::"r"(nr),"r"(b_addr):"memory"); \
+ __asm__(OP " %w0,@%1"::"r"(nr), "r"(b_addr):"memory");\
} \
}
#define smp_mb__before_clear_bit() barrier()
#define smp_mb__after_clear_bit() barrier()
-H8300_GEN_BITOP(set_bit ,"bset")
-H8300_GEN_BITOP(clear_bit ,"bclr")
-H8300_GEN_BITOP(change_bit,"bnot")
-#define __set_bit(nr,addr) set_bit((nr),(addr))
-#define __clear_bit(nr,addr) clear_bit((nr),(addr))
-#define __change_bit(nr,addr) change_bit((nr),(addr))
+H8300_GEN_BITOP(set_bit, "bset")
+H8300_GEN_BITOP(clear_bit, "bclr")
+H8300_GEN_BITOP(change_bit, "bnot")
+#define __set_bit(nr, addr) set_bit((nr), (addr))
+#define __clear_bit(nr, addr) clear_bit((nr), (addr))
+#define __change_bit(nr, addr) change_bit((nr), (addr))
#undef H8300_GEN_BITOP
#undef H8300_GEN_BITOP_CONST
-static __inline__ int test_bit(int nr, const unsigned long* addr)
+static inline int test_bit(int nr, const unsigned long *addr)
{
- return (*((volatile unsigned char *)addr +
- ((nr >> 3) ^ 3)) & (1UL << (nr & 7))) != 0;
+ return (*((volatile unsigned char *)addr +
+ ((nr >> 3) ^ 3)) & (1UL << (nr & 7))) != 0;
}
#define __test_bit(nr, addr) test_bit(nr, addr)
-#define H8300_GEN_TEST_BITOP_CONST_INT(OP,BIT) \
+#define H8300_GEN_TEST_BITOP_CONST_INT(OP, BIT) \
case BIT: \
__asm__("stc ccr,%w1\n\t" \
"orc #0x80,ccr\n\t" \
OP " #" #BIT ",@%4\n\t" \
"rotxl.l %0\n\t" \
"ldc %w1,ccr" \
- : "=r"(retval),"=&r"(ccrsave),"=m"(*b_addr) \
- : "0" (retval),"r" (b_addr) \
- : "memory"); \
- break;
+ : "=r"(retval), "=&r"(ccrsave), "=m"(*b_addr) \
+ : "0" (retval), "r" (b_addr)); \
+ break
-#define H8300_GEN_TEST_BITOP_CONST(OP,BIT) \
+#define H8300_GEN_TEST_BITOP_CONST(OP, BIT) \
case BIT: \
__asm__("bld #" #BIT ",@%3\n\t" \
OP " #" #BIT ",@%3\n\t" \
"rotxl.l %0\n\t" \
- : "=r"(retval),"=m"(*b_addr) \
- : "0" (retval),"r" (b_addr) \
- : "memory"); \
- break;
+ : "=r"(retval), "=m"(*b_addr) \
+ : "0" (retval), "r" (b_addr)); \
+ break
-#define H8300_GEN_TEST_BITOP(FNNAME,OP) \
-static __inline__ int FNNAME(int nr, volatile void * addr) \
+#define H8300_GEN_TEST_BITOP(FNNAME, OP) \
+static inline int FNNAME(int nr, volatile void *addr) \
{ \
int retval = 0; \
char ccrsave; \
volatile unsigned char *b_addr; \
b_addr = (volatile unsigned char *)addr + ((nr >> 3) ^ 3); \
if (__builtin_constant_p(nr)) { \
- switch(nr & 7) { \
- H8300_GEN_TEST_BITOP_CONST_INT(OP,0) \
- H8300_GEN_TEST_BITOP_CONST_INT(OP,1) \
- H8300_GEN_TEST_BITOP_CONST_INT(OP,2) \
- H8300_GEN_TEST_BITOP_CONST_INT(OP,3) \
- H8300_GEN_TEST_BITOP_CONST_INT(OP,4) \
- H8300_GEN_TEST_BITOP_CONST_INT(OP,5) \
- H8300_GEN_TEST_BITOP_CONST_INT(OP,6) \
- H8300_GEN_TEST_BITOP_CONST_INT(OP,7) \
+ switch (nr & 7) { \
+ H8300_GEN_TEST_BITOP_CONST_INT(OP, 0); \
+ H8300_GEN_TEST_BITOP_CONST_INT(OP, 1); \
+ H8300_GEN_TEST_BITOP_CONST_INT(OP, 2); \
+ H8300_GEN_TEST_BITOP_CONST_INT(OP, 3); \
+ H8300_GEN_TEST_BITOP_CONST_INT(OP, 4); \
+ H8300_GEN_TEST_BITOP_CONST_INT(OP, 5); \
+ H8300_GEN_TEST_BITOP_CONST_INT(OP, 6); \
+ H8300_GEN_TEST_BITOP_CONST_INT(OP, 7); \
} \
} else { \
__asm__("stc ccr,%w1\n\t" \
"inc.l #1,%0\n" \
"1:\n\t" \
"ldc %w1,ccr" \
- : "=r"(retval),"=&r"(ccrsave),"=m"(*b_addr) \
- : "0" (retval),"r" (b_addr),"r"(nr) \
- : "memory"); \
+ : "=r"(retval), "=&r"(ccrsave), "=m"(*b_addr)\
+ : "0" (retval), "r" (b_addr), "r"(nr)); \
} \
return retval; \
} \
\
-static __inline__ int __ ## FNNAME(int nr, volatile void * addr) \
+static inline int __ ## FNNAME(int nr, volatile void *addr) \
{ \
int retval = 0; \
volatile unsigned char *b_addr; \
b_addr = (volatile unsigned char *)addr + ((nr >> 3) ^ 3); \
if (__builtin_constant_p(nr)) { \
- switch(nr & 7) { \
- H8300_GEN_TEST_BITOP_CONST(OP,0) \
- H8300_GEN_TEST_BITOP_CONST(OP,1) \
- H8300_GEN_TEST_BITOP_CONST(OP,2) \
- H8300_GEN_TEST_BITOP_CONST(OP,3) \
- H8300_GEN_TEST_BITOP_CONST(OP,4) \
- H8300_GEN_TEST_BITOP_CONST(OP,5) \
- H8300_GEN_TEST_BITOP_CONST(OP,6) \
- H8300_GEN_TEST_BITOP_CONST(OP,7) \
+ switch (nr & 7) { \
+ H8300_GEN_TEST_BITOP_CONST(OP, 0); \
+ H8300_GEN_TEST_BITOP_CONST(OP, 1); \
+ H8300_GEN_TEST_BITOP_CONST(OP, 2); \
+ H8300_GEN_TEST_BITOP_CONST(OP, 3); \
+ H8300_GEN_TEST_BITOP_CONST(OP, 4); \
+ H8300_GEN_TEST_BITOP_CONST(OP, 5); \
+ H8300_GEN_TEST_BITOP_CONST(OP, 6); \
+ H8300_GEN_TEST_BITOP_CONST(OP, 7); \
} \
} else { \
__asm__("btst %w4,@%3\n\t" \
"beq 1f\n\t" \
"inc.l #1,%0\n" \
"1:" \
- : "=r"(retval),"=m"(*b_addr) \
- : "0" (retval),"r" (b_addr),"r"(nr) \
+ : "=r"(retval), "=m"(*b_addr) \
+ : "0" (retval), "r" (b_addr), "r"(nr) \
: "memory"); \
} \
return retval; \
}
-H8300_GEN_TEST_BITOP(test_and_set_bit, "bset")
-H8300_GEN_TEST_BITOP(test_and_clear_bit, "bclr")
-H8300_GEN_TEST_BITOP(test_and_change_bit,"bnot")
+H8300_GEN_TEST_BITOP(test_and_set_bit, "bset")
+H8300_GEN_TEST_BITOP(test_and_clear_bit, "bclr")
+H8300_GEN_TEST_BITOP(test_and_change_bit, "bnot")
#undef H8300_GEN_TEST_BITOP_CONST
#undef H8300_GEN_TEST_BITOP_CONST_INT
#undef H8300_GEN_TEST_BITOP
#include <asm-generic/bitops/ffs.h>
-static __inline__ unsigned long __ffs(unsigned long word)
+static inline unsigned long __ffs(unsigned long word)
{
unsigned long result;
"adds #1,%0\n\t"
"bcc 1b"
: "=r" (result)
- : "0"(result),"r"(word));
+ : "0"(result), "r"(word));
return result;
}
+/*
+H8/300 kernel boot parameters
+*/
+
+#ifndef __ASM_H8300_BOOTPARAMS__
+#define __ASM_H8300_BOOTPARAMS__
+
struct bootparams {
short size;
unsigned char gpio_ddr[24];
unsigned int clock_freq;
unsigned int ram_end;
unsigned char *command_line;
-} __attribute__((aligned(2), packed));
+} __packed __aligned(2);
+#endif
* better 64-bit) boundary
*/
-__wsum csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum);
+__wsum csum_partial_copy_nocheck(const void *src, void *dst,
+ int len, __wsum sum);
/*
*/
extern __wsum csum_partial_copy_from_user(const void __user *src, void *dst,
- int len, __wsum sum, int *csum_err);
+ int len, __wsum sum, int *csum_err);
__sum16 ip_fast_csum(const void *iph, unsigned int ihl);
static inline __sum16 csum_fold(__wsum sum)
{
- __asm__("mov.l %0,er0\n\t"
- "add.w e0,r0\n\t"
- "xor.w e0,e0\n\t"
- "rotxl.w e0\n\t"
- "add.w e0,r0\n\t"
- "sub.w e0,e0\n\t"
- "mov.l er0,%0"
+ __asm__("add.w %e0,%f0\n\t"
+ "xor.w %e0,%e0\n\t"
+ "rotxl.w %e0\n\t"
+ "add.w %e0,%f0\n\t"
+ "sub.w %e0,%e0\n\t"
: "=r"(sum)
- : "0"(sum)
- : "er0");
+ : "0"(sum));
return (__force __sum16)~sum;
}
csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len,
unsigned short proto, __wsum sum)
{
- __asm__ ("sub.l er0,er0\n\t"
- "add.l %2,%0\n\t"
- "addx #0,r0l\n\t"
- "add.l %3,%0\n\t"
- "addx #0,r0l\n\t"
- "add.l %4,%0\n\t"
- "addx #0,r0l\n\t"
- "add.l er0,%0\n\t"
+ int tmp;
+
+ __asm__ ("sub.l %1,%1\n\t"
+ "add.l %3,%0\n\t"
+ "addx #0,%w1\n\t"
+ "add.l %4,%0\n\t"
+ "addx #0,%w1\n\t"
+ "add.l %5,%0\n\t"
+ "addx #0,%w1\n\t"
+ "add.l %1,%0\n\t"
"bcc 1f\n\t"
"inc.l #1,%0\n"
"1:"
- : "=&r" (sum)
- : "0" (sum), "r" (daddr), "r" (saddr), "r" (len + proto)
- :"er0");
+ : "=&r" (sum), "=&r"(tmp)
+ : "0" (sum), "1" (daddr),
+ "r" (saddr), "r" (len + proto));
return sum;
}
csum_tcpudp_magic(__be32 saddr, __be32 daddr, unsigned short len,
unsigned short proto, __wsum sum)
{
- return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
+ return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
}
/*
#include <linux/irqflags.h>
-#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
+#define xchg(ptr, x) \
+ ((__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr), \
+ sizeof(*(ptr))))
struct __xchg_dummy { unsigned long a[100]; };
#define __xg(x) ((volatile struct __xchg_dummy *)(x))
-static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
+static inline unsigned long __xchg(unsigned long x,
+ volatile void *ptr, int size)
{
- unsigned long tmp, flags;
-
- local_irq_save(flags);
-
- switch (size) {
- case 1:
- __asm__ __volatile__
- ("mov.b %2,%0\n\t"
- "mov.b %1,%2"
- : "=&r" (tmp) : "r" (x), "m" (*__xg(ptr)) : "memory");
- break;
- case 2:
- __asm__ __volatile__
- ("mov.w %2,%0\n\t"
- "mov.w %1,%2"
- : "=&r" (tmp) : "r" (x), "m" (*__xg(ptr)) : "memory");
- break;
- case 4:
- __asm__ __volatile__
- ("mov.l %2,%0\n\t"
- "mov.l %1,%2"
- : "=&r" (tmp) : "r" (x), "m" (*__xg(ptr)) : "memory");
- break;
- default:
- tmp = 0;
- }
- local_irq_restore(flags);
- return tmp;
+ unsigned long tmp, flags;
+
+ local_irq_save(flags);
+
+ switch (size) {
+ case 1:
+ __asm__ __volatile__
+ ("mov.b %2,%0\n\t"
+ "mov.b %1,%2"
+ : "=&r" (tmp) : "r" (x), "m" (*__xg(ptr)));
+ break;
+ case 2:
+ __asm__ __volatile__
+ ("mov.w %2,%0\n\t"
+ "mov.w %1,%2"
+ : "=&r" (tmp) : "r" (x), "m" (*__xg(ptr)));
+ break;
+ case 4:
+ __asm__ __volatile__
+ ("mov.l %2,%0\n\t"
+ "mov.l %1,%2"
+ : "=&r" (tmp) : "r" (x), "m" (*__xg(ptr)));
+ break;
+ default:
+ tmp = 0;
+ }
+ local_irq_restore(flags);
+ return tmp;
}
#include <asm-generic/cmpxchg-local.h>
* cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
* them available.
*/
-#define cmpxchg_local(ptr, o, n) \
- ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
- (unsigned long)(n), sizeof(*(ptr))))
+#define cmpxchg_local(ptr, o, n) \
+ ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), \
+ (unsigned long)(o), \
+ (unsigned long)(n), \
+ sizeof(*(ptr))))
#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
#ifndef CONFIG_SMP
__asm__ __volatile__ ("1:\n\t"
"dec.l #1,%0\n\t"
"bne 1b"
- :"=r" (loops):"0"(loops));
+ : "=r" (loops) : "0"(loops));
}
/*
* lookup table, really, as the multiplications take much too long with
* short delays. This is a "reasonable" implementation, though (and the
* first constant multiplications gets optimized away if the delay is
- * a constant)
+ * a constant)
*/
extern unsigned long loops_per_jiffy;
-#ifndef __ASMH8300_ELF_H
-#define __ASMH8300_ELF_H
+#ifndef __ASM_H8300_ELF_H
+#define __ASM_H8300_ELF_H
/*
* ELF register definitions..
#define ELF_CORE_EFLAGS 0x820000
#endif
-#define ELF_PLAT_INIT(_r) _r->er1 = 0
+#define ELF_PLAT_INIT(_r) (_r)->er1 = 0
#define ELF_EXEC_PAGESIZE 4096
/*
- * include/asm-h8300/flat.h -- uClinux flat-format executables
+ * arch/h8300/asm/include/flat.h -- uClinux flat-format executables
*/
#ifndef __H8300_FLAT_H__
#define flat_get_relocate_addr(rel) (rel & ~0x00000001)
#define flat_get_addr_from_rp(rp, relval, flags, persistent) \
- (get_unaligned(rp) & ((flags & FLAT_FLAG_GOTPIC) ? 0xffffffff: 0x00ffffff))
+ (get_unaligned(rp) & (((flags) & FLAT_FLAG_GOTPIC) ? \
+ 0xffffffff : 0x00ffffff))
#define flat_put_addr_at_rp(rp, addr, rel) \
- put_unaligned (((*(char *)(rp)) << 24) | ((addr) & 0x00ffffff), rp)
+ put_unaligned (((*(char *)(rp)) << 24) | ((addr) & 0x00ffffff), (rp))
#endif /* __H8300_FLAT_H__ */
{
#ifndef H8300_IO_NOSWAP
unsigned short r;
+
__asm__("xor.b %w0,%x0\n\t"
"xor.b %x0,%w0\n\t"
"xor.b %w0,%x0"
- :"=r"(r)
- :"0"(v));
+ : "=r"(r)
+ : "0"(v));
return r;
#else
return v;
{
#ifndef H8300_IO_NOSWAP
unsigned long r;
+
__asm__("xor.b %w0,%x0\n\t"
"xor.b %x0,%w0\n\t"
"xor.b %w0,%x0\n\t"
"xor.b %w0,%x0\n\t"
"xor.b %x0,%w0\n\t"
"xor.b %w0,%x0"
- :"=r"(r)
- :"0"(v));
+ : "=r"(r)
+ : "0"(v));
return r;
#else
return v;
}
#define readb(addr) \
- ({ unsigned char __v = \
- *(volatile unsigned char *)((unsigned long)(addr) & 0x00ffffff); \
- __v; })
+ ({ u8 __v = *(volatile u8 *)((uintptr_t)(addr) & 0x00ffffff); __v; })
+
#define readw(addr) \
- ({ unsigned short __v = \
- *(volatile unsigned short *)((unsigned long)(addr) & 0x00ffffff); \
- __v; })
+ ({ u16 __v = *(volatile u16 *)((uintptr_t)(addr) & 0x00ffffff); __v; })
+
#define readl(addr) \
- ({ unsigned long __v = \
- *(volatile unsigned long *)((unsigned long)(addr) & 0x00ffffff); \
- __v; })
-
-#define writeb(b,addr) (void)((*(volatile unsigned char *) \
- ((unsigned long)(addr) & 0x00ffffff)) = (b))
-#define writew(b,addr) (void)((*(volatile unsigned short *) \
- ((unsigned long)(addr) & 0x00ffffff)) = (b))
-#define writel(b,addr) (void)((*(volatile unsigned long *) \
- ((unsigned long)(addr) & 0x00ffffff)) = (b))
+ ({ u32 __v = *(volatile u32 *)((uintptr_t)(addr) & 0x00ffffff); __v; })
+
+#define writeb(b, addr) (void)((*(volatile u8 *) \
+ ((uintptr_t)(addr) & 0x00ffffff)) = (b))
+
+#define writew(b, addr) (void)((*(volatile u16 *) \
+ ((uintptr_t)(addr) & 0x00ffffff)) = (b))
+
+#define writel(b, addr) (void)((*(volatile u32 *) \
+ ((uintptr_t)(addr) & 0x00ffffff)) = (b))
+
#define readb_relaxed(addr) readb(addr)
#define readw_relaxed(addr) readw(addr)
#define readl_relaxed(addr) readl(addr)
+#define writeb_relaxed(b, addr) writeb(b, addr)
+#define writew_relaxed(b, addr) writew(b, addr)
+#define writel_relaxed(b, addr) writel(b, addr)
#define __raw_readb readb
#define __raw_readw readw
static inline int h8300_buswidth(unsigned int addr)
{
- return (*(volatile unsigned char *)ABWCR & (1 << ((addr >> 21) & 7))) == 0;
+ return (*(volatile u8 *)ABWCR & (1 << ((addr >> 21) & 7))) == 0;
}
static inline void io_outsb(unsigned int addr, const void *buf, int len)
volatile unsigned short *ap_w = (volatile unsigned short *) addr;
unsigned char *bp = (unsigned char *) buf;
- if(h8300_buswidth(addr) && (addr & 1)) {
+ if (h8300_buswidth(addr) && (addr & 1)) {
while (len--)
*ap_w = *bp++;
} else {
{
volatile unsigned short *ap = (volatile unsigned short *) addr;
unsigned short *bp = (unsigned short *) buf;
+
while (len--)
*ap = _swapw(*bp++);
}
{
volatile unsigned long *ap = (volatile unsigned long *) addr;
unsigned long *bp = (unsigned long *) buf;
+
while (len--)
*ap = _swapl(*bp++);
}
{
volatile unsigned short *ap = (volatile unsigned short *) addr;
unsigned short *bp = (unsigned short *) buf;
+
while (len--)
*ap = *bp++;
}
{
volatile unsigned long *ap = (volatile unsigned long *) addr;
unsigned long *bp = (unsigned long *) buf;
+
while (len--)
*ap = *bp++;
}
volatile unsigned short *ap_w;
unsigned char *bp = (unsigned char *) buf;
- if(h8300_buswidth(addr)) {
+ if (h8300_buswidth(addr)) {
ap_w = (volatile unsigned short *)(addr & ~1);
while (len--)
*bp++ = *ap_w & 0xff;
{
volatile unsigned short *ap = (volatile unsigned short *) addr;
unsigned short *bp = (unsigned short *) buf;
+
while (len--)
*bp++ = _swapw(*ap);
}
{
volatile unsigned long *ap = (volatile unsigned long *) addr;
unsigned long *bp = (unsigned long *) buf;
+
while (len--)
*bp++ = _swapl(*ap);
}
{
volatile unsigned short *ap = (volatile unsigned short *) addr;
unsigned short *bp = (unsigned short *) buf;
+
while (len--)
*bp++ = *ap;
}
{
volatile unsigned long *ap = (volatile unsigned long *) addr;
unsigned long *bp = (unsigned long *) buf;
+
while (len--)
*bp++ = *ap;
}
* can override them as required
*/
-#define memset_io(a,b,c) memset((void *)(a),(b),(c))
-#define memcpy_fromio(a,b,c) memcpy((a),(void *)(b),(c))
-#define memcpy_toio(a,b,c) memcpy((void *)(a),(b),(c))
+#define memset_io(a, b, c) memset((void *)(a), (b), (c))
+#define memcpy_fromio(a, b, c) memcpy((a), (void *)(b), (c))
+#define memcpy_toio(a, b, c) memcpy((void *)(a), (b), (c))
#define mmiowb()
-#define inb(addr) ((h8300_buswidth(addr))?readw((addr) & ~1) & 0xff:readb(addr))
+#define inb(addr) ((h8300_buswidth(addr)) ? \
+ readw((addr) & ~1) & 0xff:readb(addr))
#define inw(addr) _swapw(readw(addr))
#define inl(addr) _swapl(readl(addr))
-#define outb(x,addr) ((void)((h8300_buswidth(addr) && \
- ((addr) & 1))?writew(x,(addr) & ~1):writeb(x,addr)))
-#define outw(x,addr) ((void) writew(_swapw(x),addr))
-#define outl(x,addr) ((void) writel(_swapl(x),addr))
+#define outb(x, addr) ((void)((h8300_buswidth(addr) && \
+ ((addr) & 1)) ? \
+ writew(x, (addr) & ~1) : writeb(x, addr)))
+#define outw(x, addr) ((void) writew(_swapw(x), addr))
+#define outl(x, addr) ((void) writel(_swapl(x), addr))
#define inb_p(addr) inb(addr)
#define inw_p(addr) inw(addr)
#define inl_p(addr) inl(addr)
-#define outb_p(x,addr) outb(x,addr)
-#define outw_p(x,addr) outw(x,addr)
-#define outl_p(x,addr) outl(x,addr)
+#define outb_p(x, addr) outb(x, addr)
+#define outw_p(x, addr) outw(x, addr)
+#define outl_p(x, addr) outl(x, addr)
-#define outsb(a,b,l) io_outsb(a,b,l)
-#define outsw(a,b,l) io_outsw(a,b,l)
-#define outsl(a,b,l) io_outsl(a,b,l)
+#define outsb(a, b, l) io_outsb(a, b, l)
+#define outsw(a, b, l) io_outsw(a, b, l)
+#define outsl(a, b, l) io_outsl(a, b, l)
-#define insb(a,b,l) io_insb(a,b,l)
-#define insw(a,b,l) io_insw(a,b,l)
-#define insl(a,b,l) io_insl(a,b,l)
+#define insb(a, b, l) io_insb(a, b, l)
+#define insw(a, b, l) io_insw(a, b, l)
+#define insl(a, b, l) io_insl(a, b, l)
#define ioread8(a) __raw_readb(a)
#define ioread16(a) __raw_readw(a)
#define ioread32(a) __raw_readl(a)
-#define iowrite8(v,a) __raw_writeb((v),(a))
-#define iowrite16(v,a) __raw_writew((v),(a))
-#define iowrite32(v,a) __raw_writel((v),(a))
+#define iowrite8(v, a) __raw_writeb((v), (a))
+#define iowrite16(v, a) __raw_writew((v), (a))
+#define iowrite32(v, a) __raw_writel((v), (a))
#define IO_SPACE_LIMIT 0xffffff
#define IOMAP_NOCACHE_NONSER 2
#define IOMAP_WRITETHROUGH 3
-extern void *__ioremap(unsigned long physaddr, unsigned long size, int cacheflag);
+extern void *__ioremap(unsigned long physaddr, unsigned long size,
+ int cacheflag);
extern void __iounmap(void *addr, unsigned long size);
static inline void *ioremap(unsigned long physaddr, unsigned long size)
{
return __ioremap(physaddr, size, IOMAP_NOCACHE_SER);
}
-static inline void *ioremap_nocache(unsigned long physaddr, unsigned long size)
+static inline void *ioremap_nocache(unsigned long physaddr,
+ unsigned long size)
{
return __ioremap(physaddr, size, IOMAP_NOCACHE_SER);
}
-static inline void *ioremap_writethrough(unsigned long physaddr, unsigned long size)
+static inline void *ioremap_writethrough(unsigned long physaddr,
+ unsigned long size)
{
return __ioremap(physaddr, size, IOMAP_WRITETHROUGH);
}
-static inline void *ioremap_fullcache(unsigned long physaddr, unsigned long size)
+static inline void *ioremap_fullcache(unsigned long physaddr,
+ unsigned long size)
{
return __ioremap(physaddr, size, IOMAP_FULL_CACHING);
}
extern void iounmap(void *addr);
/* H8/300 internal I/O functions */
-static __inline__ unsigned char ctrl_inb(unsigned long addr)
+static inline unsigned char ctrl_inb(unsigned long addr)
{
- return *(volatile unsigned char*)addr;
+ return *(volatile unsigned char *)addr;
}
-static __inline__ unsigned short ctrl_inw(unsigned long addr)
+static inline unsigned short ctrl_inw(unsigned long addr)
{
- return *(volatile unsigned short*)addr;
+ return *(volatile unsigned short *)addr;
}
-static __inline__ unsigned long ctrl_inl(unsigned long addr)
+static inline unsigned long ctrl_inl(unsigned long addr)
{
- return *(volatile unsigned long*)addr;
+ return *(volatile unsigned long *)addr;
}
-static __inline__ void ctrl_outb(unsigned char b, unsigned long addr)
+static inline void ctrl_outb(unsigned char b, unsigned long addr)
{
- *(volatile unsigned char*)addr = b;
+ *(volatile unsigned char *)addr = b;
}
-static __inline__ void ctrl_outw(unsigned short b, unsigned long addr)
+static inline void ctrl_outw(unsigned short b, unsigned long addr)
{
- *(volatile unsigned short*)addr = b;
+ *(volatile unsigned short *)addr = b;
}
-static __inline__ void ctrl_outl(unsigned long b, unsigned long addr)
+static inline void ctrl_outl(unsigned long b, unsigned long addr)
{
- *(volatile unsigned long*)addr = b;
+ *(volatile unsigned long *)addr = b;
}
-static __inline__ void ctrl_bclr(int b, unsigned long addr)
+static inline void ctrl_bclr(int b, unsigned long addr)
{
if (__builtin_constant_p(b))
switch (b) {
- case 0: __asm__("bclr #0,@%0"::"r"(addr)); break;
- case 1: __asm__("bclr #1,@%0"::"r"(addr)); break;
- case 2: __asm__("bclr #2,@%0"::"r"(addr)); break;
- case 3: __asm__("bclr #3,@%0"::"r"(addr)); break;
- case 4: __asm__("bclr #4,@%0"::"r"(addr)); break;
- case 5: __asm__("bclr #5,@%0"::"r"(addr)); break;
- case 6: __asm__("bclr #6,@%0"::"r"(addr)); break;
- case 7: __asm__("bclr #7,@%0"::"r"(addr)); break;
+ case 0: __asm__("bclr #0,@%0" : : "m"(addr)); break;
+ case 1: __asm__("bclr #1,@%0" : : "m"(addr)); break;
+ case 2: __asm__("bclr #2,@%0" : : "m"(addr)); break;
+ case 3: __asm__("bclr #3,@%0" : : "m"(addr)); break;
+ case 4: __asm__("bclr #4,@%0" : : "m"(addr)); break;
+ case 5: __asm__("bclr #5,@%0" : : "m"(addr)); break;
+ case 6: __asm__("bclr #6,@%0" : : "m"(addr)); break;
+ case 7: __asm__("bclr #7,@%0" : : "m"(addr)); break;
}
else
- __asm__("bclr %w0,@%1"::"r"(b), "r"(addr));
+ __asm__("bclr %w0,@%1" : : "r"(b), "m"(addr));
}
-static __inline__ void ctrl_bset(int b, unsigned long addr)
+static inline void ctrl_bset(int b, unsigned long addr)
{
if (__builtin_constant_p(b))
switch (b) {
- case 0: __asm__("bset #0,@%0"::"r"(addr)); break;
- case 1: __asm__("bset #1,@%0"::"r"(addr)); break;
- case 2: __asm__("bset #2,@%0"::"r"(addr)); break;
- case 3: __asm__("bset #3,@%0"::"r"(addr)); break;
- case 4: __asm__("bset #4,@%0"::"r"(addr)); break;
- case 5: __asm__("bset #5,@%0"::"r"(addr)); break;
- case 6: __asm__("bset #6,@%0"::"r"(addr)); break;
- case 7: __asm__("bset #7,@%0"::"r"(addr)); break;
+ case 0: __asm__("bset #0,@%0" : : "m"(addr)); break;
+ case 1: __asm__("bset #1,@%0" : : "m"(addr)); break;
+ case 2: __asm__("bset #2,@%0" : : "m"(addr)); break;
+ case 3: __asm__("bset #3,@%0" : : "m"(addr)); break;
+ case 4: __asm__("bset #4,@%0" : : "m"(addr)); break;
+ case 5: __asm__("bset #5,@%0" : : "m"(addr)); break;
+ case 6: __asm__("bset #6,@%0" : : "m"(addr)); break;
+ case 7: __asm__("bset #7,@%0" : : "m"(addr)); break;
}
else
- __asm__("bset %w0,@%1"::"r"(b), "r"(addr));
+ __asm__("bset %w0,@%1" : : "r"(b), "m"(addr));
}
-#if 0
-/* Pages to physical address... */
-#define page_to_phys(page) ((page - mem_map) << PAGE_SHIFT)
-#define page_to_bus(page) ((page - mem_map) << PAGE_SHIFT)
-#endif
-
/*
* Macros used for converting between virtual and physical mappings.
*/
/*
* Convert a virtual cached pointer to an uncached pointer
*/
-#define xlate_dev_kmem_ptr(p) p
+#define xlate_dev_kmem_ptr(p) (p)
#endif /* __KERNEL__ */
#if defined(CONFIG_CPU_H8300H)
#define NR_IRQS 64
-#define IRQ_CHIP &h8300h_irq_chip
+#define IRQ_CHIP &(h8300h_irq_chip)
#define EXT_IRQ0 12
#define EXT_IRQS 6
#elif defined(CONFIG_CPU_H8S)
#define NR_IRQS 128
-#define IRQ_CHIP &h8s_irq_chip
+#define IRQ_CHIP &(h8s_irq_chip)
#define EXT_IRQ0 16
#define EXT_IRQS 16
#endif
static inline unsigned char arch_local_save_flags(void)
{
unsigned char flags;
+
asm volatile ("stc ccr,%w0" : "=r" (flags));
return flags;
}
static inline unsigned char arch_local_irq_save(void)
{
unsigned char flags;
+
asm volatile ("stc ccr,%w0\n\t"
- "orc #0xc0,ccr" :"=r" (flags) : : "cc", "memory");
+ "orc #0xc0,ccr" : "=r" (flags) : : "cc", "memory");
return flags;
}
static inline unsigned long arch_local_save_flags(void)
{
unsigned short flags;
+
asm volatile ("stc ccr,%w0\n\tstc exr,%x0" : "=r" (flags));
return flags;
}
static inline unsigned long arch_local_irq_save(void)
{
unsigned short flags;
+
asm volatile ("stc ccr,%w0\n\t"
"stc exr,%x0\n\t"
"orc #0x80,ccr\n\t"
#ifndef _H8300_PAGE_H
#define _H8300_PAGE_H
+
#include <asm-generic/page.h>
-#define MAP_NR(addr) (((unsigned long)(addr)-PAGE_OFFSET) >> PAGE_SHIFT)
+#include <linux/types.h>
+
+#define MAP_NR(addr) (((uintptr_t)(addr)-PAGE_OFFSET) >> PAGE_SHIFT)
+
#endif
#define PAGE_KERNEL __pgprot(0) /* these mean nothing to NO_MM */
#define __swp_type(x) (0)
#define __swp_offset(x) (0)
-#define __swp_entry(typ,off) ((swp_entry_t) { ((typ) | ((off) << 7)) })
+#define __swp_entry(typ, off) ((swp_entry_t) { ((typ) | ((off) << 7)) })
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
#define kern_addr_valid(addr) (1)
* Default implementation of macro that returns current
* instruction pointer ("program counter").
*/
-#define current_text_addr() ({ __label__ _l; _l: &&_l;})
+#define current_text_addr() ({ __label__ _l; _l: &&_l; })
#include <linux/compiler.h>
#include <asm/segment.h>
#include <asm/ptrace.h>
#include <asm/current.h>
-static inline unsigned long rdusp(void) {
+static inline unsigned long rdusp(void)
+{
extern unsigned int _sw_usp;
+
return _sw_usp;
}
-static inline void wrusp(unsigned long usp) {
+static inline void wrusp(unsigned long usp)
+{
extern unsigned int _sw_usp;
+
_sw_usp = usp;
}
* it can't hurt anything as far as I can tell
*/
#if defined(CONFIG_CPU_H8300H)
-#define start_thread(_regs, _pc, _usp) \
-do { \
- (_regs)->pc = (_pc); \
- (_regs)->ccr = 0x00; /* clear all flags */ \
- (_regs)->er5 = current->mm->start_data; /* GOT base */ \
+#define start_thread(_regs, _pc, _usp) \
+do { \
+ (_regs)->pc = (_pc); \
+ (_regs)->ccr = 0x00; /* clear all flags */ \
+ (_regs)->er5 = current->mm->start_data; /* GOT base */ \
wrusp((unsigned long)(_usp) - sizeof(unsigned long)*3); \
-} while(0)
+} while (0)
#endif
#if defined(CONFIG_CPU_H8S)
-#define start_thread(_regs, _pc, _usp) \
-do { \
- (_regs)->pc = (_pc); \
- (_regs)->ccr = 0x00; /* clear kernel flag */ \
- (_regs)->exr = 0x78; /* enable all interrupts */ \
- (_regs)->er5 = current->mm->start_data; /* GOT base */ \
+#define start_thread(_regs, _pc, _usp) \
+do { \
+ (_regs)->pc = (_pc); \
+ (_regs)->ccr = 0x00; /* clear kernel flag */ \
+ (_regs)->exr = 0x78; /* enable all interrupts */ \
+ (_regs)->er5 = current->mm->start_data; /* GOT base */ \
/* 14 = space for retaddr(4), vector(4), er0(4) and exr(2) on stack */ \
- wrusp(((unsigned long)(_usp)) - 14); \
-} while(0)
+ wrusp(((unsigned long)(_usp)) - 14); \
+} while (0)
#endif
/* Forward declaration, a strange C thing */
unsigned long get_wchan(struct task_struct *p);
#define KSTK_EIP(tsk) \
- ({ \
- unsigned long eip = 0; \
- if ((tsk)->thread.esp0 > PAGE_SIZE && \
- MAP_NR((tsk)->thread.esp0) < max_mapnr) \
- eip = ((struct pt_regs *) (tsk)->thread.esp0)->pc; \
- eip; })
+ ({ \
+ unsigned long eip = 0; \
+ if ((tsk)->thread.esp0 > PAGE_SIZE && \
+ MAP_NR((tsk)->thread.esp0) < max_mapnr) \
+ eip = ((struct pt_regs *) (tsk)->thread.esp0)->pc; \
+ eip; })
+
#define KSTK_ESP(tsk) ((tsk) == current ? rdusp() : (tsk)->thread.usp)
#define cpu_relax() barrier()
#define cpu_relax_lowlatency() cpu_relax()
#define HARD_RESET_NOW() ({ \
- local_irq_disable(); \
- asm("jmp @@0"); \
+ local_irq_disable(); \
+ asm("jmp @@0"); \
})
#endif
static inline mm_segment_t get_fs(void)
{
- return USER_DS;
+ return USER_DS;
}
static inline mm_segment_t get_ds(void)
{
- /* return the supervisor data space code */
- return KERNEL_DS;
+ /* return the supervisor data space code */
+ return KERNEL_DS;
}
static inline void set_fs(mm_segment_t val)
{
}
-#define segment_eq(a,b) ((a).seg == (b).seg)
+#define segment_eq(a, b) ((a).seg == (b).seg)
#endif /* __ASSEMBLY__ */
static int sh_bios_in_gdb_mode(void)
{
static int gdb_active = -1;
+
if (gdb_active == -1) {
int (*set_console_comm)(int);
- set_console_comm = ((void **)HAL_IF_VECTOR_TABLE)[CALL_IF_SET_CONSOLE_COMM];
- gdb_active = (set_console_comm(QUERY_CURRENT) == MANGLER);
+
+ set_console_comm =
+ ((void **)HAL_IF_VECTOR_TABLE)[CALL_IF_SET_CONSOLE_COMM];
+ gdb_active =
+ (set_console_comm(QUERY_CURRENT) == MANGLER);
}
return gdb_active;
}
#include <asm/page.h>
#define __HAVE_ARCH_MEMSET
-extern void * memset(void * s, int c, size_t count);
+extern void * memset(void *s, int c, size_t count);
#define __HAVE_ARCH_MEMCPY
extern void * memcpy(void *d, const void *s, size_t count);
-#else /* KERNEL */
-
-/*
- * let user libraries deal with these,
- * IMHO the kernel has no place defining these functions for user apps
- */
-
-#define __HAVE_ARCH_STRCPY 1
-#define __HAVE_ARCH_STRNCPY 1
-#define __HAVE_ARCH_STRCAT 1
-#define __HAVE_ARCH_STRNCAT 1
-#define __HAVE_ARCH_STRCMP 1
-#define __HAVE_ARCH_STRNCMP 1
-#define __HAVE_ARCH_STRNICMP 1
-#define __HAVE_ARCH_STRCHR 1
-#define __HAVE_ARCH_STRRCHR 1
-#define __HAVE_ARCH_STRSTR 1
-#define __HAVE_ARCH_STRLEN 1
-#define __HAVE_ARCH_STRNLEN 1
-#define __HAVE_ARCH_MEMSET 1
-#define __HAVE_ARCH_MEMCPY 1
-#define __HAVE_ARCH_MEMMOVE 1
-#define __HAVE_ARCH_MEMSCAN 1
-#define __HAVE_ARCH_MEMCMP 1
-#define __HAVE_ARCH_MEMCHR 1
-#define __HAVE_ARCH_STRTOK 1
-
#endif /* KERNEL */
-#endif /* _M68K_STRING_H_ */
+#endif
*/
asmlinkage void resume(void);
-#define switch_to(prev,next,last) \
-do { \
- void *_last; \
- __asm__ __volatile__( \
- "mov.l %1, er0\n\t" \
- "mov.l %2, er1\n\t" \
- "mov.l %3, er2\n\t" \
- "jsr @_resume\n\t" \
- "mov.l er2,%0\n\t" \
- : "=r" (_last) \
- : "r" (&(prev->thread)), \
- "r" (&(next->thread)), \
- "g" (prev) \
- : "cc", "er0", "er1", "er2", "er3"); \
- (last) = _last; \
+#define switch_to(prev, next, last) \
+do { \
+ void *_last; \
+ __asm__ __volatile__( \
+ "mov.l %1, er0\n\t" \
+ "mov.l %2, er1\n\t" \
+ "mov.l %3, er2\n\t" \
+ "jsr @_resume\n\t" \
+ "mov.l er2,%0\n\t" \
+ : "=r" (_last) \
+ : "r" (&(prev->thread)), \
+ "r" (&(next->thread)), \
+ "g" (prev) \
+ : "cc", "er0", "er1", "er2", "er3"); \
+ (last) = _last; \
} while(0)
#endif /* _H8300_SWITCH_TO_H */
static inline struct thread_info *current_thread_info(void)
{
struct thread_info *ti;
- __asm__(
- "mov.l sp, %0 \n\t"
+
+ __asm__("mov.l sp, %0 \n\t"
"and.l %1, %0"
: "=&r"(ti)
- : "i" (~(THREAD_SIZE-1))
- );
+ : "i" (~(THREAD_SIZE-1)));
return ti;
}
#ifndef __H8300_TLB_H__
#define __H8300_TLB_H__
-#define tlb_flush(tlb) do { } while(0)
+#define tlb_flush(tlb) do { } while (0)
#include <asm-generic/tlb.h>
#define VERIFY_WRITE 1
/* We let the MMU do all checking */
-#define access_ok(type, addr, size) __access_ok((unsigned long)addr,size)
+#define access_ok(type, addr, size) __access_ok((unsigned long)addr, size)
static inline int __access_ok(unsigned long addr, unsigned long size)
{
#define RANGE_CHECK_OK(addr, size, lower, upper) \
(((addr) >= (lower)) && (((addr) + (size)) < (upper)))
extern unsigned long memory_end;
- return(RANGE_CHECK_OK(addr, size, 0L, memory_end));
+
+ return RANGE_CHECK_OK(addr, size, 0L, memory_end);
}
/*
#define put_user(x, ptr) \
({ \
- int __pu_err = 0; \
- typeof(*(ptr)) __pu_val = (x); \
- switch (sizeof (*(ptr))) { \
- case 1: \
- case 2: \
- case 4: \
- *(ptr) = (__pu_val); \
- break; \
- case 8: \
- memcpy(ptr, &__pu_val, sizeof (*(ptr))); \
- break; \
- default: \
- __pu_err = __put_user_bad(); \
- break; \
- } \
- __pu_err; \
+ int __pu_err = 0; \
+ typeof(*(ptr)) __pu_val = (x); \
+ switch (sizeof(*(ptr))) { \
+ case 1: \
+ case 2: \
+ case 4: \
+ *(ptr) = x; \
+ break; \
+ case 8: \
+ memcpy(ptr, &__pu_val, sizeof(*(ptr))); \
+ break; \
+ default: \
+ __pu_err = __put_user_bad(); \
+ break; \
+ } \
+ __pu_err; \
})
+#define __put_user_asm(x, addr, err, size) \
+do { \
+} while (0)
+
#define __put_user(x, ptr) put_user(x, ptr)
extern int __put_user_bad(void);
#define get_user(x, ptr) \
({ \
- unsigned long long __gu_val; \
- int __gu_err = 0; \
- switch (sizeof(*(ptr))) { \
- case 1: \
- __gu_val = *((u8 *)(ptr)); \
- break; \
- case 2: \
- __gu_val = *((u16 *)ptr); \
- break; \
- case 4: \
- __gu_val = *((u32 *)ptr); \
- break; \
- case 8: \
- memcpy((void *)&__gu_val, ptr, sizeof(*(ptr))); \
- break; \
- default: \
- __gu_err = __get_user_bad(); \
- break; \
- } \
- (x) = __gu_val; \
- __gu_err; \
+ unsigned long long __gu_val; \
+ int __gu_err = 0; \
+ switch (sizeof(*(ptr))) { \
+ case 1: \
+ __gu_val = *((u8 *)(ptr)); \
+ break; \
+ case 2: \
+ __gu_val = *((u16 *)ptr); \
+ break; \
+ case 4: \
+ __gu_val = *((u32 *)ptr); \
+ break; \
+ case 8: \
+ memcpy((void *)&__gu_val, ptr, sizeof(*(ptr))); \
+ break; \
+ default: \
+ __gu_err = __get_user_bad(); \
+ break; \
+ } \
+ *(unsigned long *)&(x) = __gu_val; \
+ __gu_err; \
})
#define __get_user(x, ptr) get_user(x, ptr)
#define __copy_to_user_inatomic __copy_to_user
#define __copy_from_user_inatomic __copy_from_user
-#define copy_to_user_ret(to,from,n,retval) ({ if (copy_to_user(to,from,n)) return retval; })
+#define copy_to_user_ret(to, from, n, retval) \
+ ({ if (copy_to_user(to, from, n)) return retval; })
-#define copy_from_user_ret(to,from,n,retval) ({ if (copy_from_user(to,from,n)) return retval; })
+#define copy_from_user_ret(to, from, n, retval) \
+ ({ if (copy_from_user(to, from, n)) return retval; })
unsigned long clear_user(void __user *addr, unsigned long size);
#define strnlen_user(s, n) (strnlen(s, n) + 1)
is still the layout used by user (the new pt_regs doesn't have
all registers). */
struct user_regs_struct {
- long er1,er2,er3,er4,er5,er6;
+ long er1, er2, er3, er4, er5, er6;
long er0;
long usp;
long orig_er0;
long pc;
};
-
/* When the kernel dumps core, it starts by dumping the user struct -
this will be used by gdb to figure out where the data and stack segments
are within the file, and what virtual addresses to use. */
-struct user{
+struct user {
/* We start with the registers, to mimic the way that "memory" is returned
from the ptrace(3,...) function. */
- struct user_regs_struct regs; /* Where the registers are actually stored */
+ struct user_regs_struct regs; /* Where the registers are actually stored */
/* ptrace does not yet supply these. Someday.... */
/* The rest of this junk is to help gdb figure out what goes where */
- unsigned long int u_tsize; /* Text segment size (pages). */
- unsigned long int u_dsize; /* Data segment size (pages). */
- unsigned long int u_ssize; /* Stack segment size (pages). */
- unsigned long start_code; /* Starting virtual address of text. */
- unsigned long start_stack; /* Starting virtual address of stack area.
- This is actually the bottom of the stack,
- the top of the stack is always found in the
- esp register. */
- long int signal; /* Signal that caused the core dump. */
- int reserved; /* No longer used */
- unsigned long u_ar0; /* Used by gdb to help find the values for */
- /* the registers. */
- unsigned long magic; /* To uniquely identify a core file */
- char u_comm[32]; /* User command that was responsible */
+ unsigned long int u_tsize; /* Text segment size (pages). */
+ unsigned long int u_dsize; /* Data segment size (pages). */
+ unsigned long int u_ssize; /* Stack segment size (pages). */
+ unsigned long start_code; /* Starting virtual address of text. */
+ unsigned long start_stack; /* Starting virtual address of stack area.
+ This is actually the bottom of the stack,
+ the top of the stack is always found in the
+ esp register. */
+ long int signal; /* Signal that caused the core dump. */
+ int reserved; /* No longer used */
+ unsigned long u_ar0; /* Used by gdb to help find the values for */
+ /* the registers. */
+ unsigned long magic; /* To uniquely identify a core file */
+ char u_comm[32]; /* User command that was responsible */
};
#define NBPG PAGE_SIZE
#define UPAGES 1
unsigned short exr;
#endif
unsigned long pc;
-} __attribute__((aligned(2),packed));
+} __attribute__((aligned(2), packed));
#define PTRACE_GETREGS 12
#define PTRACE_SETREGS 13
#define _ASM_H8300_SIGCONTEXT_H
struct sigcontext {
- unsigned long sc_mask; /* old sigmask */
+ unsigned long sc_mask; /* old sigmask */
unsigned long sc_usp; /* old user stack pointer */
unsigned long sc_er0;
unsigned long sc_er1;
obj-$(CONFIG_CPU_H8300H) += ptrace_h.o irq_h.o
obj-$(CONFIG_CPU_H8S) += ptrace_s.o irq_s.o
-obj-$(CONFIG_MODULES) += module.o h8300_ksyms.o
+obj-$(CONFIG_MODULES) += module.o h8300_ksyms.o
obj-$(CONFIG_H8300H_SIM) += sim-console.o
obj-$(CONFIG_H8S_SIM) += sim-console.o
DEFINE(LORIG, offsetof(struct pt_regs, orig_er0) - sizeof(long));
DEFINE(LCCR, offsetof(struct pt_regs, ccr) - sizeof(long));
DEFINE(LVEC, offsetof(struct pt_regs, vector) - sizeof(long));
-#if defined(__H8300S__)
+#if defined(CONFIG_CPU_H8S)
DEFINE(LEXR, offsetof(struct pt_regs, exr) - sizeof(long));
#endif
DEFINE(LRET, offsetof(struct pt_regs, pc) - sizeof(long));
{
.con_id = "master_clk",
.clk = &master_clk,
- } ,
+ },
{
.con_id = "peripheral_clk",
.clk = &peripheral_clk,
void __init early_device_init(void)
{
- /* All interrupt priority high */
- ctrl_outb(0xff, 0xfee018);
- ctrl_outb(0xff, 0xfee019);
early_platform_add_devices(early_devices,
ARRAY_SIZE(early_devices));
}
void __init early_device_init(void)
{
- int i;
/* SCI / Timer enable */
ctrl_outw(0x07f0, 0xffff40);
- /* All interrupt priority is 1 */
- for(i = 0; i < 12; i++)
- ctrl_outw(0x1111, 0xfffe00 + i * 2);
early_platform_add_devices(early_devices,
ARRAY_SIZE(early_devices));
}
subs #4,er1 /* adjust ret_pc */
#if defined(CONFIG_CPU_H8S)
orc #7,exr
-#endif
+#endif
jsr @do_IRQ
jmp @ret_from_interrupt
#include <asm/checksum.h>
#include <asm/current.h>
-//asmlinkage long long __ashrdi3 (long long, int);
-//asmlinkage long long __lshrdi3 (long long, int);
+asmlinkage long long __ashrdi3 (long long, int);
+asmlinkage long long __lshrdi3 (long long, int);
extern char h8300_debug_device[];
/*
__HEAD
.global _start
-_start:
+_start:
mov.l #IRAMTOP,sp
/* .bss clear */
mov.l #_sbss,er5
shlr er4
shlr er4
sub.l er1,er1
-1:
+1:
mov.l er1,@er5
adds #4,er5
dec.l #1,er4
__HEAD
.global _start
-_start:
+_start:
mov.l #IRAMTOP,sp
#if !defined(CONFIG_H8300H_SIM) && \
!defined(CONFIG_H8S_SIM)
jsr @lowlevel_init
-
+
/* copy .data */
mov.l #_begin_data,er5
mov.l #_sdata,er6
sub.l er6,er4
shlr.l er4
shlr.l er4
-1:
+1:
mov.l @er5+,er0
mov.l er0,@er6
adds #4,er6
dec.l #1,er4
- bne 1b
+ bne 1b
/* .bss clear */
mov.l #_sbss,er5
mov.l #_ebss,er4
shlr er4
shlr er4
sub.l er0,er0
-1:
+1:
mov.l er0,@er5
adds #4,er5
dec.l #1,er4
beq 4f
1:
mov.l @er1+,er2
-2:
+2:
mov.b @er2+,r4l
beq 3f
mov.b r4l,@er3
typedef void (*h8300_vector)(void);
static const h8300_vector __initconst trap_table[] = {
- 0, 0, 0, 0,
+ 0, 0, 0, 0,
_trace_break,
- 0, 0,
- _nmi,
+ 0, 0,
+ _nmi,
_system_call,
0, 0,
_trace_break,
static unsigned long __init *get_vector_address(void)
{
unsigned long *rom_vector = CPU_VECTOR;
- unsigned long base,tmp;
+ unsigned long base, tmp;
int vec_no;
base = rom_vector[EXT_IRQ0] & ADDR_MASK;
/* check romvector format */
for (vec_no = EXT_IRQ0 + 1; vec_no <= EXT_IRQ0+EXT_IRQS; vec_no++) {
- if ((base+(vec_no - EXT_IRQ0)*4) != (rom_vector[vec_no] & ADDR_MASK))
+ if ((base+(vec_no - EXT_IRQ0)*4) !=
+ (rom_vector[vec_no] & ADDR_MASK))
return NULL;
}
static void __init setup_vector(void)
{
int i;
- unsigned long *ramvec,*ramvec_p;
+ unsigned long *ramvec, *ramvec_p;
const h8300_vector *trap_entry;
ramvec = get_vector_address();
/* create redirect table */
ramvec_p = ramvec;
trap_entry = trap_table;
- for ( i = 0; i < NR_IRQS; i++) {
- if ( i < 12 ) {
+ for (i = 0; i < NR_IRQS; i++) {
+ if (i < 12) {
if (*trap_entry)
*ramvec_p = VECTOR(*trap_entry);
ramvec_p++;
#include <linux/irq.h>
#include <asm/io.h>
-const static char ipr_bit[] = {
+static const char ipr_bit[] = {
7, 6, 5, 5,
4, 4, 4, 4, 3, 3, 3, 3,
2, 2, 2, 2, 1, 1, 1, 1,
int bit;
unsigned int addr;
int irq = data->irq - 12;
- if ((bit = ipr_bit[irq]) >= 0) {
+
+ bit = ipr_bit[irq];
+ if (bit >= 0) {
addr = IPR + (irq >> 3);
ctrl_bclr(bit & 7, addr);
}
int bit;
unsigned int addr;
int irq = data->irq - 12;
- if ((bit = ipr_bit[irq]) >= 0) {
+
+ bit = ipr_bit[irq];
+ if (bit >= 0) {
addr = IPR + (irq >> 3);
ctrl_bset(bit & 7, addr);
}
void __init h8300_init_ipr(void)
{
- ctrl_outb(0xff, IPR +0);
+ /* All interrupt priority high */
+ ctrl_outb(0xff, IPR + 0);
ctrl_outb(0xee, IPR + 1);
}
#include <asm/io.h>
#define IPRA 0xfffe00
-const static unsigned char ipr_table[] = {
+static const unsigned char ipr_table[] = {
0x03, 0x02, 0x01, 0x00, 0x13, 0x12, 0x11, 0x10, /* 16 - 23 */
0x23, 0x22, 0x21, 0x20, 0x33, 0x32, 0x31, 0x30, /* 24 - 31 */
0x43, 0x42, 0x41, 0x40, 0x53, 0x53, 0x52, 0x52, /* 32 - 39 */
unsigned int addr;
unsigned short pri;
int irq = data->irq;
+
addr = IPRA + ((ipr_table[irq - 16] & 0xf0) >> 3);
pos = (ipr_table[irq - 16] & 0x0f) * 4;
pri = ~(0x000f << pos);
unsigned int addr;
unsigned short pri;
int irq = data->irq;
+
addr = IPRA + ((ipr_table[irq - 16] & 0xf0) >> 3);
pos = (ipr_table[irq - 16] & 0x0f) * 4;
pri = ~(0x000f << pos);
void __init h8300_init_ipr(void)
{
int n;
+ int i;
+ /* All interrupt priority is 1 */
/* IPRA to IPRK */
for (n = 0; n <= 'k' - 'a'; n++)
ctrl_outw(0x1111, IPRA + (n * 2));
break;
case R_H8_PCREL16:
v -= (unsigned long)loc + 2;
- if ((Elf32_Sword)v > 0x7fff ||
+ if ((Elf32_Sword)v > 0x7fff ||
(Elf32_Sword)v < -(Elf32_Sword)0x8000)
goto overflow;
- else
+ else
*(unsigned short *)loc = v;
break;
case R_H8_PCREL8:
v -= (unsigned long)loc + 1;
- if ((Elf32_Sword)v > 0x7f ||
+ if ((Elf32_Sword)v > 0x7f ||
(Elf32_Sword)v < -(Elf32_Sword)0x80)
goto overflow;
- else
+ else
*(unsigned char *)loc = v;
break;
default:
/*
* The idle loop on an H8/300..
*/
-#if !defined(CONFIG_H8300H_SIM) && !defined(CONFIG_H8S_SIM)
void arch_cpu_idle(void)
{
local_irq_enable();
- /* XXX: race here! What if need_resched() gets set now? */
__asm__("sleep");
}
-#endif
-void machine_restart(char * __unused)
+void machine_restart(char *__unused)
{
local_irq_disable();
- __asm__("jmp @@0");
+ __asm__("jmp @@0");
}
void machine_halt(void)
for (;;);
}
-void show_regs(struct pt_regs * regs)
+void show_regs(struct pt_regs *regs)
{
show_regs_print_info(KERN_DEFAULT);
regs->orig_er0, regs->er0, regs->er1);
printk("\nER2: %08lx ER3: %08lx ER4: %08lx ER5: %08lx",
regs->er2, regs->er3, regs->er4, regs->er5);
- printk("\nER6' %08lx ",regs->er6);
+ printk("\nER6' %08lx ", regs->er6);
if (user_mode(regs))
printk("USP: %08lx\n", rdusp());
else
}
int copy_thread(unsigned long clone_flags,
- unsigned long usp, unsigned long topstk,
- struct task_struct * p)
+ unsigned long usp, unsigned long topstk,
+ struct task_struct *p)
{
struct pt_regs * childregs;
unsigned long fp, pc;
unsigned long stack_page;
int count = 0;
+
if (!p || p == current || p->state == TASK_RUNNING)
return 0;
/* cpu depend functions */
extern long h8300_get_reg(struct task_struct *task, int regno);
-extern int h8300_put_reg(struct task_struct *task, int regno, unsigned long data);
+extern int h8300_put_reg(struct task_struct *task, int regno,
+ unsigned long data);
extern void user_disable_single_step(struct task_struct *child);
/* read the word at location addr in the USER area. */
case PTRACE_PEEKUSR: {
unsigned long tmp = 0;
-
+
if ((addr & 3) || addr >= sizeof(struct user)) {
ret = -EIO;
- break ;
+ break;
}
-
- ret = 0; /* Default return condition */
+
+ ret = 0; /* Default return condition */
if (regno < H8300_REGS_NO)
tmp = h8300_get_reg(child, regno);
switch (regno) {
case 49:
tmp = child->mm->start_code;
- break ;
+ break;
case 50:
tmp = child->mm->start_data;
- break ;
+ break;
case 51:
tmp = child->mm->end_code;
- break ;
+ break;
case 52:
tmp = child->mm->end_data;
- break ;
+ break;
default:
ret = -EIO;
}
}
/* when I and D space are separate, this will have to be fixed. */
- case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
+ case PTRACE_POKEUSR: /* write the word at location addr
+ in the USER area */
if ((addr & 3) || addr >= sizeof(struct user)) {
ret = -EIO;
- break ;
+ break;
}
if (regno == PT_ORIG_ER0) {
ret = -EIO;
- break ;
+ break;
}
if (regno < H8300_REGS_NO) {
ret = h8300_put_reg(child, regno, data);
- break ;
+ break;
}
ret = -EIO;
- break ;
+ break;
case PTRACE_GETREGS: { /* Get all gp regs from the child. */
- int i;
+ int i;
unsigned long tmp;
+
for (i = 0; i < H8300_REGS_NO; i++) {
- tmp = h8300_get_reg(child, i);
- if (put_user(tmp, datap)) {
- ret = -EFAULT;
- break;
- }
- datap++;
+ tmp = h8300_get_reg(child, i);
+ if (put_user(tmp, datap)) {
+ ret = -EFAULT;
+ break;
+ }
+ datap++;
}
ret = 0;
break;
case PTRACE_SETREGS: { /* Set all gp regs in the child. */
int i;
unsigned long tmp;
+
for (i = 0; i < H8300_REGS_NO; i++) {
- if (get_user(tmp, datap)) {
- ret = -EFAULT;
- break;
- }
- h8300_put_reg(child, i, tmp);
- datap++;
+ if (get_user(tmp, datap)) {
+ ret = -EFAULT;
+ break;
+ }
+ h8300_put_reg(child, i, tmp);
+ datap++;
}
ret = 0;
break;
case PT_USP:
return task->thread.usp + sizeof(long)*2;
case PT_CCR:
- return *(unsigned short *)(task->thread.esp0 + h8300_register_offset[regno]);
+ return *(unsigned short *)(task->thread.esp0 +
+ h8300_register_offset[regno]);
default:
- return *(unsigned long *)(task->thread.esp0 + h8300_register_offset[regno]);
+ return *(unsigned long *)(task->thread.esp0 +
+ h8300_register_offset[regno]);
}
}
int h8300_put_reg(struct task_struct *task, int regno, unsigned long data)
{
unsigned short oldccr;
+
switch (regno) {
case PT_USP:
task->thread.usp = data - sizeof(long)*2;
case PT_CCR:
- oldccr = *(unsigned short *)(task->thread.esp0 + h8300_register_offset[regno]);
+ oldccr = *(unsigned short *)(task->thread.esp0 +
+ h8300_register_offset[regno]);
oldccr &= ~CCR_MASK;
data &= CCR_MASK;
data |= oldccr;
- *(unsigned short *)(task->thread.esp0 + h8300_register_offset[regno]) = data;
+ *(unsigned short *)(task->thread.esp0 +
+ h8300_register_offset[regno]) = data;
break;
default:
- *(unsigned long *)(task->thread.esp0 + h8300_register_offset[regno]) = data;
+ *(unsigned long *)(task->thread.esp0 +
+ h8300_register_offset[regno]) = data;
break;
}
return 0;
/* disable singlestep */
void user_disable_single_step(struct task_struct *child)
{
- if((long)child->thread.breakinfo.addr != -1L) {
- *child->thread.breakinfo.addr = child->thread.breakinfo.inst;
+ if ((long)child->thread.breakinfo.addr != -1L) {
+ *(child->thread.breakinfo.addr) = child->thread.breakinfo.inst;
child->thread.breakinfo.addr = (unsigned short *)-1L;
}
}
/* calculate next pc */
-enum jump_type {none, /* normal instruction */
- jabs, /* absolute address jump */
- ind, /* indirect address jump */
- ret, /* return to subrutine */
- reg, /* register indexed jump */
- relb, /* pc relative jump (byte offset) */
- relw, /* pc relative jump (word offset) */
- };
+enum jump_type {none, /* normal instruction */
+ jabs, /* absolute address jump */
+ ind, /* indirect address jump */
+ ret, /* return to subrutine */
+ reg, /* register indexed jump */
+ relb, /* pc relative jump (byte offset) */
+ relw, /* pc relative jump (word offset) */
+ };
/* opcode decode table define
ptn: opcode pattern
unsigned char bitmask;
signed char length;
signed char type;
-} __attribute__((aligned(1),packed));
+} __packed, __aligned(1);
-#define OPTABLE(ptn,msk,len,jmp) \
- { \
- .bitpattern = ptn, \
- .bitmask = msk, \
- .length = len, \
- .type = jmp, \
+#define OPTABLE(ptn, msk, len, jmp) \
+ { \
+ .bitpattern = ptn, \
+ .bitmask = msk, \
+ .length = len, \
+ .type = jmp, \
}
static const struct optable optable_0[] = {
- OPTABLE(0x00,0xff, 1,none), /* 0x00 */
- OPTABLE(0x01,0xff,-1,none), /* 0x01 */
- OPTABLE(0x02,0xfe, 1,none), /* 0x02-0x03 */
- OPTABLE(0x04,0xee, 1,none), /* 0x04-0x05/0x14-0x15 */
- OPTABLE(0x06,0xfe, 1,none), /* 0x06-0x07 */
- OPTABLE(0x08,0xea, 1,none), /* 0x08-0x09/0x0c-0x0d/0x18-0x19/0x1c-0x1d */
- OPTABLE(0x0a,0xee, 1,none), /* 0x0a-0x0b/0x1a-0x1b */
- OPTABLE(0x0e,0xee, 1,none), /* 0x0e-0x0f/0x1e-0x1f */
- OPTABLE(0x10,0xfc, 1,none), /* 0x10-0x13 */
- OPTABLE(0x16,0xfe, 1,none), /* 0x16-0x17 */
- OPTABLE(0x20,0xe0, 1,none), /* 0x20-0x3f */
- OPTABLE(0x40,0xf0, 1,relb), /* 0x40-0x4f */
- OPTABLE(0x50,0xfc, 1,none), /* 0x50-0x53 */
- OPTABLE(0x54,0xfd, 1,ret ), /* 0x54/0x56 */
- OPTABLE(0x55,0xff, 1,relb), /* 0x55 */
- OPTABLE(0x57,0xff, 1,none), /* 0x57 */
- OPTABLE(0x58,0xfb, 2,relw), /* 0x58/0x5c */
- OPTABLE(0x59,0xfb, 1,reg ), /* 0x59/0x5b */
- OPTABLE(0x5a,0xfb, 2,jabs), /* 0x5a/0x5e */
- OPTABLE(0x5b,0xfb, 2,ind ), /* 0x5b/0x5f */
- OPTABLE(0x60,0xe8, 1,none), /* 0x60-0x67/0x70-0x77 */
- OPTABLE(0x68,0xfa, 1,none), /* 0x68-0x69/0x6c-0x6d */
- OPTABLE(0x6a,0xfe,-2,none), /* 0x6a-0x6b */
- OPTABLE(0x6e,0xfe, 2,none), /* 0x6e-0x6f */
- OPTABLE(0x78,0xff, 4,none), /* 0x78 */
- OPTABLE(0x79,0xff, 2,none), /* 0x79 */
- OPTABLE(0x7a,0xff, 3,none), /* 0x7a */
- OPTABLE(0x7b,0xff, 2,none), /* 0x7b */
- OPTABLE(0x7c,0xfc, 2,none), /* 0x7c-0x7f */
- OPTABLE(0x80,0x80, 1,none), /* 0x80-0xff */
+ OPTABLE(0x00, 0xff, 1, none), /* 0x00 */
+ OPTABLE(0x01, 0xff, -1, none), /* 0x01 */
+ OPTABLE(0x02, 0xfe, 1, none), /* 0x02-0x03 */
+ OPTABLE(0x04, 0xee, 1, none), /* 0x04-0x05/0x14-0x15 */
+ OPTABLE(0x06, 0xfe, 1, none), /* 0x06-0x07 */
+ OPTABLE(0x08, 0xea, 1, none), /* 0x08-0x09/0x0c-0x0d/0x18-0x19/0x1c-0x1d */
+ OPTABLE(0x0a, 0xee, 1, none), /* 0x0a-0x0b/0x1a-0x1b */
+ OPTABLE(0x0e, 0xee, 1, none), /* 0x0e-0x0f/0x1e-0x1f */
+ OPTABLE(0x10, 0xfc, 1, none), /* 0x10-0x13 */
+ OPTABLE(0x16, 0xfe, 1, none), /* 0x16-0x17 */
+ OPTABLE(0x20, 0xe0, 1, none), /* 0x20-0x3f */
+ OPTABLE(0x40, 0xf0, 1, relb), /* 0x40-0x4f */
+ OPTABLE(0x50, 0xfc, 1, none), /* 0x50-0x53 */
+ OPTABLE(0x54, 0xfd, 1, ret ), /* 0x54/0x56 */
+ OPTABLE(0x55, 0xff, 1, relb), /* 0x55 */
+ OPTABLE(0x57, 0xff, 1, none), /* 0x57 */
+ OPTABLE(0x58, 0xfb, 2, relw), /* 0x58/0x5c */
+ OPTABLE(0x59, 0xfb, 1, reg ), /* 0x59/0x5b */
+ OPTABLE(0x5a, 0xfb, 2, jabs), /* 0x5a/0x5e */
+ OPTABLE(0x5b, 0xfb, 2, ind ), /* 0x5b/0x5f */
+ OPTABLE(0x60, 0xe8, 1, none), /* 0x60-0x67/0x70-0x77 */
+ OPTABLE(0x68, 0xfa, 1, none), /* 0x68-0x69/0x6c-0x6d */
+ OPTABLE(0x6a, 0xfe, -2, none), /* 0x6a-0x6b */
+ OPTABLE(0x6e, 0xfe, 2, none), /* 0x6e-0x6f */
+ OPTABLE(0x78, 0xff, 4, none), /* 0x78 */
+ OPTABLE(0x79, 0xff, 2, none), /* 0x79 */
+ OPTABLE(0x7a, 0xff, 3, none), /* 0x7a */
+ OPTABLE(0x7b, 0xff, 2, none), /* 0x7b */
+ OPTABLE(0x7c, 0xfc, 2 ,none), /* 0x7c-0x7f */
+ OPTABLE(0x80, 0x80, 1, none), /* 0x80-0xff */
};
static const struct optable optable_1[] = {
- OPTABLE(0x00,0xff,-3,none), /* 0x0100 */
- OPTABLE(0x40,0xf0,-3,none), /* 0x0140-0x14f */
- OPTABLE(0x80,0xf0, 1,none), /* 0x0180-0x018f */
- OPTABLE(0xc0,0xc0, 2,none), /* 0x01c0-0x01ff */
+ OPTABLE(0x00, 0xff, -3, none), /* 0x0100 */
+ OPTABLE(0x40, 0xf0, -3, none), /* 0x0140-0x14f */
+ OPTABLE(0x80, 0xf0, 1, none), /* 0x0180-0x018f */
+ OPTABLE(0xc0, 0xc0, 2, none), /* 0x01c0-0x01ff */
};
static const struct optable optable_2[] = {
- OPTABLE(0x00,0x20, 2,none), /* 0x6a0?/0x6a8?/0x6b0?/0x6b8? */
- OPTABLE(0x20,0x20, 3,none), /* 0x6a2?/0x6aa?/0x6b2?/0x6ba? */
+ OPTABLE(0x00, 0x20, 2, none), /* 0x6a0?/0x6a8?/0x6b0?/0x6b8? */
+ OPTABLE(0x20, 0x20, 3, none), /* 0x6a2?/0x6aa?/0x6b2?/0x6ba? */
};
static const struct optable optable_3[] = {
- OPTABLE(0x69,0xfb, 2,none), /* 0x010069/0x01006d/014069/0x01406d */
- OPTABLE(0x6b,0xff,-4,none), /* 0x01006b/0x01406b */
- OPTABLE(0x6f,0xff, 3,none), /* 0x01006f/0x01406f */
- OPTABLE(0x78,0xff, 5,none), /* 0x010078/0x014078 */
+ OPTABLE(0x69, 0xfb, 2, none), /* 0x010069/0x01006d/014069/0x01406d */
+ OPTABLE(0x6b, 0xff, -4, none), /* 0x01006b/0x01406b */
+ OPTABLE(0x6f, 0xff, 3, none), /* 0x01006f/0x01406f */
+ OPTABLE(0x78, 0xff, 5, none), /* 0x010078/0x014078 */
};
static const struct optable optable_4[] = {
- OPTABLE(0x00,0x78, 3,none), /* 0x0100690?/0x01006d0?/0140690/0x01406d0?/0x0100698?/0x01006d8?/0140698?/0x01406d8? */
- OPTABLE(0x20,0x78, 4,none), /* 0x0100692?/0x01006d2?/0140692/0x01406d2?/0x010069a?/0x01006da?/014069a?/0x01406da? */
+/* 0x0100690?/0x01006d0?/0140690?/0x01406d0?/
+ 0x0100698?/0x01006d8?/0140698?/0x01406d8? */
+ OPTABLE(0x00, 0x78, 3, none),
+/* 0x0100692?/0x01006d2?/0140692?/0x01406d2?/
+ 0x010069a?/0x01006da?/014069a?/0x01406da? */
+ OPTABLE(0x20, 0x78, 4, none),
};
static const struct optables_list {
};
const unsigned char condmask[] = {
- 0x00,0x40,0x01,0x04,0x02,0x08,0x10,0x20
+ 0x00, 0x40, 0x01, 0x04, 0x02, 0x08, 0x10, 0x20
};
static int isbranch(struct task_struct *task,int reson)
"bld #2,%w0\n\t"
"bor #0,%w0\n\t"
"bst #6,%w0\n\t"
- :"=&r"(cond)::"cc");
+ : "=&r"(cond) :: "cc");
cond &= condmask[reson >> 1];
if (!(reson & 1))
return cond == 0;
return cond != 0;
}
-static unsigned short *getnextpc(struct task_struct *child, unsigned short *pc)
+static unsigned short *decode(struct task_struct *child, struct optable op,
+ char *fetch_p, unsigned int *pc)
+{
+ switch (op->type) {
+ case none:
+ return pc + op->length;
+ case jabs:
+ addr = *(unsigned long *)pc;
+ return (unsigned short *)(addr & 0x00ffffff);
+ case ind:
+ addr = *pc & 0xff;
+ return (unsigned short *)(*(unsigned long *)addr);
+ case ret:
+ sp = (unsigned long *)h8300_get_reg(child, PT_USP);
+ /* user stack frames
+ | er0 | temporary saved
+ +--------+
+ | exp | exception stack frames
+ +--------+
+ | ret pc | userspace return address
+ */
+ return (unsigned short *)(*(sp+2) & 0x00ffffff);
+ case reg:
+ regno = (*pc >> 4) & 0x07;
+ if (regno == 0)
+ addr = h8300_get_reg(child, PT_ER0);
+ else
+ addr = h8300_get_reg(child, regno-1+PT_ER1);
+ return (unsigned short *)addr;
+ case relb:
+ if (inst == 0x55 || isbranch(child, inst & 0x0f))
+ pc = (unsigned short *)((unsigned long)pc +
+ ((signed char)(*fetch_p)));
+ return pc+1; /* skip myself */
+ case relw:
+ if (inst == 0x5c || isbranch(child, (*fetch_p & 0xf0) >> 4))
+ pc = (unsigned short *)((unsigned long)pc +
+ ((signed short)(*(pc+1))));
+ return pc+2; /* skip myself */
+ }
+}
+
+static unsigned short *nextpc(struct task_struct *child, unsigned short *pc)
{
const struct optable *op;
unsigned char *fetch_p;
unsigned char inst;
unsigned long addr;
unsigned long *sp;
- int op_len,regno;
+ int op_len, regno;
+
op = optables[0].ptr;
op_len = optables[0].size;
fetch_p = (unsigned char *)pc;
op = optables[-op->length].ptr;
op_len = optables[-op->length].size + 1;
inst = *fetch_p++;
- } else {
- switch (op->type) {
- case none:
- return pc + op->length;
- case jabs:
- addr = *(unsigned long *)pc;
- return (unsigned short *)(addr & 0x00ffffff);
- case ind:
- addr = *pc & 0xff;
- return (unsigned short *)(*(unsigned long *)addr);
- case ret:
- sp = (unsigned long *)h8300_get_reg(child, PT_USP);
- /* user stack frames
- | er0 | temporary saved
- +--------+
- | exp | exception stack frames
- +--------+
- | ret pc | userspace return address
- */
- return (unsigned short *)(*(sp+2) & 0x00ffffff);
- case reg:
- regno = (*pc >> 4) & 0x07;
- if (regno == 0)
- addr = h8300_get_reg(child, PT_ER0);
- else
- addr = h8300_get_reg(child, regno-1+PT_ER1);
- return (unsigned short *)addr;
- case relb:
- if (inst == 0x55 || isbranch(child,inst & 0x0f))
- pc = (unsigned short *)((unsigned long)pc +
- ((signed char)(*fetch_p)));
- return pc+1; /* skip myself */
- case relw:
- if (inst == 0x5c || isbranch(child,(*fetch_p & 0xf0) >> 4))
- pc = (unsigned short *)((unsigned long)pc +
- ((signed short)(*(pc+1))));
- return pc+2; /* skip myself */
- }
- }
+ } else
+ return decode(child, op, fetch_p, pc);
} else
op++;
} while(--op_len > 0);
void user_enable_single_step(struct task_struct *child)
{
- unsigned short *nextpc;
- nextpc = getnextpc(child,(unsigned short *)h8300_get_reg(child, PT_PC));
- child->thread.breakinfo.addr = nextpc;
- child->thread.breakinfo.inst = *nextpc;
- *nextpc = BREAKINST;
+ unsigned short *next;
+
+ next = nextpc(child, (unsigned short *)h8300_get_reg(child, PT_PC));
+ child->thread.breakinfo.addr = next;
+ child->thread.breakinfo.inst = *next;
+ *next = BREAKINST;
}
asmlinkage void trace_trap(unsigned long bp)
{
if ((unsigned long)current->thread.breakinfo.addr == bp) {
user_disable_single_step(current);
- force_sig(SIGTRAP,current);
+ force_sig(SIGTRAP, current);
} else
- force_sig(SIGILL,current);
+ force_sig(SIGILL, current);
}
return task->thread.usp + sizeof(long)*2 + 2;
case PT_CCR:
case PT_EXR:
- return *(unsigned short *)(task->thread.esp0 + h8300_register_offset[regno]);
+ return *(unsigned short *)(task->thread.esp0 +
+ h8300_register_offset[regno]);
default:
- return *(unsigned long *)(task->thread.esp0 + h8300_register_offset[regno]);
+ return *(unsigned long *)(task->thread.esp0 +
+ h8300_register_offset[regno]);
}
}
case PT_USP:
task->thread.usp = data - sizeof(long)*2 - 2;
case PT_CCR:
- oldccr = *(unsigned short *)(task->thread.esp0 + h8300_register_offset[regno]);
+ oldccr = *(unsigned short *)(task->thread.esp0 +
+ h8300_register_offset[regno]);
oldccr &= ~CCR_MASK;
data &= CCR_MASK;
data |= oldccr;
- *(unsigned short *)(task->thread.esp0 + h8300_register_offset[regno]) = data;
+ *(unsigned short *)(task->thread.esp0 +
+ h8300_register_offset[regno]) = data;
break;
case PT_EXR:
/* exr modify not support */
return -EIO;
default:
- *(unsigned long *)(task->thread.esp0 + h8300_register_offset[regno]) = data;
+ *(unsigned long *)(task->thread.esp0 +
+ h8300_register_offset[regno]) = data;
break;
}
return 0;
/* disable singlestep */
void user_disable_single_step(struct task_struct *child)
{
- *(unsigned short *)(child->thread.esp0 + h8300_register_offset[PT_EXR]) &= ~EXR_TRACE;
+ *(unsigned short *)(child->thread.esp0 +
+ h8300_register_offset[PT_EXR]) &= ~EXR_TRACE;
}
/* enable singlestep */
void user_enable_single_step(struct task_struct *child)
{
- *(unsigned short *)(child->thread.esp0 + h8300_register_offset[PT_EXR]) |= EXR_TRACE;
+ *(unsigned short *)(child->thread.esp0 +
+ h8300_register_offset[PT_EXR]) |= EXR_TRACE;
}
asmlinkage void trace_trap(unsigned long bp)
{
(void)bp;
- force_sig(SIGTRAP,current);
+ force_sig(SIGTRAP, current);
}
-
init_mm.start_code = (unsigned long) _stext;
init_mm.end_code = (unsigned long) _etext;
init_mm.end_data = (unsigned long) _edata;
- init_mm.brk = (unsigned long) 0;
+ init_mm.brk = (unsigned long) 0;
pr_notice("\r\n\nuClinux " CPU "\n");
pr_notice("Flat model support (C) 1998,1999 Kenneth Albanowski, D. Jeff Dionne\n");
"BogoMips:\t%lu.%02lu\n"
"Calibration:\t%lu loops\n",
cpu,
- clockfreq/1000,clockfreq%1000,
- (loops_per_jiffy*HZ)/500000,((loops_per_jiffy*HZ)/5000)%100,
+ clockfreq/1000, clockfreq%1000,
+ (loops_per_jiffy*HZ)/500000,
+ ((loops_per_jiffy*HZ)/5000)%100,
(loops_per_jiffy*HZ));
return 0;
static void *c_start(struct seq_file *m, loff_t *pos)
{
- return *pos < NR_CPUS ? ((void *) 0x12345678) : NULL;
+ return *pos < num_possible_cpus() ?
+ ((void *) 0x12345678) : NULL;
}
static void *c_next(struct seq_file *m, void *v, loff_t *pos)
* That makes the cache flush below easier.
*/
-struct rt_sigframe
-{
+struct rt_sigframe {
long dummy_er0;
long dummy_vector;
#if defined(CONFIG_CPU_H8S)
struct siginfo info;
struct ucontext uc;
int sig;
-} __attribute__((aligned(2),packed));
+} __packed __aligned(2);
static inline int
restore_sigcontext(struct sigcontext *usc, int *pd0)
goto badframe;
set_current_blocked(&set);
-
+
if (restore_sigcontext(&frame->uc.uc_mcontext, &er0))
goto badframe;
return (void *)((usp - frame_size) & -8UL);
}
-static int setup_rt_frame (struct ksignal *ksig, sigset_t *set,
- struct pt_regs *regs)
+static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
+ struct pt_regs *regs)
{
struct rt_sigframe *frame;
int err = 0;
/* sub.l er0,er0; mov.b #__NR_rt_sigreturn,r0l; trapa #0 */
err |= __put_user(0x1a80f800 + (__NR_rt_sigreturn & 0xff),
(unsigned long *)(frame->retcode + 0));
- err |= __put_user(0x5700, (unsigned short *)(frame->retcode + 4));
+ err |= __put_user(0x5700,
+ (unsigned short *)(frame->retcode + 4));
}
err |= __put_user(ret, &frame->pretcode);
goto give_sigsegv;
/* Set up registers for signal handler */
- wrusp ((unsigned long) frame);
+ wrusp((unsigned long) frame);
regs->pc = (unsigned long) ksig->ka.sa.sa_handler;
regs->er0 = (current_thread_info()->exec_domain
&& current_thread_info()->exec_domain->signal_invmap
regs->pc -= 2;
} else
regs->er0 = -EINTR;
- break;
+ break;
case -ERESTARTSYS:
if (!(ka->sa.sa_flags & SA_RESTART)) {
regs->er0 = -EINTR;
* OK, we're invoking a handler
*/
static void
-handle_signal(struct ksignal *ksig, struct pt_regs * regs)
+handle_signal(struct ksignal *ksig, struct pt_regs *regs)
{
sigset_t *oldset = sigmask_to_save();
int ret;
register const int fd __asm__("er0") = 1; /* stdout */
register const char *_ptr __asm__("er1") = ptr;
register const unsigned _len __asm__("er2") = len;
+
__asm__(".byte 0x5e,0x00,0x00,0xc7\n\t" /* jsr @0xc7 (sys_write) */
- ::"g"(fd),"g"(_ptr),"g"(_len));
+ : : "g"(fd), "g"(_ptr), "g"(_len));
}
static struct console sim_console = {
* platform.
*/
-#include <linux/errno.h>
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/smp.h>
-#include <linux/sem.h>
-#include <linux/msg.h>
-#include <linux/shm.h>
-#include <linux/stat.h>
-#include <linux/syscalls.h>
-#include <linux/mman.h>
-#include <linux/file.h>
-#include <linux/fs.h>
-#include <linux/ipc.h>
-
#include <asm/setup.h>
#include <asm/uaccess.h>
-#include <asm/traps.h>
-#include <asm/unistd.h>
/* sys_cacheflush -- no support. */
asmlinkage int
{
return PAGE_SIZE;
}
-
-#if defined(CONFIG_SYSCALL_PRINT)
-asmlinkage void syscall_print(void *dummy,...)
-{
- struct pt_regs *regs = (struct pt_regs *) ((unsigned char *)&dummy-4);
- printk("call %06lx:%ld 1:%08lx,2:%08lx,3:%08lx,ret:%08lx\n",
- ((regs->pc)&0xffffff)-2,regs->orig_er0,regs->er1,regs->er2,regs->er3,regs->er0);
-}
-#endif
#include <linux/sys.h>
#include <asm/linkage.h>
#include <asm/unistd.h>
-
+
.global _sys_call_table
#define CALL(x) .long x
static irqreturn_t timer16_interrupt(int irq, void *dev_id)
{
struct timer16_priv *p = (struct timer16_priv *)dev_id;
+
ctrl_outb(ctrl_inb(p->mapcommon + TISRA) & ~p->imfa,
p->mapcommon + TISRA);
memset(p, 0, sizeof(*p));
p->pdev = pdev;
- res[REG_CH] = platform_get_resource(p->pdev, IORESOURCE_MEM, REG_CH);
- res[REG_COMM] = platform_get_resource(p->pdev, IORESOURCE_MEM, REG_COMM);
+ res[REG_CH] = platform_get_resource(p->pdev,
+ IORESOURCE_MEM, REG_CH);
+ res[REG_COMM] = platform_get_resource(p->pdev,
+ IORESOURCE_MEM, REG_COMM);
if (!res[REG_CH] || !res[REG_COMM]) {
dev_err(&p->pdev->dev, "failed to get I/O memory\n");
return -ENXIO;
p->imfa = 1 << cfg->imfa;
p->imiea = 1 << cfg->imiea;
p->ced.name = pdev->name;
- p->ced.features = CLOCK_EVT_FEAT_PERIODIC |CLOCK_EVT_FEAT_ONESHOT;
+ p->ced.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
p->ced.rating = cfg->rating;
p->ced.cpumask = cpumask_of(0);
p->ced.set_next_event = timer16_clock_event_next;
p = kmalloc(sizeof(*p), GFP_KERNEL);
if (p == NULL) {
- dev_err(&pdev->dev, "failed to allocate driver data\n");
+ dev_err(&pdev->dev, "failed to allocate driver data."
+ " out of memory.\n");
return -ENOMEM;
}
{
struct timer8_priv *p = dev_id;
- switch(p->mode) {
+ switch (p->mode) {
case H8300_TMR8_CLKSRC:
ctrl_outb(ctrl_inb(p->mapbase + _8TCSR) & ~0x20,
p->mapbase + _8TCSR);
p->total_cycles = 0;
ctrl_outw(0, p->mapbase + _8TCNT);
ctrl_outw(0x2400 | p->div, p->mapbase + _8TCR);
-
+
p->cs_enabled = true;
return 0;
}
WARN_ON(!p->cs_enabled);
- ctrl_outb(0, p->mapbase + _8TCR);
+ ctrl_outb(0, p->mapbase + _8TCR);
p->cs_enabled = false;
}
#define CMI 0
#define OVI 1
-static int __init timer8_setup(struct timer8_priv *p, struct platform_device *pdev)
+static int __init timer8_setup(struct timer8_priv *p,
+ struct platform_device *pdev)
{
struct h8300_timer8_config *cfg = dev_get_platdata(&pdev->dev);
struct resource *res;
p->clk.cs.mask = CLOCKSOURCE_MASK(sizeof(unsigned long) * 8);
p->clk.cs.flags = CLOCK_SOURCE_IS_CONTINUOUS;
- if ((ret = setup_irq(irq[OVI], &p->irqaction)) < 0) {
+ ret = setup_irq(irq[OVI], &p->irqaction);
+ if (ret < 0) {
dev_err(&p->pdev->dev,
"failed to request irq %d\n", irq[OVI]);
return ret;
break;
case H8300_TMR8_CLKEVTDEV:
p->clk.ced.name = pdev->name;
- p->clk.ced.features = CLOCK_EVT_FEAT_PERIODIC |
+ p->clk.ced.features = CLOCK_EVT_FEAT_PERIODIC |
CLOCK_EVT_FEAT_ONESHOT;
p->clk.ced.rating = cfg->rating;
p->clk.ced.cpumask = cpumask_of(0);
p->clk.ced.set_next_event = timer8_clock_event_next;
p->clk.ced.set_mode = timer8_clock_event_mode;
- if ((ret = setup_irq(irq[CMI], &p->irqaction)) < 0) {
+
+ ret = setup_irq(irq[CMI], &p->irqaction);
+ if (ret < 0) {
dev_err(&p->pdev->dev,
"failed to request irq %d\n", irq[CMI]);
return ret;
p = kmalloc(sizeof(*p), GFP_KERNEL);
if (p == NULL) {
- dev_err(&pdev->dev, "failed to allocate driver data\n");
+ dev_err(&pdev->dev, "failed to allocate driver data"
+ " out of memory\n");
return -ENOMEM;
}
/*
* linux/arch/h8300/boot/traps.c -- general exception handling code
* H8/300 support Yoshinori Sato <ysato@users.sourceforge.jp>
- *
+ *
* Cloned from Linux/m68k.
*
* No original Copyright holder listed,
{
}
-void __init trap_init (void)
+void __init trap_init(void)
{
}
unsigned char *tp;
int i;
- printk("\nCURRENT PROCESS:\n\n");
- printk("COMM=%s PID=%d\n", current->comm, current->pid);
+ pr_info("\nCURRENT PROCESS:\n\n");
+ pr_info("COMM=%s PID=%d\n", current->comm, current->pid);
if (current->mm) {
- printk("TEXT=%08x-%08x DATA=%08x-%08x BSS=%08x-%08x\n",
+ pr_info("TEXT=%08x-%08x DATA=%08x-%08x BSS=%08x-%08x\n",
(int) current->mm->start_code,
(int) current->mm->end_code,
(int) current->mm->start_data,
(int) current->mm->end_data,
(int) current->mm->end_data,
(int) current->mm->brk);
- printk("USER-STACK=%08x KERNEL-STACK=%08lx\n\n",
+ pr_info("USER-STACK=%08x KERNEL-STACK=%08lx\n\n",
(int) current->mm->start_stack,
(int) PAGE_SIZE+(unsigned long)current);
}
show_regs(fp);
- printk("\nCODE:");
+ pr_info("\nCODE:");
tp = ((unsigned char *) fp->pc) - 0x20;
for (sp = (unsigned long *) tp, i = 0; (i < 0x40); i += 4) {
if ((i % 0x10) == 0)
- printk("\n%08x: ", (int) (tp + i));
- printk("%08x ", (int) *sp++);
+ pr_info("\n%08x: ", (int) (tp + i));
+ pr_info("%08x ", (int) *sp++);
}
- printk("\n");
+ pr_info("\n");
- printk("\nKERNEL STACK:");
+ pr_info("\nKERNEL STACK:");
tp = ((unsigned char *) fp) - 0x40;
for (sp = (unsigned long *) tp, i = 0; (i < 0xc0); i += 4) {
if ((i % 0x10) == 0)
- printk("\n%08x: ", (int) (tp + i));
- printk("%08x ", (int) *sp++);
+ pr_info("\n%08x: ", (int) (tp + i));
+ pr_info("%08x ", (int) *sp++);
}
- printk("\n");
+ pr_info("\n");
if (STACK_MAGIC != *(unsigned long *)((unsigned long)current+PAGE_SIZE))
- printk("(Possibly corrupted stack page??)\n");
+ pr_info("(Possibly corrupted stack page??)\n");
- printk("\n\n");
+ pr_info("\n\n");
}
void die(const char *str, struct pt_regs *fp, unsigned long err)
console_verbose();
spin_lock_irq(&die_lock);
report_bug(fp->pc, fp);
- printk(KERN_EMERG "%s: %04lx [#%d] ", str, err & 0xffff, ++diecount);
+ pr_crit("%s: %04lx [#%d] ", str, err & 0xffff, ++diecount);
dump(fp);
spin_unlock_irq(&die_lock);
extern char _start, _etext;
#define check_kernel_text(addr) \
- ((addr >= (unsigned long)(&_start)) && \
- (addr < (unsigned long)(&_etext)))
+ ((addr >= (unsigned long)(&_start)) && \
+ (addr < (unsigned long)(&_etext)))
static int kstack_depth_to_print = 24;
stack = esp;
- printk("Stack from %08lx:", (unsigned long)stack);
+ pr_info("Stack from %08lx:", (unsigned long)stack);
for (i = 0; i < kstack_depth_to_print; i++) {
if (((unsigned long)stack & (THREAD_SIZE - 1)) == 0)
break;
if (i % 8 == 0)
printk("\n ");
- printk(" %08lx", *stack++);
+ pr_info(" %08lx", *stack++);
}
- printk("\nCall Trace:");
+ pr_info("\nCall Trace:");
i = 0;
stack = esp;
while (((unsigned long)stack & (THREAD_SIZE - 1)) != 0) {
*/
if (check_kernel_text(addr)) {
if (i % 4 == 0)
- printk("\n ");
- printk(" [<%08lx>]", addr);
+ pr_info("\n ");
+ pr_info(" [<%08lx>]", addr);
i++;
}
}
- printk("\n");
+ pr_info("\n");
}
void show_trace_task(struct task_struct *tsk)
{
- show_stack(tsk,(unsigned long *)tsk->thread.esp0);
+ show_stack(tsk, (unsigned long *)tsk->thread.esp0);
}
#include <asm/linkage.h>
-#if defined(CONFIG_CPU_H8300H)
+#if defined(CONFIG_CPU_H8300H)
.h8300h
#endif
-#if defined(CONFIG_CPU_H8S)
+#if defined(CONFIG_CPU_H8S)
.h8300s
#endif
.text
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
-You should have received a copy of the GNU General Public License
-along with GNU CC; see the file COPYING. If not, write to
-the Free Software Foundation, 59 Temple Place - Suite 330,
-Boston, MA 02111-1307, USA. */
+*/
#define BITS_PER_UNIT 8
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
-
+
/* Revised by Kenneth Albanowski for m68knommu. Basic problem: unaligned access kills, so most
of the assembly has to go. */
return x;
}
-static unsigned long do_csum(const unsigned char * buff, int len)
+static unsigned long do_csum(const unsigned char *buff, int len)
{
int odd, count;
unsigned long result = 0;
}
count >>= 1; /* nr of 32-bit words.. */
if (count) {
- unsigned long carry = 0;
+ unsigned long carry = 0;
+
do {
unsigned long w = *(unsigned long *) buff;
+
count--;
buff += 4;
result += carry;
*/
__sum16 ip_fast_csum(const void *iph, unsigned int ihl)
{
- return (__force __sum16)~do_csum(iph,ihl*4);
+ return (__force __sum16)~do_csum(iph, ihl*4);
}
/*
*/
__sum16 ip_compute_csum(const void *buff, int len)
{
- return (__force __sum16)~do_csum(buff,len);
+ return (__force __sum16)~do_csum(buff, len);
}
/*
csum_partial_copy_from_user(const void __user *src, void *dst, int len,
__wsum sum, int *csum_err)
{
- if (csum_err) *csum_err = 0;
+ if (csum_err)
+ *csum_err = 0;
memcpy(dst, (__force const void *)src, len);
return csum_partial(dst, len, sum);
}
#include <asm/linkage.h>
-#if defined(CONFIG_CPU_H8300H)
+#if defined(CONFIG_CPU_H8300H)
.h8300h
#endif
-#if defined(CONFIG_CPU_H8S)
+#if defined(CONFIG_CPU_H8S)
.h8300s
#endif
.text
memcpy:
mov.l er2,er2
bne 1f
- rts
-1:
+ rts
+1:
;; address check
bld #0,r0l
bxor #0,r1l
adds #1,er0
dec.l #1,er2
beq 3f
-1:
+1:
;; n < sizeof(unsigned long) check
sub.l er4,er4
adds #4,er4 ; loop count check value
cmp.l er4,er2
blo 2f
;; unsigned long copy
-1:
+1:
mov.l @er1,er3
mov.l er3,@er0
adds #4,er0
adds #4,er1
subs #4,er2
cmp.l er4,er2
- bcc 1b
+ bcc 1b
;; rest
-2:
+2:
mov.l er2,er2
beq 3f
-1:
+1:
mov.b @er1,r3l
mov.b r3l,@er0
adds #1,er1
rts
;; odd <- even / even <- odd
-4:
+4:
mov.l er4,er3
mov.l er2,er4
mov.l er5,er2
mov.l er2,er5
mov.l er3,er4
rts
+
+ .end
mov.l er2,er2
bne 1f
sub.l er0,er0
- rts
+ rts
1:
mov.l er4,@-sp
sub.l er3,er3
-2:
+2:
mov.b @er1+,r4l
mov.b r4l,@er0
adds #1,er0
bne 2b
3:
dec.l #1,er2
-4:
+4:
mov.b r4l,@er0
adds #1,er0
dec.l #1,er2
* linux/arch/h8300/mm/fault.c
*
* Copyright (C) 1998 D. Jeff Dionne <jeff@lineo.ca>,
- * Copyright (C) 2000 Lineo, Inc. (www.lineo.com)
+ * Copyright (C) 2000 Lineo, Inc. (www.lineo.com)
*
* Based on:
*
unsigned long error_code)
{
#ifdef DEBUG
- printk ("regs->sr=%#x, regs->pc=%#lx, address=%#lx, %ld\n",
- regs->sr, regs->pc, address, error_code);
+ pr_debug("regs->sr=%#x, regs->pc=%#lx, address=%#lx, %ld\n",
+ regs->sr, regs->pc, address, error_code);
#endif
/*
* Oops. The kernel tried to access some bad page. We'll have to
* terminate things with extreme prejudice.
*/
- if ((unsigned long) address < PAGE_SIZE) {
- printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference");
- } else
- printk(KERN_ALERT "Unable to handle kernel access");
- printk(" at virtual address %08lx\n",address);
+ if ((unsigned long) address < PAGE_SIZE)
+ pr_alert("Unable to handle kernel NULL pointer dereference");
+ else
+ pr_alert("Unable to handle kernel access");
+ printk(" at virtual address %08lx\n", address);
if (!user_mode(regs))
die("Oops", regs, error_code);
do_exit(SIGKILL);
*
* Copyright (C) 1998 D. Jeff Dionne <jeff@lineo.ca>,
* Kenneth Albanowski <kjahds@kjahds.com>,
- * Copyright (C) 2000 Lineo, Inc. (www.lineo.com)
+ * Copyright (C) 2000 Lineo, Inc. (www.lineo.com)
*
* Based on:
*
unsigned long end_mem = memory_end & PAGE_MASK;
#ifdef DEBUG
- printk ("start_mem is %#lx\nvirtual_end is %#lx\n",
- start_mem, end_mem);
+ pr_debug("start_mem is %#lx\nvirtual_end is %#lx\n",
+ start_mem, end_mem);
#endif
/*
/*
* Set up SFC/DFC registers (user data space).
*/
- set_fs (USER_DS);
+ set_fs(USER_DS);
#ifdef DEBUG
- printk ("before free_area_init\n");
+ pr_debug("before free_area_init\n");
- printk ("free_area_init -> start_mem is %#lx\nvirtual_end is %#lx\n",
- start_mem, end_mem);
+ pr_debug("free_area_init -> start_mem is %#lx\nvirtual_end is %#lx\n",
+ start_mem, end_mem);
#endif
{
unsigned long zones_size[MAX_NR_ZONES] = {0, };
- zones_size[ZONE_DMA] = 0 >> PAGE_SHIFT;
- zones_size[ZONE_NORMAL] = (end_mem - PAGE_OFFSET) >> PAGE_SHIFT;
+ zones_size[ZONE_DMA] = 0 >> PAGE_SHIFT;
+ zones_size[ZONE_NORMAL] = (end_mem - PAGE_OFFSET) >> PAGE_SHIFT;
#ifdef CONFIG_HIGHMEM
zones_size[ZONE_HIGHMEM] = 0;
#endif
/*
* linux/arch/h8300/mm/kmap.c
- *
+ *
* Based on
* linux/arch/m68knommu/mm/kmap.c
*
#include <asm/traps.h>
#include <asm/io.h>
-void cache_clear (unsigned long paddr, int len)
+void cache_clear(unsigned long paddr, int len)
{
}
-void cache_push (unsigned long paddr, int len)
+void cache_push(unsigned long paddr, int len)
{
}
-void cache_push_v (unsigned long vaddr, int len)
+void cache_push_v(unsigned long vaddr, int len)
{
}
*/
unsigned long kernel_map(unsigned long paddr, unsigned long size,
- int nocacheflag, unsigned long *memavailp )
+ int nocacheflag, unsigned long *memavailp)
{
return paddr;
}