1 #ifndef __ASM_MIPS_DELAY_H
2 #define __ASM_MIPS_DELAY_H
3
4 extern __inline__ void __delay(int loops)
/* ![[previous]](../icons/n_left.png)
![[next]](../icons/right.png)
![[first]](../icons/n_first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
5 {
6 __asm__ __volatile__ (
7 ".set\tnoreorder\n\t"
8 ".set\tnoat\n\t"
9 "1:\tbne\t$0,%0,1b\n\t"
10 "subu\t%0,%0,1\n\t"
11 ".set\tat\n\t"
12 ".set\treorder"
13 :"=r" (loops)
14 :"0" (loops));
15 }
16
17 /*
18 * division by multiplication: you don't have to worry about
19 * loss of precision.
20 *
21 * Use only for very small delays ( < 1 msec). Should probably use a
22 * lookup table, really, as the multiplications take much too long with
23 * short delays. This is a "reasonable" implementation, though (and the
24 * first constant multiplications gets optimized away if the delay is
25 * a constant)
26 */
27 extern __inline__ void udelay(unsigned long usecs)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
28 {
29 usecs *= 0x000010c6; /* 2**32 / 1000000 */
30 __asm__("mul\t%0,%0,%1"
31 :"=r" (usecs)
32 :"0" (usecs),"r" (loops_per_sec));
33 __delay(usecs);
34 }
35
36 /*
37 * 64-bit integers means we don't have to worry about overflow as
38 * on some other architectures..
39 */
40 extern __inline__ unsigned long muldiv(unsigned long a, unsigned long b, unsigned long c)
/* ![[previous]](../icons/left.png)
![[next]](../icons/n_right.png)
![[first]](../icons/first.png)
![[last]](../icons/n_last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
41 {
42 return (a*b)/c;
43 }
44
45 #endif /* __ASM_MIPS_DELAY_H */