1 #ifndef__ASM_MIPS_DELAY_H 2 #define__ASM_MIPS_DELAY_H 3
4 extern__inline__void__delay(intloops)
/* */ 5 { 6 __asm____volatile__ (
7 ".set\tnoreorder\n\t"
8 ".set\tnoat\n\t"
9 "1:\tbne\t$0,%0,1b\n\t"
10 "subu\t%0,%0,1\n\t"
11 ".set\tat\n\t"
12 ".set\treorder"
13 :"=r" (loops)
14 :"0" (loops));
15 } 16
17 /* 18 * division by multiplication: you don't have to worry about 19 * loss of precision. 20 * 21 * Use only for very small delays ( < 1 msec). Should probably use a 22 * lookup table, really, as the multiplications take much too long with 23 * short delays. This is a "reasonable" implementation, though (and the 24 * first constant multiplications gets optimized away if the delay is 25 * a constant) 26 */ 27 extern__inline__voidudelay(unsignedlongusecs)
/* */ 28 { 29 usecs *= 0x000010c6; /* 2**32 / 1000000 */ 30 __asm__("mul\t%0,%0,%1"
31 :"=r" (usecs)
32 :"0" (usecs),"r" (loops_per_sec));
33 __delay(usecs);
34 } 35
36 /* 37 * 64-bit integers means we don't have to worry about overflow as 38 * on some other architectures.. 39 */ 40 extern__inline__unsignedlongmuldiv(unsignedlonga, unsignedlongb, unsignedlongc)
/* */ 41 { 42 return (a*b)/c;
43 } 44
45 #endif/* __ASM_MIPS_DELAY_H */