root/arch/i386/kernel/irq.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. mask_irq
  2. unmask_irq
  3. disable_irq
  4. enable_irq
  5. no_action
  6. math_error_irq
  7. get_irq_list
  8. get_smp_prof_list
  9. do_IRQ
  10. do_fast_IRQ
  11. setup_x86_irq
  12. request_irq
  13. free_irq
  14. probe_irq_on
  15. probe_irq_off
  16. init_IRQ

   1 /*
   2  *      linux/arch/i386/kernel/irq.c
   3  *
   4  *      Copyright (C) 1992 Linus Torvalds
   5  *
   6  * This file contains the code used by various IRQ handling routines:
   7  * asking for different IRQ's should be done through these routines
   8  * instead of just grabbing them. Thus setups with different IRQ numbers
   9  * shouldn't result in any weird surprises, and installing new handlers
  10  * should be easier.
  11  */
  12 
  13 /*
  14  * IRQ's are in fact implemented a bit like signal handlers for the kernel.
  15  * Naturally it's not a 1:1 relation, but there are similarities.
  16  */
  17 
  18 #include <linux/ptrace.h>
  19 #include <linux/errno.h>
  20 #include <linux/kernel_stat.h>
  21 #include <linux/signal.h>
  22 #include <linux/sched.h>
  23 #include <linux/ioport.h>
  24 #include <linux/interrupt.h>
  25 #include <linux/timex.h>
  26 #include <linux/malloc.h>
  27 #include <linux/random.h>
  28 
  29 #include <asm/system.h>
  30 #include <asm/io.h>
  31 #include <asm/irq.h>
  32 #include <asm/bitops.h>
  33 #include <asm/smp.h>
  34 
  35 #define CR0_NE 32
  36 
  37 static unsigned char cache_21 = 0xff;
  38 static unsigned char cache_A1 = 0xff;
  39 
  40 #ifdef __SMP_PROF__
  41 static unsigned int int_count[NR_CPUS][NR_IRQS] = {{0},};
  42 #endif
  43 
  44 static inline void mask_irq(unsigned int irq_nr)
     /* [previous][next][first][last][top][bottom][index][help] */
  45 {
  46         unsigned char mask;
  47 
  48         mask = 1 << (irq_nr & 7);
  49         if (irq_nr < 8) {
  50                 cache_21 |= mask;
  51                 outb(cache_21,0x21);
  52         } else {
  53                 cache_A1 |= mask;
  54                 outb(cache_A1,0xA1);
  55         }
  56 }
  57 
  58 static inline void unmask_irq(unsigned int irq_nr)
     /* [previous][next][first][last][top][bottom][index][help] */
  59 {
  60         unsigned char mask;
  61 
  62         mask = ~(1 << (irq_nr & 7));
  63         if (irq_nr < 8) {
  64                 cache_21 &= mask;
  65                 outb(cache_21,0x21);
  66         } else {
  67                 cache_A1 &= mask;
  68                 outb(cache_A1,0xA1);
  69         }
  70 }
  71 
  72 void disable_irq(unsigned int irq_nr)
     /* [previous][next][first][last][top][bottom][index][help] */
  73 {
  74         unsigned long flags;
  75 
  76         save_flags(flags);
  77         cli();
  78         mask_irq(irq_nr);
  79         restore_flags(flags);
  80 }
  81 
  82 void enable_irq(unsigned int irq_nr)
     /* [previous][next][first][last][top][bottom][index][help] */
  83 {
  84         unsigned long flags;
  85         save_flags(flags);
  86         cli();
  87         unmask_irq(irq_nr);
  88         restore_flags(flags);
  89 }
  90 
  91 /*
  92  * This builds up the IRQ handler stubs using some ugly macros in irq.h
  93  *
  94  * These macros create the low-level assembly IRQ routines that do all
  95  * the operations that are needed to keep the AT interrupt-controller
  96  * happy. They are also written to be fast - and to disable interrupts
  97  * as little as humanly possible.
  98  *
  99  * NOTE! These macros expand to three different handlers for each line: one
 100  * complete handler that does all the fancy stuff (including signal handling),
 101  * and one fast handler that is meant for simple IRQ's that want to be
 102  * atomic. The specific handler is chosen depending on the SA_INTERRUPT
 103  * flag when installing a handler. Finally, one "bad interrupt" handler, that
 104  * is used when no handler is present.
 105  *
 106  * The timer interrupt is handled specially to insure that the jiffies
 107  * variable is updated at all times.  Specifically, the timer interrupt is
 108  * just like the complete handlers except that it is invoked with interrupts
 109  * disabled and should never re-enable them.  If other interrupts were
 110  * allowed to be processed while the timer interrupt is active, then the
 111  * other interrupts would have to avoid using the jiffies variable for delay
 112  * and interval timing operations to avoid hanging the system.
 113  */
 114 BUILD_TIMER_IRQ(FIRST,0,0x01)
 115 BUILD_IRQ(FIRST,1,0x02)
 116 BUILD_IRQ(FIRST,2,0x04)
 117 BUILD_IRQ(FIRST,3,0x08)
 118 BUILD_IRQ(FIRST,4,0x10)
 119 BUILD_IRQ(FIRST,5,0x20)
 120 BUILD_IRQ(FIRST,6,0x40)
 121 BUILD_IRQ(FIRST,7,0x80)
 122 BUILD_IRQ(SECOND,8,0x01)
 123 BUILD_IRQ(SECOND,9,0x02)
 124 BUILD_IRQ(SECOND,10,0x04)
 125 BUILD_IRQ(SECOND,11,0x08)
 126 BUILD_IRQ(SECOND,12,0x10)
 127 #ifdef __SMP__
 128 BUILD_MSGIRQ(SECOND,13,0x20)
 129 #else
 130 BUILD_IRQ(SECOND,13,0x20)
 131 #endif
 132 BUILD_IRQ(SECOND,14,0x40)
 133 BUILD_IRQ(SECOND,15,0x80)
 134 #ifdef __SMP__
 135 BUILD_RESCHEDIRQ(16)
 136 #endif
 137 
 138 /*
 139  * Pointers to the low-level handlers: first the general ones, then the
 140  * fast ones, then the bad ones.
 141  */
 142 static void (*interrupt[17])(void) = {
 143         IRQ0_interrupt, IRQ1_interrupt, IRQ2_interrupt, IRQ3_interrupt,
 144         IRQ4_interrupt, IRQ5_interrupt, IRQ6_interrupt, IRQ7_interrupt,
 145         IRQ8_interrupt, IRQ9_interrupt, IRQ10_interrupt, IRQ11_interrupt,
 146         IRQ12_interrupt, IRQ13_interrupt, IRQ14_interrupt, IRQ15_interrupt      
 147 #ifdef __SMP__  
 148         ,IRQ16_interrupt
 149 #endif
 150 };
 151 
 152 static void (*fast_interrupt[16])(void) = {
 153         fast_IRQ0_interrupt, fast_IRQ1_interrupt,
 154         fast_IRQ2_interrupt, fast_IRQ3_interrupt,
 155         fast_IRQ4_interrupt, fast_IRQ5_interrupt,
 156         fast_IRQ6_interrupt, fast_IRQ7_interrupt,
 157         fast_IRQ8_interrupt, fast_IRQ9_interrupt,
 158         fast_IRQ10_interrupt, fast_IRQ11_interrupt,
 159         fast_IRQ12_interrupt, fast_IRQ13_interrupt,
 160         fast_IRQ14_interrupt, fast_IRQ15_interrupt
 161 };
 162 
 163 static void (*bad_interrupt[16])(void) = {
 164         bad_IRQ0_interrupt, bad_IRQ1_interrupt,
 165         bad_IRQ2_interrupt, bad_IRQ3_interrupt,
 166         bad_IRQ4_interrupt, bad_IRQ5_interrupt,
 167         bad_IRQ6_interrupt, bad_IRQ7_interrupt,
 168         bad_IRQ8_interrupt, bad_IRQ9_interrupt,
 169         bad_IRQ10_interrupt, bad_IRQ11_interrupt,
 170         bad_IRQ12_interrupt, bad_IRQ13_interrupt,
 171         bad_IRQ14_interrupt, bad_IRQ15_interrupt
 172 };
 173 
 174 /*
 175  * Initial irq handlers.
 176  */
 177 
 178 static void no_action(int cpl, void *dev_id, struct pt_regs *regs) { }
     /* [previous][next][first][last][top][bottom][index][help] */
 179 
 180 #ifdef __SMP__
 181 
 182 /*
 183  * On SMP boards, irq13 is used for interprocessor interrupts (IPI's).
 184  */
 185 static struct irqaction irq13 = { smp_message_irq, SA_INTERRUPT, 0, "IPI", NULL, NULL };
 186 
 187 #else
 188 
 189 /*
 190  * Note that on a 486, we don't want to do a SIGFPE on a irq13
 191  * as the irq is unreliable, and exception 16 works correctly
 192  * (ie as explained in the intel literature). On a 386, you
 193  * can't use exception 16 due to bad IBM design, so we have to
 194  * rely on the less exact irq13.
 195  *
 196  * Careful.. Not only is IRQ13 unreliable, but it is also
 197  * leads to races. IBM designers who came up with it should
 198  * be shot.
 199  */
 200  
 201 
 202 static void math_error_irq(int cpl, void *dev_id, struct pt_regs *regs)
     /* [previous][next][first][last][top][bottom][index][help] */
 203 {
 204         outb(0,0xF0);
 205         if (ignore_irq13 || !hard_math)
 206                 return;
 207         math_error();
 208 }
 209 
 210 static struct irqaction irq13 = { math_error_irq, 0, 0, "math error", NULL, NULL };
 211 
 212 #endif
 213 
 214 /*
 215  * IRQ2 is cascade interrupt to second interrupt controller
 216  */
 217 static struct irqaction irq2  = { no_action, 0, 0, "cascade", NULL, NULL};
 218 
 219 static struct irqaction *irq_action[16] = {
 220         NULL, NULL, NULL, NULL,
 221         NULL, NULL, NULL, NULL,
 222         NULL, NULL, NULL, NULL,
 223         NULL, NULL, NULL, NULL
 224 };
 225 
 226 int get_irq_list(char *buf)
     /* [previous][next][first][last][top][bottom][index][help] */
 227 {
 228         int i, len = 0;
 229         struct irqaction * action;
 230 
 231         for (i = 0 ; i < 16 ; i++) {
 232                 action = irq_action[i];
 233                 if (!action) 
 234                         continue;
 235                 len += sprintf(buf+len, "%2d: %8d %c %s",
 236                         i, kstat.interrupts[i],
 237                         (action->flags & SA_INTERRUPT) ? '+' : ' ',
 238                         action->name);
 239                 for (action=action->next; action; action = action->next) {
 240                         len += sprintf(buf+len, ",%s %s",
 241                                 (action->flags & SA_INTERRUPT) ? " +" : "",
 242                                 action->name);
 243                 }
 244                 len += sprintf(buf+len, "\n");
 245         }
 246 /*
 247  *      Linus - should you add NMI counts here ?????
 248  */
 249 #ifdef __SMP_PROF__
 250         len+=sprintf(buf+len, "IPI: %8lu received\n",
 251                 ipi_count);
 252 #endif          
 253         return len;
 254 }
 255 
 256 #ifdef __SMP_PROF__
 257 
 258 int get_smp_prof_list(char *buf) {
     /* [previous][next][first][last][top][bottom][index][help] */
 259         int i,j, len = 0;
 260         struct irqaction * action;
 261         unsigned long sum_spins = 0;
 262         unsigned long sum_spins_syscall = 0;
 263         unsigned long sum_spins_sys_idle = 0;
 264         unsigned long sum_smp_idle_count = 0;
 265 
 266         for (i=0;i<smp_num_cpus;i++) {
 267                 int cpunum = cpu_logical_map[i];
 268                 sum_spins+=smp_spins[cpunum];
 269                 sum_spins_syscall+=smp_spins_syscall[cpunum];
 270                 sum_spins_sys_idle+=smp_spins_sys_idle[cpunum];
 271                 sum_smp_idle_count+=smp_idle_count[cpunum];
 272         }
 273 
 274         len += sprintf(buf+len,"CPUS: %10i \n", smp_num_cpus);
 275         len += sprintf(buf+len,"            SUM ");
 276         for (i=0;i<smp_num_cpus;i++)
 277                 len += sprintf(buf+len,"        P%1d ",cpu_logical_map[i]);
 278         len += sprintf(buf+len,"\n");
 279         for (i = 0 ; i < NR_IRQS ; i++) {
 280                 action = *(i + irq_action);
 281                 if (!action || !action->handler)
 282                         continue;
 283                 len += sprintf(buf+len, "%3d: %10d ",
 284                         i, kstat.interrupts[i]);
 285                 for (j=0;j<smp_num_cpus;j++)
 286                         len+=sprintf(buf+len, "%10d ",
 287                                 int_count[cpu_logical_map[j]][i]);
 288                 len += sprintf(buf+len, "%c %s\n",
 289                         (action->flags & SA_INTERRUPT) ? '+' : ' ',
 290                         action->name);
 291                 for (action=action->next; action; action = action->next) {
 292                         len += sprintf(buf+len, ",%s %s",
 293                                 (action->flags & SA_INTERRUPT) ? " +" : "",
 294                                 action->name);
 295                 }
 296         }
 297         len+=sprintf(buf+len, "LCK: %10lu",
 298                 sum_spins);
 299 
 300         for (i=0;i<smp_num_cpus;i++)
 301                 len+=sprintf(buf+len," %10lu",smp_spins[cpu_logical_map[i]]);
 302 
 303         len +=sprintf(buf+len,"   spins from int\n");
 304 
 305         len+=sprintf(buf+len, "LCK: %10lu",
 306                 sum_spins_syscall);
 307 
 308         for (i=0;i<smp_num_cpus;i++)
 309                 len+=sprintf(buf+len," %10lu",smp_spins_syscall[cpu_logical_map[i]]);
 310 
 311         len +=sprintf(buf+len,"   spins from syscall\n");
 312 
 313         len+=sprintf(buf+len, "LCK: %10lu",
 314                 sum_spins_sys_idle);
 315 
 316         for (i=0;i<smp_num_cpus;i++)
 317                 len+=sprintf(buf+len," %10lu",smp_spins_sys_idle[cpu_logical_map[i]]);
 318 
 319         len +=sprintf(buf+len,"   spins from sysidle\n");
 320         len+=sprintf(buf+len,"IDLE %10lu",sum_smp_idle_count);
 321 
 322         for (i=0;i<smp_num_cpus;i++)
 323                 len+=sprintf(buf+len," %10lu",smp_idle_count[cpu_logical_map[i]]);
 324 
 325         len +=sprintf(buf+len,"   idle ticks\n");
 326 
 327         len+=sprintf(buf+len, "IPI: %10lu   received\n",
 328                 ipi_count);
 329 
 330         return len;
 331 }
 332 #endif 
 333 
 334 
 335 
 336 /*
 337  * do_IRQ handles IRQ's that have been installed without the
 338  * SA_INTERRUPT flag: it uses the full signal-handling return
 339  * and runs with other interrupts enabled. All relatively slow
 340  * IRQ's should use this format: notably the keyboard/timer
 341  * routines.
 342  */
 343 asmlinkage void do_IRQ(int irq, struct pt_regs * regs)
     /* [previous][next][first][last][top][bottom][index][help] */
 344 {
 345         struct irqaction * action = *(irq + irq_action);
 346 
 347 #ifdef __SMP__
 348         if(smp_threads_ready && active_kernel_processor!=smp_processor_id())
 349                 panic("IRQ %d: active processor set wrongly(%d not %d).\n", irq, active_kernel_processor, smp_processor_id());
 350 #endif
 351 
 352         kstat.interrupts[irq]++;
 353 #ifdef __SMP_PROF__
 354         int_count[smp_processor_id()][irq]++;
 355 #endif
 356         while (action) {
 357                 if (action->flags & SA_SAMPLE_RANDOM)
 358                         add_interrupt_randomness(irq);
 359                 action->handler(irq, action->dev_id, regs);
 360                 action = action->next;
 361         }
 362 }
 363 
 364 /*
 365  * do_fast_IRQ handles IRQ's that don't need the fancy interrupt return
 366  * stuff - the handler is also running with interrupts disabled unless
 367  * it explicitly enables them later.
 368  */
 369 asmlinkage void do_fast_IRQ(int irq)
     /* [previous][next][first][last][top][bottom][index][help] */
 370 {
 371         struct irqaction * action = *(irq + irq_action);
 372 #ifdef __SMP__
 373         /* IRQ 13 is allowed - thats a flush tlb */
 374         if(smp_threads_ready && active_kernel_processor!=smp_processor_id() && irq!=13)
 375                 panic("fast_IRQ %d: active processor set wrongly(%d not %d).\n", irq, active_kernel_processor, smp_processor_id());
 376 #endif
 377 
 378         kstat.interrupts[irq]++;
 379 #ifdef __SMP_PROF__
 380         int_count[smp_processor_id()][irq]++;
 381 #endif
 382         while (action) {
 383                 action->handler(irq, action->dev_id, NULL);
 384                 action = action->next;
 385         }
 386 }
 387 
 388 int setup_x86_irq(int irq, struct irqaction * new)
     /* [previous][next][first][last][top][bottom][index][help] */
 389 {
 390         int shared = 0;
 391         struct irqaction *old, **p;
 392         unsigned long flags;
 393 
 394         p = irq_action + irq;
 395         if ((old = *p) != NULL) {
 396                 /* Can't share interrupts unless both agree to */
 397                 if (!(old->flags & new->flags & SA_SHIRQ))
 398                         return -EBUSY;
 399 
 400                 /* Can't share interrupts unless both are same type */
 401                 if ((old->flags ^ new->flags) & SA_INTERRUPT)
 402                         return -EBUSY;
 403 
 404                 /* add new interrupt at end of irq queue */
 405                 do {
 406                         p = &old->next;
 407                         old = *p;
 408                 } while (old);
 409                 shared = 1;
 410         }
 411 
 412         if (new->flags & SA_SAMPLE_RANDOM)
 413                 rand_initialize_irq(irq);
 414 
 415         save_flags(flags);
 416         cli();
 417         *p = new;
 418 
 419         if (!shared) {
 420                 if (new->flags & SA_INTERRUPT)
 421                         set_intr_gate(0x20+irq,fast_interrupt[irq]);
 422                 else
 423                         set_intr_gate(0x20+irq,interrupt[irq]);
 424                 unmask_irq(irq);
 425         }
 426         restore_flags(flags);
 427         return 0;
 428 }
 429 
 430 int request_irq(unsigned int irq, 
     /* [previous][next][first][last][top][bottom][index][help] */
 431                 void (*handler)(int, void *, struct pt_regs *),
 432                 unsigned long irqflags, 
 433                 const char * devname,
 434                 void *dev_id)
 435 {
 436         int retval;
 437         struct irqaction * action;
 438 
 439         if (irq > 15)
 440                 return -EINVAL;
 441         if (!handler)
 442                 return -EINVAL;
 443 
 444         action = (struct irqaction *)kmalloc(sizeof(struct irqaction), GFP_KERNEL);
 445         if (!action)
 446                 return -ENOMEM;
 447 
 448         action->handler = handler;
 449         action->flags = irqflags;
 450         action->mask = 0;
 451         action->name = devname;
 452         action->next = NULL;
 453         action->dev_id = dev_id;
 454 
 455         retval = setup_x86_irq(irq, action);
 456 
 457         if (retval)
 458                 kfree(action);
 459         return retval;
 460 }
 461                 
 462 void free_irq(unsigned int irq, void *dev_id)
     /* [previous][next][first][last][top][bottom][index][help] */
 463 {
 464         struct irqaction * action, **p;
 465         unsigned long flags;
 466 
 467         if (irq > 15) {
 468                 printk("Trying to free IRQ%d\n",irq);
 469                 return;
 470         }
 471         for (p = irq + irq_action; (action = *p) != NULL; p = &action->next) {
 472                 if (action->dev_id != dev_id)
 473                         continue;
 474 
 475                 /* Found it - now free it */
 476                 save_flags(flags);
 477                 cli();
 478                 *p = action->next;
 479                 if (!irq[irq_action]) {
 480                         mask_irq(irq);
 481                         set_intr_gate(0x20+irq,bad_interrupt[irq]);
 482                 }
 483                 restore_flags(flags);
 484                 kfree(action);
 485                 return;
 486         }
 487         printk("Trying to free free IRQ%d\n",irq);
 488 }
 489 
 490 unsigned long probe_irq_on (void)
     /* [previous][next][first][last][top][bottom][index][help] */
 491 {
 492         unsigned int i, irqs = 0, irqmask;
 493         unsigned long delay;
 494 
 495         /* first, enable any unassigned irqs */
 496         for (i = 15; i > 0; i--) {
 497                 if (!irq_action[i]) {
 498                         enable_irq(i);
 499                         irqs |= (1 << i);
 500                 }
 501         }
 502 
 503         /* wait for spurious interrupts to mask themselves out again */
 504         for (delay = jiffies + HZ/10; delay > jiffies; )
 505                 /* about 100ms delay */;
 506 
 507         /* now filter out any obviously spurious interrupts */
 508         irqmask = (((unsigned int)cache_A1)<<8) | (unsigned int)cache_21;
 509         return irqs & ~irqmask;
 510 }
 511 
 512 int probe_irq_off (unsigned long irqs)
     /* [previous][next][first][last][top][bottom][index][help] */
 513 {
 514         unsigned int i, irqmask;
 515 
 516         irqmask = (((unsigned int)cache_A1)<<8) | (unsigned int)cache_21;
 517 #ifdef DEBUG
 518         printk("probe_irq_off: irqs=0x%04x irqmask=0x%04x\n", irqs, irqmask);
 519 #endif
 520         irqs &= irqmask;
 521         if (!irqs)
 522                 return 0;
 523         i = ffz(~irqs);
 524         if (irqs != (irqs & (1 << i)))
 525                 i = -i;
 526         return i;
 527 }
 528 
 529 void init_IRQ(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 530 {
 531         int i;
 532         static unsigned char smptrap=0;
 533         if(smptrap)
 534                 return;
 535         smptrap=1;
 536 
 537         /* set the clock to 100 Hz */
 538         outb_p(0x34,0x43);              /* binary, mode 2, LSB/MSB, ch 0 */
 539         outb_p(LATCH & 0xff , 0x40);    /* LSB */
 540         outb(LATCH >> 8 , 0x40);        /* MSB */
 541         for (i = 0; i < 16 ; i++)
 542                 set_intr_gate(0x20+i,bad_interrupt[i]);
 543         /* This bit is a hack because we don't send timer messages to all processors yet */
 544         /* It has to be here .. it doesn't work if you put it down the bottom - assembler explodes 8) */
 545 #ifdef __SMP__  
 546         set_intr_gate(0x20+i, interrupt[i]);    /* IRQ '16' - IPI for rescheduling */
 547 #endif  
 548         request_region(0x20,0x20,"pic1");
 549         request_region(0xa0,0x20,"pic2");
 550         setup_x86_irq(2, &irq2);
 551         setup_x86_irq(13, &irq13);
 552 } 

/* [previous][next][first][last][top][bottom][index][help] */