root/kernel/dma.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. mutex_atomic_swap
  2. get_dma_list
  3. request_dma
  4. free_dma

   1 /* $Id: dma.c,v 1.7 1994/12/28 03:35:33 root Exp root $
   2  * linux/kernel/dma.c: A DMA channel allocator. Inspired by linux/kernel/irq.c.
   3  *
   4  * Written by Hennus Bergman, 1992.
   5  *
   6  * 1994/12/26: Changes by Alex Nash to fix a minor bug in /proc/dma.
   7  *   In the previous version the reported device could end up being wrong,
   8  *   if a device requested a DMA channel that was already in use.
   9  *   [It also happened to remove the sizeof(char *) == sizeof(int)
  10  *   assumption introduced because of those /proc/dma patches. -- Hennus]
  11  */
  12 
  13 #include <linux/kernel.h>
  14 #include <linux/errno.h>
  15 #include <asm/dma.h>
  16 
  17 
  18 /* A note on resource allocation:
  19  *
  20  * All drivers needing DMA channels, should allocate and release them
  21  * through the public routines `request_dma()' and `free_dma()'.
  22  *
  23  * In order to avoid problems, all processes should allocate resources in
  24  * the same sequence and release them in the reverse order.
  25  *
  26  * So, when allocating DMAs and IRQs, first allocate the IRQ, then the DMA.
  27  * When releasing them, first release the DMA, then release the IRQ.
  28  * If you don't, you may cause allocation requests to fail unnecessarily.
  29  * This doesn't really matter now, but it will once we get real semaphores
  30  * in the kernel.
  31  */
  32 
  33 
  34 
  35 /* Channel n is busy iff dma_chan_busy[n] != 0.
  36  * DMA0 used to be reserved for DRAM refresh, but apparently not any more...
  37  * DMA4 is reserved for cascading.
  38  */
  39 
  40 struct dma_chan {
  41         int  lock;
  42         char *device_id;
  43 };
  44 
  45 static volatile struct dma_chan dma_chan_busy[MAX_DMA_CHANNELS] = {
  46         { 0, 0 },
  47         { 0, 0 },
  48         { 0, 0 },
  49         { 0, 0 },
  50         { 1, "cascade" },
  51         { 0, 0 },
  52         { 0, 0 },
  53         { 0, 0 }
  54 };
  55 
  56 /* Atomically swap memory location [32 bits] with `newval'.
  57  * This avoid the cli()/sti() junk and related problems.
  58  * [And it's faster too :-)]
  59  * Maybe this should be in include/asm/mutex.h and be used for
  60  * implementing kernel-semaphores as well.
  61  */
  62 static __inline__ unsigned int mutex_atomic_swap(volatile unsigned int * p, unsigned int newval)
     /* [previous][next][first][last][top][bottom][index][help] */
  63 {
  64         unsigned int semval = newval;
  65 
  66         /* If one of the operands for the XCHG instructions is a memory ref,
  67          * it makes the swap an uninterruptible RMW cycle.
  68          *
  69          * One operand must be in memory, the other in a register, otherwise
  70          * the swap may not be atomic.
  71          */
  72 
  73         asm __volatile__ ("xchgl %2, %0\n"
  74                           : /* outputs: semval   */ "=r" (semval)
  75                           : /* inputs: newval, p */ "0" (semval), "m" (*p)
  76                          );     /* p is a var, containing an address */
  77         return semval;
  78 } /* mutex_atomic_swap */
  79 
  80 
  81 int get_dma_list(char *buf)
     /* [previous][next][first][last][top][bottom][index][help] */
  82 {
  83         int i, len = 0;
  84 
  85         for (i = 0 ; i < MAX_DMA_CHANNELS ; i++) {
  86                 if (dma_chan_busy[i].lock) {
  87                     len += sprintf(buf+len, "%2d: %s\n",
  88                                    i,
  89                                    dma_chan_busy[i].device_id);
  90                 }
  91         }
  92         return len;
  93 } /* get_dma_list */
  94 
  95 
  96 int request_dma(unsigned int dmanr, char * device_id)
     /* [previous][next][first][last][top][bottom][index][help] */
  97 {
  98         if (dmanr >= MAX_DMA_CHANNELS)
  99                 return -EINVAL;
 100 
 101         if (mutex_atomic_swap((unsigned int *) &dma_chan_busy[dmanr].lock, 1) != 0)
 102                 return -EBUSY;
 103 
 104         dma_chan_busy[dmanr].device_id = device_id;
 105 
 106         /* old flag was 0, now contains 1 to indicate busy */
 107         return 0;
 108 } /* request_dma */
 109 
 110 
 111 void free_dma(unsigned int dmanr)
     /* [previous][next][first][last][top][bottom][index][help] */
 112 {
 113         if (dmanr >= MAX_DMA_CHANNELS) {
 114                 printk("Trying to free DMA%d\n", dmanr);
 115                 return;
 116         }
 117 
 118         if (mutex_atomic_swap((unsigned int *) &dma_chan_busy[dmanr].lock, 0) == 0) {
 119                 printk("Trying to free free DMA%d\n", dmanr);
 120                 return;
 121         }       
 122 
 123 } /* free_dma */

/* [previous][next][first][last][top][bottom][index][help] */