root/include/asm-i386/locks.h

/* [previous][next][first][last][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. prim_spin_lock
  2. prim_spin_unlock
  3. prim_spin_lock_nb
  4. spinlock
  5. spinunlock
  6. spintestlock
  7. spintestunlock

   1 /*
   2  *      SMP locks primitives for building ix86 locks
   3  *      (not yet used).
   4  *
   5  *              Alan Cox, alan@cymru.net, 1995
   6  */
   7  
   8 /*
   9  *      This would be much easier but far less clear and easy
  10  *      to borrow for other processors if it was just assembler.
  11  */
  12 
  13 extern __inline__ void prim_spin_lock(struct spinlock *sp)
     /* [previous][next][first][last][top][bottom][index][help] */
  14 {
  15         int processor=smp_processor_id();
  16         
  17         /*
  18          *      Grab the lock bit
  19          */
  20          
  21         while(lock_set_bit(0,&sp->lock))
  22         {
  23                 /*
  24                  *      Failed, but thats cos we own it!
  25                  */
  26                  
  27                 if(sp->cpu==processor)
  28                 {
  29                         sp->users++;
  30                         return 0;
  31                 }
  32                 /*
  33                  *      Spin in the cache S state if possible
  34                  */
  35                 while(sp->lock)
  36                 {
  37                         /*
  38                          *      Wait for any invalidates to go off
  39                          */
  40                          
  41                         if(smp_invalidate_needed&(1<<processor));
  42                                 while(lock_clear_bit(processor,&smp_invalidate_needed))
  43                                         local_invalidate();
  44                         sp->spins++;
  45                 }
  46                 /*
  47                  *      Someone wrote the line, we go 'I' and get
  48                  *      the cache entry. Now try and regrab
  49                  */
  50         }
  51         sp->users++;sp->cpu=processor;
  52         return 1;
  53 }
  54 
  55 /*
  56  *      Release a spin lock
  57  */
  58  
  59 extern __inline__ int prim_spin_unlock(struct spinlock *sp)
     /* [previous][next][first][last][top][bottom][index][help] */
  60 {
  61         /* This is safe. The decrement is still guarded by the lock. A multilock would
  62            not be safe this way */
  63         if(!--sp->users)
  64         {
  65                 lock_clear_bit(0,&sp->lock);sp->cpu= NO_PROC_ID;
  66                 return 1;
  67         }
  68         return 0;
  69 }
  70 
  71 
  72 /*
  73  *      Non blocking lock grab
  74  */
  75  
  76 extern __inline__ int prim_spin_lock_nb(struct spinlock *sp)
     /* [previous][next][first][last][top][bottom][index][help] */
  77 {
  78         if(lock_set_bit(0,&sp->lock))
  79                 return 0;               /* Locked already */
  80         sp->users++;
  81         return 1;                       /* We got the lock */
  82 }
  83 
  84 
  85 /*
  86  *      These wrap the locking primtives up for usage
  87  */
  88  
  89 extern __inline__ void spinlock(struct spinlock *sp)
     /* [previous][next][first][last][top][bottom][index][help] */
  90 {
  91         if(sp->priority<current->lock_order)
  92                 panic("lock order violation: %s (%d)\n", sp->name, current->lock_order);
  93         if(prim_spin_lock(sp))
  94         {
  95                 /*
  96                  *      We got a new lock. Update the priority chain
  97                  */
  98                 sp->oldpri=current->lock_order;
  99                 current->lock_order=sp->priority;
 100         }
 101 }
 102 
 103 extern __inline__ void spinunlock(struct spinlock *sp)
     /* [previous][next][first][last][top][bottom][index][help] */
 104 {
 105         if(current->lock_order!=sp->priority)
 106                 panic("lock release order violation %s (%d)\n", sp->name, current->lock_order);
 107         if(prim_spin_unlock(sp))
 108         {
 109                 /*
 110                  *      Update the debugging lock priority chain. We dumped
 111                  *      our last right to the lock.
 112                  */
 113                 current->lock_order=sp->oldpri;
 114         }       
 115 }
 116 
 117 extern __inline__ void spintestlock(struct spinlock *sp)
     /* [previous][next][first][last][top][bottom][index][help] */
 118 {
 119         /*
 120          *      We do no sanity checks, its legal to optimistically
 121          *      get a lower lock.
 122          */
 123         prim_spin_lock_nb(sp);
 124 }
 125 
 126 extern __inline__ void spintestunlock(struct spinlock *sp)
     /* [previous][next][first][last][top][bottom][index][help] */
 127 {
 128         /*
 129          *      A testlock doesnt update the lock chain so we
 130          *      must not update it on free
 131          */
 132         prim_spin_unlock(sp);
 133 }

/* [previous][next][first][last][top][bottom][index][help] */