This source file includes following definitions.
- fcntl_init_locks
- fcntl_getlk
- fcntl_setlk
- fcntl_remove_locks
- copy_flock
- conflict
- overlap
- lock_it
- alloc_lock
- free_lock
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15 #include <asm/segment.h>
16
17 #include <linux/sched.h>
18 #include <linux/kernel.h>
19 #include <linux/errno.h>
20 #include <linux/stat.h>
21 #include <linux/fcntl.h>
22
23 #define OFFSET_MAX 0x7fffffff
24
25 static int copy_flock(struct file *filp, struct file_lock *fl, struct flock *l);
26 static int conflict(struct file_lock *caller_fl, struct file_lock *sys_fl);
27 static int overlap(struct file_lock *fl1, struct file_lock *fl2);
28 static int lock_it(struct file *filp, struct file_lock *caller);
29 static struct file_lock *alloc_lock(struct file_lock **pos, struct file_lock *template);
30 static void free_lock(struct file_lock **fl);
31
32 static struct file_lock file_lock_table[NR_FILE_LOCKS];
33 static struct file_lock *file_lock_free_list;
34
35
36
37
38
39 void fcntl_init_locks(void)
40 {
41 struct file_lock *fl;
42
43 for (fl = &file_lock_table[0]; fl < file_lock_table + NR_FILE_LOCKS - 1; fl++) {
44 fl->fl_next = fl + 1;
45 fl->fl_owner = NULL;
46 }
47 file_lock_table[NR_FILE_LOCKS - 1].fl_next = NULL;
48 file_lock_table[NR_FILE_LOCKS - 1].fl_owner = NULL;
49 file_lock_free_list = &file_lock_table[0];
50 }
51
52 int fcntl_getlk(unsigned int fd, struct flock *l)
53 {
54 int error;
55 struct flock flock;
56 struct file *filp;
57 struct file_lock *fl,file_lock;
58
59 if (fd >= NR_OPEN || !(filp = current->filp[fd]))
60 return -EBADF;
61 error = verify_area(VERIFY_WRITE,l, sizeof(*l));
62 if (error)
63 return error;
64 memcpy_fromfs(&flock, l, sizeof(flock));
65 if (flock.l_type == F_UNLCK)
66 return -EINVAL;
67 if (!copy_flock(filp, &file_lock, &flock))
68 return -EINVAL;
69
70 for (fl = filp->f_inode->i_flock; fl != NULL; fl = fl->fl_next) {
71 if (conflict(&file_lock, fl)) {
72 flock.l_pid = fl->fl_owner->pid;
73 flock.l_start = fl->fl_start;
74 flock.l_len = fl->fl_end == OFFSET_MAX ? 0 :
75 fl->fl_end - fl->fl_start + 1;
76 flock.l_whence = fl->fl_whence;
77 flock.l_type = fl->fl_type;
78 memcpy_tofs(l, &flock, sizeof(flock));
79 return 0;
80 }
81 }
82
83 flock.l_type = F_UNLCK;
84 memcpy_tofs(l, &flock, sizeof(flock));
85 return 0;
86 }
87
88
89
90
91
92 int fcntl_setlk(unsigned int fd, unsigned int cmd, struct flock *l)
93 {
94 int error;
95 struct file *filp;
96 struct file_lock *fl,file_lock;
97 struct flock flock;
98
99
100
101
102
103 if (fd >= NR_OPEN || !(filp = current->filp[fd]))
104 return -EBADF;
105 error = verify_area(VERIFY_WRITE, l, sizeof(*l));
106 if (error)
107 return error;
108 memcpy_fromfs(&flock, l, sizeof(flock));
109 if (!copy_flock(filp, &file_lock, &flock))
110 return -EINVAL;
111 switch (file_lock.fl_type) {
112 case F_RDLCK :
113 if (!(filp->f_mode & 1))
114 return -EBADF;
115 break;
116 case F_WRLCK :
117 if (!(filp->f_mode & 2))
118 return -EBADF;
119 break;
120 case F_SHLCK :
121 if (!(filp->f_mode & 3))
122 return -EBADF;
123 file_lock.fl_type = F_RDLCK;
124 break;
125 case F_EXLCK :
126 if (!(filp->f_mode & 3))
127 return -EBADF;
128 file_lock.fl_type = F_WRLCK;
129 break;
130 case F_UNLCK :
131 break;
132 }
133
134
135
136
137
138 if (file_lock.fl_type != F_UNLCK) {
139 repeat:
140 for (fl = filp->f_inode->i_flock; fl != NULL; fl = fl->fl_next) {
141 if (!conflict(&file_lock, fl))
142 continue;
143
144
145
146
147
148 if (cmd == F_SETLKW) {
149 if (current->signal & ~current->blocked)
150 return -ERESTARTSYS;
151 interruptible_sleep_on(&fl->fl_wait);
152 if (current->signal & ~current->blocked)
153 return -ERESTARTSYS;
154 goto repeat;
155 }
156 return -EAGAIN;
157 }
158 }
159
160
161
162
163
164 return lock_it(filp, &file_lock);
165 }
166
167
168
169
170
171 void fcntl_remove_locks(struct task_struct *task, struct file *filp)
172 {
173 struct file_lock *fl;
174 struct file_lock **before;
175
176
177
178 before = &filp->f_inode->i_flock;
179 while ((fl = *before) && task != fl->fl_owner)
180 before = &fl->fl_next;
181
182
183
184 while ((fl = *before) && task == fl->fl_owner)
185 free_lock(before);
186 }
187
188
189
190
191
192
193 static int copy_flock(struct file *filp, struct file_lock *fl, struct flock *l)
194 {
195 off_t start;
196
197 if (!filp->f_inode)
198 return 0;
199 if (!S_ISREG(filp->f_inode->i_mode))
200 return 0;
201 if (l->l_type != F_UNLCK && l->l_type != F_RDLCK && l->l_type != F_WRLCK
202 && l->l_type != F_SHLCK && l->l_type != F_EXLCK)
203 return 0;
204 switch (l->l_whence) {
205 case 0 : start = 0; break;
206 case 1 : start = filp->f_pos; break;
207 case 2 : start = filp->f_inode->i_size; break;
208 default : return 0;
209 }
210 if ((start += l->l_start) < 0 || l->l_len < 0)
211 return 0;
212 fl->fl_type = l->l_type;
213 fl->fl_start = start;
214 fl->fl_whence = 0;
215 if (l->l_len == 0 || (fl->fl_end = start + l->l_len - 1) < 0)
216 fl->fl_end = OFFSET_MAX;
217 fl->fl_owner = current;
218 fl->fl_wait = NULL;
219 return 1;
220 }
221
222
223
224
225
226 static int conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
227 {
228 if (caller_fl->fl_owner == sys_fl->fl_owner)
229 return 0;
230 if (!overlap(caller_fl, sys_fl))
231 return 0;
232 switch (caller_fl->fl_type) {
233 case F_RDLCK :
234 return sys_fl->fl_type != F_RDLCK;
235 case F_WRLCK :
236 return 1;
237 }
238 return 0;
239 }
240
241 static int overlap(struct file_lock *fl1, struct file_lock *fl2)
242 {
243 return fl1->fl_end >= fl2->fl_start && fl2->fl_end >= fl1->fl_start;
244 }
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266 static int lock_it(struct file *filp, struct file_lock *caller)
267 {
268 struct file_lock *fl;
269 struct file_lock *left = 0;
270 struct file_lock *right = 0;
271 struct file_lock **before;
272 int added = 0;
273
274
275
276
277
278 before = &filp->f_inode->i_flock;
279 while ((fl = *before) && caller->fl_owner != fl->fl_owner)
280 before = &fl->fl_next;
281
282
283
284
285
286 while ((fl = *before) && caller->fl_owner == fl->fl_owner) {
287
288
289
290 if (caller->fl_type == fl->fl_type) {
291 if (fl->fl_end < caller->fl_start - 1)
292 goto next_lock;
293
294
295
296
297 if (fl->fl_start > caller->fl_end + 1)
298 break;
299
300
301
302
303
304
305
306 if (fl->fl_start > caller->fl_start)
307 fl->fl_start = caller->fl_start;
308 else
309 caller->fl_start = fl->fl_start;
310 if (fl->fl_end < caller->fl_end)
311 fl->fl_end = caller->fl_end;
312 else
313 caller->fl_end = fl->fl_end;
314 if (added) {
315 free_lock(before);
316 continue;
317 }
318 caller = fl;
319 added = 1;
320 goto next_lock;
321 }
322
323
324
325 if (fl->fl_end < caller->fl_start)
326 goto next_lock;
327 if (fl->fl_start > caller->fl_end)
328 break;
329 if (caller->fl_type == F_UNLCK)
330 added = 1;
331 if (fl->fl_start < caller->fl_start)
332 left = fl;
333
334
335
336
337 if (fl->fl_end > caller->fl_end) {
338 right = fl;
339 break;
340 }
341 if (fl->fl_start >= caller->fl_start) {
342
343
344
345
346 if (added) {
347 free_lock(before);
348 continue;
349 }
350
351
352
353
354
355 wake_up(&fl->fl_wait);
356 fl->fl_start = caller->fl_start;
357 fl->fl_end = caller->fl_end;
358 fl->fl_type = caller->fl_type;
359 fl->fl_wait = 0;
360 caller = fl;
361 added = 1;
362 }
363
364
365
366 next_lock:
367 before = &(*before)->fl_next;
368 }
369
370 if (! added) {
371 if (caller->fl_type == F_UNLCK)
372 return -EINVAL;
373 if (! (caller = alloc_lock(before, caller)))
374 return -ENOLCK;
375 }
376 if (right) {
377 if (left == right) {
378
379
380
381
382
383 if (! (left = alloc_lock(before, right))) {
384 if (! added)
385 free_lock(before);
386 return -ENOLCK;
387 }
388 }
389 right->fl_start = caller->fl_end + 1;
390 }
391 if (left)
392 left->fl_end = caller->fl_start - 1;
393 return 0;
394 }
395
396
397
398
399
400 static struct file_lock *alloc_lock(struct file_lock **pos,
401 struct file_lock *template)
402 {
403 struct file_lock *new;
404
405 new = file_lock_free_list;
406 if (new == NULL)
407 return NULL;
408 if (new->fl_owner != NULL)
409 panic("alloc_lock: broken free list\n");
410
411
412 file_lock_free_list = new->fl_next;
413
414 *new = *template;
415
416 new->fl_next = *pos;
417 *pos = new;
418
419 new->fl_owner = current;
420 new->fl_wait = NULL;
421 return new;
422 }
423
424
425
426
427
428 static void free_lock(struct file_lock **fl_p)
429 {
430 struct file_lock *fl;
431
432 fl = *fl_p;
433 if (fl->fl_owner == NULL)
434 panic("free_lock: broken lock list\n");
435
436 *fl_p = (*fl_p)->fl_next;
437
438 fl->fl_next = file_lock_free_list;
439 file_lock_free_list = fl;
440 fl->fl_owner = NULL;
441
442 wake_up(&fl->fl_wait);
443 }