This source file includes following definitions.
- fcntl_init_locks
- fcntl_getlk
- fcntl_setlk
- fcntl_remove_locks
- copy_flock
- conflict
- overlap
- lock_it
- alloc_lock
- free_lock
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15 #include <asm/segment.h>
16
17 #include <linux/sched.h>
18 #include <linux/kernel.h>
19 #include <linux/errno.h>
20 #include <linux/stat.h>
21 #include <linux/fcntl.h>
22
23 #define OFFSET_MAX ((off_t)0x7fffffff)
24
25 static int copy_flock(struct file *filp, struct file_lock *fl, struct flock *l,
26 unsigned int fd);
27 static int conflict(struct file_lock *caller_fl, struct file_lock *sys_fl);
28 static int overlap(struct file_lock *fl1, struct file_lock *fl2);
29 static int lock_it(struct file *filp, struct file_lock *caller, unsigned int fd);
30 static struct file_lock *alloc_lock(struct file_lock **pos, struct file_lock *fl,
31 unsigned int fd);
32 static void free_lock(struct file_lock **fl);
33
34 static struct file_lock file_lock_table[NR_FILE_LOCKS];
35 static struct file_lock *file_lock_free_list;
36
37
38
39
40
41 void fcntl_init_locks(void)
42 {
43 struct file_lock *fl;
44
45 for (fl = &file_lock_table[0]; fl < file_lock_table + NR_FILE_LOCKS - 1; fl++) {
46 fl->fl_next = fl + 1;
47 fl->fl_owner = NULL;
48 }
49 file_lock_table[NR_FILE_LOCKS - 1].fl_next = NULL;
50 file_lock_table[NR_FILE_LOCKS - 1].fl_owner = NULL;
51 file_lock_free_list = &file_lock_table[0];
52 }
53
54 int fcntl_getlk(unsigned int fd, struct flock *l)
55 {
56 int error;
57 struct flock flock;
58 struct file *filp;
59 struct file_lock *fl,file_lock;
60
61 if (fd >= NR_OPEN || !(filp = current->filp[fd]))
62 return -EBADF;
63 error = verify_area(VERIFY_WRITE,l, sizeof(*l));
64 if (error)
65 return error;
66 memcpy_fromfs(&flock, l, sizeof(flock));
67 if (flock.l_type == F_UNLCK)
68 return -EINVAL;
69 if (!copy_flock(filp, &file_lock, &flock, fd))
70 return -EINVAL;
71
72 for (fl = filp->f_inode->i_flock; fl != NULL; fl = fl->fl_next) {
73 if (conflict(&file_lock, fl)) {
74 flock.l_pid = fl->fl_owner->pid;
75 flock.l_start = fl->fl_start;
76 flock.l_len = fl->fl_end == OFFSET_MAX ? 0 :
77 fl->fl_end - fl->fl_start + 1;
78 flock.l_whence = fl->fl_whence;
79 flock.l_type = fl->fl_type;
80 memcpy_tofs(l, &flock, sizeof(flock));
81 return 0;
82 }
83 }
84
85 flock.l_type = F_UNLCK;
86 memcpy_tofs(l, &flock, sizeof(flock));
87 return 0;
88 }
89
90
91
92
93
94 int fcntl_setlk(unsigned int fd, unsigned int cmd, struct flock *l)
95 {
96 int error;
97 struct file *filp;
98 struct file_lock *fl,file_lock;
99 struct flock flock;
100
101
102
103
104
105 if (fd >= NR_OPEN || !(filp = current->filp[fd]))
106 return -EBADF;
107 error = verify_area(VERIFY_WRITE, l, sizeof(*l));
108 if (error)
109 return error;
110 memcpy_fromfs(&flock, l, sizeof(flock));
111 if (!copy_flock(filp, &file_lock, &flock, fd))
112 return -EINVAL;
113 switch (file_lock.fl_type) {
114 case F_RDLCK :
115 if (!(filp->f_mode & 1))
116 return -EBADF;
117 break;
118 case F_WRLCK :
119 if (!(filp->f_mode & 2))
120 return -EBADF;
121 break;
122 case F_SHLCK :
123 if (!(filp->f_mode & 3))
124 return -EBADF;
125 file_lock.fl_type = F_RDLCK;
126 break;
127 case F_EXLCK :
128 if (!(filp->f_mode & 3))
129 return -EBADF;
130 file_lock.fl_type = F_WRLCK;
131 break;
132 case F_UNLCK :
133 break;
134 }
135
136
137
138
139
140 if (file_lock.fl_type != F_UNLCK) {
141 repeat:
142 for (fl = filp->f_inode->i_flock; fl != NULL; fl = fl->fl_next) {
143 if (!conflict(&file_lock, fl))
144 continue;
145
146
147
148
149
150 if (cmd == F_SETLKW) {
151 if (current->signal & ~current->blocked)
152 return -ERESTARTSYS;
153 interruptible_sleep_on(&fl->fl_wait);
154 if (current->signal & ~current->blocked)
155 return -ERESTARTSYS;
156 goto repeat;
157 }
158 return -EAGAIN;
159 }
160 }
161
162
163
164
165
166 return lock_it(filp, &file_lock, fd);
167 }
168
169
170
171
172
173 void fcntl_remove_locks(struct task_struct *task, struct file *filp,
174 unsigned int fd)
175 {
176 struct file_lock *fl;
177 struct file_lock **before;
178
179
180
181 before = &filp->f_inode->i_flock;
182 while ((fl = *before) && (task != fl->fl_owner || fd != fl->fl_fd))
183 before = &fl->fl_next;
184
185
186
187 while ((fl = *before) && task == fl->fl_owner && fd == fl->fl_fd)
188 free_lock(before);
189 }
190
191
192
193
194
195
196 static int copy_flock(struct file *filp, struct file_lock *fl, struct flock *l,
197 unsigned int fd)
198 {
199 off_t start;
200
201 if (!filp->f_inode)
202 return 0;
203 if (!S_ISREG(filp->f_inode->i_mode))
204 return 0;
205 if (l->l_type != F_UNLCK && l->l_type != F_RDLCK && l->l_type != F_WRLCK
206 && l->l_type != F_SHLCK && l->l_type != F_EXLCK)
207 return 0;
208 switch (l->l_whence) {
209 case 0 : start = 0; break;
210 case 1 : start = filp->f_pos; break;
211 case 2 : start = filp->f_inode->i_size; break;
212 default : return 0;
213 }
214 if ((start += l->l_start) < 0 || l->l_len < 0)
215 return 0;
216 fl->fl_type = l->l_type;
217 fl->fl_start = start;
218 fl->fl_whence = 0;
219 if (l->l_len == 0 || (fl->fl_end = start + l->l_len - 1) < 0)
220 fl->fl_end = OFFSET_MAX;
221 fl->fl_owner = current;
222 fl->fl_fd = fd;
223 fl->fl_wait = NULL;
224 return 1;
225 }
226
227
228
229
230
231 static int conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
232 {
233 if ( caller_fl->fl_owner == sys_fl->fl_owner
234 && caller_fl->fl_fd == sys_fl->fl_fd)
235 return 0;
236 if (!overlap(caller_fl, sys_fl))
237 return 0;
238 switch (caller_fl->fl_type) {
239 case F_RDLCK :
240 return sys_fl->fl_type != F_RDLCK;
241 case F_WRLCK :
242 return 1;
243 }
244 return 0;
245 }
246
247 static int overlap(struct file_lock *fl1, struct file_lock *fl2)
248 {
249 return fl1->fl_end >= fl2->fl_start && fl2->fl_end >= fl1->fl_start;
250 }
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272 static int lock_it(struct file *filp, struct file_lock *caller, unsigned int fd)
273 {
274 struct file_lock *fl;
275 struct file_lock *left = 0;
276 struct file_lock *right = 0;
277 struct file_lock **before;
278 int added = 0;
279
280
281
282
283
284 before = &filp->f_inode->i_flock;
285 while ((fl = *before) &&
286 (caller->fl_owner != fl->fl_owner ||
287 caller->fl_fd != fl->fl_fd))
288 before = &fl->fl_next;
289
290
291
292
293
294 while ( (fl = *before)
295 && caller->fl_owner == fl->fl_owner
296 && caller->fl_fd == fl->fl_fd) {
297
298
299
300 if (caller->fl_type == fl->fl_type) {
301 if (fl->fl_end < caller->fl_start - 1)
302 goto next_lock;
303
304
305
306
307 if (fl->fl_start > caller->fl_end + 1)
308 break;
309
310
311
312
313
314
315
316 if (fl->fl_start > caller->fl_start)
317 fl->fl_start = caller->fl_start;
318 else
319 caller->fl_start = fl->fl_start;
320 if (fl->fl_end < caller->fl_end)
321 fl->fl_end = caller->fl_end;
322 else
323 caller->fl_end = fl->fl_end;
324 if (added) {
325 free_lock(before);
326 continue;
327 }
328 caller = fl;
329 added = 1;
330 goto next_lock;
331 }
332
333
334
335 if (fl->fl_end < caller->fl_start)
336 goto next_lock;
337 if (fl->fl_start > caller->fl_end)
338 break;
339 if (caller->fl_type == F_UNLCK)
340 added = 1;
341 if (fl->fl_start < caller->fl_start)
342 left = fl;
343
344
345
346
347 if (fl->fl_end > caller->fl_end) {
348 right = fl;
349 break;
350 }
351 if (fl->fl_start >= caller->fl_start) {
352
353
354
355
356 if (added) {
357 free_lock(before);
358 continue;
359 }
360
361
362
363
364
365 wake_up(&fl->fl_wait);
366 fl->fl_start = caller->fl_start;
367 fl->fl_end = caller->fl_end;
368 fl->fl_type = caller->fl_type;
369 caller = fl;
370 added = 1;
371 }
372
373
374
375 next_lock:
376 before = &(*before)->fl_next;
377 }
378
379 if (! added) {
380 if (caller->fl_type == F_UNLCK)
381 return -EINVAL;
382 if (! (caller = alloc_lock(before, caller, fd)))
383 return -ENOLCK;
384 }
385 if (right) {
386 if (left == right) {
387
388
389
390
391
392 if (! (left = alloc_lock(before, right, fd))) {
393 if (! added)
394 free_lock(before);
395 return -ENOLCK;
396 }
397 }
398 right->fl_start = caller->fl_end + 1;
399 }
400 if (left)
401 left->fl_end = caller->fl_start - 1;
402 return 0;
403 }
404
405
406
407
408
409 static struct file_lock *alloc_lock(struct file_lock **pos,
410 struct file_lock *fl,
411 unsigned int fd)
412 {
413 struct file_lock *tmp;
414
415 tmp = file_lock_free_list;
416 if (tmp == NULL)
417 return NULL;
418 if (tmp->fl_owner != NULL)
419 panic("alloc_lock: broken free list\n");
420
421
422 file_lock_free_list = tmp->fl_next;
423
424 *tmp = *fl;
425
426 tmp->fl_next = *pos;
427 *pos = tmp;
428
429 tmp->fl_owner = current;
430 tmp->fl_fd = fd;
431 tmp->fl_wait = NULL;
432 return tmp;
433 }
434
435
436
437
438
439 static void free_lock(struct file_lock **fl_p)
440 {
441 struct file_lock *fl;
442
443 fl = *fl_p;
444 if (fl->fl_owner == NULL)
445 panic("free_lock: broken lock list\n");
446
447 *fl_p = (*fl_p)->fl_next;
448
449 fl->fl_next = file_lock_free_list;
450 file_lock_free_list = fl;
451 fl->fl_owner = NULL;
452
453 wake_up(&fl->fl_wait);
454 }