tag | line | file | source code |
req | 1381 | drivers/block/ide-cd.c | struct request req; |
req | 1391 | drivers/block/ide-cd.c | ide_init_drive_cmd (&req); |
req | 1392 | drivers/block/ide-cd.c | req.cmd = PACKET_COMMAND; |
req | 1393 | drivers/block/ide-cd.c | req.buffer = (char *)pc; |
req | 1394 | drivers/block/ide-cd.c | (void) ide_do_drive_cmd (drive, &req, ide_wait); |
req | 2520 | drivers/block/ide-cd.c | struct request req; |
req | 2521 | drivers/block/ide-cd.c | ide_init_drive_cmd (&req); |
req | 2522 | drivers/block/ide-cd.c | req.cmd = RESET_DRIVE_COMMAND; |
req | 2523 | drivers/block/ide-cd.c | return ide_do_drive_cmd (drive, &req, ide_wait); |
req | 108 | drivers/block/linear.c | static int linear_map (int minor, struct md_dev *mddev, struct request *req) |
req | 118 | drivers/block/linear.c | while (req->nr_sectors) |
req | 120 | drivers/block/linear.c | block=req->sector >> 1; |
req | 137 | drivers/block/linear.c | if (req->sem) /* This is a paging request */ |
req | 139 | drivers/block/linear.c | req->rq_dev=tmp_dev->dev; |
req | 140 | drivers/block/linear.c | req->sector=rblock << 1; |
req | 141 | drivers/block/linear.c | add_request (blk_dev+MAJOR (tmp_dev->dev), req); |
req | 148 | drivers/block/linear.c | for (nblk=0, bh=bh2=req->bh; |
req | 159 | drivers/block/linear.c | pending[queue].cmd=req->cmd; |
req | 162 | drivers/block/linear.c | pending[queue].current_nr_sectors=req->bh->b_size >> 9; |
req | 163 | drivers/block/linear.c | pending[queue].bh=req->bh; |
req | 167 | drivers/block/linear.c | req->bh=bh; |
req | 168 | drivers/block/linear.c | req->sector+=nblk << 1; |
req | 169 | drivers/block/linear.c | req->nr_sectors-=nblk << 1; |
req | 172 | drivers/block/linear.c | req->rq_status=RQ_INACTIVE; |
req | 102 | drivers/block/ll_rw_blk.c | struct request * req; |
req | 107 | drivers/block/ll_rw_blk.c | req = dev->current_request; |
req | 108 | drivers/block/ll_rw_blk.c | if (req && req->rq_status == RQ_INACTIVE && req->cmd == -1) { |
req | 109 | drivers/block/ll_rw_blk.c | dev->current_request = req->next; |
req | 123 | drivers/block/ll_rw_blk.c | register struct request *req, *limit; |
req | 133 | drivers/block/ll_rw_blk.c | req = prev_found; |
req | 135 | drivers/block/ll_rw_blk.c | req = ((req > all_requests) ? req : limit) - 1; |
req | 136 | drivers/block/ll_rw_blk.c | if (req->rq_status == RQ_INACTIVE) |
req | 138 | drivers/block/ll_rw_blk.c | if (req == prev_found) |
req | 141 | drivers/block/ll_rw_blk.c | prev_found = req; |
req | 142 | drivers/block/ll_rw_blk.c | req->rq_status = RQ_ACTIVE; |
req | 143 | drivers/block/ll_rw_blk.c | req->rq_dev = dev; |
req | 144 | drivers/block/ll_rw_blk.c | return req; |
req | 152 | drivers/block/ll_rw_blk.c | register struct request *req; |
req | 160 | drivers/block/ll_rw_blk.c | req = get_request(n, dev); |
req | 162 | drivers/block/ll_rw_blk.c | if (req) |
req | 168 | drivers/block/ll_rw_blk.c | return req; |
req | 173 | drivers/block/ll_rw_blk.c | register struct request *req; |
req | 176 | drivers/block/ll_rw_blk.c | req = get_request(n, dev); |
req | 178 | drivers/block/ll_rw_blk.c | if (req) |
req | 179 | drivers/block/ll_rw_blk.c | return req; |
req | 233 | drivers/block/ll_rw_blk.c | void add_request(struct blk_dev_struct * dev, struct request * req) |
req | 239 | drivers/block/ll_rw_blk.c | switch (MAJOR(req->rq_dev)) { |
req | 241 | drivers/block/ll_rw_blk.c | disk_index = (MINOR(req->rq_dev) & 0x0070) >> 4; |
req | 243 | drivers/block/ll_rw_blk.c | drive_stat_acct(req->cmd, req->nr_sectors, disk_index); |
req | 247 | drivers/block/ll_rw_blk.c | disk_index = (MINOR(req->rq_dev) & 0x0040) >> 6; |
req | 248 | drivers/block/ll_rw_blk.c | drive_stat_acct(req->cmd, req->nr_sectors, disk_index); |
req | 251 | drivers/block/ll_rw_blk.c | disk_index = ((MINOR(req->rq_dev) & 0x0040) >> 6) + 2; |
req | 252 | drivers/block/ll_rw_blk.c | drive_stat_acct(req->cmd, req->nr_sectors, disk_index); |
req | 257 | drivers/block/ll_rw_blk.c | req->next = NULL; |
req | 259 | drivers/block/ll_rw_blk.c | if (req->bh && req->bh->b_dev==req->bh->b_rdev) |
req | 260 | drivers/block/ll_rw_blk.c | mark_buffer_clean(req->bh); |
req | 262 | drivers/block/ll_rw_blk.c | dev->current_request = req; |
req | 269 | drivers/block/ll_rw_blk.c | if ((IN_ORDER(tmp,req) || |
req | 271 | drivers/block/ll_rw_blk.c | IN_ORDER(req,tmp->next)) |
req | 274 | drivers/block/ll_rw_blk.c | req->next = tmp->next; |
req | 275 | drivers/block/ll_rw_blk.c | tmp->next = req; |
req | 279 | drivers/block/ll_rw_blk.c | if (scsi_major(MAJOR(req->rq_dev)) && MAJOR(req->rq_dev)!=MD_MAJOR) |
req | 288 | drivers/block/ll_rw_blk.c | struct request * req; |
req | 358 | drivers/block/ll_rw_blk.c | && (req = blk_dev[major].current_request)) |
req | 363 | drivers/block/ll_rw_blk.c | req = req->next; |
req | 365 | drivers/block/ll_rw_blk.c | while (req) { |
req | 366 | drivers/block/ll_rw_blk.c | if (req->rq_dev == bh->b_dev && |
req | 367 | drivers/block/ll_rw_blk.c | !req->sem && |
req | 368 | drivers/block/ll_rw_blk.c | req->cmd == rw && |
req | 369 | drivers/block/ll_rw_blk.c | req->sector + req->nr_sectors == sector && |
req | 370 | drivers/block/ll_rw_blk.c | req->nr_sectors < 244) |
req | 372 | drivers/block/ll_rw_blk.c | req->bhtail->b_reqnext = bh; |
req | 373 | drivers/block/ll_rw_blk.c | req->bhtail = bh; |
req | 374 | drivers/block/ll_rw_blk.c | req->nr_sectors += count; |
req | 381 | drivers/block/ll_rw_blk.c | if (req->rq_dev == bh->b_dev && |
req | 382 | drivers/block/ll_rw_blk.c | !req->sem && |
req | 383 | drivers/block/ll_rw_blk.c | req->cmd == rw && |
req | 384 | drivers/block/ll_rw_blk.c | req->sector - count == sector && |
req | 385 | drivers/block/ll_rw_blk.c | req->nr_sectors < 244) |
req | 387 | drivers/block/ll_rw_blk.c | req->nr_sectors += count; |
req | 388 | drivers/block/ll_rw_blk.c | bh->b_reqnext = req->bh; |
req | 389 | drivers/block/ll_rw_blk.c | req->buffer = bh->b_data; |
req | 390 | drivers/block/ll_rw_blk.c | req->current_nr_sectors = count; |
req | 391 | drivers/block/ll_rw_blk.c | req->sector = sector; |
req | 393 | drivers/block/ll_rw_blk.c | req->bh = bh; |
req | 399 | drivers/block/ll_rw_blk.c | req = req->next; |
req | 406 | drivers/block/ll_rw_blk.c | req = get_request(max_req, bh->b_dev); |
req | 410 | drivers/block/ll_rw_blk.c | if (!req) { |
req | 415 | drivers/block/ll_rw_blk.c | req = __get_request_wait(max_req, bh->b_dev); |
req | 419 | drivers/block/ll_rw_blk.c | req->cmd = rw; |
req | 420 | drivers/block/ll_rw_blk.c | req->errors = 0; |
req | 421 | drivers/block/ll_rw_blk.c | req->sector = sector; |
req | 422 | drivers/block/ll_rw_blk.c | req->nr_sectors = count; |
req | 423 | drivers/block/ll_rw_blk.c | req->current_nr_sectors = count; |
req | 424 | drivers/block/ll_rw_blk.c | req->buffer = bh->b_data; |
req | 425 | drivers/block/ll_rw_blk.c | req->sem = NULL; |
req | 426 | drivers/block/ll_rw_blk.c | req->bh = bh; |
req | 427 | drivers/block/ll_rw_blk.c | req->bhtail = bh; |
req | 428 | drivers/block/ll_rw_blk.c | req->next = NULL; |
req | 429 | drivers/block/ll_rw_blk.c | add_request(major+blk_dev,req); |
req | 563 | drivers/block/ll_rw_blk.c | struct request * req[8]; |
req | 591 | drivers/block/ll_rw_blk.c | req[j] = get_request_wait(NR_REQUEST, dev); |
req | 594 | drivers/block/ll_rw_blk.c | req[j] = get_request(NR_REQUEST, dev); |
req | 596 | drivers/block/ll_rw_blk.c | if (req[j] == NULL) |
req | 599 | drivers/block/ll_rw_blk.c | req[j]->cmd = rw; |
req | 600 | drivers/block/ll_rw_blk.c | req[j]->errors = 0; |
req | 601 | drivers/block/ll_rw_blk.c | req[j]->sector = (b[i] * buffersize) >> 9; |
req | 602 | drivers/block/ll_rw_blk.c | req[j]->nr_sectors = buffersize >> 9; |
req | 603 | drivers/block/ll_rw_blk.c | req[j]->current_nr_sectors = buffersize >> 9; |
req | 604 | drivers/block/ll_rw_blk.c | req[j]->buffer = buf; |
req | 605 | drivers/block/ll_rw_blk.c | req[j]->sem = &sem; |
req | 606 | drivers/block/ll_rw_blk.c | req[j]->bh = NULL; |
req | 607 | drivers/block/ll_rw_blk.c | req[j]->next = NULL; |
req | 608 | drivers/block/ll_rw_blk.c | add_request(major+blk_dev,req[j]); |
req | 619 | drivers/block/ll_rw_blk.c | struct request * req; |
req | 627 | drivers/block/ll_rw_blk.c | req = all_requests + NR_REQUEST; |
req | 628 | drivers/block/ll_rw_blk.c | while (--req >= all_requests) { |
req | 629 | drivers/block/ll_rw_blk.c | req->rq_status = RQ_INACTIVE; |
req | 630 | drivers/block/ll_rw_blk.c | req->next = NULL; |
req | 363 | drivers/block/md.c | static inline int remap_request (int minor, struct request *req) |
req | 371 | drivers/block/md.c | return (md_dev[minor].pers->map(minor, md_dev+minor, req)); |
req | 377 | drivers/block/md.c | struct request *req; |
req | 386 | drivers/block/md.c | req = blk_dev[MD_MAJOR].current_request; |
req | 387 | drivers/block/md.c | if (!req || (req->rq_status == RQ_INACTIVE)) |
req | 394 | drivers/block/md.c | reqsize=req->nr_sectors>>1; |
req | 395 | drivers/block/md.c | chunksize=1 << FACTOR_SHIFT(FACTOR(md_dev+MINOR(req->rq_dev))); |
req | 396 | drivers/block/md.c | if (reqsize==chunksize) (md_dev+MINOR(req->rq_dev))->equal_count++; |
req | 397 | drivers/block/md.c | if (reqsize<chunksize) (md_dev+MINOR(req->rq_dev))->smallest_count++; |
req | 398 | drivers/block/md.c | if (reqsize>chunksize) (md_dev+MINOR(req->rq_dev))->biggest_count++; |
req | 401 | drivers/block/md.c | blk_dev[MD_MAJOR].current_request = req->next; |
req | 404 | drivers/block/md.c | minor = MINOR(req->rq_dev); |
req | 405 | drivers/block/md.c | if ((MAJOR(req->rq_dev) != MD_MAJOR) || (minor >= MAX_REAL)) |
req | 407 | drivers/block/md.c | printk("md: bad device number: 0x%04x\n", req->rq_dev); |
req | 408 | drivers/block/md.c | end_request(0, req); |
req | 412 | drivers/block/md.c | switch (remap_request (minor, req)) |
req | 415 | drivers/block/md.c | req->rq_status=RQ_INACTIVE; |
req | 423 | drivers/block/md.c | end_request (0, req); |
req | 439 | drivers/block/md.c | struct request *req; |
req | 462 | drivers/block/md.c | && (req = blk_dev[major].current_request)) |
req | 469 | drivers/block/md.c | req = req->next; |
req | 471 | drivers/block/md.c | while (req && !found) |
req | 473 | drivers/block/md.c | if (req->rq_status!=RQ_INACTIVE && req->rq_status!=RQ_ACTIVE) |
req | 476 | drivers/block/md.c | if (req->rq_dev == dev && |
req | 477 | drivers/block/md.c | !req->sem && |
req | 478 | drivers/block/md.c | req->cmd == rw && |
req | 479 | drivers/block/md.c | req->sector + req->nr_sectors == pending[i].sector && |
req | 480 | drivers/block/md.c | (req->nr_sectors + pending[i].nr_sectors) < 245) |
req | 482 | drivers/block/md.c | req->bhtail->b_reqnext = bh; |
req | 483 | drivers/block/md.c | req->bhtail = pending[i].bhtail; |
req | 484 | drivers/block/md.c | req->nr_sectors += pending[i].nr_sectors; |
req | 490 | drivers/block/md.c | req->rq_dev == dev && |
req | 491 | drivers/block/md.c | !req->sem && |
req | 492 | drivers/block/md.c | req->cmd == rw && |
req | 493 | drivers/block/md.c | req->sector - pending[i].nr_sectors == pending[i].sector && |
req | 494 | drivers/block/md.c | (req->nr_sectors + pending[i].nr_sectors) < 245) |
req | 496 | drivers/block/md.c | req->nr_sectors += pending[i].nr_sectors; |
req | 497 | drivers/block/md.c | bh->b_reqnext = req->bh; |
req | 498 | drivers/block/md.c | req->buffer = bh->b_data; |
req | 499 | drivers/block/md.c | req->current_nr_sectors = bh->b_size >> 9; |
req | 500 | drivers/block/md.c | req->sector = pending[i].sector; |
req | 501 | drivers/block/md.c | req->bh = bh; |
req | 506 | drivers/block/md.c | req = req->next; |
req | 514 | drivers/block/md.c | req=get_md_request (max_req, dev); |
req | 517 | drivers/block/md.c | req->cmd = rw; |
req | 518 | drivers/block/md.c | req->errors = 0; |
req | 520 | drivers/block/md.c | req->shared_count = 0; |
req | 522 | drivers/block/md.c | req->sector = pending[i].sector; |
req | 523 | drivers/block/md.c | req->nr_sectors = pending[i].nr_sectors; |
req | 524 | drivers/block/md.c | req->current_nr_sectors = bh->b_size >> 9; |
req | 525 | drivers/block/md.c | req->buffer = bh->b_data; |
req | 526 | drivers/block/md.c | req->sem = NULL; |
req | 527 | drivers/block/md.c | req->bh = bh; |
req | 528 | drivers/block/md.c | req->bhtail = pending[i].bhtail; |
req | 529 | drivers/block/md.c | req->next = NULL; |
req | 531 | drivers/block/md.c | add_request (blk_dev + MAJOR(dev), req); |
req | 182 | drivers/block/raid0.c | static int raid0_map (int minor, struct md_dev *mddev, struct request *req) |
req | 195 | drivers/block/raid0.c | while (req->bh || req->sem) |
req | 197 | drivers/block/raid0.c | block=req->sector >> 1; |
req | 215 | drivers/block/raid0.c | if (req->sem) /* This is a paging request */ |
req | 217 | drivers/block/raid0.c | req->rq_dev=tmp_dev->dev; |
req | 218 | drivers/block/raid0.c | req->sector=rblock << 1; |
req | 219 | drivers/block/raid0.c | add_request (blk_dev+MAJOR (tmp_dev->dev), req); |
req | 228 | drivers/block/raid0.c | i<(1UL << FACTOR_SHIFT(factor)) && req->bh; |
req | 231 | drivers/block/raid0.c | bh=req->bh; |
req | 246 | drivers/block/raid0.c | pending[queue].cmd=req->cmd; |
req | 257 | drivers/block/raid0.c | end_redirect (req); /* Separate bh from the request */ |
req | 261 | drivers/block/raid0.c | req->rq_status=RQ_INACTIVE; |
req | 2799 | drivers/char/serial.c | int register_serial(struct serial_struct *req) |
req | 2808 | drivers/char/serial.c | if (rs_table[i].port == req->port) |
req | 2825 | drivers/char/serial.c | "device already open\n", i, req->port, req->irq); |
req | 2828 | drivers/char/serial.c | info->irq = req->irq; |
req | 2829 | drivers/char/serial.c | info->port = req->port; |
req | 2830 | drivers/char/serial.c | info->flags = req->flags; |
req | 844 | drivers/scsi/scsi.c | Scsi_Cmnd * request_queueable (struct request * req, Scsi_Device * device) |
req | 854 | drivers/scsi/scsi.c | if (req && req->rq_status == RQ_INACTIVE) |
req | 902 | drivers/scsi/scsi.c | if (req) { |
req | 903 | drivers/scsi/scsi.c | memcpy(&SCpnt->request, req, sizeof(struct request)); |
req | 905 | drivers/scsi/scsi.c | bhp = bh = req->bh; |
req | 911 | drivers/scsi/scsi.c | while(req->nr_sectors && bh){ |
req | 914 | drivers/scsi/scsi.c | req->nr_sectors -= bh->b_size >> 9; |
req | 915 | drivers/scsi/scsi.c | req->sector += bh->b_size >> 9; |
req | 919 | drivers/scsi/scsi.c | if(req->nr_sectors && bh && bh->b_reqnext){ /* Any leftovers? */ |
req | 921 | drivers/scsi/scsi.c | req->bh = bh->b_reqnext; /* Divide request */ |
req | 923 | drivers/scsi/scsi.c | bh = req->bh; |
req | 926 | drivers/scsi/scsi.c | SCpnt->request.nr_sectors -= req->nr_sectors; |
req | 927 | drivers/scsi/scsi.c | req->current_nr_sectors = bh->b_size >> 9; |
req | 928 | drivers/scsi/scsi.c | req->buffer = bh->b_data; |
req | 931 | drivers/scsi/scsi.c | req->rq_status = RQ_INACTIVE; |
req | 970 | drivers/scsi/scsi.c | struct request * req = NULL; |
req | 982 | drivers/scsi/scsi.c | if (reqp) req = *reqp; |
req | 985 | drivers/scsi/scsi.c | if (req) { |
req | 986 | drivers/scsi/scsi.c | if(req->rq_status == RQ_INACTIVE) return NULL; |
req | 987 | drivers/scsi/scsi.c | dev = req->rq_dev; |
req | 1036 | drivers/scsi/scsi.c | if (req && (req->rq_status == RQ_INACTIVE || req->rq_dev != dev)) { |
req | 1052 | drivers/scsi/scsi.c | if (req) { |
req | 1053 | drivers/scsi/scsi.c | memcpy(&SCpnt->request, req, sizeof(struct request)); |
req | 1055 | drivers/scsi/scsi.c | bhp = bh = req->bh; |
req | 1061 | drivers/scsi/scsi.c | while(req->nr_sectors && bh){ |
req | 1064 | drivers/scsi/scsi.c | req->nr_sectors -= bh->b_size >> 9; |
req | 1065 | drivers/scsi/scsi.c | req->sector += bh->b_size >> 9; |
req | 1069 | drivers/scsi/scsi.c | if(req->nr_sectors && bh && bh->b_reqnext){/* Any leftovers? */ |
req | 1071 | drivers/scsi/scsi.c | req->bh = bh->b_reqnext; /* Divide request */ |
req | 1073 | drivers/scsi/scsi.c | bh = req->bh; |
req | 1075 | drivers/scsi/scsi.c | SCpnt->request.nr_sectors -= req->nr_sectors; |
req | 1076 | drivers/scsi/scsi.c | req->current_nr_sectors = bh->b_size >> 9; |
req | 1077 | drivers/scsi/scsi.c | req->buffer = bh->b_data; |
req | 1082 | drivers/scsi/scsi.c | req->rq_status = RQ_INACTIVE; |
req | 1083 | drivers/scsi/scsi.c | *reqp = req->next; |
req | 3116 | drivers/scsi/scsi.c | struct request * req; |
req | 3118 | drivers/scsi/scsi.c | req = blk_dev[i].current_request; |
req | 3119 | drivers/scsi/scsi.c | while(req) { |
req | 3121 | drivers/scsi/scsi.c | kdevname(req->rq_dev), |
req | 3122 | drivers/scsi/scsi.c | req->cmd, |
req | 3123 | drivers/scsi/scsi.c | req->sector, |
req | 3124 | drivers/scsi/scsi.c | req->nr_sectors, |
req | 3125 | drivers/scsi/scsi.c | req->current_nr_sectors); |
req | 3126 | drivers/scsi/scsi.c | req = req->next; |
req | 460 | drivers/scsi/scsi.h | struct request * req; |
req | 463 | drivers/scsi/scsi.h | req = &SCpnt->request; |
req | 464 | drivers/scsi/scsi.h | req->errors = 0; |
req | 467 | drivers/scsi/scsi.h | kdevname(req->rq_dev), req->sector); |
req | 471 | drivers/scsi/scsi.h | if ((bh = req->bh) != NULL) { |
req | 472 | drivers/scsi/scsi.h | req->bh = bh->b_reqnext; |
req | 473 | drivers/scsi/scsi.h | req->nr_sectors -= bh->b_size >> 9; |
req | 474 | drivers/scsi/scsi.h | req->sector += bh->b_size >> 9; |
req | 479 | drivers/scsi/scsi.h | if ((bh = req->bh) != NULL) { |
req | 480 | drivers/scsi/scsi.h | req->current_nr_sectors = bh->b_size >> 9; |
req | 481 | drivers/scsi/scsi.h | if (req->nr_sectors < req->current_nr_sectors) { |
req | 482 | drivers/scsi/scsi.h | req->nr_sectors = req->current_nr_sectors; |
req | 488 | drivers/scsi/scsi.h | if (req->bh){ |
req | 489 | drivers/scsi/scsi.h | req->buffer = bh->b_data; |
req | 492 | drivers/scsi/scsi.h | DEVICE_OFF(req->rq_dev); |
req | 493 | drivers/scsi/scsi.h | if (req->sem != NULL) { |
req | 494 | drivers/scsi/scsi.h | up(req->sem); |
req | 505 | drivers/scsi/scsi.h | req->rq_status = RQ_INACTIVE; |
req | 94 | drivers/scsi/scsi_ioctl.c | struct request * req; |
req | 96 | drivers/scsi/scsi_ioctl.c | req = &SCpnt->request; |
req | 97 | drivers/scsi/scsi_ioctl.c | req->rq_status = RQ_SCSI_DONE; /* Busy, but indicate request done */ |
req | 99 | drivers/scsi/scsi_ioctl.c | if (req->sem != NULL) { |
req | 100 | drivers/scsi/scsi_ioctl.c | up(req->sem); |
req | 410 | drivers/scsi/sd.c | struct request * req = NULL; |
req | 483 | drivers/scsi/sd.c | req = CURRENT; |
req | 484 | drivers/scsi/sd.c | while(req){ |
req | 485 | drivers/scsi/sd.c | SCpnt = request_queueable(req, rscsi_disks[DEVICE_NR(req->rq_dev)].device); |
req | 487 | drivers/scsi/sd.c | req1 = req; |
req | 488 | drivers/scsi/sd.c | req = req->next; |
req | 490 | drivers/scsi/sd.c | if (SCpnt && req->rq_status == RQ_INACTIVE) { |
req | 491 | drivers/scsi/sd.c | if (req == CURRENT) |
req | 494 | drivers/scsi/sd.c | req1->next = req->next; |
req | 942 | drivers/scsi/sd.c | struct request * req; |
req | 944 | drivers/scsi/sd.c | req = &SCpnt->request; |
req | 945 | drivers/scsi/sd.c | req->rq_status = RQ_SCSI_DONE; /* Busy, but indicate request done */ |
req | 947 | drivers/scsi/sd.c | if (req->sem != NULL) { |
req | 948 | drivers/scsi/sd.c | up(req->sem); |
req | 558 | drivers/scsi/sr.c | struct request * req = NULL; |
req | 613 | drivers/scsi/sr.c | req = CURRENT; |
req | 614 | drivers/scsi/sr.c | while(req){ |
req | 615 | drivers/scsi/sr.c | SCpnt = request_queueable(req, |
req | 616 | drivers/scsi/sr.c | scsi_CDs[DEVICE_NR(req->rq_dev)].device); |
req | 618 | drivers/scsi/sr.c | req1 = req; |
req | 619 | drivers/scsi/sr.c | req = req->next; |
req | 621 | drivers/scsi/sr.c | if (SCpnt && req->rq_status == RQ_INACTIVE) { |
req | 622 | drivers/scsi/sr.c | if (req == CURRENT) |
req | 625 | drivers/scsi/sr.c | req1->next = req->next; |
req | 965 | drivers/scsi/sr.c | struct request * req; |
req | 967 | drivers/scsi/sr.c | req = &SCpnt->request; |
req | 968 | drivers/scsi/sr.c | req->rq_status = RQ_SCSI_DONE; /* Busy, but indicate request done */ |
req | 970 | drivers/scsi/sr.c | if (req->sem != NULL) { |
req | 971 | drivers/scsi/sr.c | up(req->sem); |
req | 23 | drivers/scsi/sr_ioctl.c | struct request * req; |
req | 25 | drivers/scsi/sr_ioctl.c | req = &SCpnt->request; |
req | 26 | drivers/scsi/sr_ioctl.c | req->rq_status = RQ_SCSI_DONE; /* Busy, but indicate request done */ |
req | 28 | drivers/scsi/sr_ioctl.c | if (req->sem != NULL) { |
req | 29 | drivers/scsi/sr_ioctl.c | up(req->sem); |
req | 373 | include/linux/blk.h | struct request *req = hwgroup->rq; |
req | 375 | include/linux/blk.h | static void end_request (int uptodate, struct request * req) { |
req | 378 | include/linux/blk.h | struct request *req = CURRENT; |
req | 382 | include/linux/blk.h | req->errors = 0; |
req | 385 | include/linux/blk.h | kdevname(req->rq_dev), req->sector); |
req | 386 | include/linux/blk.h | req->nr_sectors--; |
req | 387 | include/linux/blk.h | req->nr_sectors &= ~SECTOR_MASK; |
req | 388 | include/linux/blk.h | req->sector += (BLOCK_SIZE / 512); |
req | 389 | include/linux/blk.h | req->sector &= ~SECTOR_MASK; |
req | 392 | include/linux/blk.h | if ((bh = req->bh) != NULL) { |
req | 393 | include/linux/blk.h | req->bh = bh->b_reqnext; |
req | 397 | include/linux/blk.h | if ((bh = req->bh) != NULL) { |
req | 398 | include/linux/blk.h | req->current_nr_sectors = bh->b_size >> 9; |
req | 399 | include/linux/blk.h | if (req->nr_sectors < req->current_nr_sectors) { |
req | 400 | include/linux/blk.h | req->nr_sectors = req->current_nr_sectors; |
req | 403 | include/linux/blk.h | req->buffer = bh->b_data; |
req | 408 | include/linux/blk.h | add_blkdev_randomness(MAJOR(req->rq_dev)); |
req | 411 | include/linux/blk.h | blk_dev[MAJOR(req->rq_dev)].current_request = req->next; |
req | 414 | include/linux/blk.h | DEVICE_OFF(req->rq_dev); |
req | 415 | include/linux/blk.h | CURRENT = req->next; |
req | 417 | include/linux/blk.h | if (req->sem != NULL) |
req | 418 | include/linux/blk.h | up(req->sem); |
req | 419 | include/linux/blk.h | req->rq_status = RQ_INACTIVE; |
req | 426 | include/linux/blk.h | extern inline void end_redirect (struct request *req) |
req | 430 | include/linux/blk.h | req->errors = 0; |
req | 432 | include/linux/blk.h | if ((bh = req->bh) != NULL) |
req | 434 | include/linux/blk.h | req->bh = bh->b_reqnext; |
req | 437 | include/linux/blk.h | if ((bh = req->bh) != NULL) |
req | 439 | include/linux/blk.h | req->sector += req->current_nr_sectors; |
req | 440 | include/linux/blk.h | req->current_nr_sectors = bh->b_size >> 9; |
req | 442 | include/linux/blk.h | if (req->nr_sectors < req->current_nr_sectors) |
req | 444 | include/linux/blk.h | req->nr_sectors = req->current_nr_sectors; |
req | 448 | include/linux/blk.h | req->buffer = bh->b_data; |
req | 51 | include/linux/blkdev.h | extern void add_request(struct blk_dev_struct * dev, struct request * req); |
req | 110 | include/linux/md.h | int (*map)(int minor, struct md_dev *md_dev, struct request *req); |
req | 204 | include/linux/serial.h | extern int register_serial(struct serial_struct *req); |
req | 284 | net/ipv4/rarp.c | static int rarp_req_set(struct arpreq *req) |
req | 294 | net/ipv4/rarp.c | memcpy_fromfs(&r, req, sizeof(r)); |
req | 388 | net/ipv4/rarp.c | static int rarp_req_get(struct arpreq *req) |
req | 399 | net/ipv4/rarp.c | memcpy_fromfs(&r, req, sizeof(r)); |
req | 434 | net/ipv4/rarp.c | memcpy_tofs(req, &r, sizeof(r)); |