tag | line | file | source code |
req | 1382 | drivers/block/ide-cd.c | struct request req; |
req | 1392 | drivers/block/ide-cd.c | ide_init_drive_cmd (&req); |
req | 1393 | drivers/block/ide-cd.c | req.cmd = PACKET_COMMAND; |
req | 1394 | drivers/block/ide-cd.c | req.buffer = (char *)pc; |
req | 1395 | drivers/block/ide-cd.c | (void) ide_do_drive_cmd (drive, &req, ide_wait); |
req | 2488 | drivers/block/ide-cd.c | struct request req; |
req | 2489 | drivers/block/ide-cd.c | ide_init_drive_cmd (&req); |
req | 2490 | drivers/block/ide-cd.c | req.cmd = RESET_DRIVE_COMMAND; |
req | 2491 | drivers/block/ide-cd.c | return ide_do_drive_cmd (drive, &req, ide_wait); |
req | 108 | drivers/block/linear.c | static int linear_map (int minor, struct md_dev *mddev, struct request *req) |
req | 118 | drivers/block/linear.c | while (req->nr_sectors) |
req | 120 | drivers/block/linear.c | block=req->sector >> 1; |
req | 137 | drivers/block/linear.c | if (req->sem) /* This is a paging request */ |
req | 139 | drivers/block/linear.c | req->rq_dev=tmp_dev->dev; |
req | 140 | drivers/block/linear.c | req->sector=rblock << 1; |
req | 141 | drivers/block/linear.c | add_request (blk_dev+MAJOR (tmp_dev->dev), req); |
req | 148 | drivers/block/linear.c | for (nblk=0, bh=bh2=req->bh; |
req | 159 | drivers/block/linear.c | pending[queue].cmd=req->cmd; |
req | 162 | drivers/block/linear.c | pending[queue].current_nr_sectors=req->bh->b_size >> 9; |
req | 163 | drivers/block/linear.c | pending[queue].bh=req->bh; |
req | 167 | drivers/block/linear.c | req->bh=bh; |
req | 168 | drivers/block/linear.c | req->sector+=nblk << 1; |
req | 169 | drivers/block/linear.c | req->nr_sectors-=nblk << 1; |
req | 172 | drivers/block/linear.c | req->rq_status=RQ_INACTIVE; |
req | 115 | drivers/block/ll_rw_blk.c | register struct request *req, *limit; |
req | 125 | drivers/block/ll_rw_blk.c | req = prev_found; |
req | 127 | drivers/block/ll_rw_blk.c | req = ((req > all_requests) ? req : limit) - 1; |
req | 128 | drivers/block/ll_rw_blk.c | if (req->rq_status == RQ_INACTIVE) |
req | 130 | drivers/block/ll_rw_blk.c | if (req == prev_found) |
req | 133 | drivers/block/ll_rw_blk.c | prev_found = req; |
req | 134 | drivers/block/ll_rw_blk.c | req->rq_status = RQ_ACTIVE; |
req | 135 | drivers/block/ll_rw_blk.c | req->rq_dev = dev; |
req | 136 | drivers/block/ll_rw_blk.c | return req; |
req | 144 | drivers/block/ll_rw_blk.c | register struct request *req; |
req | 151 | drivers/block/ll_rw_blk.c | req = get_request(n, dev); |
req | 153 | drivers/block/ll_rw_blk.c | if (req) |
req | 159 | drivers/block/ll_rw_blk.c | return req; |
req | 164 | drivers/block/ll_rw_blk.c | register struct request *req; |
req | 167 | drivers/block/ll_rw_blk.c | req = get_request(n, dev); |
req | 169 | drivers/block/ll_rw_blk.c | if (req) |
req | 170 | drivers/block/ll_rw_blk.c | return req; |
req | 224 | drivers/block/ll_rw_blk.c | void add_request(struct blk_dev_struct * dev, struct request * req) |
req | 230 | drivers/block/ll_rw_blk.c | switch (MAJOR(req->rq_dev)) { |
req | 232 | drivers/block/ll_rw_blk.c | disk_index = (MINOR(req->rq_dev) & 0x0070) >> 4; |
req | 234 | drivers/block/ll_rw_blk.c | drive_stat_acct(req->cmd, req->nr_sectors, disk_index); |
req | 238 | drivers/block/ll_rw_blk.c | disk_index = (MINOR(req->rq_dev) & 0x0040) >> 6; |
req | 239 | drivers/block/ll_rw_blk.c | drive_stat_acct(req->cmd, req->nr_sectors, disk_index); |
req | 242 | drivers/block/ll_rw_blk.c | disk_index = ((MINOR(req->rq_dev) & 0x0040) >> 6) + 2; |
req | 243 | drivers/block/ll_rw_blk.c | drive_stat_acct(req->cmd, req->nr_sectors, disk_index); |
req | 248 | drivers/block/ll_rw_blk.c | req->next = NULL; |
req | 250 | drivers/block/ll_rw_blk.c | if (req->bh && req->bh->b_dev==req->bh->b_rdev) |
req | 251 | drivers/block/ll_rw_blk.c | mark_buffer_clean(req->bh); |
req | 253 | drivers/block/ll_rw_blk.c | dev->current_request = req; |
req | 260 | drivers/block/ll_rw_blk.c | if ((IN_ORDER(tmp,req) || |
req | 262 | drivers/block/ll_rw_blk.c | IN_ORDER(req,tmp->next)) |
req | 265 | drivers/block/ll_rw_blk.c | req->next = tmp->next; |
req | 266 | drivers/block/ll_rw_blk.c | tmp->next = req; |
req | 270 | drivers/block/ll_rw_blk.c | if (scsi_major(MAJOR(req->rq_dev)) && MAJOR(req->rq_dev)!=MD_MAJOR) |
req | 279 | drivers/block/ll_rw_blk.c | struct request * req; |
req | 340 | drivers/block/ll_rw_blk.c | req = blk_dev[major].current_request; |
req | 341 | drivers/block/ll_rw_blk.c | if (!req) { |
req | 358 | drivers/block/ll_rw_blk.c | req = req->next; |
req | 359 | drivers/block/ll_rw_blk.c | if (!req) |
req | 368 | drivers/block/ll_rw_blk.c | if (req->sem) |
req | 370 | drivers/block/ll_rw_blk.c | if (req->cmd != rw) |
req | 372 | drivers/block/ll_rw_blk.c | if (req->nr_sectors >= 244) |
req | 374 | drivers/block/ll_rw_blk.c | if (req->rq_dev != bh->b_dev) |
req | 377 | drivers/block/ll_rw_blk.c | if (req->sector + req->nr_sectors == sector) { |
req | 378 | drivers/block/ll_rw_blk.c | req->bhtail->b_reqnext = bh; |
req | 379 | drivers/block/ll_rw_blk.c | req->bhtail = bh; |
req | 381 | drivers/block/ll_rw_blk.c | } else if (req->sector - count == sector) { |
req | 382 | drivers/block/ll_rw_blk.c | bh->b_reqnext = req->bh; |
req | 383 | drivers/block/ll_rw_blk.c | req->bh = bh; |
req | 384 | drivers/block/ll_rw_blk.c | req->buffer = bh->b_data; |
req | 385 | drivers/block/ll_rw_blk.c | req->current_nr_sectors = count; |
req | 386 | drivers/block/ll_rw_blk.c | req->sector = sector; |
req | 390 | drivers/block/ll_rw_blk.c | req->nr_sectors += count; |
req | 395 | drivers/block/ll_rw_blk.c | } while ((req = req->next) != NULL); |
req | 401 | drivers/block/ll_rw_blk.c | req = get_request(max_req, bh->b_dev); |
req | 405 | drivers/block/ll_rw_blk.c | if (!req) { |
req | 410 | drivers/block/ll_rw_blk.c | req = __get_request_wait(max_req, bh->b_dev); |
req | 414 | drivers/block/ll_rw_blk.c | req->cmd = rw; |
req | 415 | drivers/block/ll_rw_blk.c | req->errors = 0; |
req | 416 | drivers/block/ll_rw_blk.c | req->sector = sector; |
req | 417 | drivers/block/ll_rw_blk.c | req->nr_sectors = count; |
req | 418 | drivers/block/ll_rw_blk.c | req->current_nr_sectors = count; |
req | 419 | drivers/block/ll_rw_blk.c | req->buffer = bh->b_data; |
req | 420 | drivers/block/ll_rw_blk.c | req->sem = NULL; |
req | 421 | drivers/block/ll_rw_blk.c | req->bh = bh; |
req | 422 | drivers/block/ll_rw_blk.c | req->bhtail = bh; |
req | 423 | drivers/block/ll_rw_blk.c | req->next = NULL; |
req | 424 | drivers/block/ll_rw_blk.c | add_request(major+blk_dev,req); |
req | 549 | drivers/block/ll_rw_blk.c | struct request * req[8]; |
req | 577 | drivers/block/ll_rw_blk.c | req[j] = get_request_wait(NR_REQUEST, dev); |
req | 580 | drivers/block/ll_rw_blk.c | req[j] = get_request(NR_REQUEST, dev); |
req | 582 | drivers/block/ll_rw_blk.c | if (req[j] == NULL) |
req | 585 | drivers/block/ll_rw_blk.c | req[j]->cmd = rw; |
req | 586 | drivers/block/ll_rw_blk.c | req[j]->errors = 0; |
req | 587 | drivers/block/ll_rw_blk.c | req[j]->sector = (b[i] * buffersize) >> 9; |
req | 588 | drivers/block/ll_rw_blk.c | req[j]->nr_sectors = buffersize >> 9; |
req | 589 | drivers/block/ll_rw_blk.c | req[j]->current_nr_sectors = buffersize >> 9; |
req | 590 | drivers/block/ll_rw_blk.c | req[j]->buffer = buf; |
req | 591 | drivers/block/ll_rw_blk.c | req[j]->sem = &sem; |
req | 592 | drivers/block/ll_rw_blk.c | req[j]->bh = NULL; |
req | 593 | drivers/block/ll_rw_blk.c | req[j]->next = NULL; |
req | 594 | drivers/block/ll_rw_blk.c | add_request(major+blk_dev,req[j]); |
req | 605 | drivers/block/ll_rw_blk.c | struct request * req; |
req | 618 | drivers/block/ll_rw_blk.c | req = all_requests + NR_REQUEST; |
req | 619 | drivers/block/ll_rw_blk.c | while (--req >= all_requests) { |
req | 620 | drivers/block/ll_rw_blk.c | req->rq_status = RQ_INACTIVE; |
req | 621 | drivers/block/ll_rw_blk.c | req->next = NULL; |
req | 378 | drivers/block/md.c | static inline int remap_request (int minor, struct request *req) |
req | 386 | drivers/block/md.c | return (md_dev[minor].pers->map(minor, md_dev+minor, req)); |
req | 393 | drivers/block/md.c | struct request *req; |
req | 403 | drivers/block/md.c | req = blk_dev[MD_MAJOR].current_request; |
req | 404 | drivers/block/md.c | if (!req || (req->rq_status == RQ_INACTIVE)) |
req | 411 | drivers/block/md.c | reqsize=req->nr_sectors>>1; |
req | 412 | drivers/block/md.c | chunksize=1 << FACTOR_SHIFT(FACTOR(md_dev+MINOR(req->rq_dev))); |
req | 413 | drivers/block/md.c | if (reqsize==chunksize) (md_dev+MINOR(req->rq_dev))->equal_count++; |
req | 414 | drivers/block/md.c | if (reqsize<chunksize) (md_dev+MINOR(req->rq_dev))->smallest_count++; |
req | 415 | drivers/block/md.c | if (reqsize>chunksize) (md_dev+MINOR(req->rq_dev))->biggest_count++; |
req | 418 | drivers/block/md.c | blk_dev[MD_MAJOR].current_request = req->next; |
req | 421 | drivers/block/md.c | minor = MINOR(req->rq_dev); |
req | 422 | drivers/block/md.c | if ((MAJOR(req->rq_dev) != MD_MAJOR) || (minor >= MAX_REAL)) |
req | 424 | drivers/block/md.c | printk("md: bad device: %s\n", kdevname(req->rq_dev)); |
req | 425 | drivers/block/md.c | end_request(0, req); |
req | 429 | drivers/block/md.c | switch (remap_request (minor, req)) |
req | 432 | drivers/block/md.c | req->rq_status=RQ_INACTIVE; |
req | 440 | drivers/block/md.c | end_request (0, req); |
req | 456 | drivers/block/md.c | struct request *req; |
req | 480 | drivers/block/md.c | && (req = blk_dev[major].current_request)) |
req | 487 | drivers/block/md.c | req = req->next; |
req | 489 | drivers/block/md.c | while (req && !found) |
req | 491 | drivers/block/md.c | if (req->rq_status!=RQ_ACTIVE && &blk_dev[major].plug!=req) |
req | 494 | drivers/block/md.c | if (req->rq_dev == dev && |
req | 495 | drivers/block/md.c | !req->sem && |
req | 496 | drivers/block/md.c | req->cmd == rw && |
req | 497 | drivers/block/md.c | req->sector + req->nr_sectors == pending[i].sector && |
req | 498 | drivers/block/md.c | (req->nr_sectors + pending[i].nr_sectors) < 245) |
req | 500 | drivers/block/md.c | req->bhtail->b_reqnext = bh; |
req | 501 | drivers/block/md.c | req->bhtail = pending[i].bhtail; |
req | 502 | drivers/block/md.c | req->nr_sectors += pending[i].nr_sectors; |
req | 508 | drivers/block/md.c | req->rq_dev == dev && |
req | 509 | drivers/block/md.c | !req->sem && |
req | 510 | drivers/block/md.c | req->cmd == rw && |
req | 511 | drivers/block/md.c | req->sector - pending[i].nr_sectors == pending[i].sector && |
req | 512 | drivers/block/md.c | (req->nr_sectors + pending[i].nr_sectors) < 245) |
req | 514 | drivers/block/md.c | req->nr_sectors += pending[i].nr_sectors; |
req | 515 | drivers/block/md.c | pending[i].bhtail->b_reqnext = req->bh; |
req | 516 | drivers/block/md.c | req->buffer = bh->b_data; |
req | 517 | drivers/block/md.c | req->current_nr_sectors = bh->b_size >> 9; |
req | 518 | drivers/block/md.c | req->sector = pending[i].sector; |
req | 519 | drivers/block/md.c | req->bh = bh; |
req | 524 | drivers/block/md.c | req = req->next; |
req | 533 | drivers/block/md.c | req=get_md_request (max_req, dev); |
req | 536 | drivers/block/md.c | req->cmd = rw; |
req | 537 | drivers/block/md.c | req->errors = 0; |
req | 539 | drivers/block/md.c | req->shared_count = 0; |
req | 541 | drivers/block/md.c | req->sector = pending[i].sector; |
req | 542 | drivers/block/md.c | req->nr_sectors = pending[i].nr_sectors; |
req | 543 | drivers/block/md.c | req->current_nr_sectors = bh->b_size >> 9; |
req | 544 | drivers/block/md.c | req->buffer = bh->b_data; |
req | 545 | drivers/block/md.c | req->sem = NULL; |
req | 546 | drivers/block/md.c | req->bh = bh; |
req | 547 | drivers/block/md.c | req->bhtail = pending[i].bhtail; |
req | 548 | drivers/block/md.c | req->next = NULL; |
req | 550 | drivers/block/md.c | add_request (blk_dev + MAJOR(dev), req); |
req | 182 | drivers/block/raid0.c | static int raid0_map (int minor, struct md_dev *mddev, struct request *req) |
req | 195 | drivers/block/raid0.c | while (req->bh || req->sem) |
req | 197 | drivers/block/raid0.c | block=req->sector >> 1; |
req | 215 | drivers/block/raid0.c | if (req->sem) /* This is a paging request */ |
req | 217 | drivers/block/raid0.c | req->rq_dev=tmp_dev->dev; |
req | 218 | drivers/block/raid0.c | req->sector=rblock << 1; |
req | 219 | drivers/block/raid0.c | add_request (blk_dev+MAJOR (tmp_dev->dev), req); |
req | 228 | drivers/block/raid0.c | i<(1UL << FACTOR_SHIFT(factor)) && req->bh; |
req | 231 | drivers/block/raid0.c | bh=req->bh; |
req | 246 | drivers/block/raid0.c | pending[queue].cmd=req->cmd; |
req | 257 | drivers/block/raid0.c | end_redirect (req); /* Separate bh from the request */ |
req | 261 | drivers/block/raid0.c | req->rq_status=RQ_INACTIVE; |
req | 2799 | drivers/char/serial.c | int register_serial(struct serial_struct *req) |
req | 2808 | drivers/char/serial.c | if (rs_table[i].port == req->port) |
req | 2825 | drivers/char/serial.c | "device already open\n", i, req->port, req->irq); |
req | 2828 | drivers/char/serial.c | info->irq = req->irq; |
req | 2829 | drivers/char/serial.c | info->port = req->port; |
req | 2830 | drivers/char/serial.c | info->flags = req->flags; |
req | 864 | drivers/scsi/scsi.c | Scsi_Cmnd * request_queueable (struct request * req, Scsi_Device * device) |
req | 874 | drivers/scsi/scsi.c | if (req && req->rq_status == RQ_INACTIVE) |
req | 922 | drivers/scsi/scsi.c | if (req) { |
req | 923 | drivers/scsi/scsi.c | memcpy(&SCpnt->request, req, sizeof(struct request)); |
req | 925 | drivers/scsi/scsi.c | bhp = bh = req->bh; |
req | 931 | drivers/scsi/scsi.c | while(req->nr_sectors && bh){ |
req | 934 | drivers/scsi/scsi.c | req->nr_sectors -= bh->b_size >> 9; |
req | 935 | drivers/scsi/scsi.c | req->sector += bh->b_size >> 9; |
req | 939 | drivers/scsi/scsi.c | if(req->nr_sectors && bh && bh->b_reqnext){ /* Any leftovers? */ |
req | 941 | drivers/scsi/scsi.c | req->bh = bh->b_reqnext; /* Divide request */ |
req | 943 | drivers/scsi/scsi.c | bh = req->bh; |
req | 946 | drivers/scsi/scsi.c | SCpnt->request.nr_sectors -= req->nr_sectors; |
req | 947 | drivers/scsi/scsi.c | req->current_nr_sectors = bh->b_size >> 9; |
req | 948 | drivers/scsi/scsi.c | req->buffer = bh->b_data; |
req | 951 | drivers/scsi/scsi.c | req->rq_status = RQ_INACTIVE; |
req | 990 | drivers/scsi/scsi.c | struct request * req = NULL; |
req | 1002 | drivers/scsi/scsi.c | if (reqp) req = *reqp; |
req | 1005 | drivers/scsi/scsi.c | if (req) { |
req | 1006 | drivers/scsi/scsi.c | if(req->rq_status == RQ_INACTIVE) return NULL; |
req | 1007 | drivers/scsi/scsi.c | dev = req->rq_dev; |
req | 1056 | drivers/scsi/scsi.c | if (req && (req->rq_status == RQ_INACTIVE || req->rq_dev != dev)) { |
req | 1087 | drivers/scsi/scsi.c | if (req) { |
req | 1088 | drivers/scsi/scsi.c | memcpy(&SCpnt->request, req, sizeof(struct request)); |
req | 1090 | drivers/scsi/scsi.c | bhp = bh = req->bh; |
req | 1096 | drivers/scsi/scsi.c | while(req->nr_sectors && bh){ |
req | 1099 | drivers/scsi/scsi.c | req->nr_sectors -= bh->b_size >> 9; |
req | 1100 | drivers/scsi/scsi.c | req->sector += bh->b_size >> 9; |
req | 1104 | drivers/scsi/scsi.c | if(req->nr_sectors && bh && bh->b_reqnext){/* Any leftovers? */ |
req | 1106 | drivers/scsi/scsi.c | req->bh = bh->b_reqnext; /* Divide request */ |
req | 1108 | drivers/scsi/scsi.c | bh = req->bh; |
req | 1110 | drivers/scsi/scsi.c | SCpnt->request.nr_sectors -= req->nr_sectors; |
req | 1111 | drivers/scsi/scsi.c | req->current_nr_sectors = bh->b_size >> 9; |
req | 1112 | drivers/scsi/scsi.c | req->buffer = bh->b_data; |
req | 1117 | drivers/scsi/scsi.c | req->rq_status = RQ_INACTIVE; |
req | 1118 | drivers/scsi/scsi.c | *reqp = req->next; |
req | 3156 | drivers/scsi/scsi.c | struct request * req; |
req | 3158 | drivers/scsi/scsi.c | req = blk_dev[i].current_request; |
req | 3159 | drivers/scsi/scsi.c | while(req) { |
req | 3161 | drivers/scsi/scsi.c | kdevname(req->rq_dev), |
req | 3162 | drivers/scsi/scsi.c | req->cmd, |
req | 3163 | drivers/scsi/scsi.c | req->sector, |
req | 3164 | drivers/scsi/scsi.c | req->nr_sectors, |
req | 3165 | drivers/scsi/scsi.c | req->current_nr_sectors); |
req | 3166 | drivers/scsi/scsi.c | req = req->next; |
req | 460 | drivers/scsi/scsi.h | struct request * req; |
req | 463 | drivers/scsi/scsi.h | req = &SCpnt->request; |
req | 464 | drivers/scsi/scsi.h | req->errors = 0; |
req | 467 | drivers/scsi/scsi.h | kdevname(req->rq_dev), req->sector); |
req | 471 | drivers/scsi/scsi.h | if ((bh = req->bh) != NULL) { |
req | 472 | drivers/scsi/scsi.h | req->bh = bh->b_reqnext; |
req | 473 | drivers/scsi/scsi.h | req->nr_sectors -= bh->b_size >> 9; |
req | 474 | drivers/scsi/scsi.h | req->sector += bh->b_size >> 9; |
req | 479 | drivers/scsi/scsi.h | if ((bh = req->bh) != NULL) { |
req | 480 | drivers/scsi/scsi.h | req->current_nr_sectors = bh->b_size >> 9; |
req | 481 | drivers/scsi/scsi.h | if (req->nr_sectors < req->current_nr_sectors) { |
req | 482 | drivers/scsi/scsi.h | req->nr_sectors = req->current_nr_sectors; |
req | 488 | drivers/scsi/scsi.h | if (req->bh){ |
req | 489 | drivers/scsi/scsi.h | req->buffer = bh->b_data; |
req | 492 | drivers/scsi/scsi.h | DEVICE_OFF(req->rq_dev); |
req | 493 | drivers/scsi/scsi.h | if (req->sem != NULL) { |
req | 494 | drivers/scsi/scsi.h | up(req->sem); |
req | 505 | drivers/scsi/scsi.h | req->rq_status = RQ_INACTIVE; |
req | 94 | drivers/scsi/scsi_ioctl.c | struct request * req; |
req | 96 | drivers/scsi/scsi_ioctl.c | req = &SCpnt->request; |
req | 97 | drivers/scsi/scsi_ioctl.c | req->rq_status = RQ_SCSI_DONE; /* Busy, but indicate request done */ |
req | 99 | drivers/scsi/scsi_ioctl.c | if (req->sem != NULL) { |
req | 100 | drivers/scsi/scsi_ioctl.c | up(req->sem); |
req | 410 | drivers/scsi/sd.c | struct request * req = NULL; |
req | 483 | drivers/scsi/sd.c | req = CURRENT; |
req | 484 | drivers/scsi/sd.c | while(req){ |
req | 485 | drivers/scsi/sd.c | SCpnt = request_queueable(req, rscsi_disks[DEVICE_NR(req->rq_dev)].device); |
req | 487 | drivers/scsi/sd.c | req1 = req; |
req | 488 | drivers/scsi/sd.c | req = req->next; |
req | 490 | drivers/scsi/sd.c | if (SCpnt && req->rq_status == RQ_INACTIVE) { |
req | 491 | drivers/scsi/sd.c | if (req == CURRENT) |
req | 494 | drivers/scsi/sd.c | req1->next = req->next; |
req | 942 | drivers/scsi/sd.c | struct request * req; |
req | 944 | drivers/scsi/sd.c | req = &SCpnt->request; |
req | 945 | drivers/scsi/sd.c | req->rq_status = RQ_SCSI_DONE; /* Busy, but indicate request done */ |
req | 947 | drivers/scsi/sd.c | if (req->sem != NULL) { |
req | 948 | drivers/scsi/sd.c | up(req->sem); |
req | 562 | drivers/scsi/sr.c | struct request * req = NULL; |
req | 617 | drivers/scsi/sr.c | req = CURRENT; |
req | 618 | drivers/scsi/sr.c | while(req){ |
req | 619 | drivers/scsi/sr.c | SCpnt = request_queueable(req, |
req | 620 | drivers/scsi/sr.c | scsi_CDs[DEVICE_NR(req->rq_dev)].device); |
req | 622 | drivers/scsi/sr.c | req1 = req; |
req | 623 | drivers/scsi/sr.c | req = req->next; |
req | 625 | drivers/scsi/sr.c | if (SCpnt && req->rq_status == RQ_INACTIVE) { |
req | 626 | drivers/scsi/sr.c | if (req == CURRENT) |
req | 629 | drivers/scsi/sr.c | req1->next = req->next; |
req | 969 | drivers/scsi/sr.c | struct request * req; |
req | 971 | drivers/scsi/sr.c | req = &SCpnt->request; |
req | 972 | drivers/scsi/sr.c | req->rq_status = RQ_SCSI_DONE; /* Busy, but indicate request done */ |
req | 974 | drivers/scsi/sr.c | if (req->sem != NULL) { |
req | 975 | drivers/scsi/sr.c | up(req->sem); |
req | 27 | drivers/scsi/sr_ioctl.c | struct request * req; |
req | 29 | drivers/scsi/sr_ioctl.c | req = &SCpnt->request; |
req | 30 | drivers/scsi/sr_ioctl.c | req->rq_status = RQ_SCSI_DONE; /* Busy, but indicate request done */ |
req | 32 | drivers/scsi/sr_ioctl.c | if (req->sem != NULL) { |
req | 33 | drivers/scsi/sr_ioctl.c | up(req->sem); |
req | 92 | fs/nfs/bio.c | nfs_read_cb(int result, struct nfsiod_req *req) |
req | 94 | fs/nfs/bio.c | struct page *page = (struct page *) req->rq_cdata; |
req | 101 | fs/nfs/bio.c | && (result = nfs_proc_read_reply(&req->rq_rpcreq)) >= 0) { |
req | 117 | fs/nfs/bio.c | struct nfsiod_req *req; |
req | 125 | fs/nfs/bio.c | if (!(req = nfsiod_reserve(NFS_SERVER(inode), nfs_read_cb))) |
req | 127 | fs/nfs/bio.c | result = nfs_proc_read_request(&req->rq_rpcreq, |
req | 132 | fs/nfs/bio.c | req->rq_cdata = page; |
req | 134 | fs/nfs/bio.c | result = nfsiod_enqueue(req); |
req | 140 | fs/nfs/bio.c | nfsiod_release(req); |
req | 41 | fs/nfs/nfsiod.c | struct nfsiod_req *req; |
req | 43 | fs/nfs/nfsiod.c | if (!(req = free_list)) { |
req | 47 | fs/nfs/nfsiod.c | free_list = req->rq_next; |
req | 48 | fs/nfs/nfsiod.c | memset(&req->rq_rpcreq, 0, sizeof(struct rpc_ioreq)); |
req | 50 | fs/nfs/nfsiod.c | if (rpc_reserve(server->rsock, &req->rq_rpcreq, 1) < 0) { |
req | 52 | fs/nfs/nfsiod.c | req->rq_next = free_list; |
req | 53 | fs/nfs/nfsiod.c | free_list = req; |
req | 57 | fs/nfs/nfsiod.c | req->rq_server = server; |
req | 58 | fs/nfs/nfsiod.c | req->rq_callback = callback; |
req | 60 | fs/nfs/nfsiod.c | return req; |
req | 64 | fs/nfs/nfsiod.c | nfsiod_release(struct nfsiod_req *req) |
req | 67 | fs/nfs/nfsiod.c | rpc_release(req->rq_server->rsock, &req->rq_rpcreq); |
req | 68 | fs/nfs/nfsiod.c | memset(&req->rq_rpcreq, 0, sizeof(struct rpc_ioreq)); |
req | 69 | fs/nfs/nfsiod.c | req->rq_next = free_list; |
req | 70 | fs/nfs/nfsiod.c | free_list = req; |
req | 77 | fs/nfs/nfsiod.c | nfsiod_enqueue(struct nfsiod_req *req) |
req | 81 | fs/nfs/nfsiod.c | dprintk("BIO: enqueuing request %p\n", &req->rq_rpcreq); |
req | 82 | fs/nfs/nfsiod.c | result = rpc_transmit(req->rq_server->rsock, &req->rq_rpcreq); |
req | 86 | fs/nfs/nfsiod.c | dprintk("BIO: waking up nfsiod (%p)\n", req->rq_wait); |
req | 87 | fs/nfs/nfsiod.c | wake_up(&req->rq_wait); |
req | 99 | fs/nfs/nfsiod.c | struct nfsiod_req request, *req = &request; |
req | 105 | fs/nfs/nfsiod.c | memset(req, 0, sizeof(*req)); |
req | 106 | fs/nfs/nfsiod.c | req->rq_next = free_list; |
req | 107 | fs/nfs/nfsiod.c | free_list = req; |
req | 112 | fs/nfs/nfsiod.c | interruptible_sleep_on(&req->rq_wait); |
req | 116 | fs/nfs/nfsiod.c | if (!req->rq_rpcreq.rq_slot) |
req | 122 | fs/nfs/nfsiod.c | result = nfs_rpc_doio(req->rq_server, &req->rq_rpcreq, 1); |
req | 123 | fs/nfs/nfsiod.c | req->rq_callback(result, req); |
req | 420 | fs/nfs/proc.c | nfs_proc_read_request(struct rpc_ioreq *req, struct nfs_server *server, |
req | 436 | fs/nfs/proc.c | req->rq_svec[0].iov_base = p0; |
req | 437 | fs/nfs/proc.c | req->rq_svec[0].iov_len = (p - p0) << 2; |
req | 438 | fs/nfs/proc.c | req->rq_slen = (p - p0) << 2; |
req | 439 | fs/nfs/proc.c | req->rq_snr = 1; |
req | 442 | fs/nfs/proc.c | req->rq_rvec[0].iov_base = p0; |
req | 443 | fs/nfs/proc.c | req->rq_rvec[0].iov_len = len << 2; |
req | 444 | fs/nfs/proc.c | req->rq_rvec[1].iov_base = buf; |
req | 445 | fs/nfs/proc.c | req->rq_rvec[1].iov_len = count; |
req | 446 | fs/nfs/proc.c | req->rq_rvec[2].iov_base = p0 + len; /* spill buffer */ |
req | 447 | fs/nfs/proc.c | req->rq_rvec[2].iov_len = (NFS_SLACK_SPACE - len) << 2; |
req | 448 | fs/nfs/proc.c | req->rq_rlen = count + NFS_SLACK_SPACE; |
req | 449 | fs/nfs/proc.c | req->rq_rnr = 3; |
req | 451 | fs/nfs/proc.c | req->rq_addr = &server->toaddr; |
req | 452 | fs/nfs/proc.c | req->rq_alen = sizeof(server->toaddr); |
req | 458 | fs/nfs/proc.c | nfs_proc_read_reply(struct rpc_ioreq *req) |
req | 465 | fs/nfs/proc.c | p0 = (__u32 *) req->rq_rvec[0].iov_base; |
req | 472 | fs/nfs/proc.c | if (p != req->rq_rvec[2].iov_base) { |
req | 192 | fs/nfs/rpcsock.c | rpc_reserve(struct rpc_sock *rsock, struct rpc_ioreq *req, int nocwait) |
req | 196 | fs/nfs/rpcsock.c | req->rq_slot = NULL; |
req | 218 | fs/nfs/rpcsock.c | slot->w_req = req; |
req | 221 | fs/nfs/rpcsock.c | req->rq_slot = slot; |
req | 229 | fs/nfs/rpcsock.c | rpc_release(struct rpc_sock *rsock, struct rpc_ioreq *req) |
req | 231 | fs/nfs/rpcsock.c | struct rpc_wait *slot = req->rq_slot; |
req | 253 | fs/nfs/rpcsock.c | req->rq_slot = NULL; |
req | 304 | fs/nfs/rpcsock.c | struct rpc_ioreq *req = slot->w_req; |
req | 310 | fs/nfs/rpcsock.c | memcpy(iov, req->rq_svec, req->rq_snr * sizeof(iov[0])); |
req | 316 | fs/nfs/rpcsock.c | rpc_send_check("rpc_send", (u32 *) req->rq_svec[0].iov_base); |
req | 317 | fs/nfs/rpcsock.c | return rpc_sendmsg(rsock, iov, req->rq_snr, req->rq_slen, |
req | 318 | fs/nfs/rpcsock.c | req->rq_addr, req->rq_alen); |
req | 325 | fs/nfs/rpcsock.c | rpc_transmit(struct rpc_sock *rsock, struct rpc_ioreq *req) |
req | 327 | fs/nfs/rpcsock.c | rpc_send_check("rpc_transmit", (u32 *) req->rq_svec[0].iov_base); |
req | 328 | fs/nfs/rpcsock.c | return rpc_send(rsock, req->rq_slot); |
req | 338 | fs/nfs/rpcsock.c | struct rpc_ioreq *req; |
req | 386 | fs/nfs/rpcsock.c | req = rovr->w_req; |
req | 390 | fs/nfs/rpcsock.c | memcpy(iov, req->rq_rvec, req->rq_rnr * sizeof(iov[0])); |
req | 391 | fs/nfs/rpcsock.c | result = rpc_recvmsg(rsock, iov, req->rq_rnr, req->rq_rlen, 0); |
req | 446 | fs/nfs/rpcsock.c | rpc_doio(struct rpc_sock *rsock, struct rpc_ioreq *req, |
req | 455 | fs/nfs/rpcsock.c | slot = req->rq_slot; |
req | 458 | fs/nfs/rpcsock.c | dprintk("RPC: rpc_doio: TP1 (req %p)\n", req); |
req | 461 | fs/nfs/rpcsock.c | result = rpc_reserve(rsock, req, 0); |
req | 466 | fs/nfs/rpcsock.c | slot = req->rq_slot; |
req | 468 | fs/nfs/rpcsock.c | (u32 *) req->rq_svec[0].iov_base); |
req | 513 | fs/nfs/rpcsock.c | rpc_call(struct rpc_sock *rsock, struct rpc_ioreq *req, |
req | 518 | fs/nfs/rpcsock.c | result = rpc_doio(rsock, req, strategy, 0); |
req | 519 | fs/nfs/rpcsock.c | if (req->rq_slot == NULL) |
req | 521 | fs/nfs/rpcsock.c | rpc_release(rsock, req); |
req | 44 | fs/nfs/sock.c | struct rpc_ioreq req; |
req | 48 | fs/nfs/sock.c | req.rq_addr = &server->toaddr; |
req | 49 | fs/nfs/sock.c | req.rq_alen = sizeof(server->toaddr); |
req | 50 | fs/nfs/sock.c | req.rq_slot = NULL; |
req | 52 | fs/nfs/sock.c | req.rq_svec[0].iov_base = start; |
req | 53 | fs/nfs/sock.c | req.rq_svec[0].iov_len = (end - start) << 2; |
req | 54 | fs/nfs/sock.c | req.rq_slen = (end - start) << 2; |
req | 55 | fs/nfs/sock.c | req.rq_snr = 1; |
req | 56 | fs/nfs/sock.c | req.rq_rvec[0].iov_base = start; |
req | 57 | fs/nfs/sock.c | req.rq_rvec[0].iov_len = size; |
req | 58 | fs/nfs/sock.c | req.rq_rlen = size; |
req | 59 | fs/nfs/sock.c | req.rq_rnr = 1; |
req | 61 | fs/nfs/sock.c | return nfs_rpc_doio(server, &req, 0); |
req | 65 | fs/nfs/sock.c | nfs_rpc_doio(struct nfs_server *server, struct rpc_ioreq *req, int async) |
req | 90 | fs/nfs/sock.c | result = rpc_doio(server->rsock, req, &timeout, async); |
req | 91 | fs/nfs/sock.c | rpc_release(server->rsock, req); /* Release slot */ |
req | 383 | include/linux/blk.h | struct request *req = hwgroup->rq; |
req | 385 | include/linux/blk.h | static void end_request (int uptodate, struct request * req) { |
req | 388 | include/linux/blk.h | struct request *req = CURRENT; |
req | 392 | include/linux/blk.h | req->errors = 0; |
req | 395 | include/linux/blk.h | kdevname(req->rq_dev), req->sector); |
req | 396 | include/linux/blk.h | req->nr_sectors--; |
req | 397 | include/linux/blk.h | req->nr_sectors &= ~SECTOR_MASK; |
req | 398 | include/linux/blk.h | req->sector += (BLOCK_SIZE / 512); |
req | 399 | include/linux/blk.h | req->sector &= ~SECTOR_MASK; |
req | 402 | include/linux/blk.h | if ((bh = req->bh) != NULL) { |
req | 403 | include/linux/blk.h | req->bh = bh->b_reqnext; |
req | 407 | include/linux/blk.h | if ((bh = req->bh) != NULL) { |
req | 408 | include/linux/blk.h | req->current_nr_sectors = bh->b_size >> 9; |
req | 409 | include/linux/blk.h | if (req->nr_sectors < req->current_nr_sectors) { |
req | 410 | include/linux/blk.h | req->nr_sectors = req->current_nr_sectors; |
req | 413 | include/linux/blk.h | req->buffer = bh->b_data; |
req | 418 | include/linux/blk.h | add_blkdev_randomness(MAJOR(req->rq_dev)); |
req | 421 | include/linux/blk.h | blk_dev[MAJOR(req->rq_dev)].current_request = req->next; |
req | 424 | include/linux/blk.h | DEVICE_OFF(req->rq_dev); |
req | 425 | include/linux/blk.h | CURRENT = req->next; |
req | 427 | include/linux/blk.h | if (req->sem != NULL) |
req | 428 | include/linux/blk.h | up(req->sem); |
req | 429 | include/linux/blk.h | req->rq_status = RQ_INACTIVE; |
req | 436 | include/linux/blk.h | extern inline void end_redirect (struct request *req) |
req | 440 | include/linux/blk.h | req->errors = 0; |
req | 442 | include/linux/blk.h | if ((bh = req->bh) != NULL) |
req | 444 | include/linux/blk.h | req->bh = bh->b_reqnext; |
req | 447 | include/linux/blk.h | if ((bh = req->bh) != NULL) |
req | 449 | include/linux/blk.h | req->sector += req->current_nr_sectors; |
req | 450 | include/linux/blk.h | req->current_nr_sectors = bh->b_size >> 9; |
req | 452 | include/linux/blk.h | if (req->nr_sectors < req->current_nr_sectors) |
req | 454 | include/linux/blk.h | req->nr_sectors = req->current_nr_sectors; |
req | 458 | include/linux/blk.h | req->buffer = bh->b_data; |
req | 54 | include/linux/blkdev.h | extern void add_request(struct blk_dev_struct * dev, struct request * req); |
req | 116 | include/linux/if_arp.h | unsigned short req; /* request type */ |
req | 110 | include/linux/md.h | int (*map)(int minor, struct md_dev *md_dev, struct request *req); |
req | 204 | include/linux/serial.h | extern int register_serial(struct serial_struct *req); |
req | 447 | net/ipv4/arp.c | arpreq.req = ARPD_UPDATE; |
req | 503 | net/ipv4/arp.c | arpreq.req = ARPD_LOOKUP; |
req | 284 | net/ipv4/rarp.c | static int rarp_req_set(struct arpreq *req) |
req | 294 | net/ipv4/rarp.c | memcpy_fromfs(&r, req, sizeof(r)); |
req | 388 | net/ipv4/rarp.c | static int rarp_req_get(struct arpreq *req) |
req | 399 | net/ipv4/rarp.c | memcpy_fromfs(&r, req, sizeof(r)); |
req | 434 | net/ipv4/rarp.c | memcpy_tofs(req, &r, sizeof(r)); |