taglinefilesource code
req1382drivers/block/ide-cd.cstruct request req;
req1392drivers/block/ide-cd.cide_init_drive_cmd (&req);
req1393drivers/block/ide-cd.creq.cmd = PACKET_COMMAND;
req1394drivers/block/ide-cd.creq.buffer = (char *)pc;
req1395drivers/block/ide-cd.c(void) ide_do_drive_cmd (drive, &req, ide_wait);
req2488drivers/block/ide-cd.cstruct request req;
req2489drivers/block/ide-cd.cide_init_drive_cmd (&req);
req2490drivers/block/ide-cd.creq.cmd = RESET_DRIVE_COMMAND;
req2491drivers/block/ide-cd.creturn ide_do_drive_cmd (drive, &req, ide_wait);
req108drivers/block/linear.cstatic int linear_map (int minor, struct md_dev *mddev, struct request *req)
req118drivers/block/linear.cwhile (req->nr_sectors)
req120drivers/block/linear.cblock=req->sector >> 1;
req137drivers/block/linear.cif (req->sem)        /* This is a paging request */
req139drivers/block/linear.creq->rq_dev=tmp_dev->dev;
req140drivers/block/linear.creq->sector=rblock << 1;
req141drivers/block/linear.cadd_request (blk_dev+MAJOR (tmp_dev->dev), req);
req148drivers/block/linear.cfor (nblk=0, bh=bh2=req->bh;
req159drivers/block/linear.cpending[queue].cmd=req->cmd;
req162drivers/block/linear.cpending[queue].current_nr_sectors=req->bh->b_size >> 9;
req163drivers/block/linear.cpending[queue].bh=req->bh;
req167drivers/block/linear.creq->bh=bh;
req168drivers/block/linear.creq->sector+=nblk << 1;
req169drivers/block/linear.creq->nr_sectors-=nblk << 1;
req172drivers/block/linear.creq->rq_status=RQ_INACTIVE;
req115drivers/block/ll_rw_blk.cregister struct request *req, *limit;
req125drivers/block/ll_rw_blk.creq = prev_found;
req127drivers/block/ll_rw_blk.creq = ((req > all_requests) ? req : limit) - 1;
req128drivers/block/ll_rw_blk.cif (req->rq_status == RQ_INACTIVE)
req130drivers/block/ll_rw_blk.cif (req == prev_found)
req133drivers/block/ll_rw_blk.cprev_found = req;
req134drivers/block/ll_rw_blk.creq->rq_status = RQ_ACTIVE;
req135drivers/block/ll_rw_blk.creq->rq_dev = dev;
req136drivers/block/ll_rw_blk.creturn req;
req144drivers/block/ll_rw_blk.cregister struct request *req;
req151drivers/block/ll_rw_blk.creq = get_request(n, dev);
req153drivers/block/ll_rw_blk.cif (req)
req159drivers/block/ll_rw_blk.creturn req;
req164drivers/block/ll_rw_blk.cregister struct request *req;
req167drivers/block/ll_rw_blk.creq = get_request(n, dev);
req169drivers/block/ll_rw_blk.cif (req)
req170drivers/block/ll_rw_blk.creturn req;
req224drivers/block/ll_rw_blk.cvoid add_request(struct blk_dev_struct * dev, struct request * req)
req230drivers/block/ll_rw_blk.cswitch (MAJOR(req->rq_dev)) {
req232drivers/block/ll_rw_blk.cdisk_index = (MINOR(req->rq_dev) & 0x0070) >> 4;
req234drivers/block/ll_rw_blk.cdrive_stat_acct(req->cmd, req->nr_sectors, disk_index);
req238drivers/block/ll_rw_blk.cdisk_index = (MINOR(req->rq_dev) & 0x0040) >> 6;
req239drivers/block/ll_rw_blk.cdrive_stat_acct(req->cmd, req->nr_sectors, disk_index);
req242drivers/block/ll_rw_blk.cdisk_index = ((MINOR(req->rq_dev) & 0x0040) >> 6) + 2;
req243drivers/block/ll_rw_blk.cdrive_stat_acct(req->cmd, req->nr_sectors, disk_index);
req248drivers/block/ll_rw_blk.creq->next = NULL;
req250drivers/block/ll_rw_blk.cif (req->bh && req->bh->b_dev==req->bh->b_rdev)
req251drivers/block/ll_rw_blk.cmark_buffer_clean(req->bh);
req253drivers/block/ll_rw_blk.cdev->current_request = req;
req260drivers/block/ll_rw_blk.cif ((IN_ORDER(tmp,req) ||
req262drivers/block/ll_rw_blk.cIN_ORDER(req,tmp->next))
req265drivers/block/ll_rw_blk.creq->next = tmp->next;
req266drivers/block/ll_rw_blk.ctmp->next = req;
req270drivers/block/ll_rw_blk.cif (scsi_major(MAJOR(req->rq_dev)) && MAJOR(req->rq_dev)!=MD_MAJOR)
req279drivers/block/ll_rw_blk.cstruct request * req;
req340drivers/block/ll_rw_blk.creq = blk_dev[major].current_request;
req341drivers/block/ll_rw_blk.cif (!req) {
req358drivers/block/ll_rw_blk.creq = req->next;
req359drivers/block/ll_rw_blk.cif (!req)
req368drivers/block/ll_rw_blk.cif (req->sem)
req370drivers/block/ll_rw_blk.cif (req->cmd != rw)
req372drivers/block/ll_rw_blk.cif (req->nr_sectors >= 244)
req374drivers/block/ll_rw_blk.cif (req->rq_dev != bh->b_dev)
req377drivers/block/ll_rw_blk.cif (req->sector + req->nr_sectors == sector) {
req378drivers/block/ll_rw_blk.creq->bhtail->b_reqnext = bh;
req379drivers/block/ll_rw_blk.creq->bhtail = bh;
req381drivers/block/ll_rw_blk.c} else if (req->sector - count == sector) {
req382drivers/block/ll_rw_blk.cbh->b_reqnext = req->bh;
req383drivers/block/ll_rw_blk.creq->bh = bh;
req384drivers/block/ll_rw_blk.creq->buffer = bh->b_data;
req385drivers/block/ll_rw_blk.creq->current_nr_sectors = count;
req386drivers/block/ll_rw_blk.creq->sector = sector;
req390drivers/block/ll_rw_blk.creq->nr_sectors += count;
req395drivers/block/ll_rw_blk.c} while ((req = req->next) != NULL);
req401drivers/block/ll_rw_blk.creq = get_request(max_req, bh->b_dev);
req405drivers/block/ll_rw_blk.cif (!req) {
req410drivers/block/ll_rw_blk.creq = __get_request_wait(max_req, bh->b_dev);
req414drivers/block/ll_rw_blk.creq->cmd = rw;
req415drivers/block/ll_rw_blk.creq->errors = 0;
req416drivers/block/ll_rw_blk.creq->sector = sector;
req417drivers/block/ll_rw_blk.creq->nr_sectors = count;
req418drivers/block/ll_rw_blk.creq->current_nr_sectors = count;
req419drivers/block/ll_rw_blk.creq->buffer = bh->b_data;
req420drivers/block/ll_rw_blk.creq->sem = NULL;
req421drivers/block/ll_rw_blk.creq->bh = bh;
req422drivers/block/ll_rw_blk.creq->bhtail = bh;
req423drivers/block/ll_rw_blk.creq->next = NULL;
req424drivers/block/ll_rw_blk.cadd_request(major+blk_dev,req);
req549drivers/block/ll_rw_blk.cstruct request * req[8];
req577drivers/block/ll_rw_blk.creq[j] = get_request_wait(NR_REQUEST, dev);
req580drivers/block/ll_rw_blk.creq[j] = get_request(NR_REQUEST, dev);
req582drivers/block/ll_rw_blk.cif (req[j] == NULL)
req585drivers/block/ll_rw_blk.creq[j]->cmd = rw;
req586drivers/block/ll_rw_blk.creq[j]->errors = 0;
req587drivers/block/ll_rw_blk.creq[j]->sector = (b[i] * buffersize) >> 9;
req588drivers/block/ll_rw_blk.creq[j]->nr_sectors = buffersize >> 9;
req589drivers/block/ll_rw_blk.creq[j]->current_nr_sectors = buffersize >> 9;
req590drivers/block/ll_rw_blk.creq[j]->buffer = buf;
req591drivers/block/ll_rw_blk.creq[j]->sem = &sem;
req592drivers/block/ll_rw_blk.creq[j]->bh = NULL;
req593drivers/block/ll_rw_blk.creq[j]->next = NULL;
req594drivers/block/ll_rw_blk.cadd_request(major+blk_dev,req[j]);
req605drivers/block/ll_rw_blk.cstruct request * req;
req618drivers/block/ll_rw_blk.creq = all_requests + NR_REQUEST;
req619drivers/block/ll_rw_blk.cwhile (--req >= all_requests) {
req620drivers/block/ll_rw_blk.creq->rq_status = RQ_INACTIVE;
req621drivers/block/ll_rw_blk.creq->next = NULL;
req378drivers/block/md.cstatic inline int remap_request (int minor, struct request *req)
req386drivers/block/md.creturn (md_dev[minor].pers->map(minor, md_dev+minor, req));
req393drivers/block/md.cstruct request *req;
req403drivers/block/md.creq = blk_dev[MD_MAJOR].current_request;
req404drivers/block/md.cif (!req || (req->rq_status == RQ_INACTIVE))
req411drivers/block/md.creqsize=req->nr_sectors>>1;
req412drivers/block/md.cchunksize=1 << FACTOR_SHIFT(FACTOR(md_dev+MINOR(req->rq_dev)));
req413drivers/block/md.cif (reqsize==chunksize) (md_dev+MINOR(req->rq_dev))->equal_count++;
req414drivers/block/md.cif (reqsize<chunksize) (md_dev+MINOR(req->rq_dev))->smallest_count++;
req415drivers/block/md.cif (reqsize>chunksize) (md_dev+MINOR(req->rq_dev))->biggest_count++;
req418drivers/block/md.cblk_dev[MD_MAJOR].current_request = req->next;
req421drivers/block/md.cminor = MINOR(req->rq_dev);
req422drivers/block/md.cif ((MAJOR(req->rq_dev) != MD_MAJOR) || (minor >= MAX_REAL))
req424drivers/block/md.cprintk("md: bad device: %s\n", kdevname(req->rq_dev));
req425drivers/block/md.cend_request(0, req);
req429drivers/block/md.cswitch (remap_request (minor, req))
req432drivers/block/md.creq->rq_status=RQ_INACTIVE;
req440drivers/block/md.cend_request (0, req);
req456drivers/block/md.cstruct request *req;
req480drivers/block/md.c&& (req = blk_dev[major].current_request))
req487drivers/block/md.creq = req->next;
req489drivers/block/md.cwhile (req && !found)
req491drivers/block/md.cif (req->rq_status!=RQ_ACTIVE && &blk_dev[major].plug!=req)
req494drivers/block/md.cif (req->rq_dev == dev &&
req495drivers/block/md.c!req->sem &&
req496drivers/block/md.creq->cmd == rw &&
req497drivers/block/md.creq->sector + req->nr_sectors == pending[i].sector &&
req498drivers/block/md.c(req->nr_sectors + pending[i].nr_sectors) < 245)
req500drivers/block/md.creq->bhtail->b_reqnext = bh;
req501drivers/block/md.creq->bhtail = pending[i].bhtail;
req502drivers/block/md.creq->nr_sectors += pending[i].nr_sectors;
req508drivers/block/md.creq->rq_dev == dev &&
req509drivers/block/md.c!req->sem &&
req510drivers/block/md.creq->cmd == rw &&
req511drivers/block/md.creq->sector - pending[i].nr_sectors == pending[i].sector &&
req512drivers/block/md.c(req->nr_sectors + pending[i].nr_sectors) < 245)
req514drivers/block/md.creq->nr_sectors += pending[i].nr_sectors;
req515drivers/block/md.cpending[i].bhtail->b_reqnext = req->bh;
req516drivers/block/md.creq->buffer = bh->b_data;
req517drivers/block/md.creq->current_nr_sectors = bh->b_size >> 9;
req518drivers/block/md.creq->sector = pending[i].sector;
req519drivers/block/md.creq->bh = bh;
req524drivers/block/md.creq = req->next;
req533drivers/block/md.creq=get_md_request (max_req, dev);
req536drivers/block/md.creq->cmd = rw;
req537drivers/block/md.creq->errors = 0;
req539drivers/block/md.creq->shared_count = 0;
req541drivers/block/md.creq->sector = pending[i].sector;
req542drivers/block/md.creq->nr_sectors = pending[i].nr_sectors;
req543drivers/block/md.creq->current_nr_sectors = bh->b_size >> 9;
req544drivers/block/md.creq->buffer = bh->b_data;
req545drivers/block/md.creq->sem = NULL;
req546drivers/block/md.creq->bh = bh;
req547drivers/block/md.creq->bhtail = pending[i].bhtail;
req548drivers/block/md.creq->next = NULL;
req550drivers/block/md.cadd_request (blk_dev + MAJOR(dev), req);
req182drivers/block/raid0.cstatic int raid0_map (int minor, struct md_dev *mddev, struct request *req)
req195drivers/block/raid0.cwhile (req->bh || req->sem)
req197drivers/block/raid0.cblock=req->sector >> 1;
req215drivers/block/raid0.cif (req->sem)    /* This is a paging request */
req217drivers/block/raid0.creq->rq_dev=tmp_dev->dev;
req218drivers/block/raid0.creq->sector=rblock << 1;
req219drivers/block/raid0.cadd_request (blk_dev+MAJOR (tmp_dev->dev), req);
req228drivers/block/raid0.ci<(1UL << FACTOR_SHIFT(factor)) && req->bh;
req231drivers/block/raid0.cbh=req->bh;
req246drivers/block/raid0.cpending[queue].cmd=req->cmd;
req257drivers/block/raid0.cend_redirect (req);  /* Separate bh from the request */
req261drivers/block/raid0.creq->rq_status=RQ_INACTIVE;
req2799drivers/char/serial.cint register_serial(struct serial_struct *req)
req2808drivers/char/serial.cif (rs_table[i].port == req->port)
req2825drivers/char/serial.c"device already open\n", i, req->port, req->irq);
req2828drivers/char/serial.cinfo->irq = req->irq;
req2829drivers/char/serial.cinfo->port = req->port;
req2830drivers/char/serial.cinfo->flags = req->flags;
req864drivers/scsi/scsi.cScsi_Cmnd * request_queueable (struct request * req, Scsi_Device * device)
req874drivers/scsi/scsi.cif (req && req->rq_status == RQ_INACTIVE)
req922drivers/scsi/scsi.cif (req) {
req923drivers/scsi/scsi.cmemcpy(&SCpnt->request, req, sizeof(struct request));
req925drivers/scsi/scsi.cbhp = bh = req->bh;
req931drivers/scsi/scsi.cwhile(req->nr_sectors && bh){
req934drivers/scsi/scsi.creq->nr_sectors -= bh->b_size >> 9;
req935drivers/scsi/scsi.creq->sector += bh->b_size >> 9;
req939drivers/scsi/scsi.cif(req->nr_sectors && bh && bh->b_reqnext){  /* Any leftovers? */
req941drivers/scsi/scsi.creq->bh = bh->b_reqnext; /* Divide request */
req943drivers/scsi/scsi.cbh = req->bh;
req946drivers/scsi/scsi.cSCpnt->request.nr_sectors -= req->nr_sectors;
req947drivers/scsi/scsi.creq->current_nr_sectors = bh->b_size >> 9;
req948drivers/scsi/scsi.creq->buffer = bh->b_data;
req951drivers/scsi/scsi.creq->rq_status = RQ_INACTIVE;
req990drivers/scsi/scsi.cstruct request * req = NULL;
req1002drivers/scsi/scsi.cif (reqp) req = *reqp;
req1005drivers/scsi/scsi.cif (req) {
req1006drivers/scsi/scsi.cif(req->rq_status == RQ_INACTIVE) return NULL;
req1007drivers/scsi/scsi.cdev = req->rq_dev;
req1056drivers/scsi/scsi.cif (req && (req->rq_status == RQ_INACTIVE || req->rq_dev != dev)) {
req1087drivers/scsi/scsi.cif (req) {
req1088drivers/scsi/scsi.cmemcpy(&SCpnt->request, req, sizeof(struct request));
req1090drivers/scsi/scsi.cbhp = bh = req->bh;
req1096drivers/scsi/scsi.cwhile(req->nr_sectors && bh){
req1099drivers/scsi/scsi.creq->nr_sectors -= bh->b_size >> 9;
req1100drivers/scsi/scsi.creq->sector += bh->b_size >> 9;
req1104drivers/scsi/scsi.cif(req->nr_sectors && bh && bh->b_reqnext){/* Any leftovers? */
req1106drivers/scsi/scsi.creq->bh = bh->b_reqnext; /* Divide request */
req1108drivers/scsi/scsi.cbh = req->bh;
req1110drivers/scsi/scsi.cSCpnt->request.nr_sectors -= req->nr_sectors;
req1111drivers/scsi/scsi.creq->current_nr_sectors = bh->b_size >> 9;
req1112drivers/scsi/scsi.creq->buffer = bh->b_data;
req1117drivers/scsi/scsi.creq->rq_status = RQ_INACTIVE;
req1118drivers/scsi/scsi.c*reqp = req->next;
req3156drivers/scsi/scsi.cstruct request * req;
req3158drivers/scsi/scsi.creq = blk_dev[i].current_request;
req3159drivers/scsi/scsi.cwhile(req) {
req3161drivers/scsi/scsi.ckdevname(req->rq_dev),
req3162drivers/scsi/scsi.creq->cmd,
req3163drivers/scsi/scsi.creq->sector,
req3164drivers/scsi/scsi.creq->nr_sectors,
req3165drivers/scsi/scsi.creq->current_nr_sectors);
req3166drivers/scsi/scsi.creq = req->next;
req460drivers/scsi/scsi.hstruct request * req;
req463drivers/scsi/scsi.hreq = &SCpnt->request;
req464drivers/scsi/scsi.hreq->errors = 0;
req467drivers/scsi/scsi.hkdevname(req->rq_dev), req->sector);
req471drivers/scsi/scsi.hif ((bh = req->bh) != NULL) {
req472drivers/scsi/scsi.hreq->bh = bh->b_reqnext;
req473drivers/scsi/scsi.hreq->nr_sectors -= bh->b_size >> 9;
req474drivers/scsi/scsi.hreq->sector += bh->b_size >> 9;
req479drivers/scsi/scsi.hif ((bh = req->bh) != NULL) {
req480drivers/scsi/scsi.hreq->current_nr_sectors = bh->b_size >> 9;
req481drivers/scsi/scsi.hif (req->nr_sectors < req->current_nr_sectors) {
req482drivers/scsi/scsi.hreq->nr_sectors = req->current_nr_sectors;
req488drivers/scsi/scsi.hif (req->bh){
req489drivers/scsi/scsi.hreq->buffer = bh->b_data;
req492drivers/scsi/scsi.hDEVICE_OFF(req->rq_dev);
req493drivers/scsi/scsi.hif (req->sem != NULL) {
req494drivers/scsi/scsi.hup(req->sem);
req505drivers/scsi/scsi.hreq->rq_status = RQ_INACTIVE;
req94drivers/scsi/scsi_ioctl.cstruct request * req;
req96drivers/scsi/scsi_ioctl.creq = &SCpnt->request;
req97drivers/scsi/scsi_ioctl.creq->rq_status = RQ_SCSI_DONE; /* Busy, but indicate request done */
req99drivers/scsi/scsi_ioctl.cif (req->sem != NULL) {
req100drivers/scsi/scsi_ioctl.cup(req->sem);
req410drivers/scsi/sd.cstruct request * req = NULL;
req483drivers/scsi/sd.creq = CURRENT;
req484drivers/scsi/sd.cwhile(req){
req485drivers/scsi/sd.cSCpnt = request_queueable(req, rscsi_disks[DEVICE_NR(req->rq_dev)].device);
req487drivers/scsi/sd.creq1 = req;
req488drivers/scsi/sd.creq = req->next;
req490drivers/scsi/sd.cif (SCpnt && req->rq_status == RQ_INACTIVE) {
req491drivers/scsi/sd.cif (req == CURRENT) 
req494drivers/scsi/sd.creq1->next = req->next;
req942drivers/scsi/sd.cstruct request * req;
req944drivers/scsi/sd.creq = &SCpnt->request;
req945drivers/scsi/sd.creq->rq_status = RQ_SCSI_DONE; /* Busy, but indicate request done */
req947drivers/scsi/sd.cif (req->sem != NULL) {
req948drivers/scsi/sd.cup(req->sem);
req562drivers/scsi/sr.cstruct request * req = NULL;
req617drivers/scsi/sr.creq = CURRENT;
req618drivers/scsi/sr.cwhile(req){
req619drivers/scsi/sr.cSCpnt = request_queueable(req,
req620drivers/scsi/sr.cscsi_CDs[DEVICE_NR(req->rq_dev)].device);
req622drivers/scsi/sr.creq1 = req;
req623drivers/scsi/sr.creq = req->next;
req625drivers/scsi/sr.cif (SCpnt && req->rq_status == RQ_INACTIVE) {
req626drivers/scsi/sr.cif (req == CURRENT) 
req629drivers/scsi/sr.creq1->next = req->next;
req969drivers/scsi/sr.cstruct request * req;
req971drivers/scsi/sr.creq = &SCpnt->request;
req972drivers/scsi/sr.creq->rq_status = RQ_SCSI_DONE; /* Busy, but indicate request done */
req974drivers/scsi/sr.cif (req->sem != NULL) {
req975drivers/scsi/sr.cup(req->sem);
req27drivers/scsi/sr_ioctl.cstruct request * req;
req29drivers/scsi/sr_ioctl.creq = &SCpnt->request;
req30drivers/scsi/sr_ioctl.creq->rq_status = RQ_SCSI_DONE; /* Busy, but indicate request done */
req32drivers/scsi/sr_ioctl.cif (req->sem != NULL) {
req33drivers/scsi/sr_ioctl.cup(req->sem);
req92fs/nfs/bio.cnfs_read_cb(int result, struct nfsiod_req *req)
req94fs/nfs/bio.cstruct page  *page = (struct page *) req->rq_cdata;
req101fs/nfs/bio.c&& (result = nfs_proc_read_reply(&req->rq_rpcreq)) >= 0) {
req117fs/nfs/bio.cstruct nfsiod_req *req;
req125fs/nfs/bio.cif (!(req = nfsiod_reserve(NFS_SERVER(inode), nfs_read_cb)))
req127fs/nfs/bio.cresult = nfs_proc_read_request(&req->rq_rpcreq,
req132fs/nfs/bio.creq->rq_cdata = page;
req134fs/nfs/bio.cresult = nfsiod_enqueue(req);
req140fs/nfs/bio.cnfsiod_release(req);
req41fs/nfs/nfsiod.cstruct nfsiod_req  *req;
req43fs/nfs/nfsiod.cif (!(req = free_list)) {
req47fs/nfs/nfsiod.cfree_list = req->rq_next;
req48fs/nfs/nfsiod.cmemset(&req->rq_rpcreq, 0, sizeof(struct rpc_ioreq));
req50fs/nfs/nfsiod.cif (rpc_reserve(server->rsock, &req->rq_rpcreq, 1) < 0) {
req52fs/nfs/nfsiod.creq->rq_next = free_list;
req53fs/nfs/nfsiod.cfree_list = req;
req57fs/nfs/nfsiod.creq->rq_server = server;
req58fs/nfs/nfsiod.creq->rq_callback = callback;
req60fs/nfs/nfsiod.creturn req;
req64fs/nfs/nfsiod.cnfsiod_release(struct nfsiod_req *req)
req67fs/nfs/nfsiod.crpc_release(req->rq_server->rsock, &req->rq_rpcreq);
req68fs/nfs/nfsiod.cmemset(&req->rq_rpcreq, 0, sizeof(struct rpc_ioreq));
req69fs/nfs/nfsiod.creq->rq_next = free_list;
req70fs/nfs/nfsiod.cfree_list = req;
req77fs/nfs/nfsiod.cnfsiod_enqueue(struct nfsiod_req *req)
req81fs/nfs/nfsiod.cdprintk("BIO: enqueuing request %p\n", &req->rq_rpcreq);
req82fs/nfs/nfsiod.cresult = rpc_transmit(req->rq_server->rsock, &req->rq_rpcreq);
req86fs/nfs/nfsiod.cdprintk("BIO: waking up nfsiod (%p)\n", req->rq_wait);
req87fs/nfs/nfsiod.cwake_up(&req->rq_wait);
req99fs/nfs/nfsiod.cstruct nfsiod_req  request, *req = &request;
req105fs/nfs/nfsiod.cmemset(req, 0, sizeof(*req));
req106fs/nfs/nfsiod.creq->rq_next = free_list;
req107fs/nfs/nfsiod.cfree_list = req;
req112fs/nfs/nfsiod.cinterruptible_sleep_on(&req->rq_wait);
req116fs/nfs/nfsiod.cif (!req->rq_rpcreq.rq_slot)
req122fs/nfs/nfsiod.cresult = nfs_rpc_doio(req->rq_server, &req->rq_rpcreq, 1);
req123fs/nfs/nfsiod.creq->rq_callback(result, req);
req420fs/nfs/proc.cnfs_proc_read_request(struct rpc_ioreq *req, struct nfs_server *server,
req436fs/nfs/proc.creq->rq_svec[0].iov_base = p0;
req437fs/nfs/proc.creq->rq_svec[0].iov_len  = (p - p0) << 2;
req438fs/nfs/proc.creq->rq_slen = (p - p0) << 2;
req439fs/nfs/proc.creq->rq_snr = 1;
req442fs/nfs/proc.creq->rq_rvec[0].iov_base = p0;
req443fs/nfs/proc.creq->rq_rvec[0].iov_len  = len << 2;
req444fs/nfs/proc.creq->rq_rvec[1].iov_base = buf;
req445fs/nfs/proc.creq->rq_rvec[1].iov_len  = count;
req446fs/nfs/proc.creq->rq_rvec[2].iov_base = p0 + len;    /* spill buffer */
req447fs/nfs/proc.creq->rq_rvec[2].iov_len  = (NFS_SLACK_SPACE - len) << 2;
req448fs/nfs/proc.creq->rq_rlen = count + NFS_SLACK_SPACE;
req449fs/nfs/proc.creq->rq_rnr = 3;
req451fs/nfs/proc.creq->rq_addr = &server->toaddr;
req452fs/nfs/proc.creq->rq_alen = sizeof(server->toaddr);
req458fs/nfs/proc.cnfs_proc_read_reply(struct rpc_ioreq *req)
req465fs/nfs/proc.cp0 = (__u32 *) req->rq_rvec[0].iov_base;
req472fs/nfs/proc.cif (p != req->rq_rvec[2].iov_base) {
req192fs/nfs/rpcsock.crpc_reserve(struct rpc_sock *rsock, struct rpc_ioreq *req, int nocwait)
req196fs/nfs/rpcsock.creq->rq_slot = NULL;
req218fs/nfs/rpcsock.cslot->w_req = req;
req221fs/nfs/rpcsock.creq->rq_slot = slot;
req229fs/nfs/rpcsock.crpc_release(struct rpc_sock *rsock, struct rpc_ioreq *req)
req231fs/nfs/rpcsock.cstruct rpc_wait  *slot = req->rq_slot;
req253fs/nfs/rpcsock.creq->rq_slot = NULL;
req304fs/nfs/rpcsock.cstruct rpc_ioreq *req = slot->w_req;
req310fs/nfs/rpcsock.cmemcpy(iov, req->rq_svec, req->rq_snr * sizeof(iov[0]));
req316fs/nfs/rpcsock.crpc_send_check("rpc_send", (u32 *) req->rq_svec[0].iov_base);
req317fs/nfs/rpcsock.creturn rpc_sendmsg(rsock, iov, req->rq_snr, req->rq_slen,
req318fs/nfs/rpcsock.creq->rq_addr, req->rq_alen);
req325fs/nfs/rpcsock.crpc_transmit(struct rpc_sock *rsock, struct rpc_ioreq *req)
req327fs/nfs/rpcsock.crpc_send_check("rpc_transmit", (u32 *) req->rq_svec[0].iov_base);
req328fs/nfs/rpcsock.creturn rpc_send(rsock, req->rq_slot);
req338fs/nfs/rpcsock.cstruct rpc_ioreq *req;
req386fs/nfs/rpcsock.creq = rovr->w_req;
req390fs/nfs/rpcsock.cmemcpy(iov, req->rq_rvec, req->rq_rnr * sizeof(iov[0]));
req391fs/nfs/rpcsock.cresult = rpc_recvmsg(rsock, iov, req->rq_rnr, req->rq_rlen, 0);
req446fs/nfs/rpcsock.crpc_doio(struct rpc_sock *rsock, struct rpc_ioreq *req,
req455fs/nfs/rpcsock.cslot = req->rq_slot;
req458fs/nfs/rpcsock.cdprintk("RPC: rpc_doio: TP1 (req %p)\n", req);
req461fs/nfs/rpcsock.cresult = rpc_reserve(rsock, req, 0);
req466fs/nfs/rpcsock.cslot = req->rq_slot;
req468fs/nfs/rpcsock.c(u32 *) req->rq_svec[0].iov_base);
req513fs/nfs/rpcsock.crpc_call(struct rpc_sock *rsock, struct rpc_ioreq *req,
req518fs/nfs/rpcsock.cresult = rpc_doio(rsock, req, strategy, 0);
req519fs/nfs/rpcsock.cif (req->rq_slot == NULL)
req521fs/nfs/rpcsock.crpc_release(rsock, req);
req44fs/nfs/sock.cstruct rpc_ioreq  req;
req48fs/nfs/sock.creq.rq_addr = &server->toaddr;
req49fs/nfs/sock.creq.rq_alen = sizeof(server->toaddr);
req50fs/nfs/sock.creq.rq_slot = NULL;
req52fs/nfs/sock.creq.rq_svec[0].iov_base = start;
req53fs/nfs/sock.creq.rq_svec[0].iov_len = (end - start) << 2;
req54fs/nfs/sock.creq.rq_slen = (end - start) << 2;
req55fs/nfs/sock.creq.rq_snr = 1;
req56fs/nfs/sock.creq.rq_rvec[0].iov_base = start;
req57fs/nfs/sock.creq.rq_rvec[0].iov_len = size;
req58fs/nfs/sock.creq.rq_rlen = size;
req59fs/nfs/sock.creq.rq_rnr = 1;
req61fs/nfs/sock.creturn nfs_rpc_doio(server, &req, 0);
req65fs/nfs/sock.cnfs_rpc_doio(struct nfs_server *server, struct rpc_ioreq *req, int async)
req90fs/nfs/sock.cresult = rpc_doio(server->rsock, req, &timeout, async);
req91fs/nfs/sock.crpc_release(server->rsock, req);  /* Release slot */
req383include/linux/blk.hstruct request *req = hwgroup->rq;
req385include/linux/blk.hstatic void end_request (int uptodate, struct request * req) {
req388include/linux/blk.hstruct request *req = CURRENT;
req392include/linux/blk.hreq->errors = 0;
req395include/linux/blk.hkdevname(req->rq_dev), req->sector);
req396include/linux/blk.hreq->nr_sectors--;
req397include/linux/blk.hreq->nr_sectors &= ~SECTOR_MASK;
req398include/linux/blk.hreq->sector += (BLOCK_SIZE / 512);
req399include/linux/blk.hreq->sector &= ~SECTOR_MASK;    
req402include/linux/blk.hif ((bh = req->bh) != NULL) {
req403include/linux/blk.hreq->bh = bh->b_reqnext;
req407include/linux/blk.hif ((bh = req->bh) != NULL) {
req408include/linux/blk.hreq->current_nr_sectors = bh->b_size >> 9;
req409include/linux/blk.hif (req->nr_sectors < req->current_nr_sectors) {
req410include/linux/blk.hreq->nr_sectors = req->current_nr_sectors;
req413include/linux/blk.hreq->buffer = bh->b_data;
req418include/linux/blk.hadd_blkdev_randomness(MAJOR(req->rq_dev));
req421include/linux/blk.hblk_dev[MAJOR(req->rq_dev)].current_request = req->next;
req424include/linux/blk.hDEVICE_OFF(req->rq_dev);
req425include/linux/blk.hCURRENT = req->next;
req427include/linux/blk.hif (req->sem != NULL)
req428include/linux/blk.hup(req->sem);
req429include/linux/blk.hreq->rq_status = RQ_INACTIVE;
req436include/linux/blk.hextern inline void end_redirect (struct request *req)
req440include/linux/blk.hreq->errors = 0;
req442include/linux/blk.hif ((bh = req->bh) != NULL)
req444include/linux/blk.hreq->bh = bh->b_reqnext;
req447include/linux/blk.hif ((bh = req->bh) != NULL)
req449include/linux/blk.hreq->sector += req->current_nr_sectors;
req450include/linux/blk.hreq->current_nr_sectors = bh->b_size >> 9;
req452include/linux/blk.hif (req->nr_sectors < req->current_nr_sectors)
req454include/linux/blk.hreq->nr_sectors = req->current_nr_sectors;
req458include/linux/blk.hreq->buffer = bh->b_data;
req54include/linux/blkdev.hextern void add_request(struct blk_dev_struct * dev, struct request * req);
req116include/linux/if_arp.hunsigned short  req;      /* request type */
req110include/linux/md.hint (*map)(int minor, struct md_dev *md_dev, struct request *req);
req204include/linux/serial.hextern int register_serial(struct serial_struct *req);
req447net/ipv4/arp.carpreq.req = ARPD_UPDATE;
req503net/ipv4/arp.carpreq.req = ARPD_LOOKUP;
req284net/ipv4/rarp.cstatic int rarp_req_set(struct arpreq *req)
req294net/ipv4/rarp.cmemcpy_fromfs(&r, req, sizeof(r));
req388net/ipv4/rarp.cstatic int rarp_req_get(struct arpreq *req)
req399net/ipv4/rarp.cmemcpy_fromfs(&r, req, sizeof(r));
req434net/ipv4/rarp.cmemcpy_tofs(req, &r, sizeof(r));