taglinefilesource code
buff47arch/alpha/lib/checksum.cstatic inline unsigned long do_csum(unsigned char * buff, int len)
buff54arch/alpha/lib/checksum.codd = 1 & (unsigned long) buff;
buff56arch/alpha/lib/checksum.cresult = *buff << 8;
buff58arch/alpha/lib/checksum.cbuff++;
buff62arch/alpha/lib/checksum.cif (2 & (unsigned long) buff) {
buff63arch/alpha/lib/checksum.cresult += *(unsigned short *) buff;
buff66arch/alpha/lib/checksum.cbuff += 2;
buff70arch/alpha/lib/checksum.cif (4 & (unsigned long) buff) {
buff71arch/alpha/lib/checksum.cresult += *(unsigned int *) buff;
buff74arch/alpha/lib/checksum.cbuff += 4;
buff80arch/alpha/lib/checksum.cunsigned long w = *(unsigned long *) buff;
buff82arch/alpha/lib/checksum.cbuff += 8;
buff91arch/alpha/lib/checksum.cresult += *(unsigned int *) buff;
buff92arch/alpha/lib/checksum.cbuff += 4;
buff96arch/alpha/lib/checksum.cresult += *(unsigned short *) buff;
buff97arch/alpha/lib/checksum.cbuff += 2;
buff101arch/alpha/lib/checksum.cresult += *buff;
buff130arch/alpha/lib/checksum.cunsigned int csum_partial(unsigned char * buff, int len, unsigned int sum)
buff132arch/alpha/lib/checksum.cunsigned long result = do_csum(buff, len);
buff167arch/alpha/lib/checksum.cunsigned short ip_compute_csum(unsigned char * buff, int len)
buff169arch/alpha/lib/checksum.creturn ~from64to16(do_csum(buff,len));
buff26arch/i386/lib/checksum.cunsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum) {
buff93arch/i386/lib/checksum.c: "0"(sum), "c"(len), "S"(buff)
buff24arch/mips/lib/checksum.cunsigned int csum_partial(const unsigned char *buff, int len, unsigned int sum)
buff128arch/mips/lib/checksum.c: "0"(sum), "r"(len), "r"(buff)
buff107arch/ppc/kernel/stubs.cunsigned int csum_partial(unsigned char * buff, int len, unsigned int sum)
buff594drivers/cdrom/sonycd535.cByte **buff, int buf_size)
buff628drivers/cdrom/sonycd535.cdata_buff = buff[sector_count++];
buff774drivers/char/ftape/fdc-io.cint setup_fdc_and_dma(buffer_struct * buff, unsigned char operation)
buff811drivers/char/ftape/fdc-io.cTRACEx2(5, "xfer %d sectors to 0x%p", buff->sector_count, buff->ptr);
buff840drivers/char/ftape/fdc-io.cTRACEx2(5, "xfer %d sectors from 0x%p", buff->sector_count, buff->ptr);
buff853drivers/char/ftape/fdc-io.cset_dma_addr(fdc.dma, (unsigned) buff->ptr);
buff854drivers/char/ftape/fdc-io.cset_dma_count(fdc.dma, SECTOR_SIZE * buff->sector_count);
buff872drivers/char/ftape/fdc-io.cout[2] = buff->cyl;
buff873drivers/char/ftape/fdc-io.cout[3] = buff->head;
buff874drivers/char/ftape/fdc-io.cout[4] = buff->sect + buff->sector_offset;
buff876drivers/char/ftape/fdc-io.cout[6] = out[4] + buff->sector_count - 1;  /* last sector */
buff191drivers/char/ftape/fdc-isr.cstatic void skip_bad_sector(buffer_struct * buff)
buff197drivers/char/ftape/fdc-isr.cif (buff->remaining > 0) {
buff198drivers/char/ftape/fdc-isr.c++buff->sector_offset;
buff199drivers/char/ftape/fdc-isr.c++buff->data_offset;
buff200drivers/char/ftape/fdc-isr.c--buff->remaining;
buff201drivers/char/ftape/fdc-isr.cbuff->ptr += SECTOR_SIZE;
buff202drivers/char/ftape/fdc-isr.cbuff->bad_sector_map >>= 1;
buff204drivers/char/ftape/fdc-isr.c++buff->sector_offset;  /* hack for error maps */
buff210drivers/char/ftape/fdc-isr.cstatic void update_error_maps(buffer_struct * buff, unsigned error_offset)
buff217drivers/char/ftape/fdc-isr.cif (buff->retry < SOFT_RETRIES) {
buff218drivers/char/ftape/fdc-isr.cbuff->soft_error_map |= (1 << error_offset);
buff220drivers/char/ftape/fdc-isr.cbuff->hard_error_map |= (1 << error_offset);
buff221drivers/char/ftape/fdc-isr.cbuff->soft_error_map &= ~buff->hard_error_map;
buff222drivers/char/ftape/fdc-isr.cbuff->retry = -1;  /* will be set to 0 in setup_segment */
buff228drivers/char/ftape/fdc-isr.cbuff->hard_error_map, buff->soft_error_map);
buff244drivers/char/ftape/fdc-isr.cstatic void determine_progress(buffer_struct * buff, error_cause cause, int mode)
buff256drivers/char/ftape/fdc-isr.cnr_xferred = buff->sector_count * SECTOR_SIZE - dma_residue;
buff266drivers/char/ftape/fdc-isr.cbuff->sector_count -= nr_not_xferred;  /* adjust to actual value */
buff270drivers/char/ftape/fdc-isr.cif (buff->sector_count > 0) {
buff271drivers/char/ftape/fdc-isr.cbuff->sector_offset += buff->sector_count;
buff272drivers/char/ftape/fdc-isr.cbuff->data_offset += buff->sector_count;
buff273drivers/char/ftape/fdc-isr.cbuff->ptr += buff->sector_count * SECTOR_SIZE;
buff274drivers/char/ftape/fdc-isr.cbuff->remaining -= buff->sector_count;
buff275drivers/char/ftape/fdc-isr.cbuff->bad_sector_map >>= buff->sector_count;
buff278drivers/char/ftape/fdc-isr.cTRACEx1(5, "%d Sector(s) transferred", buff->sector_count);
buff280drivers/char/ftape/fdc-isr.cTRACEx1(5, "Sector %d not found", SECTOR(buff->sector_offset));
buff283drivers/char/ftape/fdc-isr.cTRACEx1(5, "Error in sector %d", SECTOR(buff->sector_offset));
buff289drivers/char/ftape/fdc-isr.cTRACEx1(4, "Unexpected error at sector %d", SECTOR(buff->sector_offset));
buff299drivers/char/ftape/fdc-isr.cskip_bad_sector(buff);
buff301drivers/char/ftape/fdc-isr.cupdate_error_maps(buff, buff->sector_offset - 1);
buff379drivers/char/ftape/fdc-isr.cbuffer_struct *buff = *p_buff;
buff387drivers/char/ftape/fdc-isr.c(buff->status != (write ? writing : reading))) {
buff389drivers/char/ftape/fdc-isr.crunner_status, buff->status);
buff390drivers/char/ftape/fdc-isr.cbuff->status = error;
buff396drivers/char/ftape/fdc-isr.cif (buff->remaining > 0 && calc_next_cluster(&buffer[head]) > 0) {
buff402drivers/char/ftape/fdc-isr.cbuff->bytes = buff->ptr - buff->address;
buff403drivers/char/ftape/fdc-isr.cbuff->status = error;
buff404drivers/char/ftape/fdc-isr.cbuff = *p_buff = next_buffer(&head);  /* finish this buffer */
buff411drivers/char/ftape/fdc-isr.cunsigned last_segment = buff->segment_id;
buff413drivers/char/ftape/fdc-isr.cint next = buff->next_segment;  /* 0 means stop ! */
buff415drivers/char/ftape/fdc-isr.cbuff->bytes = buff->ptr - buff->address;
buff416drivers/char/ftape/fdc-isr.cbuff->status = done;
buff417drivers/char/ftape/fdc-isr.cbuff = *p_buff = next_buffer(&head);
buff426drivers/char/ftape/fdc-isr.cif (buff->status == waiting) {
buff427drivers/char/ftape/fdc-isr.cif (write && next != buff->segment_id) {
buff434drivers/char/ftape/fdc-isr.cbuff->next_segment = 0;
buff443drivers/char/ftape/fdc-isr.cbuff->status = (write) ? writing : reading;  /* keep on going */
buff474drivers/char/ftape/fdc-isr.cbuffer_struct *buff = *p_buff;
buff478drivers/char/ftape/fdc-isr.cpause_tape(buff->segment_id, 1, fdc_mode);
buff480drivers/char/ftape/fdc-isr.cbuff->status = error;
buff481drivers/char/ftape/fdc-isr.cbuff->skip = skip;
buff486drivers/char/ftape/fdc-isr.cfind_resume_point(buffer_struct * buff)
buff500drivers/char/ftape/fdc-isr.cif (buff->sector_offset < 1 || buff->sector_offset > 32) {
buff501drivers/char/ftape/fdc-isr.cTRACEx1(1, "bug: sector_offset = %d", buff->sector_offset);
buff503drivers/char/ftape/fdc-isr.cif (buff->sector_offset >= 32) {  /* C-limitation on shift ! */
buff506drivers/char/ftape/fdc-isr.cmask = (1 << buff->sector_offset) - 1;
buff508drivers/char/ftape/fdc-isr.cmap = buff->soft_error_map & mask;
buff515drivers/char/ftape/fdc-isr.cmap = buff->hard_error_map & mask;
buff516drivers/char/ftape/fdc-isr.ci = buff->sector_offset - 1;
buff544drivers/char/ftape/fdc-isr.cbuffer_struct *buff = &buffer[head];
buff581drivers/char/ftape/fdc-isr.cTRACEi(5, "reading segment", buff->segment_id);
buff583drivers/char/ftape/fdc-isr.cTRACEi(4, "error reading segment", buff->segment_id);
buff589drivers/char/ftape/fdc-isr.cif (buff->retry > 0) {
buff590drivers/char/ftape/fdc-isr.cTRACEx1(5, "this is retry nr %d", buff->retry);
buff592drivers/char/ftape/fdc-isr.cif (buff->bad_sector_map == FAKE_SEGMENT) {
buff600drivers/char/ftape/fdc-isr.cbuff->remaining = 0;  /* skip failing sector */
buff601drivers/char/ftape/fdc-isr.ccontinue_xfer(&buff, no_error, fdc_mode, 1);  /* fake success */
buff605drivers/char/ftape/fdc-isr.cdetermine_progress(buff, cause, fdc_reading_data);
buff611drivers/char/ftape/fdc-isr.cSECTOR(buff->sector_offset - 1));
buff612drivers/char/ftape/fdc-isr.cbuff->deleted = 1;
buff613drivers/char/ftape/fdc-isr.cbuff->remaining = 0;  /* abort transfer */
buff614drivers/char/ftape/fdc-isr.cbuff->soft_error_map |= (-1L << buff->sector_offset);
buff615drivers/char/ftape/fdc-isr.cif (buff->segment_id == 0) {
buff618drivers/char/ftape/fdc-isr.cbuff->next_segment = buff->segment_id + 1;  /* force read-ahead */
buff619drivers/char/ftape/fdc-isr.cskip = (SECTORS_PER_SEGMENT - buff->sector_offset);
buff623drivers/char/ftape/fdc-isr.ccontinue_xfer(&buff, cause, fdc_mode, skip);
buff637drivers/char/ftape/fdc-isr.cint first_error = (buff->soft_error_map == 0 &&
buff638drivers/char/ftape/fdc-isr.cbuff->hard_error_map == 0);
buff641drivers/char/ftape/fdc-isr.cdetermine_progress(buff, cause, fdc_reading_data);
buff643drivers/char/ftape/fdc-isr.cskip = buff->sector_offset;
buff645drivers/char/ftape/fdc-isr.cskip = find_resume_point(buff);
buff653drivers/char/ftape/fdc-isr.ccontinue_xfer(&buff, cause, fdc_mode, skip);
buff655drivers/char/ftape/fdc-isr.cretry_sector(&buff, cause, fdc_mode, skip);
buff663drivers/char/ftape/fdc-isr.cdetermine_progress(buff, cause, fdc_reading_data);
buff664drivers/char/ftape/fdc-isr.cretry_sector(&buff, cause, fdc_mode, 0);
buff693drivers/char/ftape/fdc-isr.cTRACEi(5, "writing segment", buff->segment_id);
buff695drivers/char/ftape/fdc-isr.cTRACEi(4, "error writing segment", buff->segment_id);
buff701drivers/char/ftape/fdc-isr.cif (buff->retry > 0) {
buff702drivers/char/ftape/fdc-isr.cTRACEx1(5, "this is retry nr %d", buff->retry);
buff704drivers/char/ftape/fdc-isr.cif (buff->bad_sector_map == FAKE_SEGMENT) {
buff711drivers/char/ftape/fdc-isr.cbuff->remaining = 0;  /* skip failing sector */
buff712drivers/char/ftape/fdc-isr.ccontinue_xfer(&buff, no_error, fdc_mode, 1);  /* fake success */
buff716drivers/char/ftape/fdc-isr.cdetermine_progress(buff, cause, fdc_writing_data);
buff717drivers/char/ftape/fdc-isr.ccontinue_xfer(&buff, cause, fdc_mode, 0);
buff726drivers/char/ftape/fdc-isr.cdetermine_progress(buff, cause, fdc_writing_data);
buff727drivers/char/ftape/fdc-isr.cskip = find_resume_point(buff);
buff728drivers/char/ftape/fdc-isr.cretry_sector(&buff, cause, fdc_mode, skip);
buff74drivers/char/ftape/ftape-read.cstatic unsigned long convert_sector_map(buffer_struct * buff)
buff78drivers/char/ftape/ftape-read.cunsigned long bad_map = get_bad_sector_entry(buff->segment_id);
buff79drivers/char/ftape/ftape-read.cunsigned long src_map = buff->soft_error_map | buff->hard_error_map;
buff577drivers/char/ftape/ftape-read.cint _ftape_read(char *buff, int req_len)
buff653drivers/char/ftape/ftape-read.cresult = verify_area(VERIFY_WRITE, buff, cnt);
buff659drivers/char/ftape/ftape-read.cmemcpy_tofs(buff, deblock_buffer + buf_pos_rd, cnt);
buff660drivers/char/ftape/ftape-read.cbuff += cnt;
buff39drivers/char/ftape/ftape-read.hextern int _ftape_read(char *buff, int req_len);
buff105drivers/char/ftape/ftape-rw.cstatic int setup_segment(buffer_struct * buff, unsigned int segment_id,
buff112drivers/char/ftape/ftape-rw.cbuff->segment_id = segment_id;
buff113drivers/char/ftape/ftape-rw.cbuff->sector_offset = sector_offset;
buff114drivers/char/ftape/ftape-rw.cbuff->remaining = sector_count;
buff115drivers/char/ftape/ftape-rw.cbuff->head = segment_id / segments_per_head;
buff116drivers/char/ftape/ftape-rw.cbuff->cyl = (segment_id % segments_per_head) / segments_per_cylinder;
buff117drivers/char/ftape/ftape-rw.cbuff->sect = (segment_id % segments_per_cylinder) * SECTORS_PER_SEGMENT + 1;
buff118drivers/char/ftape/ftape-rw.cbuff->deleted = 0;
buff119drivers/char/ftape/ftape-rw.coffset_mask = (1 << buff->sector_offset) - 1;
buff127drivers/char/ftape/ftape-rw.cbuff->data_offset = count_ones(offset_mask);  /* good sectors to skip */
buff128drivers/char/ftape/ftape-rw.cbuff->ptr = buff->address + buff->data_offset * SECTOR_SIZE;
buff129drivers/char/ftape/ftape-rw.cTRACEx1(5, "data offset = %d sectors", buff->data_offset);
buff131drivers/char/ftape/ftape-rw.cbuff->soft_error_map &= offset_mask;  /* keep skipped part */
buff133drivers/char/ftape/ftape-rw.cbuff->hard_error_map = buff->soft_error_map = 0;
buff135drivers/char/ftape/ftape-rw.cbuff->bad_sector_map = get_bad_sector_entry(buff->segment_id);
buff136drivers/char/ftape/ftape-rw.cif (buff->bad_sector_map != 0) {
buff138drivers/char/ftape/ftape-rw.cbuff->segment_id, buff->bad_sector_map);
buff140drivers/char/ftape/ftape-rw.cTRACEx1(5, "segment: %d", buff->segment_id);
buff142drivers/char/ftape/ftape-rw.cif (buff->sector_offset > 0) {
buff143drivers/char/ftape/ftape-rw.cbuff->bad_sector_map >>= buff->sector_offset;
buff145drivers/char/ftape/ftape-rw.cif (buff->sector_offset != 0 || buff->remaining != SECTORS_PER_SEGMENT) {
buff147drivers/char/ftape/ftape-rw.cbuff->sector_offset, buff->remaining);
buff163drivers/char/ftape/ftape-rw.cif (buff->bad_sector_map == EMPTY_SEGMENT) {
buff165drivers/char/ftape/ftape-rw.cbuff->bad_sector_map = FAKE_SEGMENT;
buff168drivers/char/ftape/ftape-rw.cbuff->next_segment = segment_id + 1;
buff175drivers/char/ftape/ftape-rw.cint setup_new_segment(buffer_struct * buff, unsigned int segment_id, int skip)
buff193drivers/char/ftape/ftape-rw.c++buff->retry;
buff195drivers/char/ftape/ftape-rw.cTRACEx1(5, "setting up for retry nr %d", buff->retry);
buff197drivers/char/ftape/ftape-rw.cif (skip && buff->skip > 0) {  /* allow skip on retry */
buff198drivers/char/ftape/ftape-rw.coffset = buff->skip;
buff203drivers/char/ftape/ftape-rw.cbuff->retry = 0;
buff204drivers/char/ftape/ftape-rw.cbuff->skip = 0;
buff207drivers/char/ftape/ftape-rw.cresult = setup_segment(buff, segment_id, offset, count, retry);
buff214drivers/char/ftape/ftape-rw.cint calc_next_cluster(buffer_struct * buff)
buff218drivers/char/ftape/ftape-rw.cwhile (buff->remaining > 0 && (buff->bad_sector_map & 1) != 0) {
buff219drivers/char/ftape/ftape-rw.cbuff->bad_sector_map >>= 1;
buff220drivers/char/ftape/ftape-rw.c++buff->sector_offset;
buff221drivers/char/ftape/ftape-rw.c--buff->remaining;
buff225drivers/char/ftape/ftape-rw.cif (buff->bad_sector_map == 0) {  /* speed up */
buff226drivers/char/ftape/ftape-rw.cbuff->sector_count = buff->remaining;
buff228drivers/char/ftape/ftape-rw.cunsigned long map = buff->bad_sector_map;
buff230drivers/char/ftape/ftape-rw.cbuff->sector_count = 0;
buff231drivers/char/ftape/ftape-rw.cwhile (buff->sector_count < buff->remaining && (map & 1) == 0) {
buff232drivers/char/ftape/ftape-rw.c++buff->sector_count;
buff236drivers/char/ftape/ftape-rw.creturn buff->sector_count;
buff159drivers/char/ftape/ftape-rw.hextern int setup_new_segment(buffer_struct * buff, unsigned int segment_id,
buff161drivers/char/ftape/ftape-rw.hextern int calc_next_cluster(buffer_struct * buff);
buff171drivers/char/ftape/ftape-rw.hextern int setup_fdc_and_dma(buffer_struct * buff, byte operation);
buff112drivers/char/ftape/ftape-write.cbuffer_struct *buff = &buffer[head];
buff113drivers/char/ftape/ftape-write.cint segment_id = buff->segment_id;
buff115drivers/char/ftape/ftape-write.cif (ftape_state == writing && buff->status == waiting) {
buff116drivers/char/ftape/ftape-write.csetup_new_segment(buff, segment_id, 1);
buff120drivers/char/ftape/ftape-write.ccalc_next_cluster(buff);  /* prepare */
buff121drivers/char/ftape/ftape-write.cbuff->status = writing;
buff124drivers/char/ftape/ftape-write.cresult = ftape_start_tape(segment_id, buff->sector_offset);
buff130drivers/char/ftape/ftape-write.cresult = setup_fdc_and_dma(buff, FDC_WRITE);  /* go */
buff589drivers/char/ftape/ftape-write.cint _ftape_write(const char *buff, int req_len)
buff635drivers/char/ftape/ftape-write.cresult = verify_area(VERIFY_READ, buff, cnt);
buff642drivers/char/ftape/ftape-write.cmemcpy_fromfs(deblock_buffer + buf_pos_wr, buff, cnt);
buff643drivers/char/ftape/ftape-write.cbuff += cnt;
buff39drivers/char/ftape/ftape-write.hextern int _ftape_write(const char *buff, int req_len);
buff64drivers/char/ftape/kernel-interface.cstatic int ftape_read(struct inode *ino, struct file *fp, char *buff,
buff66drivers/char/ftape/kernel-interface.cstatic int ftape_write(struct inode *ino, struct file *fp, const char *buff,
buff313drivers/char/ftape/kernel-interface.cstatic int ftape_read(struct inode *ino, struct file *fp, char *buff, int req_len)
buff327drivers/char/ftape/kernel-interface.cresult = _ftape_read(buff, req_len);
buff336drivers/char/ftape/kernel-interface.cstatic int ftape_write(struct inode *ino, struct file *fp, const char *buff, int req_len)
buff350drivers/char/ftape/kernel-interface.cresult = _ftape_write(buff, req_len);
buff1140drivers/isdn/isdn_net.cisdn_net_rebuild_header(void *buff, struct device *dev, ulong dst,
buff1147drivers/isdn/isdn_net.cstruct ethhdr *eth = (struct ethhdr *)buff;
buff49drivers/net/apricot.c#define kfree_skbmem(buff, size) kfree_s(buff,size)
buff2650drivers/net/arcnet.cint arcnetA_rebuild_header(void *buff,struct device *dev,unsigned long dst,
buff2653drivers/net/arcnet.cstruct ClientData *head = (struct ClientData *)buff;
buff3105drivers/net/arcnet.cint arcnetS_rebuild_header(void *buff,struct device *dev,unsigned long dst,
buff3108drivers/net/arcnet.cstruct S_ClientData *head = (struct S_ClientData *)buff;
buff155drivers/net/eql.cstatic int eql_rebuild_header(void *buff, struct device *dev, 
buff416drivers/net/eql.cstatic int eql_rebuild_header(void *buff, struct device *dev, 
buff1084drivers/net/pi2.cstatic int pi_rebuild_header(void *buff, struct device *dev, unsigned long raddr,
buff1087drivers/net/pi2.creturn ax25_rebuild_header(buff, dev, raddr, skb);
buff145drivers/net/plip.cstatic int plip_rebuild_header(void *buff, struct device *dev,
buff857drivers/net/plip.cplip_rebuild_header(void *buff, struct device *dev, unsigned long dst,
buff861drivers/net/plip.cstruct ethhdr *eth = (struct ethhdr *)buff;
buff865drivers/net/plip.creturn nl->orig_rebuild_header(buff, dev, dst, skb);
buff347drivers/net/pt.cstatic int pt_rebuild_header(void *buff, struct device *dev, unsigned long raddr,
buff350drivers/net/pt.creturn ax25_rebuild_header(buff, dev, raddr, skb);
buff556drivers/net/slip.csl_rebuild_header(void *buff, struct device *dev, unsigned long raddr,
buff564drivers/net/slip.creturn ax25_rebuild_header(buff, dev, raddr, skb);
buff518drivers/scsi/aha1542.cvoid *buff = SCpnt->request_buffer;
buff645drivers/scsi/aha1542.cif(((unsigned int) buff & 0xff000000)) goto baddma;
buff646drivers/scsi/aha1542.cany2scsi(ccb[mbo].dataptr, buff);
buff258drivers/scsi/aha1740.cvoid *buff = SCpnt->request_buffer;
buff354drivers/scsi/aha1740.cecb[ecbno].dataptr = (long) buff;
buff79drivers/scsi/aic7xxx_proc.cstatic u8 buff[512];
buff124drivers/scsi/aic7xxx_proc.cmemset(buff, 0, sizeof(buff));
buff695drivers/scsi/eata_dma.cstatic char *buff;
buff704drivers/scsi/eata_dma.cbuff = dma_scratch;
buff708drivers/scsi/eata_dma.cmemset(buff, 0, 256);
buff716drivers/scsi/eata_dma.ccp->cp_dataDMA = htonl(virt_to_bus(buff));
buff743drivers/scsi/eata_dma.c(u32) sp->scsi_stat, buff, sp));
buff757drivers/scsi/eata_dma.creturn (buff);
buff851drivers/scsi/eata_dma.cchar *buff = 0;
buff905drivers/scsi/eata_dma.cbuff = get_board_data(base, gc->IRQ, gc->scsi_id[3]);
buff907drivers/scsi/eata_dma.cif (buff == NULL) {
buff931drivers/scsi/eata_dma.cif (gc->DMA_support == FALSE && buff != NULL)  
buff933drivers/scsi/eata_dma.c&buff[16], base);
buff977drivers/scsi/eata_dma.cstrncpy(SD(sh)->vendor, &buff[8], 8);
buff979drivers/scsi/eata_dma.cstrncpy(SD(sh)->name, &buff[16], 17);
buff981drivers/scsi/eata_dma.cSD(sh)->revision[0] = buff[32];
buff982drivers/scsi/eata_dma.cSD(sh)->revision[1] = buff[33];
buff983drivers/scsi/eata_dma.cSD(sh)->revision[2] = buff[34];
buff985drivers/scsi/eata_dma.cSD(sh)->revision[4] = buff[35];
buff1013drivers/scsi/eata_dma.cif (buff[21] == '4')
buff1015drivers/scsi/eata_dma.celse if (buff[21] == '2')
buff75drivers/scsi/eata_dma_proc.cstatic u8 buff[512];
buff106drivers/scsi/eata_dma_proc.cmemset(buff, 0, sizeof(buff));
buff108drivers/scsi/eata_dma_proc.ccc = (coco *)     (buff + 0x148);
buff109drivers/scsi/eata_dma_proc.cst = (scsitrans *)(buff + 0x164); 
buff110drivers/scsi/eata_dma_proc.csm = (scsimod *)  (buff + 0x16c);
buff111drivers/scsi/eata_dma_proc.chb = (hobu *)     (buff + 0x172);
buff112drivers/scsi/eata_dma_proc.csb = (scbu *)     (buff + 0x178);
buff113drivers/scsi/eata_dma_proc.cbt = (boty *)     (buff + 0x17e);
buff114drivers/scsi/eata_dma_proc.cmc = (memco *)    (buff + 0x186);
buff115drivers/scsi/eata_dma_proc.cfm = (firm *)     (buff + 0x18e);
buff116drivers/scsi/eata_dma_proc.csi = (subinf *)   (buff + 0x196); 
buff117drivers/scsi/eata_dma_proc.cpi = (pcinf *)    (buff + 0x19c);
buff118drivers/scsi/eata_dma_proc.cal = (arrlim *)   (buff + 0x1a2);
buff191drivers/scsi/eata_dma_proc.cscsi_do_cmd (&scmd, cmnd, buff + 0x144, 0x66,  
buff539drivers/scsi/eata_pio.cstatic char buff[256];
buff543drivers/scsi/eata_pio.cmemset(buff, 0, sizeof(buff));
buff575drivers/scsi/eata_pio.cinsw(base+HA_RDATA, &buff, 127);
buff577drivers/scsi/eata_pio.creturn (buff);
buff679drivers/scsi/eata_pio.cchar *buff;
buff693drivers/scsi/eata_pio.cif ((buff = get_pio_board_data((uint)base, gc->IRQ, gc->scsi_id[3], 
buff737drivers/scsi/eata_pio.cstrncpy(SD(sh)->vendor, &buff[8], 8);
buff739drivers/scsi/eata_pio.cstrncpy(SD(sh)->name, &buff[16], 17);
buff741drivers/scsi/eata_pio.cSD(sh)->revision[0] = buff[32];
buff742drivers/scsi/eata_pio.cSD(sh)->revision[1] = buff[33];
buff743drivers/scsi/eata_pio.cSD(sh)->revision[2] = buff[34];
buff745drivers/scsi/eata_pio.cSD(sh)->revision[4] = buff[35];
buff772drivers/scsi/eata_pio.cif (buff[21] == '4')
buff774drivers/scsi/eata_pio.celse if (buff[21] == '2')
buff46drivers/scsi/eata_pio_proc.cstatic u8 buff[512];
buff63drivers/scsi/eata_pio_proc.cmemset(buff, 0, sizeof(buff));
buff423drivers/scsi/in2000.cvoid *buff = SCpnt->request_buffer;
buff505drivers/scsi/in2000.cin2000_scatter = (struct scatterlist *) buff;
buff511drivers/scsi/in2000.cin2000_dataptr = (unsigned short*) buff;
buff179drivers/scsi/scsi_debug.cunsigned char * buff;
buff192drivers/scsi/scsi_debug.cbuff = (unsigned char *) SCpnt->request_buffer;
buff215drivers/scsi/scsi_debug.cprintk("scsi_debug: Requesting sense buffer (%x %x %x %d):", SCpnt, buff, done, bufflen);
buff220drivers/scsi/scsi_debug.cmemset(buff, 0, bufflen);
buff221drivers/scsi/scsi_debug.cmemcpy(buff, sense_buffer, bufflen);
buff232drivers/scsi/scsi_debug.cprintk("Inquiry...(%x %d)\n", buff, bufflen);
buff233drivers/scsi/scsi_debug.cmemset(buff, 0, bufflen);
buff234drivers/scsi/scsi_debug.cbuff[0] = TYPE_DISK;
buff235drivers/scsi/scsi_debug.cbuff[1] = 0x80;  /* Removable disk */
buff236drivers/scsi/scsi_debug.cbuff[2] = 1;
buff237drivers/scsi/scsi_debug.cbuff[4] = 33 - 5;
buff238drivers/scsi/scsi_debug.cmemcpy(&buff[8],"Foo Inc",7);
buff239drivers/scsi/scsi_debug.cmemcpy(&buff[16],"XYZZY",5);
buff240drivers/scsi/scsi_debug.cmemcpy(&buff[32],"1",1);
buff244drivers/scsi/scsi_debug.cprintk("Test unit ready(%x %d)\n", buff, bufflen);
buff245drivers/scsi/scsi_debug.cif (buff)
buff246drivers/scsi/scsi_debug.cmemset(buff, 0, bufflen);
buff252drivers/scsi/scsi_debug.cmemset(buff, 0, bufflen);
buff253drivers/scsi/scsi_debug.cbuff[0] = (CAPACITY >> 24);
buff254drivers/scsi/scsi_debug.cbuff[1] = (CAPACITY >> 16) & 0xff;
buff255drivers/scsi/scsi_debug.cbuff[2] = (CAPACITY >> 8) & 0xff;
buff256drivers/scsi/scsi_debug.cbuff[3] = CAPACITY & 0xff;
buff257drivers/scsi/scsi_debug.cbuff[6] = 2; /* 512 byte sectors */
buff288drivers/scsi/scsi_debug.csgpnt = (struct scatterlist *) buff;
buff289drivers/scsi/scsi_debug.cbuff = sgpnt[sgcount].address;
buff298drivers/scsi/scsi_debug.cmemset(buff, 0, bufflen);
buff303drivers/scsi/scsi_debug.cmemset(buff, 0, bufflen);
buff304drivers/scsi/scsi_debug.c*((unsigned short *) (buff+510)) = 0xAA55;
buff305drivers/scsi/scsi_debug.cp = (struct partition* ) (buff + 0x1be);
buff345drivers/scsi/scsi_debug.cmemcpy(buff, &target, sizeof(target));
buff346drivers/scsi/scsi_debug.cmemcpy(buff+sizeof(target), cmd, 24);
buff347drivers/scsi/scsi_debug.cmemcpy(buff+60, &block, sizeof(block));
buff348drivers/scsi/scsi_debug.cmemcpy(buff+64, SCpnt, sizeof(Scsi_Cmnd));
buff353drivers/scsi/scsi_debug.cmemcpy(buff+128, bh, sizeof(struct buffer_head));
buff360drivers/scsi/scsi_debug.cbuff = sgpnt[sgcount].address;
buff387drivers/scsi/scsi_debug.csgpnt = (struct scatterlist *) buff;
buff388drivers/scsi/scsi_debug.cbuff = sgpnt[sgcount].address;
buff391drivers/scsi/scsi_debug.cif (block != *((unsigned long *) (buff+60))) {
buff392drivers/scsi/scsi_debug.cprintk("%x %x :",block,  *((unsigned long *) (buff+60)));
buff513drivers/scsi/sd.cchar * buff, *bounce_buffer;
buff628drivers/scsi/sd.cbuff = SCpnt->request.buffer;
buff647drivers/scsi/sd.cbuff = SCpnt->request.buffer;
buff692drivers/scsi/sd.cbuff = SCpnt->request.buffer;
buff697drivers/scsi/sd.cbuff = (char *) sgpnt;
buff738drivers/scsi/sd.cbuff = SCpnt->request.buffer;
buff819drivers/scsi/sd.cif (((long) buff) + (this_count << 9) - 1 > ISA_DMA_THRESHOLD && 
buff822drivers/scsi/sd.cbuff = bounce_buffer;
buff824drivers/scsi/sd.cbuff = (char *) scsi_malloc(this_count << 9);
buff825drivers/scsi/sd.cif(buff == NULL) {  /* Try backing off a bit if we are low on mem*/
buff827drivers/scsi/sd.cbuff = (char *) scsi_malloc(this_count << 9);
buff828drivers/scsi/sd.cif(!buff) panic("Ran out of DMA buffers.");
buff831drivers/scsi/sd.cmemcpy(buff, (char *)SCpnt->request.buffer, this_count << 9);
buff889drivers/scsi/sd.cscsi_do_cmd (SCpnt, (void *) cmd, buff, 
buff95drivers/scsi/seagate.cvoid *buff, int bufflen, int reselect);
buff620drivers/scsi/seagate.cvoid *buff, int bufflen, int reselect) {
buff56drivers/scsi/sg.cchar *buff;   /* the buffer */
buff64drivers/scsi/sg.cstatic void sg_free(char *buff,int size);
buff136drivers/scsi/sg.cif (scsi_generics[dev].buff != NULL)
buff137drivers/scsi/sg.csg_free(scsi_generics[dev].buff,scsi_generics[dev].buff_len);
buff138drivers/scsi/sg.cscsi_generics[dev].buff=NULL;
buff181drivers/scsi/sg.cstatic void sg_free(char *buff,int size) 
buff184drivers/scsi/sg.cif (buff==big_buff)
buff191drivers/scsi/sg.cscsi_free(buff,size);
buff241drivers/scsi/sg.cmemcpy_tofs(buf,device->buff,count-sizeof(struct sg_header));
buff251drivers/scsi/sg.csg_free(device->buff,device->buff_len);
buff252drivers/scsi/sg.cdevice->buff = NULL;
buff389drivers/scsi/sg.cif ((bsize<0) || !(device->buff=sg_malloc(device->buff_len=bsize)))
buff408drivers/scsi/sg.csg_free(device->buff,device->buff_len);
buff409drivers/scsi/sg.cdevice->buff = NULL;
buff432drivers/scsi/sg.cif (input_size > 0) memcpy_fromfs(device->buff, buf, input_size);
buff449drivers/scsi/sg.c(void *) device->buff,amt,
buff576drivers/scsi/sg.cscsi_generics[i].buff=NULL;
buff178drivers/sound/audio.ctranslate_bytes (const unsigned char *table, unsigned char *buff, int n)
buff186drivers/sound/audio.cbuff[i] = table[buff[i]];
buff191drivers/sound/audio.ctranslate_bytes (const void *table, void *buff, int n)
buff200drivers/sound/audio.c:     "b" ((long) table), "c" (n), "D" ((long) buff), "S" ((long) buff)
buff51fs/ncpfs/sock.cstatic int _sendto(struct socket *sock, const void *buff,
buff59fs/ncpfs/sock.ciov.iov_base = (void *)buff;
buff47fs/smbfs/sock.cstatic int _send(struct socket *sock, const void *buff, int len, int nonblock, unsigned flags) {
buff51fs/smbfs/sock.ciov.iov_base = (void *)buff;
buff33include/asm-alpha/checksum.hextern unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum);
buff56include/asm-alpha/checksum.hextern unsigned short ip_compute_csum(unsigned char * buff, int len);
buff28include/asm-alpha/ipsum.hextern inline unsigned short ip_compute_csum(unsigned char * buff, int len)
buff39include/asm-alpha/ipsum.hstatic inline unsigned short ip_fast_csum(unsigned char * buff, int wlen)
buff16include/asm-i386/checksum.hunsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum);
buff123include/asm-i386/checksum.hstatic inline unsigned short ip_compute_csum(unsigned char * buff, int len) {
buff135include/asm-i386/checksum.h: "0" (csum_partial(buff, len, 0)));
buff25include/asm-mips/checksum.hunsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum);
buff161include/asm-mips/checksum.hstatic inline unsigned short ip_compute_csum(unsigned char * buff, int len) {
buff173include/asm-mips/checksum.h: "r" (csum_partial(buff, len, 0))
buff33include/asm-ppc/checksum.hextern unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum);
buff56include/asm-ppc/checksum.hextern unsigned short ip_compute_csum(unsigned char * buff, int len);
buff25include/asm-sparc/checksum.hdo_csum(unsigned char * buff, int len)
buff32include/asm-sparc/checksum.hodd = 1 & (unsigned long) buff;
buff34include/asm-sparc/checksum.hresult = *buff << 8;
buff36include/asm-sparc/checksum.hbuff++;
buff40include/asm-sparc/checksum.hif (2 & (unsigned long) buff) {
buff41include/asm-sparc/checksum.hresult += *(unsigned short *) buff;
buff44include/asm-sparc/checksum.hbuff += 2;
buff50include/asm-sparc/checksum.hunsigned long w = *(unsigned long *) buff;
buff52include/asm-sparc/checksum.hbuff += 4;
buff62include/asm-sparc/checksum.hresult += *(unsigned short *) buff;
buff63include/asm-sparc/checksum.hbuff += 2;
buff67include/asm-sparc/checksum.hresult += (*buff) << 8;
buff92include/asm-sparc/checksum.hextern inline unsigned int csum_partial(unsigned char * buff, int len, unsigned int sum)
buff94include/asm-sparc/checksum.hunsigned long result = do_csum(buff, len);
buff128include/asm-sparc/checksum.hextern inline unsigned short ip_compute_csum(unsigned char * buff, int len)
buff130include/asm-sparc/checksum.hreturn ~from32to16(do_csum(buff,len));
buff34include/linux/etherdevice.hextern int    eth_rebuild_header(void *buff, struct device *dev,
buff34include/linux/trdevice.hextern int    tr_rebuild_header(void *buff, struct device *dev,
buff35include/net/protocol.hvoid      (*err_handler)(int type, int code, unsigned char *buff,
buff29include/net/raw.hextern int  raw_read(struct sock *sk, unsigned char *buff,
buff351include/net/sock.hint      (*rcv)(struct sk_buff *buff, struct device *dev,
buff41include/net/udp.hextern int  udp_read(struct sock *sk, unsigned char *buff,
buff71net/802/tr.cint tr_rebuild_header(void *buff, struct device *dev, unsigned long dest,
buff74net/802/tr.cstruct trh_hdr *trh=(struct trh_hdr *)buff;
buff75net/802/tr.cstruct trllc *trllc=(struct trllc *)(buff+sizeof(struct trh_hdr));
buff2517net/ax25/af_ax25.cunsigned char *buff = skb_push(skb, AX25_HEADER_LEN);
buff2519net/ax25/af_ax25.c*buff++ = 0;  /* KISS DATA */
buff2522net/ax25/af_ax25.cmemcpy(buff, daddr, dev->addr_len);  /* Address specified */
buff2524net/ax25/af_ax25.cbuff[6] &= ~LAPB_C;
buff2525net/ax25/af_ax25.cbuff[6] &= ~LAPB_E;
buff2526net/ax25/af_ax25.cbuff[6] |= SSSID_SPARE;
buff2527net/ax25/af_ax25.cbuff += AX25_ADDR_LEN;
buff2530net/ax25/af_ax25.cmemcpy(buff, saddr, dev->addr_len);
buff2532net/ax25/af_ax25.cmemcpy(buff, dev->dev_addr, dev->addr_len);
buff2534net/ax25/af_ax25.cbuff[6] &= ~LAPB_C;
buff2535net/ax25/af_ax25.cbuff[6] |= LAPB_E;
buff2536net/ax25/af_ax25.cbuff[6] |= SSSID_SPARE;
buff2537net/ax25/af_ax25.cbuff   += AX25_ADDR_LEN;
buff2539net/ax25/af_ax25.c*buff++ = LAPB_UI;  /* UI */
buff2544net/ax25/af_ax25.c*buff++ = AX25_P_IP;
buff2548net/ax25/af_ax25.c*buff++ = AX25_P_ARP;
buff2552net/ax25/af_ax25.c*buff++ = 0;
buff140net/ethernet/eth.cint eth_rebuild_header(void *buff, struct device *dev, unsigned long dst,
buff143net/ethernet/eth.cstruct ethhdr *eth = (struct ethhdr *)buff;
buff1218net/ipv4/tcp.cstruct sk_buff *buff;
buff1241net/ipv4/tcp.cbuff = sock_wmalloc(sk,MAX_ACK_SIZE,1, GFP_ATOMIC);
buff1242net/ipv4/tcp.cif (buff == NULL) 
buff1249net/ipv4/tcp.cbuff->sk = sk;
buff1250net/ipv4/tcp.cbuff->localroute = sk->localroute;
buff1251net/ipv4/tcp.cbuff->csum = 0;
buff1257net/ipv4/tcp.ctmp = sk->prot->build_header(buff, sk->saddr, sk->daddr, &dev,
buff1261net/ipv4/tcp.cbuff->free = 1;
buff1262net/ipv4/tcp.csock_wfree(sk, buff);
buff1266net/ipv4/tcp.ct1 =(struct tcphdr *)skb_put(buff,sizeof(struct tcphdr));
buff1278net/ipv4/tcp.ctcp_send_check(t1, sk->saddr, sk->daddr, sizeof(*t1), buff);
buff1279net/ipv4/tcp.csk->prot->queue_xmit(sk, dev, buff, 1);
buff1908net/ipv4/tcp.cstruct sk_buff *buff;
buff1955net/ipv4/tcp.cbuff = sock_wmalloc(sk,MAX_SYN_SIZE,0, GFP_KERNEL);
buff1956net/ipv4/tcp.cif (buff == NULL) 
buff1961net/ipv4/tcp.cbuff->sk = sk;
buff1962net/ipv4/tcp.cbuff->free = 0;
buff1963net/ipv4/tcp.cbuff->localroute = sk->localroute;
buff1970net/ipv4/tcp.ctmp = sk->prot->build_header(buff, sk->saddr, sk->daddr, &dev,
buff1974net/ipv4/tcp.csock_wfree(sk, buff);
buff1982net/ipv4/tcp.ct1 = (struct tcphdr *) skb_put(buff,sizeof(struct tcphdr));
buff1985net/ipv4/tcp.cbuff->seq = sk->write_seq++;
buff1986net/ipv4/tcp.ct1->seq = htonl(buff->seq);
buff1988net/ipv4/tcp.cbuff->end_seq = sk->write_seq;
buff2035net/ipv4/tcp.cptr = skb_put(buff,4);
buff2040net/ipv4/tcp.cbuff->csum = csum_partial(ptr, 4, 0);
buff2042net/ipv4/tcp.csizeof(struct tcphdr) + 4, buff);
buff2060net/ipv4/tcp.csk->prot->queue_xmit(sk, dev, buff, 0);  
buff496net/ipv4/tcp_output.cstruct sk_buff *buff;
buff513net/ipv4/tcp_output.cbuff = sock_wmalloc(NULL, MAX_RESET_SIZE, 1, GFP_ATOMIC);
buff514net/ipv4/tcp_output.cif (buff == NULL) 
buff517net/ipv4/tcp_output.cbuff->sk = NULL;
buff518net/ipv4/tcp_output.cbuff->dev = dev;
buff519net/ipv4/tcp_output.cbuff->localroute = 0;
buff520net/ipv4/tcp_output.cbuff->csum = 0;
buff526net/ipv4/tcp_output.ctmp = prot->build_header(buff, saddr, daddr, &ndev, IPPROTO_TCP, opt,
buff530net/ipv4/tcp_output.cbuff->free = 1;
buff531net/ipv4/tcp_output.csock_wfree(NULL, buff);
buff535net/ipv4/tcp_output.ct1 =(struct tcphdr *)skb_put(buff,sizeof(struct tcphdr));
buff560net/ipv4/tcp_output.ctcp_send_check(t1, saddr, daddr, sizeof(*t1), buff);
buff561net/ipv4/tcp_output.cprot->queue_xmit(NULL, ndev, buff, 1);
buff574net/ipv4/tcp_output.cstruct sk_buff *buff;
buff578net/ipv4/tcp_output.cbuff = sock_wmalloc(sk, MAX_RESET_SIZE,1 , GFP_KERNEL);
buff580net/ipv4/tcp_output.cif (buff == NULL)
buff591net/ipv4/tcp_output.cbuff->sk = sk;
buff592net/ipv4/tcp_output.cbuff->localroute = sk->localroute;
buff593net/ipv4/tcp_output.cbuff->csum = 0;
buff599net/ipv4/tcp_output.ctmp = prot->build_header(buff,sk->saddr, sk->daddr, &dev,
buff610net/ipv4/tcp_output.cbuff->free = 1;
buff611net/ipv4/tcp_output.csock_wfree(sk,buff);
buff626net/ipv4/tcp_output.ct1 =(struct tcphdr *)skb_put(buff,sizeof(struct tcphdr));
buff627net/ipv4/tcp_output.cbuff->dev = dev;
buff629net/ipv4/tcp_output.cbuff->seq = sk->write_seq;
buff631net/ipv4/tcp_output.cbuff->end_seq = sk->write_seq;
buff632net/ipv4/tcp_output.ct1->seq = htonl(buff->seq);
buff636net/ipv4/tcp_output.ctcp_send_check(t1, sk->saddr, sk->daddr, sizeof(*t1), buff);
buff645net/ipv4/tcp_output.cbuff->free = 0;
buff646net/ipv4/tcp_output.cif (buff->next != NULL) 
buff649net/ipv4/tcp_output.cskb_unlink(buff);
buff651net/ipv4/tcp_output.cskb_queue_tail(&sk->write_queue, buff);
buff656net/ipv4/tcp_output.csk->prot->queue_xmit(sk, dev, buff, 0);
buff666net/ipv4/tcp_output.cstruct sk_buff * buff;
buff670net/ipv4/tcp_output.cbuff = sock_wmalloc(newsk, MAX_SYN_SIZE, 1, GFP_ATOMIC);
buff671net/ipv4/tcp_output.cif (buff == NULL) 
buff680net/ipv4/tcp_output.cbuff->sk = newsk;
buff681net/ipv4/tcp_output.cbuff->localroute = newsk->localroute;
buff687net/ipv4/tcp_output.ctmp = sk->prot->build_header(buff, newsk->saddr, newsk->daddr, &ndev,
buff697net/ipv4/tcp_output.cbuff->free = 1;
buff698net/ipv4/tcp_output.ckfree_skb(buff,FREE_WRITE);
buff706net/ipv4/tcp_output.ct1 =(struct tcphdr *)skb_put(buff,sizeof(struct tcphdr));
buff709net/ipv4/tcp_output.cbuff->seq = newsk->write_seq++;
buff710net/ipv4/tcp_output.cbuff->end_seq = newsk->write_seq;
buff716net/ipv4/tcp_output.ct1->seq = ntohl(buff->seq);
buff726net/ipv4/tcp_output.cptr = skb_put(buff,4);
buff731net/ipv4/tcp_output.cbuff->csum = csum_partial(ptr, 4, 0);
buff732net/ipv4/tcp_output.ctcp_send_check(t1, newsk->saddr, newsk->daddr, sizeof(*t1)+4, buff);
buff733net/ipv4/tcp_output.cnewsk->prot->queue_xmit(newsk, ndev, buff, 0);
buff757net/ipv4/tcp_output.cstruct sk_buff *buff;
buff770net/ipv4/tcp_output.cbuff = sock_wmalloc(sk, MAX_ACK_SIZE, 1, GFP_ATOMIC);
buff771net/ipv4/tcp_output.cif (buff == NULL) 
buff792net/ipv4/tcp_output.cbuff->sk = sk;
buff793net/ipv4/tcp_output.cbuff->localroute = sk->localroute;
buff794net/ipv4/tcp_output.cbuff->csum = 0;
buff800net/ipv4/tcp_output.ctmp = sk->prot->build_header(buff, sk->saddr, daddr, &dev,
buff804net/ipv4/tcp_output.cbuff->free = 1;
buff805net/ipv4/tcp_output.csock_wfree(sk, buff);
buff808net/ipv4/tcp_output.ct1 =(struct tcphdr *)skb_put(buff,sizeof(struct tcphdr));
buff846net/ipv4/tcp_output.ctcp_send_check(t1, sk->saddr, daddr, sizeof(*t1), buff);
buff849net/ipv4/tcp_output.csk->prot->queue_xmit(sk, dev, buff, 1);
buff860net/ipv4/tcp_output.cstruct sk_buff *buff,*skb;
buff917net/ipv4/tcp_output.cbuff = sock_wmalloc(sk, win_size + th->doff * 4 + 
buff921net/ipv4/tcp_output.cif ( buff == NULL )
buff929net/ipv4/tcp_output.cbuff->free = /*0*/1;
buff931net/ipv4/tcp_output.cbuff->sk = sk;
buff932net/ipv4/tcp_output.cbuff->localroute = sk->localroute;
buff938net/ipv4/tcp_output.ctmp = sk->prot->build_header(buff, sk->saddr, sk->daddr, &dev,
buff939net/ipv4/tcp_output.cIPPROTO_TCP, sk->opt, buff->truesize,
buff943net/ipv4/tcp_output.csock_wfree(sk, buff);
buff951net/ipv4/tcp_output.cbuff->dev = dev;
buff953net/ipv4/tcp_output.cnth = (struct tcphdr *) skb_put(buff,sizeof(*th));
buff970net/ipv4/tcp_output.cbuff->csum = csum_partial_copy((void *)(th + 1), skb_put(buff,win_size),
buff977net/ipv4/tcp_output.cbuff->end_seq = sk->sent_seq + win_size;
buff978net/ipv4/tcp_output.csk->sent_seq = buff->end_seq;    /* Hack */
buff987net/ipv4/tcp_output.cnth->doff * 4 + win_size , buff);
buff991net/ipv4/tcp_output.cbuff = sock_wmalloc(sk,MAX_ACK_SIZE,1, GFP_ATOMIC);
buff992net/ipv4/tcp_output.cif (buff == NULL) 
buff995net/ipv4/tcp_output.cbuff->free = 1;
buff996net/ipv4/tcp_output.cbuff->sk = sk;
buff997net/ipv4/tcp_output.cbuff->localroute = sk->localroute;
buff998net/ipv4/tcp_output.cbuff->csum = 0;
buff1004net/ipv4/tcp_output.ctmp = sk->prot->build_header(buff, sk->saddr, sk->daddr, &dev,
buff1008net/ipv4/tcp_output.csock_wfree(sk, buff);
buff1012net/ipv4/tcp_output.ct1 = (struct tcphdr *)skb_put(buff,sizeof(struct tcphdr));
buff1024net/ipv4/tcp_output.ctcp_send_check(t1, sk->saddr, sk->daddr, sizeof(*t1), buff);
buff1032net/ipv4/tcp_output.csk->prot->queue_xmit(sk, dev, buff, 1);
buff51net/ipv4/utils.cstatic char buff[18];
buff55net/ipv4/utils.csprintf(buff, "%d.%d.%d.%d",
buff57net/ipv4/utils.creturn(buff);
buff78net/netrom/nr_dev.cunsigned char *buff = skb_push(skb, NR_NETWORK_LEN + NR_TRANSPORT_LEN);
buff80net/netrom/nr_dev.cmemcpy(buff, (saddr != NULL) ? saddr : dev->dev_addr, dev->addr_len);
buff81net/netrom/nr_dev.cbuff[6] &= ~LAPB_C;
buff82net/netrom/nr_dev.cbuff[6] &= ~LAPB_E;
buff83net/netrom/nr_dev.cbuff[6] |= SSSID_SPARE;
buff84net/netrom/nr_dev.cbuff    += AX25_ADDR_LEN;
buff87net/netrom/nr_dev.cmemcpy(buff, daddr, dev->addr_len);
buff88net/netrom/nr_dev.cbuff[6] &= ~LAPB_C;
buff89net/netrom/nr_dev.cbuff[6] |= LAPB_E;
buff90net/netrom/nr_dev.cbuff[6] |= SSSID_SPARE;
buff91net/netrom/nr_dev.cbuff    += AX25_ADDR_LEN;
buff93net/netrom/nr_dev.c*buff++ = nr_default.ttl;
buff95net/netrom/nr_dev.c*buff++ = NR_PROTO_IP;
buff96net/netrom/nr_dev.c*buff++ = NR_PROTO_IP;
buff97net/netrom/nr_dev.c*buff++ = 0;
buff98net/netrom/nr_dev.c*buff++ = 0;
buff99net/netrom/nr_dev.c*buff++ = NR_PROTOEXT;
buff107net/netrom/nr_dev.cstatic int nr_rebuild_header(void *buff, struct device *dev,
buff111net/netrom/nr_dev.cunsigned char *bp = (unsigned char *)buff;
buff894net/socket.casmlinkage int sys_send(int fd, void * buff, int len, unsigned flags)
buff909net/socket.cerr=verify_area(VERIFY_READ, buff, len);
buff913net/socket.ciov.iov_base=buff;
buff928net/socket.casmlinkage int sys_sendto(int fd, void * buff, int len, unsigned flags,
buff945net/socket.cerr=verify_area(VERIFY_READ,buff,len);
buff952net/socket.ciov.iov_base=buff;