taglinefilesource code
buff47arch/alpha/lib/checksum.cstatic inline unsigned long do_csum(unsigned char * buff, int len)
buff54arch/alpha/lib/checksum.codd = 1 & (unsigned long) buff;
buff56arch/alpha/lib/checksum.cresult = *buff << 8;
buff58arch/alpha/lib/checksum.cbuff++;
buff62arch/alpha/lib/checksum.cif (2 & (unsigned long) buff) {
buff63arch/alpha/lib/checksum.cresult += *(unsigned short *) buff;
buff66arch/alpha/lib/checksum.cbuff += 2;
buff70arch/alpha/lib/checksum.cif (4 & (unsigned long) buff) {
buff71arch/alpha/lib/checksum.cresult += *(unsigned int *) buff;
buff74arch/alpha/lib/checksum.cbuff += 4;
buff80arch/alpha/lib/checksum.cunsigned long w = *(unsigned long *) buff;
buff82arch/alpha/lib/checksum.cbuff += 8;
buff91arch/alpha/lib/checksum.cresult += *(unsigned int *) buff;
buff92arch/alpha/lib/checksum.cbuff += 4;
buff96arch/alpha/lib/checksum.cresult += *(unsigned short *) buff;
buff97arch/alpha/lib/checksum.cbuff += 2;
buff101arch/alpha/lib/checksum.cresult += *buff;
buff130arch/alpha/lib/checksum.cunsigned int csum_partial(unsigned char * buff, int len, unsigned int sum)
buff132arch/alpha/lib/checksum.cunsigned long result = do_csum(buff, len);
buff167arch/alpha/lib/checksum.cunsigned short ip_compute_csum(unsigned char * buff, int len)
buff169arch/alpha/lib/checksum.creturn ~from64to16(do_csum(buff,len));
buff26arch/i386/lib/checksum.cunsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum) {
buff93arch/i386/lib/checksum.c: "0"(sum), "c"(len), "S"(buff)
buff38arch/m68k/lib/checksum.ccsum_partial (const unsigned char *buff, int len, unsigned int sum)
buff117arch/m68k/lib/checksum.c: "=d" (sum), "=d" (len), "=a" (buff),
buff119arch/m68k/lib/checksum.c: "0" (sum), "1" (len), "2" (buff)
buff24arch/mips/lib/checksum.cunsigned int csum_partial(const unsigned char *buff, int len, unsigned int sum)
buff128arch/mips/lib/checksum.c: "0"(sum), "r"(len), "r"(buff)
buff107arch/ppc/kernel/stubs.cunsigned int csum_partial(unsigned char * buff, int len, unsigned int sum)
buff594drivers/cdrom/sonycd535.cByte **buff, int buf_size)
buff628drivers/cdrom/sonycd535.cdata_buff = buff[sector_count++];
buff774drivers/char/ftape/fdc-io.cint setup_fdc_and_dma(buffer_struct * buff, unsigned char operation)
buff811drivers/char/ftape/fdc-io.cTRACEx2(5, "xfer %d sectors to 0x%p", buff->sector_count, buff->ptr);
buff840drivers/char/ftape/fdc-io.cTRACEx2(5, "xfer %d sectors from 0x%p", buff->sector_count, buff->ptr);
buff853drivers/char/ftape/fdc-io.cset_dma_addr(fdc.dma, (unsigned) buff->ptr);
buff854drivers/char/ftape/fdc-io.cset_dma_count(fdc.dma, SECTOR_SIZE * buff->sector_count);
buff872drivers/char/ftape/fdc-io.cout[2] = buff->cyl;
buff873drivers/char/ftape/fdc-io.cout[3] = buff->head;
buff874drivers/char/ftape/fdc-io.cout[4] = buff->sect + buff->sector_offset;
buff876drivers/char/ftape/fdc-io.cout[6] = out[4] + buff->sector_count - 1;  /* last sector */
buff191drivers/char/ftape/fdc-isr.cstatic void skip_bad_sector(buffer_struct * buff)
buff197drivers/char/ftape/fdc-isr.cif (buff->remaining > 0) {
buff198drivers/char/ftape/fdc-isr.c++buff->sector_offset;
buff199drivers/char/ftape/fdc-isr.c++buff->data_offset;
buff200drivers/char/ftape/fdc-isr.c--buff->remaining;
buff201drivers/char/ftape/fdc-isr.cbuff->ptr += SECTOR_SIZE;
buff202drivers/char/ftape/fdc-isr.cbuff->bad_sector_map >>= 1;
buff204drivers/char/ftape/fdc-isr.c++buff->sector_offset;  /* hack for error maps */
buff210drivers/char/ftape/fdc-isr.cstatic void update_error_maps(buffer_struct * buff, unsigned error_offset)
buff217drivers/char/ftape/fdc-isr.cif (buff->retry < SOFT_RETRIES) {
buff218drivers/char/ftape/fdc-isr.cbuff->soft_error_map |= (1 << error_offset);
buff220drivers/char/ftape/fdc-isr.cbuff->hard_error_map |= (1 << error_offset);
buff221drivers/char/ftape/fdc-isr.cbuff->soft_error_map &= ~buff->hard_error_map;
buff222drivers/char/ftape/fdc-isr.cbuff->retry = -1;  /* will be set to 0 in setup_segment */
buff228drivers/char/ftape/fdc-isr.cbuff->hard_error_map, buff->soft_error_map);
buff244drivers/char/ftape/fdc-isr.cstatic void determine_progress(buffer_struct * buff, error_cause cause, int mode)
buff256drivers/char/ftape/fdc-isr.cnr_xferred = buff->sector_count * SECTOR_SIZE - dma_residue;
buff266drivers/char/ftape/fdc-isr.cbuff->sector_count -= nr_not_xferred;  /* adjust to actual value */
buff270drivers/char/ftape/fdc-isr.cif (buff->sector_count > 0) {
buff271drivers/char/ftape/fdc-isr.cbuff->sector_offset += buff->sector_count;
buff272drivers/char/ftape/fdc-isr.cbuff->data_offset += buff->sector_count;
buff273drivers/char/ftape/fdc-isr.cbuff->ptr += buff->sector_count * SECTOR_SIZE;
buff274drivers/char/ftape/fdc-isr.cbuff->remaining -= buff->sector_count;
buff275drivers/char/ftape/fdc-isr.cbuff->bad_sector_map >>= buff->sector_count;
buff278drivers/char/ftape/fdc-isr.cTRACEx1(5, "%d Sector(s) transferred", buff->sector_count);
buff280drivers/char/ftape/fdc-isr.cTRACEx1(5, "Sector %d not found", SECTOR(buff->sector_offset));
buff283drivers/char/ftape/fdc-isr.cTRACEx1(5, "Error in sector %d", SECTOR(buff->sector_offset));
buff289drivers/char/ftape/fdc-isr.cTRACEx1(4, "Unexpected error at sector %d", SECTOR(buff->sector_offset));
buff299drivers/char/ftape/fdc-isr.cskip_bad_sector(buff);
buff301drivers/char/ftape/fdc-isr.cupdate_error_maps(buff, buff->sector_offset - 1);
buff379drivers/char/ftape/fdc-isr.cbuffer_struct *buff = *p_buff;
buff387drivers/char/ftape/fdc-isr.c(buff->status != (write ? writing : reading))) {
buff389drivers/char/ftape/fdc-isr.crunner_status, buff->status);
buff390drivers/char/ftape/fdc-isr.cbuff->status = error;
buff396drivers/char/ftape/fdc-isr.cif (buff->remaining > 0 && calc_next_cluster(&buffer[head]) > 0) {
buff402drivers/char/ftape/fdc-isr.cbuff->bytes = buff->ptr - buff->address;
buff403drivers/char/ftape/fdc-isr.cbuff->status = error;
buff404drivers/char/ftape/fdc-isr.cbuff = *p_buff = next_buffer(&head);  /* finish this buffer */
buff411drivers/char/ftape/fdc-isr.cunsigned last_segment = buff->segment_id;
buff413drivers/char/ftape/fdc-isr.cint next = buff->next_segment;  /* 0 means stop ! */
buff415drivers/char/ftape/fdc-isr.cbuff->bytes = buff->ptr - buff->address;
buff416drivers/char/ftape/fdc-isr.cbuff->status = done;
buff417drivers/char/ftape/fdc-isr.cbuff = *p_buff = next_buffer(&head);
buff426drivers/char/ftape/fdc-isr.cif (buff->status == waiting) {
buff427drivers/char/ftape/fdc-isr.cif (write && next != buff->segment_id) {
buff434drivers/char/ftape/fdc-isr.cbuff->next_segment = 0;
buff443drivers/char/ftape/fdc-isr.cbuff->status = (write) ? writing : reading;  /* keep on going */
buff474drivers/char/ftape/fdc-isr.cbuffer_struct *buff = *p_buff;
buff478drivers/char/ftape/fdc-isr.cpause_tape(buff->segment_id, 1, fdc_mode);
buff480drivers/char/ftape/fdc-isr.cbuff->status = error;
buff481drivers/char/ftape/fdc-isr.cbuff->skip = skip;
buff486drivers/char/ftape/fdc-isr.cfind_resume_point(buffer_struct * buff)
buff500drivers/char/ftape/fdc-isr.cif (buff->sector_offset < 1 || buff->sector_offset > 32) {
buff501drivers/char/ftape/fdc-isr.cTRACEx1(1, "bug: sector_offset = %d", buff->sector_offset);
buff503drivers/char/ftape/fdc-isr.cif (buff->sector_offset >= 32) {  /* C-limitation on shift ! */
buff506drivers/char/ftape/fdc-isr.cmask = (1 << buff->sector_offset) - 1;
buff508drivers/char/ftape/fdc-isr.cmap = buff->soft_error_map & mask;
buff515drivers/char/ftape/fdc-isr.cmap = buff->hard_error_map & mask;
buff516drivers/char/ftape/fdc-isr.ci = buff->sector_offset - 1;
buff544drivers/char/ftape/fdc-isr.cbuffer_struct *buff = &buffer[head];
buff581drivers/char/ftape/fdc-isr.cTRACEi(5, "reading segment", buff->segment_id);
buff583drivers/char/ftape/fdc-isr.cTRACEi(4, "error reading segment", buff->segment_id);
buff589drivers/char/ftape/fdc-isr.cif (buff->retry > 0) {
buff590drivers/char/ftape/fdc-isr.cTRACEx1(5, "this is retry nr %d", buff->retry);
buff592drivers/char/ftape/fdc-isr.cif (buff->bad_sector_map == FAKE_SEGMENT) {
buff600drivers/char/ftape/fdc-isr.cbuff->remaining = 0;  /* skip failing sector */
buff601drivers/char/ftape/fdc-isr.ccontinue_xfer(&buff, no_error, fdc_mode, 1);  /* fake success */
buff605drivers/char/ftape/fdc-isr.cdetermine_progress(buff, cause, fdc_reading_data);
buff611drivers/char/ftape/fdc-isr.cSECTOR(buff->sector_offset - 1));
buff612drivers/char/ftape/fdc-isr.cbuff->deleted = 1;
buff613drivers/char/ftape/fdc-isr.cbuff->remaining = 0;  /* abort transfer */
buff614drivers/char/ftape/fdc-isr.cbuff->soft_error_map |= (-1L << buff->sector_offset);
buff615drivers/char/ftape/fdc-isr.cif (buff->segment_id == 0) {
buff618drivers/char/ftape/fdc-isr.cbuff->next_segment = buff->segment_id + 1;  /* force read-ahead */
buff619drivers/char/ftape/fdc-isr.cskip = (SECTORS_PER_SEGMENT - buff->sector_offset);
buff623drivers/char/ftape/fdc-isr.ccontinue_xfer(&buff, cause, fdc_mode, skip);
buff637drivers/char/ftape/fdc-isr.cint first_error = (buff->soft_error_map == 0 &&
buff638drivers/char/ftape/fdc-isr.cbuff->hard_error_map == 0);
buff641drivers/char/ftape/fdc-isr.cdetermine_progress(buff, cause, fdc_reading_data);
buff643drivers/char/ftape/fdc-isr.cskip = buff->sector_offset;
buff645drivers/char/ftape/fdc-isr.cskip = find_resume_point(buff);
buff653drivers/char/ftape/fdc-isr.ccontinue_xfer(&buff, cause, fdc_mode, skip);
buff655drivers/char/ftape/fdc-isr.cretry_sector(&buff, cause, fdc_mode, skip);
buff663drivers/char/ftape/fdc-isr.cdetermine_progress(buff, cause, fdc_reading_data);
buff664drivers/char/ftape/fdc-isr.cretry_sector(&buff, cause, fdc_mode, 0);
buff693drivers/char/ftape/fdc-isr.cTRACEi(5, "writing segment", buff->segment_id);
buff695drivers/char/ftape/fdc-isr.cTRACEi(4, "error writing segment", buff->segment_id);
buff701drivers/char/ftape/fdc-isr.cif (buff->retry > 0) {
buff702drivers/char/ftape/fdc-isr.cTRACEx1(5, "this is retry nr %d", buff->retry);
buff704drivers/char/ftape/fdc-isr.cif (buff->bad_sector_map == FAKE_SEGMENT) {
buff711drivers/char/ftape/fdc-isr.cbuff->remaining = 0;  /* skip failing sector */
buff712drivers/char/ftape/fdc-isr.ccontinue_xfer(&buff, no_error, fdc_mode, 1);  /* fake success */
buff716drivers/char/ftape/fdc-isr.cdetermine_progress(buff, cause, fdc_writing_data);
buff717drivers/char/ftape/fdc-isr.ccontinue_xfer(&buff, cause, fdc_mode, 0);
buff726drivers/char/ftape/fdc-isr.cdetermine_progress(buff, cause, fdc_writing_data);
buff727drivers/char/ftape/fdc-isr.cskip = find_resume_point(buff);
buff728drivers/char/ftape/fdc-isr.cretry_sector(&buff, cause, fdc_mode, skip);
buff74drivers/char/ftape/ftape-read.cstatic unsigned long convert_sector_map(buffer_struct * buff)
buff78drivers/char/ftape/ftape-read.cunsigned long bad_map = get_bad_sector_entry(buff->segment_id);
buff79drivers/char/ftape/ftape-read.cunsigned long src_map = buff->soft_error_map | buff->hard_error_map;
buff577drivers/char/ftape/ftape-read.cint _ftape_read(char *buff, int req_len)
buff653drivers/char/ftape/ftape-read.cresult = verify_area(VERIFY_WRITE, buff, cnt);
buff659drivers/char/ftape/ftape-read.cmemcpy_tofs(buff, deblock_buffer + buf_pos_rd, cnt);
buff660drivers/char/ftape/ftape-read.cbuff += cnt;
buff39drivers/char/ftape/ftape-read.hextern int _ftape_read(char *buff, int req_len);
buff105drivers/char/ftape/ftape-rw.cstatic int setup_segment(buffer_struct * buff, unsigned int segment_id,
buff112drivers/char/ftape/ftape-rw.cbuff->segment_id = segment_id;
buff113drivers/char/ftape/ftape-rw.cbuff->sector_offset = sector_offset;
buff114drivers/char/ftape/ftape-rw.cbuff->remaining = sector_count;
buff115drivers/char/ftape/ftape-rw.cbuff->head = segment_id / segments_per_head;
buff116drivers/char/ftape/ftape-rw.cbuff->cyl = (segment_id % segments_per_head) / segments_per_cylinder;
buff117drivers/char/ftape/ftape-rw.cbuff->sect = (segment_id % segments_per_cylinder) * SECTORS_PER_SEGMENT + 1;
buff118drivers/char/ftape/ftape-rw.cbuff->deleted = 0;
buff119drivers/char/ftape/ftape-rw.coffset_mask = (1 << buff->sector_offset) - 1;
buff127drivers/char/ftape/ftape-rw.cbuff->data_offset = count_ones(offset_mask);  /* good sectors to skip */
buff128drivers/char/ftape/ftape-rw.cbuff->ptr = buff->address + buff->data_offset * SECTOR_SIZE;
buff129drivers/char/ftape/ftape-rw.cTRACEx1(5, "data offset = %d sectors", buff->data_offset);
buff131drivers/char/ftape/ftape-rw.cbuff->soft_error_map &= offset_mask;  /* keep skipped part */
buff133drivers/char/ftape/ftape-rw.cbuff->hard_error_map = buff->soft_error_map = 0;
buff135drivers/char/ftape/ftape-rw.cbuff->bad_sector_map = get_bad_sector_entry(buff->segment_id);
buff136drivers/char/ftape/ftape-rw.cif (buff->bad_sector_map != 0) {
buff138drivers/char/ftape/ftape-rw.cbuff->segment_id, buff->bad_sector_map);
buff140drivers/char/ftape/ftape-rw.cTRACEx1(5, "segment: %d", buff->segment_id);
buff142drivers/char/ftape/ftape-rw.cif (buff->sector_offset > 0) {
buff143drivers/char/ftape/ftape-rw.cbuff->bad_sector_map >>= buff->sector_offset;
buff145drivers/char/ftape/ftape-rw.cif (buff->sector_offset != 0 || buff->remaining != SECTORS_PER_SEGMENT) {
buff147drivers/char/ftape/ftape-rw.cbuff->sector_offset, buff->remaining);
buff163drivers/char/ftape/ftape-rw.cif (buff->bad_sector_map == EMPTY_SEGMENT) {
buff165drivers/char/ftape/ftape-rw.cbuff->bad_sector_map = FAKE_SEGMENT;
buff168drivers/char/ftape/ftape-rw.cbuff->next_segment = segment_id + 1;
buff175drivers/char/ftape/ftape-rw.cint setup_new_segment(buffer_struct * buff, unsigned int segment_id, int skip)
buff193drivers/char/ftape/ftape-rw.c++buff->retry;
buff195drivers/char/ftape/ftape-rw.cTRACEx1(5, "setting up for retry nr %d", buff->retry);
buff197drivers/char/ftape/ftape-rw.cif (skip && buff->skip > 0) {  /* allow skip on retry */
buff198drivers/char/ftape/ftape-rw.coffset = buff->skip;
buff203drivers/char/ftape/ftape-rw.cbuff->retry = 0;
buff204drivers/char/ftape/ftape-rw.cbuff->skip = 0;
buff207drivers/char/ftape/ftape-rw.cresult = setup_segment(buff, segment_id, offset, count, retry);
buff214drivers/char/ftape/ftape-rw.cint calc_next_cluster(buffer_struct * buff)
buff218drivers/char/ftape/ftape-rw.cwhile (buff->remaining > 0 && (buff->bad_sector_map & 1) != 0) {
buff219drivers/char/ftape/ftape-rw.cbuff->bad_sector_map >>= 1;
buff220drivers/char/ftape/ftape-rw.c++buff->sector_offset;
buff221drivers/char/ftape/ftape-rw.c--buff->remaining;
buff225drivers/char/ftape/ftape-rw.cif (buff->bad_sector_map == 0) {  /* speed up */
buff226drivers/char/ftape/ftape-rw.cbuff->sector_count = buff->remaining;
buff228drivers/char/ftape/ftape-rw.cunsigned long map = buff->bad_sector_map;
buff230drivers/char/ftape/ftape-rw.cbuff->sector_count = 0;
buff231drivers/char/ftape/ftape-rw.cwhile (buff->sector_count < buff->remaining && (map & 1) == 0) {
buff232drivers/char/ftape/ftape-rw.c++buff->sector_count;
buff236drivers/char/ftape/ftape-rw.creturn buff->sector_count;
buff159drivers/char/ftape/ftape-rw.hextern int setup_new_segment(buffer_struct * buff, unsigned int segment_id,
buff161drivers/char/ftape/ftape-rw.hextern int calc_next_cluster(buffer_struct * buff);
buff171drivers/char/ftape/ftape-rw.hextern int setup_fdc_and_dma(buffer_struct * buff, byte operation);
buff112drivers/char/ftape/ftape-write.cbuffer_struct *buff = &buffer[head];
buff113drivers/char/ftape/ftape-write.cint segment_id = buff->segment_id;
buff115drivers/char/ftape/ftape-write.cif (ftape_state == writing && buff->status == waiting) {
buff116drivers/char/ftape/ftape-write.csetup_new_segment(buff, segment_id, 1);
buff120drivers/char/ftape/ftape-write.ccalc_next_cluster(buff);  /* prepare */
buff121drivers/char/ftape/ftape-write.cbuff->status = writing;
buff124drivers/char/ftape/ftape-write.cresult = ftape_start_tape(segment_id, buff->sector_offset);
buff130drivers/char/ftape/ftape-write.cresult = setup_fdc_and_dma(buff, FDC_WRITE);  /* go */
buff589drivers/char/ftape/ftape-write.cint _ftape_write(const char *buff, int req_len)
buff635drivers/char/ftape/ftape-write.cresult = verify_area(VERIFY_READ, buff, cnt);
buff642drivers/char/ftape/ftape-write.cmemcpy_fromfs(deblock_buffer + buf_pos_wr, buff, cnt);
buff643drivers/char/ftape/ftape-write.cbuff += cnt;
buff39drivers/char/ftape/ftape-write.hextern int _ftape_write(const char *buff, int req_len);
buff64drivers/char/ftape/kernel-interface.cstatic int ftape_read(struct inode *ino, struct file *fp, char *buff,
buff66drivers/char/ftape/kernel-interface.cstatic int ftape_write(struct inode *ino, struct file *fp, const char *buff,
buff313drivers/char/ftape/kernel-interface.cstatic int ftape_read(struct inode *ino, struct file *fp, char *buff, int req_len)
buff327drivers/char/ftape/kernel-interface.cresult = _ftape_read(buff, req_len);
buff336drivers/char/ftape/kernel-interface.cstatic int ftape_write(struct inode *ino, struct file *fp, const char *buff, int req_len)
buff350drivers/char/ftape/kernel-interface.cresult = _ftape_write(buff, req_len);
buff1182drivers/isdn/isdn_net.cisdn_net_rebuild_header(void *buff, struct device *dev, ulong dst,
buff1189drivers/isdn/isdn_net.cstruct ethhdr *eth = (struct ethhdr *)buff;
buff49drivers/net/apricot.c#define kfree_skbmem(buff, size) kfree_s(buff,size)
buff2658drivers/net/arcnet.cint arcnetA_rebuild_header(void *buff,struct device *dev,unsigned long dst,
buff2661drivers/net/arcnet.cstruct ClientData *head = (struct ClientData *)buff;
buff3113drivers/net/arcnet.cint arcnetS_rebuild_header(void *buff,struct device *dev,unsigned long dst,
buff3116drivers/net/arcnet.cstruct S_ClientData *head = (struct S_ClientData *)buff;
buff161drivers/net/eql.cstatic int eql_rebuild_header(void *buff, struct device *dev, 
buff424drivers/net/eql.cstatic int eql_rebuild_header(void *buff, struct device *dev, 
buff1084drivers/net/pi2.cstatic int pi_rebuild_header(void *buff, struct device *dev, unsigned long raddr,
buff1087drivers/net/pi2.creturn ax25_rebuild_header(buff, dev, raddr, skb);
buff146drivers/net/plip.cstatic int plip_rebuild_header(void *buff, struct device *dev,
buff859drivers/net/plip.cplip_rebuild_header(void *buff, struct device *dev, unsigned long dst,
buff863drivers/net/plip.cstruct ethhdr *eth = (struct ethhdr *)buff;
buff867drivers/net/plip.creturn nl->orig_rebuild_header(buff, dev, dst, skb);
buff347drivers/net/pt.cstatic int pt_rebuild_header(void *buff, struct device *dev, unsigned long raddr,
buff350drivers/net/pt.creturn ax25_rebuild_header(buff, dev, raddr, skb);
buff556drivers/net/slip.csl_rebuild_header(void *buff, struct device *dev, unsigned long raddr,
buff564drivers/net/slip.creturn ax25_rebuild_header(buff, dev, raddr, skb);
buff990drivers/net/strip.cstatic int strip_rebuild_header(void *buff, struct device *dev, 
buff1138drivers/net/tulip.cchar *buff;
buff1150drivers/net/tulip.cbuff = (char *)kmalloc(alloc_size, GFP_KERNEL);
buff1151drivers/net/tulip.cdev = (struct device *)buff;
buff1156drivers/net/tulip.ctp = (struct tulip_private *)(buff + sizeof(struct device));
buff1157drivers/net/tulip.cmemset(buff, 0, alloc_size);
buff1159drivers/net/tulip.cdev->name = (char *)(buff + sizeof(struct device)
buff79drivers/net/wic.cint wic_rebuild_header(void *buff, struct device *dev,
buff919drivers/net/wic.cwic_rebuild_header(void *buff, struct device *dev, unsigned long dst,
buff923drivers/net/wic.cstruct ethhdr *eth = (struct ethhdr *)buff;
buff927drivers/net/wic.creturn nl->orig_rebuild_header(buff, dev, dst, skb);
buff518drivers/scsi/aha1542.cvoid *buff = SCpnt->request_buffer;
buff645drivers/scsi/aha1542.cif(((unsigned int) buff & 0xff000000)) goto baddma;
buff646drivers/scsi/aha1542.cany2scsi(ccb[mbo].dataptr, buff);
buff258drivers/scsi/aha1740.cvoid *buff = SCpnt->request_buffer;
buff354drivers/scsi/aha1740.cecb[ecbno].dataptr = (long) buff;
buff79drivers/scsi/aic7xxx_proc.cstatic u8 buff[512];
buff125drivers/scsi/aic7xxx_proc.cmemset(buff, 0, sizeof(buff));
buff849drivers/scsi/eata_dma.cstatic char *buff;
buff858drivers/scsi/eata_dma.cbuff = dma_scratch;
buff862drivers/scsi/eata_dma.cmemset(buff, 0, 256);
buff870drivers/scsi/eata_dma.ccp->cp_dataDMA = htonl(virt_to_bus(buff));
buff897drivers/scsi/eata_dma.c(u32) sp->scsi_stat, buff, sp));
buff911drivers/scsi/eata_dma.creturn (buff);
buff989drivers/scsi/eata_dma.cchar *buff = 0;
buff1062drivers/scsi/eata_dma.cbuff = get_board_data(base, gc->IRQ, gc->scsi_id[3]);
buff1064drivers/scsi/eata_dma.cif (buff == NULL) {
buff1085drivers/scsi/eata_dma.cif (gc->DMA_support == FALSE && buff != NULL)  
buff1087drivers/scsi/eata_dma.c"flag correctly.\n", &buff[16], base);
buff1168drivers/scsi/eata_dma.cstrncpy(hd->vendor, &buff[8], 8);
buff1170drivers/scsi/eata_dma.cstrncpy(hd->name, &buff[16], 17);
buff1172drivers/scsi/eata_dma.chd->revision[0] = buff[32];
buff1173drivers/scsi/eata_dma.chd->revision[1] = buff[33];
buff1174drivers/scsi/eata_dma.chd->revision[2] = buff[34];
buff1176drivers/scsi/eata_dma.chd->revision[4] = buff[35];
buff1178drivers/scsi/eata_dma.chd->firmware_revision = (buff[32] << 24) + (buff[33] << 16) 
buff1179drivers/scsi/eata_dma.c+ (buff[34] << 8) + buff[35]; 
buff75drivers/scsi/eata_dma_proc.cstatic u8 buff[512];
buff106drivers/scsi/eata_dma_proc.cmemset(buff, 0, sizeof(buff));
buff108drivers/scsi/eata_dma_proc.ccc = (coco *)     (buff + 0x148);
buff109drivers/scsi/eata_dma_proc.cst = (scsitrans *)(buff + 0x164); 
buff110drivers/scsi/eata_dma_proc.csm = (scsimod *)  (buff + 0x16c);
buff111drivers/scsi/eata_dma_proc.chb = (hobu *)     (buff + 0x172);
buff112drivers/scsi/eata_dma_proc.csb = (scbu *)     (buff + 0x178);
buff113drivers/scsi/eata_dma_proc.cbt = (boty *)     (buff + 0x17e);
buff114drivers/scsi/eata_dma_proc.cmc = (memco *)    (buff + 0x186);
buff115drivers/scsi/eata_dma_proc.cfm = (firm *)     (buff + 0x18e);
buff116drivers/scsi/eata_dma_proc.csi = (subinf *)   (buff + 0x196); 
buff117drivers/scsi/eata_dma_proc.cpi = (pcinf *)    (buff + 0x19c);
buff118drivers/scsi/eata_dma_proc.cal = (arrlim *)   (buff + 0x1a2);
buff191drivers/scsi/eata_dma_proc.cscsi_do_cmd (&scmd, cmnd, buff + 0x144, 0x66,  
buff545drivers/scsi/eata_pio.cstatic char buff[256];
buff549drivers/scsi/eata_pio.cmemset(buff, 0, sizeof(buff));
buff581drivers/scsi/eata_pio.cinsw(base+HA_RDATA, &buff, 127);
buff583drivers/scsi/eata_pio.creturn (buff);
buff685drivers/scsi/eata_pio.cchar *buff;
buff699drivers/scsi/eata_pio.cif ((buff = get_pio_board_data((uint)base, gc->IRQ, gc->scsi_id[3], 
buff743drivers/scsi/eata_pio.cstrncpy(SD(sh)->vendor, &buff[8], 8);
buff745drivers/scsi/eata_pio.cstrncpy(SD(sh)->name, &buff[16], 17);
buff747drivers/scsi/eata_pio.cSD(sh)->revision[0] = buff[32];
buff748drivers/scsi/eata_pio.cSD(sh)->revision[1] = buff[33];
buff749drivers/scsi/eata_pio.cSD(sh)->revision[2] = buff[34];
buff751drivers/scsi/eata_pio.cSD(sh)->revision[4] = buff[35];
buff778drivers/scsi/eata_pio.cif (buff[21] == '4')
buff780drivers/scsi/eata_pio.celse if (buff[21] == '2')
buff46drivers/scsi/eata_pio_proc.cstatic u8 buff[512];
buff63drivers/scsi/eata_pio_proc.cmemset(buff, 0, sizeof(buff));
buff179drivers/scsi/scsi_debug.cunsigned char * buff;
buff192drivers/scsi/scsi_debug.cbuff = (unsigned char *) SCpnt->request_buffer;
buff215drivers/scsi/scsi_debug.cprintk("scsi_debug: Requesting sense buffer (%x %x %x %d):", SCpnt, buff, done, bufflen);
buff220drivers/scsi/scsi_debug.cmemset(buff, 0, bufflen);
buff221drivers/scsi/scsi_debug.cmemcpy(buff, sense_buffer, bufflen);
buff232drivers/scsi/scsi_debug.cprintk("Inquiry...(%x %d)\n", buff, bufflen);
buff233drivers/scsi/scsi_debug.cmemset(buff, 0, bufflen);
buff234drivers/scsi/scsi_debug.cbuff[0] = TYPE_DISK;
buff235drivers/scsi/scsi_debug.cbuff[1] = 0x80;  /* Removable disk */
buff236drivers/scsi/scsi_debug.cbuff[2] = 1;
buff237drivers/scsi/scsi_debug.cbuff[4] = 33 - 5;
buff238drivers/scsi/scsi_debug.cmemcpy(&buff[8],"Foo Inc",7);
buff239drivers/scsi/scsi_debug.cmemcpy(&buff[16],"XYZZY",5);
buff240drivers/scsi/scsi_debug.cmemcpy(&buff[32],"1",1);
buff244drivers/scsi/scsi_debug.cprintk("Test unit ready(%x %d)\n", buff, bufflen);
buff245drivers/scsi/scsi_debug.cif (buff)
buff246drivers/scsi/scsi_debug.cmemset(buff, 0, bufflen);
buff252drivers/scsi/scsi_debug.cmemset(buff, 0, bufflen);
buff253drivers/scsi/scsi_debug.cbuff[0] = (CAPACITY >> 24);
buff254drivers/scsi/scsi_debug.cbuff[1] = (CAPACITY >> 16) & 0xff;
buff255drivers/scsi/scsi_debug.cbuff[2] = (CAPACITY >> 8) & 0xff;
buff256drivers/scsi/scsi_debug.cbuff[3] = CAPACITY & 0xff;
buff257drivers/scsi/scsi_debug.cbuff[6] = 2; /* 512 byte sectors */
buff288drivers/scsi/scsi_debug.csgpnt = (struct scatterlist *) buff;
buff289drivers/scsi/scsi_debug.cbuff = sgpnt[sgcount].address;
buff298drivers/scsi/scsi_debug.cmemset(buff, 0, bufflen);
buff303drivers/scsi/scsi_debug.cmemset(buff, 0, bufflen);
buff304drivers/scsi/scsi_debug.c*((unsigned short *) (buff+510)) = 0xAA55;
buff305drivers/scsi/scsi_debug.cp = (struct partition* ) (buff + 0x1be);
buff345drivers/scsi/scsi_debug.cmemcpy(buff, &target, sizeof(target));
buff346drivers/scsi/scsi_debug.cmemcpy(buff+sizeof(target), cmd, 24);
buff347drivers/scsi/scsi_debug.cmemcpy(buff+60, &block, sizeof(block));
buff348drivers/scsi/scsi_debug.cmemcpy(buff+64, SCpnt, sizeof(Scsi_Cmnd));
buff353drivers/scsi/scsi_debug.cmemcpy(buff+128, bh, sizeof(struct buffer_head));
buff360drivers/scsi/scsi_debug.cbuff = sgpnt[sgcount].address;
buff387drivers/scsi/scsi_debug.csgpnt = (struct scatterlist *) buff;
buff388drivers/scsi/scsi_debug.cbuff = sgpnt[sgcount].address;
buff391drivers/scsi/scsi_debug.cif (block != *((unsigned long *) (buff+60))) {
buff392drivers/scsi/scsi_debug.cprintk("%x %x :",block,  *((unsigned long *) (buff+60)));
buff513drivers/scsi/sd.cchar * buff, *bounce_buffer;
buff628drivers/scsi/sd.cbuff = SCpnt->request.buffer;
buff647drivers/scsi/sd.cbuff = SCpnt->request.buffer;
buff692drivers/scsi/sd.cbuff = SCpnt->request.buffer;
buff697drivers/scsi/sd.cbuff = (char *) sgpnt;
buff738drivers/scsi/sd.cbuff = SCpnt->request.buffer;
buff818drivers/scsi/sd.cif (((long) buff) + (this_count << 9) - 1 > ISA_DMA_THRESHOLD && 
buff821drivers/scsi/sd.cbuff = bounce_buffer;
buff823drivers/scsi/sd.cbuff = (char *) scsi_malloc(this_count << 9);
buff824drivers/scsi/sd.cif(buff == NULL) {  /* Try backing off a bit if we are low on mem*/
buff826drivers/scsi/sd.cbuff = (char *) scsi_malloc(this_count << 9);
buff827drivers/scsi/sd.cif(!buff) panic("Ran out of DMA buffers.");
buff830drivers/scsi/sd.cmemcpy(buff, (char *)SCpnt->request.buffer, this_count << 9);
buff888drivers/scsi/sd.cscsi_do_cmd (SCpnt, (void *) cmd, buff, 
buff95drivers/scsi/seagate.cvoid *buff, int bufflen, int reselect);
buff620drivers/scsi/seagate.cvoid *buff, int bufflen, int reselect) {
buff56drivers/scsi/sg.cchar *buff;   /* the buffer */
buff64drivers/scsi/sg.cstatic void sg_free(char *buff,int size);
buff136drivers/scsi/sg.cif (scsi_generics[dev].buff != NULL)
buff137drivers/scsi/sg.csg_free(scsi_generics[dev].buff,scsi_generics[dev].buff_len);
buff138drivers/scsi/sg.cscsi_generics[dev].buff=NULL;
buff181drivers/scsi/sg.cstatic void sg_free(char *buff,int size) 
buff184drivers/scsi/sg.cif (buff==big_buff)
buff191drivers/scsi/sg.cscsi_free(buff,size);
buff241drivers/scsi/sg.cmemcpy_tofs(buf,device->buff,count-sizeof(struct sg_header));
buff251drivers/scsi/sg.csg_free(device->buff,device->buff_len);
buff252drivers/scsi/sg.cdevice->buff = NULL;
buff389drivers/scsi/sg.cif ((bsize<0) || !(device->buff=sg_malloc(device->buff_len=bsize)))
buff408drivers/scsi/sg.csg_free(device->buff,device->buff_len);
buff409drivers/scsi/sg.cdevice->buff = NULL;
buff432drivers/scsi/sg.cif (input_size > 0) memcpy_fromfs(device->buff, buf, input_size);
buff449drivers/scsi/sg.c(void *) device->buff,amt,
buff576drivers/scsi/sg.cscsi_generics[i].buff=NULL;
buff178drivers/sound/audio.ctranslate_bytes (const unsigned char *table, unsigned char *buff, int n)
buff186drivers/sound/audio.cbuff[i] = table[buff[i]];
buff191drivers/sound/audio.ctranslate_bytes (const void *table, void *buff, int n)
buff200drivers/sound/audio.c:     "b" ((long) table), "c" (n), "D" ((long) buff), "S" ((long) buff)
buff51fs/ncpfs/sock.cstatic int _sendto(struct socket *sock, const void *buff,
buff59fs/ncpfs/sock.ciov.iov_base = (void *)buff;
buff47fs/smbfs/sock.cstatic int _send(struct socket *sock, const void *buff, int len, int nonblock, unsigned flags) {
buff51fs/smbfs/sock.ciov.iov_base = (void *)buff;
buff33include/asm-alpha/checksum.hextern unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum);
buff56include/asm-alpha/checksum.hextern unsigned short ip_compute_csum(unsigned char * buff, int len);
buff28include/asm-alpha/ipsum.hextern inline unsigned short ip_compute_csum(unsigned char * buff, int len)
buff39include/asm-alpha/ipsum.hstatic inline unsigned short ip_fast_csum(unsigned char * buff, int wlen)
buff16include/asm-i386/checksum.hunsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum);
buff117include/asm-i386/checksum.hstatic inline unsigned short ip_compute_csum(unsigned char * buff, int len) {
buff118include/asm-i386/checksum.hreturn csum_fold (csum_partial(buff, len, 0));
buff16include/asm-m68k/checksum.hunsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum);
buff112include/asm-m68k/checksum.hip_compute_csum(unsigned char * buff, int len)
buff123include/asm-m68k/checksum.h: "0" (csum_partial(buff, len, 0)));
buff25include/asm-mips/checksum.hunsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum);
buff161include/asm-mips/checksum.hstatic inline unsigned short ip_compute_csum(unsigned char * buff, int len) {
buff173include/asm-mips/checksum.h: "r" (csum_partial(buff, len, 0))
buff33include/asm-ppc/checksum.hextern unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum);
buff56include/asm-ppc/checksum.hextern unsigned short ip_compute_csum(unsigned char * buff, int len);
buff69include/asm-sparc/checksum.hextern inline unsigned long do_csum(unsigned char * buff, int len)
buff76include/asm-sparc/checksum.hodd = 1 & (unsigned long) buff;
buff78include/asm-sparc/checksum.hresult = *buff;
buff80include/asm-sparc/checksum.hbuff++;
buff84include/asm-sparc/checksum.hif (2 & (unsigned long) buff) {
buff85include/asm-sparc/checksum.hresult += *(unsigned short *) buff;
buff88include/asm-sparc/checksum.hbuff += 2;
buff94include/asm-sparc/checksum.hunsigned long w = *(unsigned long *) buff;
buff96include/asm-sparc/checksum.hbuff += 4;
buff105include/asm-sparc/checksum.hresult += *(unsigned short *) buff;
buff106include/asm-sparc/checksum.hbuff += 2;
buff110include/asm-sparc/checksum.hresult += (*buff << 8);
buff169include/asm-sparc/checksum.hextern inline unsigned int csum_partial(unsigned char * buff, int len, unsigned int sum)
buff261include/asm-sparc/checksum.h"=&r" (buff), "=&r" (len), "=&r" (sum) :
buff262include/asm-sparc/checksum.h"0" (buff), "1" (len), "2" (sum) :
buff293include/asm-sparc/checksum.hextern inline unsigned short ip_compute_csum(unsigned char * buff, int len)
buff295include/asm-sparc/checksum.hreturn ~from32to16(do_csum(buff,len));
buff34include/linux/etherdevice.hextern int    eth_rebuild_header(void *buff, struct device *dev,
buff34include/linux/trdevice.hextern int    tr_rebuild_header(void *buff, struct device *dev,
buff35include/net/protocol.hvoid      (*err_handler)(int type, int code, unsigned char *buff,
buff29include/net/raw.hextern int  raw_read(struct sock *sk, unsigned char *buff,
buff352include/net/sock.hint      (*rcv)(struct sk_buff *buff, struct device *dev,
buff41include/net/udp.hextern int  udp_read(struct sock *sk, unsigned char *buff,
buff71net/802/tr.cint tr_rebuild_header(void *buff, struct device *dev, unsigned long dest,
buff74net/802/tr.cstruct trh_hdr *trh=(struct trh_hdr *)buff;
buff75net/802/tr.cstruct trllc *trllc=(struct trllc *)(buff+sizeof(struct trh_hdr));
buff2491net/ax25/af_ax25.cunsigned char *buff = skb_push(skb, AX25_HEADER_LEN);
buff2493net/ax25/af_ax25.c*buff++ = 0;  /* KISS DATA */
buff2496net/ax25/af_ax25.cmemcpy(buff, daddr, dev->addr_len);  /* Address specified */
buff2498net/ax25/af_ax25.cbuff[6] &= ~LAPB_C;
buff2499net/ax25/af_ax25.cbuff[6] &= ~LAPB_E;
buff2500net/ax25/af_ax25.cbuff[6] |= SSSID_SPARE;
buff2501net/ax25/af_ax25.cbuff += AX25_ADDR_LEN;
buff2504net/ax25/af_ax25.cmemcpy(buff, saddr, dev->addr_len);
buff2506net/ax25/af_ax25.cmemcpy(buff, dev->dev_addr, dev->addr_len);
buff2508net/ax25/af_ax25.cbuff[6] &= ~LAPB_C;
buff2509net/ax25/af_ax25.cbuff[6] |= LAPB_E;
buff2510net/ax25/af_ax25.cbuff[6] |= SSSID_SPARE;
buff2511net/ax25/af_ax25.cbuff   += AX25_ADDR_LEN;
buff2513net/ax25/af_ax25.c*buff++ = LAPB_UI;  /* UI */
buff2518net/ax25/af_ax25.c*buff++ = AX25_P_IP;
buff2522net/ax25/af_ax25.c*buff++ = AX25_P_ARP;
buff2526net/ax25/af_ax25.c*buff++ = 0;
buff140net/ethernet/eth.cint eth_rebuild_header(void *buff, struct device *dev, unsigned long dst,
buff143net/ethernet/eth.cstruct ethhdr *eth = (struct ethhdr *)buff;
buff1851net/ipv4/tcp.cstruct sk_buff *buff;
buff1899net/ipv4/tcp.cbuff = sock_wmalloc(sk,MAX_SYN_SIZE,0, GFP_KERNEL);
buff1900net/ipv4/tcp.cif (buff == NULL)
buff1905net/ipv4/tcp.cbuff->sk = sk;
buff1906net/ipv4/tcp.cbuff->free = 0;
buff1907net/ipv4/tcp.cbuff->localroute = sk->localroute;
buff1914net/ipv4/tcp.ctmp = sk->prot->build_header(buff, sk->saddr, sk->daddr, &dev,
buff1918net/ipv4/tcp.csock_wfree(sk, buff);
buff1926net/ipv4/tcp.ct1 = (struct tcphdr *) skb_put(buff,sizeof(struct tcphdr));
buff1929net/ipv4/tcp.cbuff->seq = sk->write_seq++;
buff1930net/ipv4/tcp.ct1->seq = htonl(buff->seq);
buff1932net/ipv4/tcp.cbuff->end_seq = sk->write_seq;
buff1979net/ipv4/tcp.cptr = skb_put(buff,4);
buff1984net/ipv4/tcp.cbuff->csum = csum_partial(ptr, 4, 0);
buff1986net/ipv4/tcp.csizeof(struct tcphdr) + 4, buff);
buff2006net/ipv4/tcp.csk->prot->queue_xmit(sk, dev, buff, 0);
buff564net/ipv4/tcp_output.cstruct sk_buff *buff;
buff581net/ipv4/tcp_output.cbuff = sock_wmalloc(NULL, MAX_RESET_SIZE, 1, GFP_ATOMIC);
buff582net/ipv4/tcp_output.cif (buff == NULL) 
buff585net/ipv4/tcp_output.cbuff->sk = NULL;
buff586net/ipv4/tcp_output.cbuff->dev = dev;
buff587net/ipv4/tcp_output.cbuff->localroute = 0;
buff588net/ipv4/tcp_output.cbuff->csum = 0;
buff594net/ipv4/tcp_output.ctmp = prot->build_header(buff, saddr, daddr, &ndev, IPPROTO_TCP, opt,
buff598net/ipv4/tcp_output.cbuff->free = 1;
buff599net/ipv4/tcp_output.csock_wfree(NULL, buff);
buff603net/ipv4/tcp_output.ct1 =(struct tcphdr *)skb_put(buff,sizeof(struct tcphdr));
buff628net/ipv4/tcp_output.ctcp_send_check(t1, saddr, daddr, sizeof(*t1), buff);
buff629net/ipv4/tcp_output.cprot->queue_xmit(NULL, ndev, buff, 1);
buff642net/ipv4/tcp_output.cstruct sk_buff *buff;
buff646net/ipv4/tcp_output.cbuff = sock_wmalloc(sk, MAX_RESET_SIZE,1 , GFP_KERNEL);
buff648net/ipv4/tcp_output.cif (buff == NULL)
buff659net/ipv4/tcp_output.cbuff->sk = sk;
buff660net/ipv4/tcp_output.cbuff->localroute = sk->localroute;
buff661net/ipv4/tcp_output.cbuff->csum = 0;
buff667net/ipv4/tcp_output.ctmp = prot->build_header(buff,sk->saddr, sk->daddr, &dev,
buff678net/ipv4/tcp_output.cbuff->free = 1;
buff679net/ipv4/tcp_output.csock_wfree(sk,buff);
buff694net/ipv4/tcp_output.ct1 =(struct tcphdr *)skb_put(buff,sizeof(struct tcphdr));
buff695net/ipv4/tcp_output.cbuff->dev = dev;
buff697net/ipv4/tcp_output.cbuff->seq = sk->write_seq;
buff699net/ipv4/tcp_output.cbuff->end_seq = sk->write_seq;
buff700net/ipv4/tcp_output.ct1->seq = htonl(buff->seq);
buff704net/ipv4/tcp_output.ctcp_send_check(t1, sk->saddr, sk->daddr, sizeof(*t1), buff);
buff713net/ipv4/tcp_output.cbuff->free = 0;
buff714net/ipv4/tcp_output.cif (buff->next != NULL) 
buff717net/ipv4/tcp_output.cskb_unlink(buff);
buff719net/ipv4/tcp_output.cskb_queue_tail(&sk->write_queue, buff);
buff724net/ipv4/tcp_output.csk->prot->queue_xmit(sk, dev, buff, 0);
buff734net/ipv4/tcp_output.cstruct sk_buff * buff;
buff738net/ipv4/tcp_output.cbuff = sock_wmalloc(newsk, MAX_SYN_SIZE, 1, GFP_ATOMIC);
buff739net/ipv4/tcp_output.cif (buff == NULL) 
buff748net/ipv4/tcp_output.cbuff->sk = newsk;
buff749net/ipv4/tcp_output.cbuff->localroute = newsk->localroute;
buff755net/ipv4/tcp_output.ctmp = sk->prot->build_header(buff, newsk->saddr, newsk->daddr, &ndev,
buff765net/ipv4/tcp_output.cbuff->free = 1;
buff766net/ipv4/tcp_output.ckfree_skb(buff,FREE_WRITE);
buff774net/ipv4/tcp_output.ct1 =(struct tcphdr *)skb_put(buff,sizeof(struct tcphdr));
buff777net/ipv4/tcp_output.cbuff->seq = newsk->write_seq++;
buff778net/ipv4/tcp_output.cbuff->end_seq = newsk->write_seq;
buff784net/ipv4/tcp_output.ct1->seq = ntohl(buff->seq);
buff794net/ipv4/tcp_output.cptr = skb_put(buff,4);
buff799net/ipv4/tcp_output.cbuff->csum = csum_partial(ptr, 4, 0);
buff800net/ipv4/tcp_output.ctcp_send_check(t1, newsk->saddr, newsk->daddr, sizeof(*t1)+4, buff);
buff801net/ipv4/tcp_output.cnewsk->prot->queue_xmit(newsk, ndev, buff, 0);
buff856net/ipv4/tcp_output.cstruct sk_buff *buff;
buff887net/ipv4/tcp_output.cbuff = sock_wmalloc(sk, MAX_ACK_SIZE, 1, GFP_ATOMIC);
buff888net/ipv4/tcp_output.cif (buff == NULL) 
buff905net/ipv4/tcp_output.cbuff->sk = sk;
buff906net/ipv4/tcp_output.cbuff->localroute = sk->localroute;
buff907net/ipv4/tcp_output.cbuff->csum = 0;
buff913net/ipv4/tcp_output.ctmp = sk->prot->build_header(buff, sk->saddr, sk->daddr, &dev,
buff917net/ipv4/tcp_output.cbuff->free = 1;
buff918net/ipv4/tcp_output.csock_wfree(sk, buff);
buff921net/ipv4/tcp_output.ct1 =(struct tcphdr *)skb_put(buff,sizeof(struct tcphdr));
buff932net/ipv4/tcp_output.ctcp_send_check(t1, sk->saddr, sk->daddr, sizeof(*t1), buff);
buff935net/ipv4/tcp_output.csk->prot->queue_xmit(sk, dev, buff, 1);
buff946net/ipv4/tcp_output.cstruct sk_buff *buff,*skb;
buff1003net/ipv4/tcp_output.cbuff = sock_wmalloc(sk, win_size + th->doff * 4 + 
buff1007net/ipv4/tcp_output.cif ( buff == NULL )
buff1015net/ipv4/tcp_output.cbuff->free = /*0*/1;
buff1017net/ipv4/tcp_output.cbuff->sk = sk;
buff1018net/ipv4/tcp_output.cbuff->localroute = sk->localroute;
buff1024net/ipv4/tcp_output.ctmp = sk->prot->build_header(buff, sk->saddr, sk->daddr, &dev,
buff1025net/ipv4/tcp_output.cIPPROTO_TCP, sk->opt, buff->truesize,
buff1029net/ipv4/tcp_output.csock_wfree(sk, buff);
buff1037net/ipv4/tcp_output.cbuff->dev = dev;
buff1039net/ipv4/tcp_output.cnth = (struct tcphdr *) skb_put(buff,sizeof(*th));
buff1056net/ipv4/tcp_output.cbuff->csum = csum_partial_copy((void *)(th + 1), skb_put(buff,win_size),
buff1063net/ipv4/tcp_output.cbuff->end_seq = sk->sent_seq + win_size;
buff1064net/ipv4/tcp_output.csk->sent_seq = buff->end_seq;    /* Hack */
buff1073net/ipv4/tcp_output.cnth->doff * 4 + win_size , buff);
buff1077net/ipv4/tcp_output.cbuff = sock_wmalloc(sk,MAX_ACK_SIZE,1, GFP_ATOMIC);
buff1078net/ipv4/tcp_output.cif (buff == NULL) 
buff1081net/ipv4/tcp_output.cbuff->free = 1;
buff1082net/ipv4/tcp_output.cbuff->sk = sk;
buff1083net/ipv4/tcp_output.cbuff->localroute = sk->localroute;
buff1084net/ipv4/tcp_output.cbuff->csum = 0;
buff1090net/ipv4/tcp_output.ctmp = sk->prot->build_header(buff, sk->saddr, sk->daddr, &dev,
buff1094net/ipv4/tcp_output.csock_wfree(sk, buff);
buff1098net/ipv4/tcp_output.ct1 = (struct tcphdr *)skb_put(buff,sizeof(struct tcphdr));
buff1110net/ipv4/tcp_output.ctcp_send_check(t1, sk->saddr, sk->daddr, sizeof(*t1), buff);
buff1118net/ipv4/tcp_output.csk->prot->queue_xmit(sk, dev, buff, 1);
buff51net/ipv4/utils.cstatic char buff[18];
buff55net/ipv4/utils.csprintf(buff, "%d.%d.%d.%d",
buff57net/ipv4/utils.creturn(buff);
buff80net/netrom/nr_dev.cunsigned char *buff = skb_push(skb, NR_NETWORK_LEN + NR_TRANSPORT_LEN);
buff82net/netrom/nr_dev.cmemcpy(buff, (saddr != NULL) ? saddr : dev->dev_addr, dev->addr_len);
buff83net/netrom/nr_dev.cbuff[6] &= ~LAPB_C;
buff84net/netrom/nr_dev.cbuff[6] &= ~LAPB_E;
buff85net/netrom/nr_dev.cbuff[6] |= SSSID_SPARE;
buff86net/netrom/nr_dev.cbuff    += AX25_ADDR_LEN;
buff89net/netrom/nr_dev.cmemcpy(buff, daddr, dev->addr_len);
buff90net/netrom/nr_dev.cbuff[6] &= ~LAPB_C;
buff91net/netrom/nr_dev.cbuff[6] |= LAPB_E;
buff92net/netrom/nr_dev.cbuff[6] |= SSSID_SPARE;
buff93net/netrom/nr_dev.cbuff    += AX25_ADDR_LEN;
buff95net/netrom/nr_dev.c*buff++ = nr_default.ttl;
buff97net/netrom/nr_dev.c*buff++ = NR_PROTO_IP;
buff98net/netrom/nr_dev.c*buff++ = NR_PROTO_IP;
buff99net/netrom/nr_dev.c*buff++ = 0;
buff100net/netrom/nr_dev.c*buff++ = 0;
buff101net/netrom/nr_dev.c*buff++ = NR_PROTOEXT;
buff109net/netrom/nr_dev.cstatic int nr_rebuild_header(void *buff, struct device *dev,
buff113net/netrom/nr_dev.cunsigned char *bp = (unsigned char *)buff;
buff898net/socket.casmlinkage int sys_send(int fd, void * buff, int len, unsigned flags)
buff913net/socket.cerr=verify_area(VERIFY_READ, buff, len);
buff917net/socket.ciov.iov_base=buff;
buff932net/socket.casmlinkage int sys_sendto(int fd, void * buff, int len, unsigned flags,
buff949net/socket.cerr=verify_area(VERIFY_READ,buff,len);
buff956net/socket.ciov.iov_base=buff;