From 118af321b24529d546cad1c4b6fccf02cd838384 Mon Sep 17 00:00:00 2001 From: "Robert P. J. Day" Date: Fri, 23 Mar 2007 11:27:01 -0400 Subject: [MTD] Delete unused header file linux/mtd/iflash.h. Delete the unreferenced header file include/linux/mtd/iflash.h. Signed-off-by: Robert P. J. Day Signed-off-by: David Woodhouse --- include/linux/mtd/iflash.h | 98 ---------------------------------------------- 1 file changed, 98 deletions(-) delete mode 100644 include/linux/mtd/iflash.h (limited to 'include') diff --git a/include/linux/mtd/iflash.h b/include/linux/mtd/iflash.h deleted file mode 100644 index 9aa5b4f02666..000000000000 --- a/include/linux/mtd/iflash.h +++ /dev/null @@ -1,98 +0,0 @@ -/* $Id: iflash.h,v 1.2 2000/11/13 18:01:54 dwmw2 Exp $ */ - -#ifndef __MTD_IFLASH_H__ -#define __MTD_IFLASH_H__ - -/* Extended CIS registers for Series 2 and 2+ cards */ -/* The registers are all offsets from 0x4000 */ -#define CISREG_CSR 0x0100 -#define CISREG_WP 0x0104 -#define CISREG_RDYBSY 0x0140 - -/* Extended CIS registers for Series 2 cards */ -#define CISREG_SLEEP 0x0118 -#define CISREG_RDY_MASK 0x0120 -#define CISREG_RDY_STATUS 0x0130 - -/* Extended CIS registers for Series 2+ cards */ -#define CISREG_VCR 0x010c - -/* Card Status Register */ -#define CSR_SRESET 0x20 /* Soft reset */ -#define CSR_CMWP 0x10 /* Common memory write protect */ -#define CSR_PWRDOWN 0x08 /* Power down status */ -#define CSR_CISWP 0x04 /* Common memory CIS WP */ -#define CSR_WP 0x02 /* Mechanical write protect */ -#define CSR_READY 0x01 /* Ready/busy status */ - -/* Write Protection Register */ -#define WP_BLKEN 0x04 /* Enable block locking */ -#define WP_CMWP 0x02 /* Common memory write protect */ -#define WP_CISWP 0x01 /* Common memory CIS WP */ - -/* Voltage Control Register */ -#define VCR_VCC_LEVEL 0x80 /* 0 = 5V, 1 = 3.3V */ -#define VCR_VPP_VALID 0x02 /* Vpp Valid */ -#define VCR_VPP_GEN 0x01 /* Integrated Vpp generator */ - -/* Ready/Busy Mode Register */ -#define RDYBSY_RACK 0x02 /* Ready acknowledge */ -#define RDYBSY_MODE 0x01 /* 1 = high performance */ - -#define LOW(x) ((x) & 0xff) - -/* 28F008SA-Compatible Command Set */ -#define IF_READ_ARRAY 0xffff -#define IF_INTEL_ID 0x9090 -#define IF_READ_CSR 0x7070 -#define IF_CLEAR_CSR 0x5050 -#define IF_WRITE 0x4040 -#define IF_BLOCK_ERASE 0x2020 -#define IF_ERASE_SUSPEND 0xb0b0 -#define IF_CONFIRM 0xd0d0 - -/* 28F016SA Performance Enhancement Commands */ -#define IF_READ_PAGE 0x7575 -#define IF_PAGE_SWAP 0x7272 -#define IF_SINGLE_LOAD 0x7474 -#define IF_SEQ_LOAD 0xe0e0 -#define IF_PAGE_WRITE 0x0c0c -#define IF_RDY_MODE 0x9696 -#define IF_RDY_LEVEL 0x0101 -#define IF_RDY_PULSE_WRITE 0x0202 -#define IF_RDY_PULSE_ERASE 0x0303 -#define IF_RDY_DISABLE 0x0404 -#define IF_LOCK_BLOCK 0x7777 -#define IF_UPLOAD_STATUS 0x9797 -#define IF_READ_ESR 0x7171 -#define IF_ERASE_UNLOCKED 0xa7a7 -#define IF_SLEEP 0xf0f0 -#define IF_ABORT 0x8080 -#define IF_UPLOAD_DEVINFO 0x9999 - -/* Definitions for Compatible Status Register */ -#define CSR_WR_READY 0x8080 /* Write state machine status */ -#define CSR_ERA_SUSPEND 0x4040 /* Erase suspend status */ -#define CSR_ERA_ERR 0x2020 /* Erase status */ -#define CSR_WR_ERR 0x1010 /* Data write status */ -#define CSR_VPP_LOW 0x0808 /* Vpp status */ - -/* Definitions for Global Status Register */ -#define GSR_WR_READY 0x8080 /* Write state machine status */ -#define GSR_OP_SUSPEND 0x4040 /* Operation suspend status */ -#define GSR_OP_ERR 0x2020 /* Device operation status */ -#define GSR_SLEEP 0x1010 /* Device sleep status */ -#define GSR_QUEUE_FULL 0x0808 /* Queue status */ -#define GSR_PAGE_AVAIL 0x0404 /* Page buffer available status */ -#define GSR_PAGE_READY 0x0202 /* Page buffer status */ -#define GSR_PAGE_SELECT 0x0101 /* Page buffer select status */ - -/* Definitions for Block Status Register */ -#define BSR_READY 0x8080 /* Block status */ -#define BSR_UNLOCK 0x4040 /* Block lock status */ -#define BSR_FAILED 0x2020 /* Block operation status */ -#define BSR_ABORTED 0x1010 /* Operation abort status */ -#define BSR_QUEUE_FULL 0x0808 /* Queue status */ -#define BSR_VPP_LOW 0x0404 /* Vpp status */ - -#endif /* __MTD_IFLASH_H__ */ -- cgit v1.2.3-59-g8ed1b From 0ecbc81adfcb9f15f86b05ff576b342ce81bbef8 Mon Sep 17 00:00:00 2001 From: Rodolfo Giometti Date: Mon, 26 Mar 2007 21:45:43 -0800 Subject: [MTD] [NOR] Support for auto locking flash on power up Auto unlock sectors on resume for auto locking flash on power up. Signed-off-by: Rodolfo Giometti Signed-off-by: Andrew Morton Signed-off-by: David Woodhouse --- drivers/mtd/chips/cfi_cmdset_0001.c | 93 +++++++++++++++++++++++++++++++++++-- include/linux/mtd/mtd.h | 1 + 2 files changed, 91 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c index f334959a335b..2f19fa78d24a 100644 --- a/drivers/mtd/chips/cfi_cmdset_0001.c +++ b/drivers/mtd/chips/cfi_cmdset_0001.c @@ -15,6 +15,8 @@ * - optimized write buffer method * 02/05/2002 Christopher Hoover / * - reworked lock/unlock/erase support for var size flash + * 21/03/2007 Rodolfo Giometti + * - auto unlock sectors on resume for auto locking flash on power up */ #include @@ -30,6 +32,7 @@ #include #include #include +#include #include #include #include @@ -220,6 +223,15 @@ static void fixup_use_write_buffers(struct mtd_info *mtd, void *param) } } +/* + * Some chips power-up with all sectors locked by default. + */ +static void fixup_use_powerup_lock(struct mtd_info *mtd, void *param) +{ + printk(KERN_INFO "Using auto-unlock on power-up/resume\n" ); + mtd->flags |= MTD_STUPID_LOCK; +} + static struct cfi_fixup cfi_fixup_table[] = { #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash, NULL }, @@ -232,6 +244,7 @@ static struct cfi_fixup cfi_fixup_table[] = { #endif { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct, NULL }, { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb, NULL }, + { MANUFACTURER_INTEL, 0x891c, fixup_use_powerup_lock, NULL, }, { 0, 0, NULL, NULL } }; @@ -460,6 +473,7 @@ static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd) mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset; mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize; mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum; + mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].lockmap = kmalloc(ernum / 8 + 1, GFP_KERNEL); } offset += (ersize * ernum); } @@ -1825,8 +1839,7 @@ static void cfi_intelext_sync (struct mtd_info *mtd) } } -#ifdef DEBUG_LOCK_BITS -static int __xipram do_printlockstatus_oneblock(struct map_info *map, +static int __xipram do_getlockstatus_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, int len, void *thunk) @@ -1840,8 +1853,17 @@ static int __xipram do_printlockstatus_oneblock(struct map_info *map, chip->state = FL_JEDEC_QUERY; status = cfi_read_query(map, adr+(2*ofs_factor)); xip_enable(map, chip, 0); + return status; +} + +#ifdef DEBUG_LOCK_BITS +static int __xipram do_printlockstatus_oneblock(struct map_info *map, + struct flchip *chip, + unsigned long adr, + int len, void *thunk) +{ printk(KERN_DEBUG "block status register for 0x%08lx is %x\n", - adr, status); + adr, do_getlockstatus_oneblock(map, chip, adr, len, thunk)); return 0; } #endif @@ -2216,14 +2238,45 @@ static int cfi_intelext_get_user_prot_info(struct mtd_info *mtd, #endif +static void cfi_intelext_save_locks(struct mtd_info *mtd) +{ + struct mtd_erase_region_info *region; + int block, status, i; + unsigned long adr; + size_t len; + + for (i = 0; i < mtd->numeraseregions; i++) { + region = &mtd->eraseregions[i]; + if (!region->lockmap) + continue; + + for (block = 0; block < region->numblocks; block++){ + len = region->erasesize; + adr = region->offset + block * len; + + status = cfi_varsize_frob(mtd, + do_getlockstatus_oneblock, adr, len, 0); + if (status) + set_bit(block, region->lockmap); + else + clear_bit(block, region->lockmap); + } + } +} + static int cfi_intelext_suspend(struct mtd_info *mtd) { struct map_info *map = mtd->priv; struct cfi_private *cfi = map->fldrv_priv; + struct cfi_pri_intelext *extp = cfi->cmdset_priv; int i; struct flchip *chip; int ret = 0; + if ((mtd->flags & MTD_STUPID_LOCK) + && extp && (extp->FeatureSupport & (1 << 5))) + cfi_intelext_save_locks(mtd); + for (i=0; !ret && inumchips; i++) { chip = &cfi->chips[i]; @@ -2285,10 +2338,33 @@ static int cfi_intelext_suspend(struct mtd_info *mtd) return ret; } +static void cfi_intelext_restore_locks(struct mtd_info *mtd) +{ + struct mtd_erase_region_info *region; + int block, i; + unsigned long adr; + size_t len; + + for (i = 0; i < mtd->numeraseregions; i++) { + region = &mtd->eraseregions[i]; + if (!region->lockmap) + continue; + + for (block = 0; block < region->numblocks; block++) { + len = region->erasesize; + adr = region->offset + block * len; + + if (!test_bit(block, region->lockmap)) + cfi_intelext_unlock(mtd, adr, len); + } + } +} + static void cfi_intelext_resume(struct mtd_info *mtd) { struct map_info *map = mtd->priv; struct cfi_private *cfi = map->fldrv_priv; + struct cfi_pri_intelext *extp = cfi->cmdset_priv; int i; struct flchip *chip; @@ -2307,6 +2383,10 @@ static void cfi_intelext_resume(struct mtd_info *mtd) spin_unlock(chip->mutex); } + + if ((mtd->flags & MTD_STUPID_LOCK) + && extp && (extp->FeatureSupport & (1 << 5))) + cfi_intelext_restore_locks(mtd); } static int cfi_intelext_reset(struct mtd_info *mtd) @@ -2347,12 +2427,19 @@ static void cfi_intelext_destroy(struct mtd_info *mtd) { struct map_info *map = mtd->priv; struct cfi_private *cfi = map->fldrv_priv; + struct mtd_erase_region_info *region; + int i; cfi_intelext_reset(mtd); unregister_reboot_notifier(&mtd->reboot_notifier); kfree(cfi->cmdset_priv); kfree(cfi->cfiq); kfree(cfi->chips[0].priv); kfree(cfi); + for (i = 0; i < mtd->numeraseregions; i++) { + region = &mtd->eraseregions[i]; + if (region->lockmap) + kfree(region->lockmap); + } kfree(mtd->eraseregions); } diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h index 3d956c3abb31..45d482ce8397 100644 --- a/include/linux/mtd/mtd.h +++ b/include/linux/mtd/mtd.h @@ -53,6 +53,7 @@ struct mtd_erase_region_info { u_int32_t offset; /* At which this region starts, from the beginning of the MTD */ u_int32_t erasesize; /* For this region */ u_int32_t numblocks; /* Number of blocks of erasesize in this region */ + unsigned long *lockmap; /* If keeping bitmap of locks */ }; /* -- cgit v1.2.3-59-g8ed1b From 5a6d41b32a17ca902ef50fdfa170d7f23264bad5 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Sat, 14 Apr 2007 19:10:12 -0400 Subject: NFS: Ensure PG_writeback is cleared when writeback fails If the writebacks are cancelled via nfs_cancel_dirty_list, or due to the memory allocation failing in nfs_flush_one/nfs_flush_multi, then we must ensure that the PG_writeback flag is cleared. Also ensure that we actually own the PG_writeback flag whenever we schedule a new writeback by making nfs_set_page_writeback() return the value of test_set_page_writeback(). The PG_writeback page flag ends up replacing the functionality of the PG_FLUSHING nfs_page flag, so we rip that out too. Signed-off-by: Trond Myklebust Cc: Peter Zijlstra Signed-off-by: Linus Torvalds --- fs/nfs/write.c | 22 +++++++++++++++------- include/linux/nfs_page.h | 1 - 2 files changed, 15 insertions(+), 8 deletions(-) (limited to 'include') diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 2867e6b7096f..e5d7cac569aa 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -218,9 +218,11 @@ int nfs_congestion_kb; #define NFS_CONGESTION_OFF_THRESH \ (NFS_CONGESTION_ON_THRESH - (NFS_CONGESTION_ON_THRESH >> 2)) -static void nfs_set_page_writeback(struct page *page) +static int nfs_set_page_writeback(struct page *page) { - if (!test_set_page_writeback(page)) { + int ret = test_set_page_writeback(page); + + if (!ret) { struct inode *inode = page->mapping->host; struct nfs_server *nfss = NFS_SERVER(inode); @@ -228,6 +230,7 @@ static void nfs_set_page_writeback(struct page *page) NFS_CONGESTION_ON_THRESH) set_bdi_congested(&nfss->backing_dev_info, WRITE); } + return ret; } static void nfs_end_page_writeback(struct page *page) @@ -277,10 +280,8 @@ static int nfs_page_mark_flush(struct page *page) spin_lock(req_lock); } spin_unlock(req_lock); - if (test_and_set_bit(PG_FLUSHING, &req->wb_flags) == 0) { + if (nfs_set_page_writeback(page) == 0) nfs_mark_request_dirty(req); - nfs_set_page_writeback(page); - } ret = test_bit(PG_NEED_FLUSH, &req->wb_flags); nfs_unlock_request(req); return ret; @@ -424,7 +425,6 @@ nfs_mark_request_dirty(struct nfs_page *req) static void nfs_redirty_request(struct nfs_page *req) { - clear_bit(PG_FLUSHING, &req->wb_flags); __set_page_dirty_nobuffers(req->wb_page); } @@ -434,7 +434,11 @@ nfs_redirty_request(struct nfs_page *req) static inline int nfs_dirty_request(struct nfs_page *req) { - return test_bit(PG_FLUSHING, &req->wb_flags) == 0; + struct page *page = req->wb_page; + + if (page == NULL) + return 0; + return !PageWriteback(req->wb_page); } #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) @@ -500,6 +504,7 @@ static void nfs_cancel_dirty_list(struct list_head *head) while(!list_empty(head)) { req = nfs_list_entry(head->next); nfs_list_remove_request(req); + nfs_end_page_writeback(req->wb_page); nfs_inode_remove_request(req); nfs_clear_page_writeback(req); } @@ -890,6 +895,7 @@ out_bad: list_del(&data->pages); nfs_writedata_release(data); } + nfs_end_page_writeback(req->wb_page); nfs_redirty_request(req); nfs_clear_page_writeback(req); return -ENOMEM; @@ -935,6 +941,7 @@ static int nfs_flush_one(struct inode *inode, struct list_head *head, int how) while (!list_empty(head)) { struct nfs_page *req = nfs_list_entry(head->next); nfs_list_remove_request(req); + nfs_end_page_writeback(req->wb_page); nfs_redirty_request(req); nfs_clear_page_writeback(req); } @@ -970,6 +977,7 @@ out_err: while (!list_empty(head)) { req = nfs_list_entry(head->next); nfs_list_remove_request(req); + nfs_end_page_writeback(req->wb_page); nfs_redirty_request(req); nfs_clear_page_writeback(req); } diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h index 2e555d49c9b7..d111be639140 100644 --- a/include/linux/nfs_page.h +++ b/include/linux/nfs_page.h @@ -31,7 +31,6 @@ #define PG_NEED_COMMIT 1 #define PG_NEED_RESCHED 2 #define PG_NEED_FLUSH 3 -#define PG_FLUSHING 4 struct nfs_inode; struct nfs_page { -- cgit v1.2.3-59-g8ed1b From 8c60e5475d8ca614d712cd3e2fe7330480709e02 Mon Sep 17 00:00:00 2001 From: "sshahrom@micron.com" Date: Wed, 21 Mar 2007 18:48:02 -0700 Subject: [MTD][NAND] Add Micron Manufacturer ID Add Micron Manufacturer ID. Signed-off-by: Shahrom Sharif Signed-off-by: Thomas Gleixner Signed-off-by: David Woodhouse --- drivers/mtd/nand/nand_ids.c | 1 + include/linux/mtd/nand.h | 1 + 2 files changed, 2 insertions(+) (limited to 'include') diff --git a/drivers/mtd/nand/nand_ids.c b/drivers/mtd/nand/nand_ids.c index 2e2cdf2fc91d..900207769167 100644 --- a/drivers/mtd/nand/nand_ids.c +++ b/drivers/mtd/nand/nand_ids.c @@ -137,6 +137,7 @@ struct nand_manufacturers nand_manuf_ids[] = { {NAND_MFR_RENESAS, "Renesas"}, {NAND_MFR_STMICRO, "ST Micro"}, {NAND_MFR_HYNIX, "Hynix"}, + {NAND_MFR_MICRON, "Micron"}, {0x0, "Unknown"} }; diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h index 97523887fe5d..cf197ad62da6 100644 --- a/include/linux/mtd/nand.h +++ b/include/linux/mtd/nand.h @@ -431,6 +431,7 @@ struct nand_chip { #define NAND_MFR_RENESAS 0x07 #define NAND_MFR_STMICRO 0x20 #define NAND_MFR_HYNIX 0xad +#define NAND_MFR_MICRON 0x2c /** * struct nand_flash_dev - NAND Flash Device ID Structure -- cgit v1.2.3-59-g8ed1b From b4dfa0b1fb39c7ffe74741d60668825de6a47b69 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 17 Apr 2007 12:28:27 -0700 Subject: [NET]: Get rid of alloc_skb_from_cache Since this was added originally for Xen, and Xen has recently (~2.6.18) stopped using this function, we can safely get rid of it. Good timing too since this function has started to bit rot. Signed-off-by: Herbert Xu Signed-off-by: David S. Miller --- arch/x86_64/kernel/functionlist | 1 - include/linux/skbuff.h | 3 --- net/core/skbuff.c | 55 ----------------------------------------- 3 files changed, 59 deletions(-) (limited to 'include') diff --git a/arch/x86_64/kernel/functionlist b/arch/x86_64/kernel/functionlist index 01fa23580c85..7ae18ec12454 100644 --- a/arch/x86_64/kernel/functionlist +++ b/arch/x86_64/kernel/functionlist @@ -514,7 +514,6 @@ *(.text.dentry_open) *(.text.dentry_iput) *(.text.bio_alloc) -*(.text.alloc_skb_from_cache) *(.text.wait_on_page_bit) *(.text.vfs_readdir) *(.text.vfs_lstat) diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 82f43ad478c7..0e86b6007a0a 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -346,9 +346,6 @@ static inline struct sk_buff *alloc_skb_fclone(unsigned int size, return __alloc_skb(size, priority, 1, -1); } -extern struct sk_buff *alloc_skb_from_cache(struct kmem_cache *cp, - unsigned int size, - gfp_t priority); extern void kfree_skbmem(struct sk_buff *skb); extern struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t priority); diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 87573ae35b02..336958fbbcb2 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -196,61 +196,6 @@ nodata: goto out; } -/** - * alloc_skb_from_cache - allocate a network buffer - * @cp: kmem_cache from which to allocate the data area - * (object size must be big enough for @size bytes + skb overheads) - * @size: size to allocate - * @gfp_mask: allocation mask - * - * Allocate a new &sk_buff. The returned buffer has no headroom and - * tail room of size bytes. The object has a reference count of one. - * The return is the buffer. On a failure the return is %NULL. - * - * Buffers may only be allocated from interrupts using a @gfp_mask of - * %GFP_ATOMIC. - */ -struct sk_buff *alloc_skb_from_cache(struct kmem_cache *cp, - unsigned int size, - gfp_t gfp_mask) -{ - struct sk_buff *skb; - u8 *data; - - /* Get the HEAD */ - skb = kmem_cache_alloc(skbuff_head_cache, - gfp_mask & ~__GFP_DMA); - if (!skb) - goto out; - - /* Get the DATA. */ - size = SKB_DATA_ALIGN(size); - data = kmem_cache_alloc(cp, gfp_mask); - if (!data) - goto nodata; - - memset(skb, 0, offsetof(struct sk_buff, truesize)); - skb->truesize = size + sizeof(struct sk_buff); - atomic_set(&skb->users, 1); - skb->head = data; - skb->data = data; - skb->tail = data; - skb->end = data + size; - - atomic_set(&(skb_shinfo(skb)->dataref), 1); - skb_shinfo(skb)->nr_frags = 0; - skb_shinfo(skb)->gso_size = 0; - skb_shinfo(skb)->gso_segs = 0; - skb_shinfo(skb)->gso_type = 0; - skb_shinfo(skb)->frag_list = NULL; -out: - return skb; -nodata: - kmem_cache_free(skbuff_head_cache, skb); - skb = NULL; - goto out; -} - /** * __netdev_alloc_skb - allocate an skbuff for rx on a specific device * @dev: network device to receive on -- cgit v1.2.3-59-g8ed1b From c2ecba71717c4f60671175fd26083c35a4b9ad58 Mon Sep 17 00:00:00 2001 From: Pavel Emelianov Date: Tue, 17 Apr 2007 12:45:31 -0700 Subject: [NET]: Set a separate lockdep class for neighbour table's proxy_queue Otherwise the following calltrace will lead to a wrong lockdep warning: neigh_proxy_process() `- lock(neigh_table->proxy_queue.lock); arp_redo /* via tbl->proxy_redo */ arp_process neigh_event_ns neigh_update skb_queue_purge `- lock(neighbor->arp_queue.lock); This is not a deadlock actually, as neighbor table's proxy_queue and the neighbor's arp_queue are different queues. Lockdep thinks there is a deadlock as both queues are initialized with skb_queue_head_init() and thus have a common class. Signed-off-by: David S. Miller --- include/linux/skbuff.h | 7 +++++++ net/core/neighbour.c | 5 ++++- 2 files changed, 11 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 0e86b6007a0a..5992f65b4184 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -619,6 +619,13 @@ static inline void skb_queue_head_init(struct sk_buff_head *list) list->qlen = 0; } +static inline void skb_queue_head_init_class(struct sk_buff_head *list, + struct lock_class_key *class) +{ + skb_queue_head_init(list); + lockdep_set_class(&list->lock, class); +} + /* * Insert an sk_buff at the start of a list. * diff --git a/net/core/neighbour.c b/net/core/neighbour.c index cfc60019cf92..841e3f32cab1 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -1331,6 +1331,8 @@ void neigh_parms_destroy(struct neigh_parms *parms) kfree(parms); } +static struct lock_class_key neigh_table_proxy_queue_class; + void neigh_table_init_no_netlink(struct neigh_table *tbl) { unsigned long now = jiffies; @@ -1379,7 +1381,8 @@ void neigh_table_init_no_netlink(struct neigh_table *tbl) init_timer(&tbl->proxy_timer); tbl->proxy_timer.data = (unsigned long)tbl; tbl->proxy_timer.function = neigh_proxy_process; - skb_queue_head_init(&tbl->proxy_queue); + skb_queue_head_init_class(&tbl->proxy_queue, + &neigh_table_proxy_queue_class); tbl->last_flush = now; tbl->last_rand = now + tbl->parms.reachable_time * 20; -- cgit v1.2.3-59-g8ed1b From c4b7e8754e3198eb5392568e523da6440143c2cd Mon Sep 17 00:00:00 2001 From: Don Zickus Date: Mon, 16 Apr 2007 22:53:12 -0700 Subject: allow vmsplice to work in 32-bit mode on ppc64 Trivial change to pass vmsplice arguments through the compat layer on pp64. Signed-off-by: Don Zickus Acked-by: Stephen Rothwell Acked-by: Paul Mackerras Cc: Benjamin Herrenschmidt Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/asm-powerpc/systbl.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/asm-powerpc/systbl.h b/include/asm-powerpc/systbl.h index 8d853c554631..0b00068313f9 100644 --- a/include/asm-powerpc/systbl.h +++ b/include/asm-powerpc/systbl.h @@ -288,7 +288,7 @@ COMPAT_SYS(ppoll) SYSCALL_SPU(unshare) SYSCALL_SPU(splice) SYSCALL_SPU(tee) -SYSCALL_SPU(vmsplice) +COMPAT_SYS_SPU(vmsplice) COMPAT_SYS_SPU(openat) SYSCALL_SPU(mkdirat) SYSCALL_SPU(mknodat) -- cgit v1.2.3-59-g8ed1b From 112654208bd6f092e064973b8fa680e37ffa74a6 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Mon, 16 Apr 2007 22:53:15 -0700 Subject: kernel-doc: fix plist.h comments Make kernel-doc comments match macro names. Correct parameter names in a few places. Remove '#' from beginning of kernel-doc comment macro names. Remove extra (erroneous) blank lines in kernel-doc. Warning(plist.h:100): Cannot understand * #PLIST_HEAD_INIT - static struct plist_head initializer on line 100 - I thought it was a doc line Warning(plist.h:112): Cannot understand * #PLIST_NODE_INIT - static struct plist_node initializer on line 112 - I thought it was a doc line Warning(plist.h:103): No description found for parameter '_lock' Warning(plist.h:129): No description found for parameter 'lock' Warning(plist.h:158): No description found for parameter 'pos' Warning(plist.h:169): No description found for parameter 'pos' Warning(plist.h:169): No description found for parameter 'n' Warning(plist.h:179): No description found for parameter 'mem' This still leaves one warning & one error that need attention: Error(plist.h:219): cannot understand prototype: '(' Warning(plist.h): no structured comments found Acked-by: Inaky Perez-Gonzalez Cc: Daniel Walker Cc: Thomas Gleixner Cc: Oleg Nesterov Signed-off-by: Randy Dunlap Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/plist.h | 54 ++++++++++++++++++++++----------------------------- 1 file changed, 23 insertions(+), 31 deletions(-) (limited to 'include') diff --git a/include/linux/plist.h b/include/linux/plist.h index b95818a037ad..85de2f055874 100644 --- a/include/linux/plist.h +++ b/include/linux/plist.h @@ -97,9 +97,9 @@ struct plist_node { #endif /** - * #PLIST_HEAD_INIT - static struct plist_head initializer - * + * PLIST_HEAD_INIT - static struct plist_head initializer * @head: struct plist_head variable name + * @_lock: lock to initialize for this list */ #define PLIST_HEAD_INIT(head, _lock) \ { \ @@ -109,8 +109,7 @@ struct plist_node { } /** - * #PLIST_NODE_INIT - static struct plist_node initializer - * + * PLIST_NODE_INIT - static struct plist_node initializer * @node: struct plist_node variable name * @__prio: initial node priority */ @@ -122,8 +121,8 @@ struct plist_node { /** * plist_head_init - dynamic struct plist_head initializer - * * @head: &struct plist_head pointer + * @lock: list spinlock, remembered for debugging */ static inline void plist_head_init(struct plist_head *head, spinlock_t *lock) @@ -137,7 +136,6 @@ plist_head_init(struct plist_head *head, spinlock_t *lock) /** * plist_node_init - Dynamic struct plist_node initializer - * * @node: &struct plist_node pointer * @prio: initial node priority */ @@ -152,49 +150,46 @@ extern void plist_del(struct plist_node *node, struct plist_head *head); /** * plist_for_each - iterate over the plist - * - * @pos1: the type * to use as a loop counter. - * @head: the head for your list. + * @pos: the type * to use as a loop counter + * @head: the head for your list */ #define plist_for_each(pos, head) \ list_for_each_entry(pos, &(head)->node_list, plist.node_list) /** - * plist_for_each_entry_safe - iterate over a plist of given type safe - * against removal of list entry + * plist_for_each_safe - iterate safely over a plist of given type + * @pos: the type * to use as a loop counter + * @n: another type * to use as temporary storage + * @head: the head for your list * - * @pos1: the type * to use as a loop counter. - * @n1: another type * to use as temporary storage - * @head: the head for your list. + * Iterate over a plist of given type, safe against removal of list entry. */ #define plist_for_each_safe(pos, n, head) \ list_for_each_entry_safe(pos, n, &(head)->node_list, plist.node_list) /** * plist_for_each_entry - iterate over list of given type - * - * @pos: the type * to use as a loop counter. - * @head: the head for your list. - * @member: the name of the list_struct within the struct. + * @pos: the type * to use as a loop counter + * @head: the head for your list + * @mem: the name of the list_struct within the struct */ #define plist_for_each_entry(pos, head, mem) \ list_for_each_entry(pos, &(head)->node_list, mem.plist.node_list) /** - * plist_for_each_entry_safe - iterate over list of given type safe against - * removal of list entry - * - * @pos: the type * to use as a loop counter. + * plist_for_each_entry_safe - iterate safely over list of given type + * @pos: the type * to use as a loop counter * @n: another type * to use as temporary storage - * @head: the head for your list. - * @m: the name of the list_struct within the struct. + * @head: the head for your list + * @m: the name of the list_struct within the struct + * + * Iterate over list of given type, safe against removal of list entry. */ #define plist_for_each_entry_safe(pos, n, head, m) \ list_for_each_entry_safe(pos, n, &(head)->node_list, m.plist.node_list) /** * plist_head_empty - return !0 if a plist_head is empty - * * @head: &struct plist_head pointer */ static inline int plist_head_empty(const struct plist_head *head) @@ -204,7 +199,6 @@ static inline int plist_head_empty(const struct plist_head *head) /** * plist_node_empty - return !0 if plist_node is not on a list - * * @node: &struct plist_node pointer */ static inline int plist_node_empty(const struct plist_node *node) @@ -216,10 +210,9 @@ static inline int plist_node_empty(const struct plist_node *node) /** * plist_first_entry - get the struct for the first entry - * - * @ptr: the &struct plist_head pointer. - * @type: the type of the struct this is embedded in. - * @member: the name of the list_struct within the struct. + * @head: the &struct plist_head pointer + * @type: the type of the struct this is embedded in + * @member: the name of the list_struct within the struct */ #ifdef CONFIG_DEBUG_PI_LIST # define plist_first_entry(head, type, member) \ @@ -234,7 +227,6 @@ static inline int plist_node_empty(const struct plist_node *node) /** * plist_first - return the first node (and thus, highest priority) - * * @head: the &struct plist_head pointer * * Assumes the plist is _not_ empty. -- cgit v1.2.3-59-g8ed1b From 1b75b05b73cdefd1d10074e9dad60812f9731a5e Mon Sep 17 00:00:00 2001 From: Ivan Kokshaysky Date: Mon, 16 Apr 2007 22:53:17 -0700 Subject: alpha: fixes for specific machine types Files: arch/alpha/kernel/core_mcpcia.c arch/alpha/kernel/sys_rawhide.c include/asm-alpha/core_mcpcia.h Determine correct hose configuration; RAWHIDE family can have 2 or 4 hoses, so make sure non-existent hoses are ignored. arch/alpha/kernel/err_titan.c Supply a needed #include arch/alpha/kernel/module.c Add some useful output to the relocation overflow messages. arch/alpha/kernel/sys_noritake.c Supply necessary noritake_end_irq() to correct interrupt handling. This fixes a problem first noted by hangs during boot probing with a DE500-BA TULIP NIC present. arch/alpha/kernel/sys_sio.c Correct saving of original PIRQ register (PCI IRQ routing); change default PIRQ setting to leave PCI IRQs 9 and 14 free to be used for sound (Multia) and IDE (any), respectively. include/asm-alpha/io.h Supply the "isa_virt_to_bus" routine. Signed-off-by: Jay Estabrook Signed-off-by: Ivan Kokshaysky Cc: Richard Henderson Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/alpha/kernel/core_mcpcia.c | 2 -- arch/alpha/kernel/err_titan.c | 1 + arch/alpha/kernel/module.c | 8 ++++---- arch/alpha/kernel/sys_noritake.c | 9 ++++++++- arch/alpha/kernel/sys_rawhide.c | 15 +++++++++++++++ arch/alpha/kernel/sys_sio.c | 14 +++++++++----- include/asm-alpha/core_mcpcia.h | 2 ++ include/asm-alpha/io.h | 1 + 8 files changed, 40 insertions(+), 12 deletions(-) (limited to 'include') diff --git a/arch/alpha/kernel/core_mcpcia.c b/arch/alpha/kernel/core_mcpcia.c index 8d019071190a..381fec0af52e 100644 --- a/arch/alpha/kernel/core_mcpcia.c +++ b/arch/alpha/kernel/core_mcpcia.c @@ -40,8 +40,6 @@ # define DBG_CFG(args) #endif -#define MCPCIA_MAX_HOSES 4 - /* * Given a bus, device, and function number, compute resulting * configuration space address and setup the MCPCIA_HAXR2 register diff --git a/arch/alpha/kernel/err_titan.c b/arch/alpha/kernel/err_titan.c index febe71c6869f..543d96d7fa2b 100644 --- a/arch/alpha/kernel/err_titan.c +++ b/arch/alpha/kernel/err_titan.c @@ -16,6 +16,7 @@ #include #include #include +#include #include "err_impl.h" #include "proto.h" diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c index aac6d4b22f7a..bd03dc94c72b 100644 --- a/arch/alpha/kernel/module.c +++ b/arch/alpha/kernel/module.c @@ -285,12 +285,12 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab, reloc_overflow: if (ELF64_ST_TYPE (sym->st_info) == STT_SECTION) printk(KERN_ERR - "module %s: Relocation overflow vs section %d\n", - me->name, sym->st_shndx); + "module %s: Relocation (type %lu) overflow vs section %d\n", + me->name, r_type, sym->st_shndx); else printk(KERN_ERR - "module %s: Relocation overflow vs %s\n", - me->name, strtab + sym->st_name); + "module %s: Relocation (type %lu) overflow vs %s\n", + me->name, r_type, strtab + sym->st_name); return -ENOEXEC; } } diff --git a/arch/alpha/kernel/sys_noritake.c b/arch/alpha/kernel/sys_noritake.c index de6ba3432e8a..eb2a1d63f484 100644 --- a/arch/alpha/kernel/sys_noritake.c +++ b/arch/alpha/kernel/sys_noritake.c @@ -66,6 +66,13 @@ noritake_startup_irq(unsigned int irq) return 0; } +static void +noritake_end_irq(unsigned int irq) +{ + if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) + noritake_enable_irq(irq); +} + static struct hw_interrupt_type noritake_irq_type = { .typename = "NORITAKE", .startup = noritake_startup_irq, @@ -73,7 +80,7 @@ static struct hw_interrupt_type noritake_irq_type = { .enable = noritake_enable_irq, .disable = noritake_disable_irq, .ack = noritake_disable_irq, - .end = noritake_enable_irq, + .end = noritake_end_irq, }; static void diff --git a/arch/alpha/kernel/sys_rawhide.c b/arch/alpha/kernel/sys_rawhide.c index 581d08c70b92..672cb2df53df 100644 --- a/arch/alpha/kernel/sys_rawhide.c +++ b/arch/alpha/kernel/sys_rawhide.c @@ -52,6 +52,9 @@ rawhide_update_irq_hw(int hose, int mask) *(vuip)MCPCIA_INT_MASK0(MCPCIA_HOSE2MID(hose)); } +#define hose_exists(h) \ + (((h) < MCPCIA_MAX_HOSES) && (cached_irq_masks[(h)] != 0)) + static inline void rawhide_enable_irq(unsigned int irq) { @@ -59,6 +62,9 @@ rawhide_enable_irq(unsigned int irq) irq -= 16; hose = irq / 24; + if (!hose_exists(hose)) /* if hose non-existent, exit */ + return; + irq -= hose * 24; mask = 1 << irq; @@ -76,6 +82,9 @@ rawhide_disable_irq(unsigned int irq) irq -= 16; hose = irq / 24; + if (!hose_exists(hose)) /* if hose non-existent, exit */ + return; + irq -= hose * 24; mask = ~(1 << irq) | hose_irq_masks[hose]; @@ -93,6 +102,9 @@ rawhide_mask_and_ack_irq(unsigned int irq) irq -= 16; hose = irq / 24; + if (!hose_exists(hose)) /* if hose non-existent, exit */ + return; + irq -= hose * 24; mask1 = 1 << irq; mask = ~mask1 | hose_irq_masks[hose]; @@ -169,6 +181,9 @@ rawhide_init_irq(void) mcpcia_init_hoses(); + /* Clear them all; only hoses that exist will be non-zero. */ + for (i = 0; i < MCPCIA_MAX_HOSES; i++) cached_irq_masks[i] = 0; + for (hose = hose_head; hose; hose = hose->next) { unsigned int h = hose->index; unsigned int mask = hose_irq_masks[h]; diff --git a/arch/alpha/kernel/sys_sio.c b/arch/alpha/kernel/sys_sio.c index a654014d202a..14b5a753aba5 100644 --- a/arch/alpha/kernel/sys_sio.c +++ b/arch/alpha/kernel/sys_sio.c @@ -84,12 +84,16 @@ alphabook1_init_arch(void) static void __init sio_pci_route(void) { -#if defined(ALPHA_RESTORE_SRM_SETUP) - /* First, read and save the original setting. */ + unsigned int orig_route_tab; + + /* First, ALWAYS read and print the original setting. */ pci_bus_read_config_dword(pci_isa_hose->bus, PCI_DEVFN(7, 0), 0x60, - &saved_config.orig_route_tab); + &orig_route_tab); printk("%s: PIRQ original 0x%x new 0x%x\n", __FUNCTION__, - saved_config.orig_route_tab, alpha_mv.sys.sio.route_tab); + orig_route_tab, alpha_mv.sys.sio.route_tab); + +#if defined(ALPHA_RESTORE_SRM_SETUP) + saved_config.orig_route_tab = orig_route_tab; #endif /* Now override with desired setting. */ @@ -334,7 +338,7 @@ struct alpha_machine_vector avanti_mv __initmv = { .pci_swizzle = common_swizzle, .sys = { .sio = { - .route_tab = 0x0b0a0e0f, + .route_tab = 0x0b0a050f, /* leave 14 for IDE, 9 for SND */ }} }; ALIAS_MV(avanti) diff --git a/include/asm-alpha/core_mcpcia.h b/include/asm-alpha/core_mcpcia.h index 980a3c51b18e..525b4f6a7ace 100644 --- a/include/asm-alpha/core_mcpcia.h +++ b/include/asm-alpha/core_mcpcia.h @@ -72,6 +72,8 @@ * */ +#define MCPCIA_MAX_HOSES 4 + #define MCPCIA_MID(m) ((unsigned long)(m) << 33) /* Dodge has PCI0 and PCI1 at MID 4 and 5 respectively. diff --git a/include/asm-alpha/io.h b/include/asm-alpha/io.h index 24bdcc8b63aa..21a86f1a05b3 100644 --- a/include/asm-alpha/io.h +++ b/include/asm-alpha/io.h @@ -113,6 +113,7 @@ static inline unsigned long virt_to_bus(void *address) unsigned long bus = phys + __direct_map_base; return phys <= __direct_map_size ? bus : 0; } +#define isa_virt_to_bus virt_to_bus static inline void *bus_to_virt(unsigned long address) { -- cgit v1.2.3-59-g8ed1b From 88ed39b064575b4f42ecb737ec2daa66717de173 Mon Sep 17 00:00:00 2001 From: Ivan Kokshaysky Date: Mon, 16 Apr 2007 22:53:21 -0700 Subject: alpha: build fixes - force architecture Override compiler .arch directive for generic kernel build. Signed-off-by: Ivan Kokshaysky Signed-off-by: Richard Henderson Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/alpha/kernel/sys_titan.c | 3 +-- include/asm-alpha/compiler.h | 47 ++++++++++++++++++++++++++++++++----------- 2 files changed, 36 insertions(+), 14 deletions(-) (limited to 'include') diff --git a/arch/alpha/kernel/sys_titan.c b/arch/alpha/kernel/sys_titan.c index 29ab7db81c30..f009b7bc0943 100644 --- a/arch/alpha/kernel/sys_titan.c +++ b/arch/alpha/kernel/sys_titan.c @@ -257,8 +257,7 @@ titan_dispatch_irqs(u64 mask) */ while (mask) { /* convert to SRM vector... priority is <63> -> <0> */ - __asm__("ctlz %1, %0" : "=r"(vector) : "r"(mask)); - vector = 63 - vector; + vector = 63 - __kernel_ctlz(mask); mask &= ~(1UL << vector); /* clear it out */ vector = 0x900 + (vector << 4); /* convert to SRM vector */ diff --git a/include/asm-alpha/compiler.h b/include/asm-alpha/compiler.h index d2768cc3d7a4..da6bb199839c 100644 --- a/include/asm-alpha/compiler.h +++ b/include/asm-alpha/compiler.h @@ -17,9 +17,6 @@ # define __kernel_extbl(val, shift) __builtin_alpha_extbl(val, shift) # define __kernel_extwl(val, shift) __builtin_alpha_extwl(val, shift) # define __kernel_cmpbge(a, b) __builtin_alpha_cmpbge(a, b) -# define __kernel_cttz(x) __builtin_ctzl(x) -# define __kernel_ctlz(x) __builtin_clzl(x) -# define __kernel_ctpop(x) __builtin_popcountl(x) #else # define __kernel_insbl(val, shift) \ ({ unsigned long __kir; \ @@ -49,17 +46,39 @@ ({ unsigned long __kir; \ __asm__("cmpbge %r2,%1,%0" : "=r"(__kir) : "rI"(b), "rJ"(a)); \ __kir; }) +#endif + +#ifdef __alpha_cix__ +# if __GNUC__ == 3 && __GNUC_MINOR__ >= 4 || __GNUC__ > 3 +# define __kernel_cttz(x) __builtin_ctzl(x) +# define __kernel_ctlz(x) __builtin_clzl(x) +# define __kernel_ctpop(x) __builtin_popcountl(x) +# else +# define __kernel_cttz(x) \ + ({ unsigned long __kir; \ + __asm__("cttz %1,%0" : "=r"(__kir) : "r"(x)); \ + __kir; }) +# define __kernel_ctlz(x) \ + ({ unsigned long __kir; \ + __asm__("ctlz %1,%0" : "=r"(__kir) : "r"(x)); \ + __kir; }) +# define __kernel_ctpop(x) \ + ({ unsigned long __kir; \ + __asm__("ctpop %1,%0" : "=r"(__kir) : "r"(x)); \ + __kir; }) +# endif +#else # define __kernel_cttz(x) \ ({ unsigned long __kir; \ - __asm__("cttz %1,%0" : "=r"(__kir) : "r"(x)); \ + __asm__(".arch ev67; cttz %1,%0" : "=r"(__kir) : "r"(x)); \ __kir; }) # define __kernel_ctlz(x) \ ({ unsigned long __kir; \ - __asm__("ctlz %1,%0" : "=r"(__kir) : "r"(x)); \ + __asm__(".arch ev67; ctlz %1,%0" : "=r"(__kir) : "r"(x)); \ __kir; }) # define __kernel_ctpop(x) \ ({ unsigned long __kir; \ - __asm__("ctpop %1,%0" : "=r"(__kir) : "r"(x)); \ + __asm__(".arch ev67; ctpop %1,%0" : "=r"(__kir) : "r"(x)); \ __kir; }) #endif @@ -78,16 +97,20 @@ #else #define __kernel_ldbu(mem) \ ({ unsigned char __kir; \ - __asm__("ldbu %0,%1" : "=r"(__kir) : "m"(mem)); \ + __asm__(".arch ev56; \ + ldbu %0,%1" : "=r"(__kir) : "m"(mem)); \ __kir; }) #define __kernel_ldwu(mem) \ ({ unsigned short __kir; \ - __asm__("ldwu %0,%1" : "=r"(__kir) : "m"(mem)); \ + __asm__(".arch ev56; \ + ldwu %0,%1" : "=r"(__kir) : "m"(mem)); \ __kir; }) -#define __kernel_stb(val,mem) \ - __asm__("stb %1,%0" : "=m"(mem) : "r"(val)) -#define __kernel_stw(val,mem) \ - __asm__("stw %1,%0" : "=m"(mem) : "r"(val)) +#define __kernel_stb(val,mem) \ + __asm__(".arch ev56; \ + stb %1,%0" : "=m"(mem) : "r"(val)) +#define __kernel_stw(val,mem) \ + __asm__(".arch ev56; \ + stw %1,%0" : "=m"(mem) : "r"(val)) #endif #ifdef __KERNEL__ -- cgit v1.2.3-59-g8ed1b From 93da28790c17345f4db10358dfb19b4c241d8ba3 Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 17 Apr 2007 00:32:26 -0700 Subject: Provide dummy devm_ioport_* if !HAS_IOPORT Provide an dummy implementation of devm_ioport_map() and devm_ioport_unmap() to allow drivers (eg, pata_platform) to build for platforms where CONFIG_NO_IOPORT is selected. Signed-off-by: Russell King Cc: Alan Cox Cc: Jeff Garzik Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/io.h | 13 +++++++++++++ 1 file changed, 13 insertions(+) (limited to 'include') diff --git a/include/linux/io.h b/include/linux/io.h index c244a0cc9319..09d351236379 100644 --- a/include/linux/io.h +++ b/include/linux/io.h @@ -33,9 +33,22 @@ int ioremap_page_range(unsigned long addr, unsigned long end, /* * Managed iomap interface */ +#ifdef CONFIG_HAS_IOPORT void __iomem * devm_ioport_map(struct device *dev, unsigned long port, unsigned int nr); void devm_ioport_unmap(struct device *dev, void __iomem *addr); +#else +static inline void __iomem *devm_ioport_map(struct device *dev, + unsigned long port, + unsigned int nr) +{ + return NULL; +} + +static inline void devm_ioport_unmap(struct device *dev, void __iomem *addr) +{ +} +#endif void __iomem * devm_ioremap(struct device *dev, unsigned long offset, unsigned long size); -- cgit v1.2.3-59-g8ed1b From 9a9943575ade643368849e2c963094ac637867e0 Mon Sep 17 00:00:00 2001 From: Mark Mason Date: Fri, 13 Apr 2007 10:32:25 -0700 Subject: [MIPS] Add missing silicon revisions for BCM112x Recent versions of the BCM112X processors aren't recognized by Linux (preventing Linux from booting on those processors). This patch adds support for those that are missing. Signed-off-by: Mark Mason Signed-off-by: Ralf Baechle --- arch/mips/sibyte/sb1250/setup.c | 12 ++++++++++++ include/asm-mips/sibyte/sb1250_scd.h | 1 + 2 files changed, 13 insertions(+) (limited to 'include') diff --git a/arch/mips/sibyte/sb1250/setup.c b/arch/mips/sibyte/sb1250/setup.c index 87188f0f6fbe..f4a6169aa0a4 100644 --- a/arch/mips/sibyte/sb1250/setup.c +++ b/arch/mips/sibyte/sb1250/setup.c @@ -141,6 +141,18 @@ static int __init setup_bcm112x(void) periph_rev = 3; pass_str = "A2"; break; + case K_SYS_REVISION_BCM112x_A3: + periph_rev = 3; + pass_str = "A3"; + break; + case K_SYS_REVISION_BCM112x_A4: + periph_rev = 3; + pass_str = "A4"; + break; + case K_SYS_REVISION_BCM112x_B0: + periph_rev = 3; + pass_str = "B0"; + break; default: printk("Unknown %s rev %x\n", soc_str, soc_pass); ret = 1; diff --git a/include/asm-mips/sibyte/sb1250_scd.h b/include/asm-mips/sibyte/sb1250_scd.h index 7ed0bb611e56..b6a7d8f6ced5 100644 --- a/include/asm-mips/sibyte/sb1250_scd.h +++ b/include/asm-mips/sibyte/sb1250_scd.h @@ -84,6 +84,7 @@ #define K_SYS_REVISION_BCM112x_A2 0x21 #define K_SYS_REVISION_BCM112x_A3 0x22 #define K_SYS_REVISION_BCM112x_A4 0x23 +#define K_SYS_REVISION_BCM112x_B0 0x30 #define K_SYS_REVISION_BCM1480_S0 0x01 #define K_SYS_REVISION_BCM1480_A1 0x02 -- cgit v1.2.3-59-g8ed1b From 5323180db75d562a287cb2020b07c9422df13df6 Mon Sep 17 00:00:00 2001 From: Atsushi Nemoto Date: Sat, 14 Apr 2007 02:37:26 +0900 Subject: [MIPS] Disallow CpU exception in kernel again. The commit 4d40bff7110e9e1a97ff8c01bdd6350e9867cc10 ("Allow CpU exception in kernel partially") was broken. The commit was to fix theoretical problem but broke usual case. Revert it for now. Signed-off-by: Atsushi Nemoto Signed-off-by: Ralf Baechle --- arch/mips/kernel/r2300_switch.S | 10 ++++------ arch/mips/kernel/r4k_switch.S | 10 ++++------ arch/mips/kernel/signal.c | 10 +++++----- arch/mips/kernel/signal32.c | 10 +++++----- arch/mips/kernel/traps.c | 21 +++------------------ include/asm-mips/fpu.h | 16 ---------------- include/asm-mips/thread_info.h | 1 - 7 files changed, 21 insertions(+), 57 deletions(-) (limited to 'include') diff --git a/arch/mips/kernel/r2300_switch.S b/arch/mips/kernel/r2300_switch.S index 28c2e2e6af73..656bde2e11b1 100644 --- a/arch/mips/kernel/r2300_switch.S +++ b/arch/mips/kernel/r2300_switch.S @@ -49,7 +49,8 @@ LEAF(resume) #ifndef CONFIG_CPU_HAS_LLSC sw zero, ll_bit #endif - mfc0 t2, CP0_STATUS + mfc0 t1, CP0_STATUS + sw t1, THREAD_STATUS(a0) cpu_save_nonscratch a0 sw ra, THREAD_REG31(a0) @@ -59,8 +60,8 @@ LEAF(resume) lw t3, TASK_THREAD_INFO(a0) lw t0, TI_FLAGS(t3) li t1, _TIF_USEDFPU - and t1, t0 - beqz t1, 1f + and t2, t0, t1 + beqz t2, 1f nor t1, zero, t1 and t0, t0, t1 @@ -73,13 +74,10 @@ LEAF(resume) li t1, ~ST0_CU1 and t0, t0, t1 sw t0, ST_OFF(t3) - /* clear thread_struct CU1 bit */ - and t2, t1 fpu_save_single a0, t0 # clobbers t0 1: - sw t2, THREAD_STATUS(a0) /* * The order of restoring the registers takes care of the race * updating $28, $29 and kernelsp without disabling ints. diff --git a/arch/mips/kernel/r4k_switch.S b/arch/mips/kernel/r4k_switch.S index c7698fd9955c..cc566cf12246 100644 --- a/arch/mips/kernel/r4k_switch.S +++ b/arch/mips/kernel/r4k_switch.S @@ -48,7 +48,8 @@ #ifndef CONFIG_CPU_HAS_LLSC sw zero, ll_bit #endif - mfc0 t2, CP0_STATUS + mfc0 t1, CP0_STATUS + LONG_S t1, THREAD_STATUS(a0) cpu_save_nonscratch a0 LONG_S ra, THREAD_REG31(a0) @@ -58,8 +59,8 @@ PTR_L t3, TASK_THREAD_INFO(a0) LONG_L t0, TI_FLAGS(t3) li t1, _TIF_USEDFPU - and t1, t0 - beqz t1, 1f + and t2, t0, t1 + beqz t2, 1f nor t1, zero, t1 and t0, t0, t1 @@ -72,13 +73,10 @@ li t1, ~ST0_CU1 and t0, t0, t1 LONG_S t0, ST_OFF(t3) - /* clear thread_struct CU1 bit */ - and t2, t1 fpu_save_double a0 t0 t1 # c0_status passed in t0 # clobbers t1 1: - LONG_S t2, THREAD_STATUS(a0) /* * The order of restoring the registers takes care of the race diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c index 8c3c5a5789b0..fa581192de21 100644 --- a/arch/mips/kernel/signal.c +++ b/arch/mips/kernel/signal.c @@ -113,10 +113,10 @@ int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) * Save FPU state to signal context. Signal handler * will "inherit" current FPU state. */ + preempt_disable(); own_fpu(1); - enable_fp_in_kernel(); err |= save_fp_context(sc); - disable_fp_in_kernel(); + preempt_enable(); } return err; } @@ -148,7 +148,10 @@ check_and_restore_fp_context(struct sigcontext __user *sc) err = sig = fpcsr_pending(&sc->sc_fpc_csr); if (err > 0) err = 0; + preempt_disable(); + own_fpu(0); err |= restore_fp_context(sc); + preempt_enable(); return err ?: sig; } @@ -187,11 +190,8 @@ int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) if (used_math) { /* restore fpu context if we have used it before */ - own_fpu(0); - enable_fp_in_kernel(); if (!err) err = check_and_restore_fp_context(sc); - disable_fp_in_kernel(); } else { /* signal handler may have used FPU. Give it up. */ lose_fpu(0); diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c index 151fd2f0893a..53a337cfeb66 100644 --- a/arch/mips/kernel/signal32.c +++ b/arch/mips/kernel/signal32.c @@ -209,10 +209,10 @@ static int setup_sigcontext32(struct pt_regs *regs, * Save FPU state to signal context. Signal handler * will "inherit" current FPU state. */ + preempt_disable(); own_fpu(1); - enable_fp_in_kernel(); err |= save_fp_context32(sc); - disable_fp_in_kernel(); + preempt_enable(); } return err; } @@ -225,7 +225,10 @@ check_and_restore_fp_context32(struct sigcontext32 __user *sc) err = sig = fpcsr_pending(&sc->sc_fpc_csr); if (err > 0) err = 0; + preempt_disable(); + own_fpu(0); err |= restore_fp_context32(sc); + preempt_enable(); return err ?: sig; } @@ -261,11 +264,8 @@ static int restore_sigcontext32(struct pt_regs *regs, if (used_math) { /* restore fpu context if we have used it before */ - own_fpu(0); - enable_fp_in_kernel(); if (!err) err = check_and_restore_fp_context32(sc); - disable_fp_in_kernel(); } else { /* signal handler may have used FPU. Give it up. */ lose_fpu(0); diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index 7d76a85422b2..6b356f62289e 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -757,11 +757,12 @@ asmlinkage void do_cpu(struct pt_regs *regs) { unsigned int cpid; + die_if_kernel("do_cpu invoked from kernel context!", regs); + cpid = (regs->cp0_cause >> CAUSEB_CE) & 3; switch (cpid) { case 0: - die_if_kernel("do_cpu invoked from kernel context!", regs); if (!cpu_has_llsc) if (!simulate_llsc(regs)) return; @@ -772,9 +773,6 @@ asmlinkage void do_cpu(struct pt_regs *regs) break; case 1: - if (!test_thread_flag(TIF_ALLOW_FP_IN_KERNEL)) - die_if_kernel("do_cpu invoked from kernel context!", - regs); if (used_math()) /* Using the FPU again. */ own_fpu(1); else { /* First time FPU user. */ @@ -782,19 +780,7 @@ asmlinkage void do_cpu(struct pt_regs *regs) set_used_math(); } - if (raw_cpu_has_fpu) { - if (test_thread_flag(TIF_ALLOW_FP_IN_KERNEL)) { - local_irq_disable(); - if (cpu_has_fpu) - regs->cp0_status |= ST0_CU1; - /* - * We must return without enabling - * interrupts to ensure keep FPU - * ownership until resume. - */ - return; - } - } else { + if (!raw_cpu_has_fpu) { int sig; sig = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 0); @@ -836,7 +822,6 @@ asmlinkage void do_cpu(struct pt_regs *regs) case 2: case 3: - die_if_kernel("do_cpu invoked from kernel context!", regs); break; } diff --git a/include/asm-mips/fpu.h b/include/asm-mips/fpu.h index 4e12d1f9534f..71436f90203f 100644 --- a/include/asm-mips/fpu.h +++ b/include/asm-mips/fpu.h @@ -68,8 +68,6 @@ do { \ /* We don't care about the c0 hazard here */ \ } while (0) -#define __fpu_enabled() (read_c0_status() & ST0_CU1) - #define enable_fpu() \ do { \ if (cpu_has_fpu) \ @@ -162,18 +160,4 @@ static inline fpureg_t *get_fpu_regs(struct task_struct *tsk) return tsk->thread.fpu.fpr; } -static inline void enable_fp_in_kernel(void) -{ - set_thread_flag(TIF_ALLOW_FP_IN_KERNEL); - /* make sure CU1 and FPU ownership are consistent */ - if (!__is_fpu_owner() && __fpu_enabled()) - __disable_fpu(); -} - -static inline void disable_fp_in_kernel(void) -{ - BUG_ON(!__is_fpu_owner() && __fpu_enabled()); - clear_thread_flag(TIF_ALLOW_FP_IN_KERNEL); -} - #endif /* _ASM_FPU_H */ diff --git a/include/asm-mips/thread_info.h b/include/asm-mips/thread_info.h index 6cf05f4a4e7e..fbcda8204473 100644 --- a/include/asm-mips/thread_info.h +++ b/include/asm-mips/thread_info.h @@ -119,7 +119,6 @@ register struct thread_info *__current_thread_info __asm__("$28"); #define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling TIF_NEED_RESCHED */ #define TIF_MEMDIE 18 #define TIF_FREEZE 19 -#define TIF_ALLOW_FP_IN_KERNEL 20 #define TIF_SYSCALL_TRACE 31 /* syscall trace active */ #define _TIF_SYSCALL_TRACE (1< Date: Mon, 16 Apr 2007 23:19:44 +0900 Subject: [MIPS] Retry {save,restore}_fp_context if failed in atomic context. The save_fp_context()/restore_fp_context() might sleep on accessing user stack and therefore might lose FPU ownership in middle of them. If these function failed due to "in_atomic" test in do_page_fault, touch the sigcontext area in non-atomic context and retry these save/restore operation. This is a replacement of a (broken) fix which was titled "Allow CpU exception in kernel partially". Signed-off-by: Atsushi Nemoto Signed-off-by: Ralf Baechle --- arch/mips/kernel/signal-common.h | 9 +++++++ arch/mips/kernel/signal.c | 52 +++++++++++++++++++++++++++++++++------- arch/mips/kernel/signal32.c | 52 +++++++++++++++++++++++++++++++++------- include/asm-mips/fpu.h | 9 +++++-- 4 files changed, 102 insertions(+), 20 deletions(-) (limited to 'include') diff --git a/arch/mips/kernel/signal-common.h b/arch/mips/kernel/signal-common.h index 297dfcb97524..c0faabd52010 100644 --- a/arch/mips/kernel/signal-common.h +++ b/arch/mips/kernel/signal-common.h @@ -34,4 +34,13 @@ extern int install_sigtramp(unsigned int __user *tramp, unsigned int syscall); /* Check and clear pending FPU exceptions in saved CSR */ extern int fpcsr_pending(unsigned int __user *fpcsr); +/* Make sure we will not lose FPU ownership */ +#ifdef CONFIG_PREEMPT +#define lock_fpu_owner() preempt_disable() +#define unlock_fpu_owner() preempt_enable() +#else +#define lock_fpu_owner() pagefault_disable() +#define unlock_fpu_owner() pagefault_enable() +#endif + #endif /* __SIGNAL_COMMON_H */ diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c index fa581192de21..07d67309451a 100644 --- a/arch/mips/kernel/signal.c +++ b/arch/mips/kernel/signal.c @@ -20,6 +20,7 @@ #include #include #include +#include #include #include @@ -27,7 +28,6 @@ #include #include #include -#include #include #include #include @@ -78,6 +78,46 @@ struct rt_sigframe { /* * Helper routines */ +static int protected_save_fp_context(struct sigcontext __user *sc) +{ + int err; + while (1) { + lock_fpu_owner(); + own_fpu_inatomic(1); + err = save_fp_context(sc); /* this might fail */ + unlock_fpu_owner(); + if (likely(!err)) + break; + /* touch the sigcontext and try again */ + err = __put_user(0, &sc->sc_fpregs[0]) | + __put_user(0, &sc->sc_fpregs[31]) | + __put_user(0, &sc->sc_fpc_csr); + if (err) + break; /* really bad sigcontext */ + } + return err; +} + +static int protected_restore_fp_context(struct sigcontext __user *sc) +{ + int err, tmp; + while (1) { + lock_fpu_owner(); + own_fpu_inatomic(0); + err = restore_fp_context(sc); /* this might fail */ + unlock_fpu_owner(); + if (likely(!err)) + break; + /* touch the sigcontext and try again */ + err = __get_user(tmp, &sc->sc_fpregs[0]) | + __get_user(tmp, &sc->sc_fpregs[31]) | + __get_user(tmp, &sc->sc_fpc_csr); + if (err) + break; /* really bad sigcontext */ + } + return err; +} + int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) { int err = 0; @@ -113,10 +153,7 @@ int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) * Save FPU state to signal context. Signal handler * will "inherit" current FPU state. */ - preempt_disable(); - own_fpu(1); - err |= save_fp_context(sc); - preempt_enable(); + err |= protected_save_fp_context(sc); } return err; } @@ -148,10 +185,7 @@ check_and_restore_fp_context(struct sigcontext __user *sc) err = sig = fpcsr_pending(&sc->sc_fpc_csr); if (err > 0) err = 0; - preempt_disable(); - own_fpu(0); - err |= restore_fp_context(sc); - preempt_enable(); + err |= protected_restore_fp_context(sc); return err ?: sig; } diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c index 53a337cfeb66..b9a014411f83 100644 --- a/arch/mips/kernel/signal32.c +++ b/arch/mips/kernel/signal32.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include @@ -29,7 +30,6 @@ #include #include #include -#include #include #include #include @@ -176,6 +176,46 @@ struct rt_sigframe32 { /* * sigcontext handlers */ +static int protected_save_fp_context32(struct sigcontext32 __user *sc) +{ + int err; + while (1) { + lock_fpu_owner(); + own_fpu_inatomic(1); + err = save_fp_context32(sc); /* this might fail */ + unlock_fpu_owner(); + if (likely(!err)) + break; + /* touch the sigcontext and try again */ + err = __put_user(0, &sc->sc_fpregs[0]) | + __put_user(0, &sc->sc_fpregs[31]) | + __put_user(0, &sc->sc_fpc_csr); + if (err) + break; /* really bad sigcontext */ + } + return err; +} + +static int protected_restore_fp_context32(struct sigcontext32 __user *sc) +{ + int err, tmp; + while (1) { + lock_fpu_owner(); + own_fpu_inatomic(0); + err = restore_fp_context32(sc); /* this might fail */ + unlock_fpu_owner(); + if (likely(!err)) + break; + /* touch the sigcontext and try again */ + err = __get_user(tmp, &sc->sc_fpregs[0]) | + __get_user(tmp, &sc->sc_fpregs[31]) | + __get_user(tmp, &sc->sc_fpc_csr); + if (err) + break; /* really bad sigcontext */ + } + return err; +} + static int setup_sigcontext32(struct pt_regs *regs, struct sigcontext32 __user *sc) { @@ -209,10 +249,7 @@ static int setup_sigcontext32(struct pt_regs *regs, * Save FPU state to signal context. Signal handler * will "inherit" current FPU state. */ - preempt_disable(); - own_fpu(1); - err |= save_fp_context32(sc); - preempt_enable(); + err |= protected_save_fp_context32(sc); } return err; } @@ -225,10 +262,7 @@ check_and_restore_fp_context32(struct sigcontext32 __user *sc) err = sig = fpcsr_pending(&sc->sc_fpc_csr); if (err > 0) err = 0; - preempt_disable(); - own_fpu(0); - err |= restore_fp_context32(sc); - preempt_enable(); + err |= protected_restore_fp_context32(sc); return err ?: sig; } diff --git a/include/asm-mips/fpu.h b/include/asm-mips/fpu.h index 71436f90203f..b414a7d9db43 100644 --- a/include/asm-mips/fpu.h +++ b/include/asm-mips/fpu.h @@ -100,14 +100,19 @@ static inline void __own_fpu(void) set_thread_flag(TIF_USEDFPU); } -static inline void own_fpu(int restore) +static inline void own_fpu_inatomic(int restore) { - preempt_disable(); if (cpu_has_fpu && !__is_fpu_owner()) { __own_fpu(); if (restore) _restore_fp(current); } +} + +static inline void own_fpu(int restore) +{ + preempt_disable(); + own_fpu_inatomic(restore); preempt_enable(); } -- cgit v1.2.3-59-g8ed1b From ba755f8ec80fdbf2b5212622eabf7355464c6327 Mon Sep 17 00:00:00 2001 From: Atsushi Nemoto Date: Thu, 12 Apr 2007 20:02:54 +0900 Subject: [MIPS] Fix BUG(), BUG_ON() handling With commit 63dc68a8cf60cb110b147dab1704d990808b39e2, kernel can not handle BUG() and BUG_ON() properly since get_user() returns false for kernel code. Use __get_user() to skip unnecessary access_ok(). This patch also make BRK_BUG code encoded in the TNE instruction. Signed-off-by: Atsushi Nemoto Signed-off-by: Ralf Baechle --- arch/mips/kernel/traps.c | 4 ++-- include/asm-mips/bug.h | 3 ++- 2 files changed, 4 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index 6b356f62289e..493cb29b8a42 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -650,7 +650,7 @@ asmlinkage void do_bp(struct pt_regs *regs) unsigned int opcode, bcode; siginfo_t info; - if (get_user(opcode, (unsigned int __user *) exception_epc(regs))) + if (__get_user(opcode, (unsigned int __user *) exception_epc(regs))) goto out_sigsegv; /* @@ -700,7 +700,7 @@ asmlinkage void do_tr(struct pt_regs *regs) unsigned int opcode, tcode = 0; siginfo_t info; - if (get_user(opcode, (unsigned int __user *) exception_epc(regs))) + if (__get_user(opcode, (unsigned int __user *) exception_epc(regs))) goto out_sigsegv; /* Immediate versions don't provide a code. */ diff --git a/include/asm-mips/bug.h b/include/asm-mips/bug.h index 4d560a533940..7eb63de808bc 100644 --- a/include/asm-mips/bug.h +++ b/include/asm-mips/bug.h @@ -18,7 +18,8 @@ do { \ #define BUG_ON(condition) \ do { \ - __asm__ __volatile__("tne $0, %0" : : "r" (condition)); \ + __asm__ __volatile__("tne $0, %0, %1" \ + : : "r" (condition), "i" (BRK_BUG)); \ } while (0) #define HAVE_ARCH_BUG_ON -- cgit v1.2.3-59-g8ed1b From 1d464c26b5625215c4b35fb336c8f3c57d248c2e Mon Sep 17 00:00:00 2001 From: Dave Johnson Date: Wed, 18 Apr 2007 10:39:41 -0400 Subject: [MIPS] Fix wrong checksum for split TCP packets on 64-bit MIPS I've traced down an off-by-one TCP checksum calculation error under the following conditions: 1) The TCP code needs to split a full-sized packet due to a reduced MSS (typically due to the addition of TCP options mid-stream like SACK). _AND_ 2) The checksum of the 2nd fragment is larger than the checksum of the original packet. After subtraction this results in a checksum for the 1st fragment with bits 16..31 set to 1. (this is ok) _AND_ 3) The checksum of the 1st fragment's TCP header plus the previously 32bit checksum of the 1st fragment DOES NOT cause a 32bit overflow when added together. This results in a checksum of the TCP header plus TCP data that still has the upper 16 bits as 1's. _THEN_ 4) The TCP+data checksum is added to the checksum of the pseudo IP header with csum_tcpudp_nofold() incorrectly (the bug). The problem is the checksum of the TCP+data is passed to csum_tcpudp_nofold() as an 32bit unsigned value, however the assembly code acts on it as if it is a 64bit unsigned value. This causes an incorrect 32->64bit extension if the sum has bit 31 set. The resulting checksum is off by one. This problems is data and TCP header dependent due to #2 and #3 above so it doesn't occur on every TCP packet split. Signed-off-by: Dave Johnson Signed-off-by: Ralf Baechle --- include/asm-mips/checksum.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/asm-mips/checksum.h b/include/asm-mips/checksum.h index 20a81e1548f5..290485ac5407 100644 --- a/include/asm-mips/checksum.h +++ b/include/asm-mips/checksum.h @@ -166,7 +166,7 @@ static inline __wsum csum_tcpudp_nofold(__be32 saddr, #else "r" (proto + len), #endif - "r" (sum)); + "r" ((__force unsigned long)sum)); return sum; } -- cgit v1.2.3-59-g8ed1b From 8e821cad12e80cd1a8a3fbadf91f62f17f32549e Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Fri, 20 Apr 2007 16:12:34 -0400 Subject: NFS: clean up the unstable write code Get rid of the inlined #ifdefs. Signed-off-by: Trond Myklebust Signed-off-by: Linus Torvalds --- fs/nfs/write.c | 117 ++++++++++++++++++++++++++++------------------- include/linux/nfs_page.h | 30 ------------ 2 files changed, 71 insertions(+), 76 deletions(-) (limited to 'include') diff --git a/fs/nfs/write.c b/fs/nfs/write.c index ad2e91b4904f..3ed4feb8c856 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -460,6 +460,43 @@ nfs_mark_request_commit(struct nfs_page *req) inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS); __mark_inode_dirty(inode, I_DIRTY_DATASYNC); } + +static inline +int nfs_write_need_commit(struct nfs_write_data *data) +{ + return data->verf.committed != NFS_FILE_SYNC; +} + +static inline +int nfs_reschedule_unstable_write(struct nfs_page *req) +{ + if (test_and_clear_bit(PG_NEED_COMMIT, &req->wb_flags)) { + nfs_mark_request_commit(req); + return 1; + } + if (test_and_clear_bit(PG_NEED_RESCHED, &req->wb_flags)) { + nfs_redirty_request(req); + return 1; + } + return 0; +} +#else +static inline void +nfs_mark_request_commit(struct nfs_page *req) +{ +} + +static inline +int nfs_write_need_commit(struct nfs_write_data *data) +{ + return 0; +} + +static inline +int nfs_reschedule_unstable_write(struct nfs_page *req) +{ + return 0; +} #endif /* @@ -746,26 +783,12 @@ int nfs_updatepage(struct file *file, struct page *page, static void nfs_writepage_release(struct nfs_page *req) { - nfs_end_page_writeback(req->wb_page); -#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) - if (!PageError(req->wb_page)) { - if (NFS_NEED_RESCHED(req)) { - nfs_redirty_request(req); - goto out; - } else if (NFS_NEED_COMMIT(req)) { - nfs_mark_request_commit(req); - goto out; - } - } - nfs_inode_remove_request(req); - -out: - nfs_clear_commit(req); - nfs_clear_reschedule(req); -#else - nfs_inode_remove_request(req); -#endif + if (PageError(req->wb_page) || !nfs_reschedule_unstable_write(req)) { + nfs_end_page_writeback(req->wb_page); + nfs_inode_remove_request(req); + } else + nfs_end_page_writeback(req->wb_page); nfs_clear_page_writeback(req); } @@ -1008,22 +1031,28 @@ static void nfs_writeback_done_partial(struct rpc_task *task, void *calldata) nfs_set_pageerror(page); req->wb_context->error = task->tk_status; dprintk(", error = %d\n", task->tk_status); - } else { -#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) - if (data->verf.committed < NFS_FILE_SYNC) { - if (!NFS_NEED_COMMIT(req)) { - nfs_defer_commit(req); - memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf)); - dprintk(" defer commit\n"); - } else if (memcmp(&req->wb_verf, &data->verf, sizeof(req->wb_verf))) { - nfs_defer_reschedule(req); - dprintk(" server reboot detected\n"); - } - } else -#endif - dprintk(" OK\n"); + goto out; } + if (nfs_write_need_commit(data)) { + spinlock_t *req_lock = &NFS_I(page->mapping->host)->req_lock; + + spin_lock(req_lock); + if (test_bit(PG_NEED_RESCHED, &req->wb_flags)) { + /* Do nothing we need to resend the writes */ + } else if (!test_and_set_bit(PG_NEED_COMMIT, &req->wb_flags)) { + memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf)); + dprintk(" defer commit\n"); + } else if (memcmp(&req->wb_verf, &data->verf, sizeof(req->wb_verf))) { + set_bit(PG_NEED_RESCHED, &req->wb_flags); + clear_bit(PG_NEED_COMMIT, &req->wb_flags); + dprintk(" server reboot detected\n"); + } + spin_unlock(req_lock); + } else + dprintk(" OK\n"); + +out: if (atomic_dec_and_test(&req->wb_complete)) nfs_writepage_release(req); } @@ -1064,25 +1093,21 @@ static void nfs_writeback_done_full(struct rpc_task *task, void *calldata) if (task->tk_status < 0) { nfs_set_pageerror(page); req->wb_context->error = task->tk_status; - nfs_end_page_writeback(page); - nfs_inode_remove_request(req); dprintk(", error = %d\n", task->tk_status); - goto next; + goto remove_request; } - nfs_end_page_writeback(page); -#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) - if (data->args.stable != NFS_UNSTABLE || data->verf.committed == NFS_FILE_SYNC) { - nfs_inode_remove_request(req); - dprintk(" OK\n"); + if (nfs_write_need_commit(data)) { + memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf)); + nfs_mark_request_commit(req); + nfs_end_page_writeback(page); + dprintk(" marked for commit\n"); goto next; } - memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf)); - nfs_mark_request_commit(req); - dprintk(" marked for commit\n"); -#else + dprintk(" OK\n"); +remove_request: + nfs_end_page_writeback(page); nfs_inode_remove_request(req); -#endif next: nfs_clear_page_writeback(req); } diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h index d111be639140..16b0266b14fd 100644 --- a/include/linux/nfs_page.h +++ b/include/linux/nfs_page.h @@ -49,8 +49,6 @@ struct nfs_page { }; #define NFS_WBACK_BUSY(req) (test_bit(PG_BUSY,&(req)->wb_flags)) -#define NFS_NEED_COMMIT(req) (test_bit(PG_NEED_COMMIT,&(req)->wb_flags)) -#define NFS_NEED_RESCHED(req) (test_bit(PG_NEED_RESCHED,&(req)->wb_flags)) extern struct nfs_page *nfs_create_request(struct nfs_open_context *ctx, struct inode *inode, @@ -121,34 +119,6 @@ nfs_list_remove_request(struct nfs_page *req) req->wb_list_head = NULL; } -static inline int -nfs_defer_commit(struct nfs_page *req) -{ - return !test_and_set_bit(PG_NEED_COMMIT, &req->wb_flags); -} - -static inline void -nfs_clear_commit(struct nfs_page *req) -{ - smp_mb__before_clear_bit(); - clear_bit(PG_NEED_COMMIT, &req->wb_flags); - smp_mb__after_clear_bit(); -} - -static inline int -nfs_defer_reschedule(struct nfs_page *req) -{ - return !test_and_set_bit(PG_NEED_RESCHED, &req->wb_flags); -} - -static inline void -nfs_clear_reschedule(struct nfs_page *req) -{ - smp_mb__before_clear_bit(); - clear_bit(PG_NEED_RESCHED, &req->wb_flags); - smp_mb__after_clear_bit(); -} - static inline struct nfs_page * nfs_list_entry(struct list_head *head) { -- cgit v1.2.3-59-g8ed1b From 7e40f2ab0a7e36706ee78b78b3792f08f208cd44 Mon Sep 17 00:00:00 2001 From: Balbir Singh Date: Mon, 23 Apr 2007 14:41:05 -0700 Subject: Taskstats fix the structure members alignment issue We broke the the alignment of members of taskstats to the 8 byte boundary with the CSA patches. In the current kernel, the taskstats structure is not suitable for use by 32 bit applications in a 64 bit kernel. On x86_64 Offsets of taskstats' members (64 bit kernel, 64 bit application) @taskstats'offsetof[@taskstats'indices] = ( 0, # version 4, # ac_exitcode 8, # ac_flag 9, # ac_nice 16, # cpu_count 24, # cpu_delay_total 32, # blkio_count 40, # blkio_delay_total 48, # swapin_count 56, # swapin_delay_total 64, # cpu_run_real_total 72, # cpu_run_virtual_total 80, # ac_comm 112, # ac_sched 113, # ac_pad 116, # ac_uid 120, # ac_gid 124, # ac_pid 128, # ac_ppid 132, # ac_btime 136, # ac_etime 144, # ac_utime 152, # ac_stime 160, # ac_minflt 168, # ac_majflt 176, # coremem 184, # virtmem 192, # hiwater_rss 200, # hiwater_vm 208, # read_char 216, # write_char 224, # read_syscalls 232, # write_syscalls 240, # read_bytes 248, # write_bytes 256, # cancelled_write_bytes ); Offsets of taskstats' members (64 bit kernel, 32 bit application) @taskstats'offsetof[@taskstats'indices] = ( 0, # version 4, # ac_exitcode 8, # ac_flag 9, # ac_nice 12, # cpu_count 20, # cpu_delay_total 28, # blkio_count 36, # blkio_delay_total 44, # swapin_count 52, # swapin_delay_total 60, # cpu_run_real_total 68, # cpu_run_virtual_total 76, # ac_comm 108, # ac_sched 109, # ac_pad 112, # ac_uid 116, # ac_gid 120, # ac_pid 124, # ac_ppid 128, # ac_btime 132, # ac_etime 140, # ac_utime 148, # ac_stime 156, # ac_minflt 164, # ac_majflt 172, # coremem 180, # virtmem 188, # hiwater_rss 196, # hiwater_vm 204, # read_char 212, # write_char 220, # read_syscalls 228, # write_syscalls 236, # read_bytes 244, # write_bytes 252, # cancelled_write_bytes ); This is one way to solve the problem without re-arranging structure members is to pack the structure. The patch adds an __attribute__((aligned(8))) to the taskstats structure members so that 32 bit applications using taskstats can work with a 64 bit kernel. Using __attribute__((packed)) would break the 64 bit alignment of members. The fix was tested on x86_64. After the fix, we got Offsets of taskstats' members (64 bit kernel, 64 bit application) @taskstats'offsetof[@taskstats'indices] = ( 0, # version 4, # ac_exitcode 8, # ac_flag 9, # ac_nice 16, # cpu_count 24, # cpu_delay_total 32, # blkio_count 40, # blkio_delay_total 48, # swapin_count 56, # swapin_delay_total 64, # cpu_run_real_total 72, # cpu_run_virtual_total 80, # ac_comm 112, # ac_sched 113, # ac_pad 120, # ac_uid 124, # ac_gid 128, # ac_pid 132, # ac_ppid 136, # ac_btime 144, # ac_etime 152, # ac_utime 160, # ac_stime 168, # ac_minflt 176, # ac_majflt 184, # coremem 192, # virtmem 200, # hiwater_rss 208, # hiwater_vm 216, # read_char 224, # write_char 232, # read_syscalls 240, # write_syscalls 248, # read_bytes 256, # write_bytes 264, # cancelled_write_bytes ); Offsets of taskstats' members (64 bit kernel, 32 bit application) @taskstats'offsetof[@taskstats'indices] = ( 0, # version 4, # ac_exitcode 8, # ac_flag 9, # ac_nice 16, # cpu_count 24, # cpu_delay_total 32, # blkio_count 40, # blkio_delay_total 48, # swapin_count 56, # swapin_delay_total 64, # cpu_run_real_total 72, # cpu_run_virtual_total 80, # ac_comm 112, # ac_sched 113, # ac_pad 120, # ac_uid 124, # ac_gid 128, # ac_pid 132, # ac_ppid 136, # ac_btime 144, # ac_etime 152, # ac_utime 160, # ac_stime 168, # ac_minflt 176, # ac_majflt 184, # coremem 192, # virtmem 200, # hiwater_rss 208, # hiwater_vm 216, # read_char 224, # write_char 232, # read_syscalls 240, # write_syscalls 248, # read_bytes 256, # write_bytes 264, # cancelled_write_bytes ); Signed-off-by: Balbir Singh Cc: Jay Lan Cc: Shailabh Nagar Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/taskstats.h | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/include/linux/taskstats.h b/include/linux/taskstats.h index 3fced4798255..a46104a28f66 100644 --- a/include/linux/taskstats.h +++ b/include/linux/taskstats.h @@ -31,7 +31,7 @@ */ -#define TASKSTATS_VERSION 3 +#define TASKSTATS_VERSION 4 #define TS_COMM_LEN 32 /* should be >= TASK_COMM_LEN * in linux/sched.h */ @@ -66,7 +66,7 @@ struct taskstats { /* Delay waiting for cpu, while runnable * count, delay_total NOT updated atomically */ - __u64 cpu_count; + __u64 cpu_count __attribute__((aligned(8))); __u64 cpu_delay_total; /* Following four fields atomically updated using task->delays->lock */ @@ -101,14 +101,17 @@ struct taskstats { /* Basic Accounting Fields start */ char ac_comm[TS_COMM_LEN]; /* Command name */ - __u8 ac_sched; /* Scheduling discipline */ + __u8 ac_sched __attribute__((aligned(8))); + /* Scheduling discipline */ __u8 ac_pad[3]; - __u32 ac_uid; /* User ID */ + __u32 ac_uid __attribute__((aligned(8))); + /* User ID */ __u32 ac_gid; /* Group ID */ __u32 ac_pid; /* Process ID */ __u32 ac_ppid; /* Parent process ID */ __u32 ac_btime; /* Begin time [sec since 1970] */ - __u64 ac_etime; /* Elapsed time [usec] */ + __u64 ac_etime __attribute__((aligned(8))); + /* Elapsed time [usec] */ __u64 ac_utime; /* User CPU time [usec] */ __u64 ac_stime; /* SYstem CPU time [usec] */ __u64 ac_minflt; /* Minor Page Fault Count */ -- cgit v1.2.3-59-g8ed1b From 0bcbc92629044b5403719f77fb015e9005b1f504 Mon Sep 17 00:00:00 2001 From: YOSHIFUJI Hideaki Date: Tue, 24 Apr 2007 14:58:30 -0700 Subject: [IPV6]: Disallow RH0 by default. A security issue is emerging. Disallow Routing Header Type 0 by default as we have been doing for IPv4. Note: We allow RH2 by default because it is harmless. Signed-off-by: YOSHIFUJI Hideaki Signed-off-by: David S. Miller --- Documentation/networking/ip-sysctl.txt | 9 ++++++++ include/linux/ipv6.h | 3 +++ include/linux/sysctl.h | 1 + net/ipv6/addrconf.c | 11 ++++++++++ net/ipv6/exthdrs.c | 40 +++++++++++++++++++++++++++++----- 5 files changed, 58 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt index d3aae1f9b4c1..702d1d8dd04a 100644 --- a/Documentation/networking/ip-sysctl.txt +++ b/Documentation/networking/ip-sysctl.txt @@ -851,6 +851,15 @@ accept_redirects - BOOLEAN Functional default: enabled if local forwarding is disabled. disabled if local forwarding is enabled. +accept_source_route - INTEGER + Accept source routing (routing extension header). + + > 0: Accept routing header. + = 0: Accept only routing header type 2. + < 0: Do not accept routing header. + + Default: 0 + autoconf - BOOLEAN Autoconfigure addresses using Prefix Information in Router Advertisements. diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index f8241130f5ea..713eb5eaa81f 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h @@ -177,6 +177,7 @@ struct ipv6_devconf { #endif #endif __s32 proxy_ndp; + __s32 accept_source_route; void *sysctl; }; @@ -205,6 +206,8 @@ enum { DEVCONF_RTR_PROBE_INTERVAL, DEVCONF_ACCEPT_RA_RT_INFO_MAX_PLEN, DEVCONF_PROXY_NDP, + __DEVCONF_OPTIMISTIC_DAD, + DEVCONF_ACCEPT_SOURCE_ROUTE, DEVCONF_MAX }; diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h index 2c5fb38d9392..9a8970bf99a6 100644 --- a/include/linux/sysctl.h +++ b/include/linux/sysctl.h @@ -580,6 +580,7 @@ enum { NET_IPV6_RTR_PROBE_INTERVAL=21, NET_IPV6_ACCEPT_RA_RT_INFO_MAX_PLEN=22, NET_IPV6_PROXY_NDP=23, + NET_IPV6_ACCEPT_SOURCE_ROUTE=25, __NET_IPV6_MAX }; diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 7552663aa125..452a82ce4796 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -172,6 +172,7 @@ struct ipv6_devconf ipv6_devconf __read_mostly = { #endif #endif .proxy_ndp = 0, + .accept_source_route = 0, /* we do not accept RH0 by default. */ }; static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = { @@ -203,6 +204,7 @@ static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = { #endif #endif .proxy_ndp = 0, + .accept_source_route = 0, /* we do not accept RH0 by default. */ }; /* IPv6 Wildcard Address and Loopback Address defined by RFC2553 */ @@ -3356,6 +3358,7 @@ static inline void ipv6_store_devconf(struct ipv6_devconf *cnf, #endif #endif array[DEVCONF_PROXY_NDP] = cnf->proxy_ndp; + array[DEVCONF_ACCEPT_SOURCE_ROUTE] = cnf->accept_source_route; } static inline size_t inet6_if_nlmsg_size(void) @@ -3883,6 +3886,14 @@ static struct addrconf_sysctl_table .mode = 0644, .proc_handler = &proc_dointvec, }, + { + .ctl_name = NET_IPV6_ACCEPT_SOURCE_ROUTE, + .procname = "accept_source_route", + .data = &ipv6_devconf.accept_source_route, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &proc_dointvec, + }, { .ctl_name = 0, /* sentinel */ } diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c index 28e0c6568272..6ed6a8cd6a68 100644 --- a/net/ipv6/exthdrs.c +++ b/net/ipv6/exthdrs.c @@ -362,10 +362,27 @@ static int ipv6_rthdr_rcv(struct sk_buff **skbp) struct inet6_skb_parm *opt = IP6CB(skb); struct in6_addr *addr = NULL; struct in6_addr daddr; + struct inet6_dev *idev; int n, i; - struct ipv6_rt_hdr *hdr; struct rt0_hdr *rthdr; + int accept_source_route = ipv6_devconf.accept_source_route; + + if (accept_source_route < 0 || + ((idev = in6_dev_get(skb->dev)) == NULL)) { + kfree_skb(skb); + return -1; + } + if (idev->cnf.accept_source_route < 0) { + in6_dev_put(idev); + kfree_skb(skb); + return -1; + } + + if (accept_source_route > idev->cnf.accept_source_route) + accept_source_route = idev->cnf.accept_source_route; + + in6_dev_put(idev); if (!pskb_may_pull(skb, (skb->h.raw-skb->data)+8) || !pskb_may_pull(skb, (skb->h.raw-skb->data)+((skb->h.raw[1]+1)<<3))) { @@ -377,6 +394,22 @@ static int ipv6_rthdr_rcv(struct sk_buff **skbp) hdr = (struct ipv6_rt_hdr *) skb->h.raw; + switch (hdr->type) { +#ifdef CONFIG_IPV6_MIP6 + break; +#endif + case IPV6_SRCRT_TYPE_0: + if (accept_source_route <= 0) + break; + kfree_skb(skb); + return -1; + default: + IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), + IPSTATS_MIB_INHDRERRORS); + icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, (&hdr->type) - skb->nh.raw); + return -1; + } + if (ipv6_addr_is_multicast(&skb->nh.ipv6h->daddr) || skb->pkt_type != PACKET_HOST) { IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), @@ -434,11 +467,6 @@ looped_back: } break; #endif - default: - IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), - IPSTATS_MIB_INHDRERRORS); - icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, (&hdr->type) - skb->nh.raw); - return -1; } /* -- cgit v1.2.3-59-g8ed1b From bdaae17da81db79b9aa4dfbf43305cfeef64f6a8 Mon Sep 17 00:00:00 2001 From: Ilpo Järvinen Date: Wed, 21 Feb 2007 22:59:58 -0800 Subject: [TCP] FRTO: Moved tcp_use_frto from tcp.h to tcp_input.c MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In addition, removed inline. Signed-off-by: Ilpo Järvinen Signed-off-by: David S. Miller --- include/net/tcp.h | 14 +------------- net/ipv4/tcp_input.c | 13 +++++++++++++ 2 files changed, 14 insertions(+), 13 deletions(-) (limited to 'include') diff --git a/include/net/tcp.h b/include/net/tcp.h index 5c472f255b77..572a77bb6907 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -341,6 +341,7 @@ extern struct sock * tcp_check_req(struct sock *sk,struct sk_buff *skb, extern int tcp_child_process(struct sock *parent, struct sock *child, struct sk_buff *skb); +extern int tcp_use_frto(const struct sock *sk); extern void tcp_enter_frto(struct sock *sk); extern void tcp_enter_loss(struct sock *sk, int how); extern void tcp_clear_retrans(struct tcp_sock *tp); @@ -1033,19 +1034,6 @@ static inline int tcp_paws_check(const struct tcp_options_received *rx_opt, int #define TCP_CHECK_TIMER(sk) do { } while (0) -static inline int tcp_use_frto(const struct sock *sk) -{ - const struct tcp_sock *tp = tcp_sk(sk); - - /* F-RTO must be activated in sysctl and there must be some - * unsent new data, and the advertised window should allow - * sending it. - */ - return (sysctl_tcp_frto && sk->sk_send_head && - !after(TCP_SKB_CB(sk->sk_send_head)->end_seq, - tp->snd_una + tp->snd_wnd)); -} - static inline void tcp_mib_init(void) { /* See RFC 2012 */ diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index c5be3d0465f5..fe96e176d85a 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -1236,6 +1236,19 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_ return flag; } +int tcp_use_frto(const struct sock *sk) +{ + const struct tcp_sock *tp = tcp_sk(sk); + + /* F-RTO must be activated in sysctl and there must be some + * unsent new data, and the advertised window should allow + * sending it. + */ + return (sysctl_tcp_frto && sk->sk_send_head && + !after(TCP_SKB_CB(sk->sk_send_head)->end_seq, + tp->snd_una + tp->snd_wnd)); +} + /* RTO occurred, but do not yet enter loss state. Instead, transmit two new * segments to see from the next ACKs whether any data was really missing. * If the RTO was spurious, new ACKs should arrive. -- cgit v1.2.3-59-g8ed1b From 46d0de4ed92650b95f27acae09914996bbe624e7 Mon Sep 17 00:00:00 2001 From: Ilpo Järvinen Date: Wed, 21 Feb 2007 23:10:39 -0800 Subject: [TCP] FRTO: Entry is allowed only during (New)Reno like recovery MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This interpretation comes from RFC4138: "If the sender implements some loss recovery algorithm other than Reno or NewReno [FHG04], the F-RTO algorithm SHOULD NOT be entered when earlier fast recovery is underway." I think the RFC means to say (especially in the light of Appendix B) that ...recovery is underway (not just fast recovery) or was underway when it was interrupted by an earlier (F-)RTO that hasn't yet been resolved (snd_una has not advanced enough). Thus, my interpretation is that whenever TCP has ever retransmitted other than head, basic version cannot be used because then the order assumptions which are used as FRTO basis do not hold. NewReno has only the head segment retransmitted at a time. Therefore, walk up to the segment that has not been SACKed, if that segment is not retransmitted nor anything before it, we know for sure, that nothing after the non-SACKed segment should be either. This assumption is valid because TCPCB_EVER_RETRANS does not leave holes but each non-SACKed segment is rexmitted in-order. Check for retrans_out > 1 avoids more expensive walk through the skb list, as we can know the result beforehand: F-RTO will not be allowed. SACKed skb can turn into non-SACked only in the extremely rare case of SACK reneging, in this case we might fail to detect retransmissions if there were them for any other than head. To get rid of that feature, whole rexmit queue would have to be walked (always) or FRTO should be prevented when SACK reneging happens. Of course RTO should still trigger after reneging which makes this issue even less likely to show up. And as long as the response is as conservative as it's now, nothing bad happens even then. Signed-off-by: Ilpo Järvinen Signed-off-by: David S. Miller --- include/net/tcp.h | 2 +- net/ipv4/tcp_input.c | 25 +++++++++++++++++++++---- 2 files changed, 22 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/include/net/tcp.h b/include/net/tcp.h index 572a77bb6907..7fd6b77519c3 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -341,7 +341,7 @@ extern struct sock * tcp_check_req(struct sock *sk,struct sk_buff *skb, extern int tcp_child_process(struct sock *parent, struct sock *child, struct sk_buff *skb); -extern int tcp_use_frto(const struct sock *sk); +extern int tcp_use_frto(struct sock *sk); extern void tcp_enter_frto(struct sock *sk); extern void tcp_enter_loss(struct sock *sk, int how); extern void tcp_clear_retrans(struct tcp_sock *tp); diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 723cee63791f..a283fc12186e 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -1239,14 +1239,31 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_ /* F-RTO can only be used if these conditions are satisfied: * - there must be some unsent new data * - the advertised window should allow sending it + * - TCP has never retransmitted anything other than head */ -int tcp_use_frto(const struct sock *sk) +int tcp_use_frto(struct sock *sk) { const struct tcp_sock *tp = tcp_sk(sk); + struct sk_buff *skb; + + if (!sysctl_tcp_frto || !sk->sk_send_head || + after(TCP_SKB_CB(sk->sk_send_head)->end_seq, + tp->snd_una + tp->snd_wnd)) + return 0; - return (sysctl_tcp_frto && sk->sk_send_head && - !after(TCP_SKB_CB(sk->sk_send_head)->end_seq, - tp->snd_una + tp->snd_wnd)); + /* Avoid expensive walking of rexmit queue if possible */ + if (tp->retrans_out > 1) + return 0; + + skb = skb_peek(&sk->sk_write_queue)->next; /* Skips head */ + sk_stream_for_retrans_queue_from(skb, sk) { + if (TCP_SKB_CB(skb)->sacked&TCPCB_RETRANS) + return 0; + /* Short-circuit when first non-SACKed skb has been checked */ + if (!(TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_ACKED)) + break; + } + return 1; } /* RTO occurred, but do not yet enter Loss state. Instead, defer RTO -- cgit v1.2.3-59-g8ed1b From 886236c1247ab5e2ad9c73f6e9a652e3ae3c8b07 Mon Sep 17 00:00:00 2001 From: John Heffner Date: Sun, 25 Mar 2007 19:21:45 -0700 Subject: [TCP]: Add RFC3742 Limited Slow-Start, controlled by variable sysctl_tcp_max_ssthresh. Signed-off-by: John Heffner Signed-off-by: David S. Miller --- include/linux/sysctl.h | 1 + include/net/tcp.h | 1 + net/ipv4/sysctl_net_ipv4.c | 8 ++++++++ net/ipv4/tcp_cong.c | 31 ++++++++++++++++++++++--------- 4 files changed, 32 insertions(+), 9 deletions(-) (limited to 'include') diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h index 9a8970bf99a6..98e0fd241a25 100644 --- a/include/linux/sysctl.h +++ b/include/linux/sysctl.h @@ -438,6 +438,7 @@ enum NET_CIPSOV4_RBM_STRICTVALID=121, NET_TCP_AVAIL_CONG_CONTROL=122, NET_TCP_ALLOWED_CONG_CONTROL=123, + NET_TCP_MAX_SSTHRESH=124, }; enum { diff --git a/include/net/tcp.h b/include/net/tcp.h index 7fd6b77519c3..6d09f5085f6a 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -230,6 +230,7 @@ extern int sysctl_tcp_mtu_probing; extern int sysctl_tcp_base_mss; extern int sysctl_tcp_workaround_signed_windows; extern int sysctl_tcp_slow_start_after_idle; +extern int sysctl_tcp_max_ssthresh; extern atomic_t tcp_memory_allocated; extern atomic_t tcp_sockets_allocated; diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c index 0aa304711a96..d68effe98e8d 100644 --- a/net/ipv4/sysctl_net_ipv4.c +++ b/net/ipv4/sysctl_net_ipv4.c @@ -803,6 +803,14 @@ ctl_table ipv4_table[] = { .proc_handler = &proc_allowed_congestion_control, .strategy = &strategy_allowed_congestion_control, }, + { + .ctl_name = NET_TCP_MAX_SSTHRESH, + .procname = "tcp_max_ssthresh", + .data = &sysctl_tcp_max_ssthresh, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &proc_dointvec, + }, { .ctl_name = 0 } }; diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c index 34ae3f13483a..ccd88407e0cd 100644 --- a/net/ipv4/tcp_cong.c +++ b/net/ipv4/tcp_cong.c @@ -12,6 +12,8 @@ #include #include +int sysctl_tcp_max_ssthresh = 0; + static DEFINE_SPINLOCK(tcp_cong_list_lock); static LIST_HEAD(tcp_cong_list); @@ -274,10 +276,13 @@ int tcp_set_congestion_control(struct sock *sk, const char *name) /* - * Linear increase during slow start + * Slow start (exponential increase) with + * RFC3742 Limited Slow Start (fast linear increase) support. */ void tcp_slow_start(struct tcp_sock *tp) { + int cnt = 0; + if (sysctl_tcp_abc) { /* RFC3465: Slow Start * TCP sender SHOULD increase cwnd by the number of @@ -286,17 +291,25 @@ void tcp_slow_start(struct tcp_sock *tp) */ if (tp->bytes_acked < tp->mss_cache) return; - - /* We MAY increase by 2 if discovered delayed ack */ - if (sysctl_tcp_abc > 1 && tp->bytes_acked >= 2*tp->mss_cache) { - if (tp->snd_cwnd < tp->snd_cwnd_clamp) - tp->snd_cwnd++; - } } + + if (sysctl_tcp_max_ssthresh > 0 && + tp->snd_cwnd > sysctl_tcp_max_ssthresh) + cnt += sysctl_tcp_max_ssthresh>>1; + else + cnt += tp->snd_cwnd; + + /* RFC3465: We MAY increase by 2 if discovered delayed ack */ + if (sysctl_tcp_abc > 1 && tp->bytes_acked >= 2*tp->mss_cache) + cnt <<= 1; tp->bytes_acked = 0; - if (tp->snd_cwnd < tp->snd_cwnd_clamp) - tp->snd_cwnd++; + tp->snd_cwnd_cnt += cnt; + while (tp->snd_cwnd_cnt >= tp->snd_cwnd) { + tp->snd_cwnd_cnt -= tp->snd_cwnd; + if (tp->snd_cwnd < tp->snd_cwnd_clamp) + tp->snd_cwnd++; + } } EXPORT_SYMBOL_GPL(tcp_slow_start); -- cgit v1.2.3-59-g8ed1b From 54287cc178cf85dbae0decec8b4dc190bff757ad Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 22 Feb 2007 03:20:44 -0800 Subject: [TCP]: Keep copied_seq, rcv_wup and rcv_next together. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit I noticed in oprofile study a cache miss in tcp_rcv_established() to read copied_seq. ffffffff80400a80 : /* tcp_rcv_established total: 4034293   2.0400 */  55493  0.0281 :ffffffff80400bc9:   mov    0x4c8(%r12),%eax copied_seq 543103  0.2746 :ffffffff80400bd1:   cmp    0x3e0(%r12),%eax   rcv_nxt     if (tp->copied_seq == tp->rcv_nxt &&         len - tcp_header_len <= tp->ucopy.len) { In this function, the cache line 0x4c0 -> 0x500 is used only for this reading 'copied_seq' field. rcv_wup and copied_seq should be next to rcv_nxt field, to lower number of active cache lines in hot paths. (tcp_rcv_established(), tcp_poll(), ...) As you suggested, I changed tcp_create_openreq_child() so that these fields are changed together, to avoid adding a new store buffer stall. Patch is 64bit friendly (no new hole because of alignment constraints) Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- include/linux/tcp.h | 4 ++-- net/ipv4/tcp_minisocks.c | 6 ++---- 2 files changed, 4 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/include/linux/tcp.h b/include/linux/tcp.h index 29d3089038ab..415193e171e4 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -242,6 +242,8 @@ struct tcp_sock { * See RFC793 and RFC1122. The RFC writes these in capitals. */ u32 rcv_nxt; /* What we want to receive next */ + u32 copied_seq; /* Head of yet unread data */ + u32 rcv_wup; /* rcv_nxt on last window update sent */ u32 snd_nxt; /* Next sequence we send */ u32 snd_una; /* First byte we want an ack for */ @@ -307,10 +309,8 @@ struct tcp_sock { struct sk_buff_head out_of_order_queue; /* Out of order segments go here */ u32 rcv_wnd; /* Current receiver window */ - u32 rcv_wup; /* rcv_nxt on last window update sent */ u32 write_seq; /* Tail(+1) of data held in tcp send buffer */ u32 pushed_seq; /* Last pushed seq, required to talk to windows */ - u32 copied_seq; /* Head of yet unread data */ /* SACKs data */ struct tcp_sack_block duplicate_sack[1]; /* D-SACK block */ diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index 6b5c64f3c925..706932726a11 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c @@ -387,8 +387,8 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req, /* Now setup tcp_sock */ newtp = tcp_sk(newsk); newtp->pred_flags = 0; - newtp->rcv_nxt = treq->rcv_isn + 1; - newtp->snd_nxt = newtp->snd_una = newtp->snd_sml = treq->snt_isn + 1; + newtp->rcv_wup = newtp->copied_seq = newtp->rcv_nxt = treq->rcv_isn + 1; + newtp->snd_sml = newtp->snd_una = newtp->snd_nxt = treq->snt_isn + 1; tcp_prequeue_init(newtp); @@ -422,10 +422,8 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req, tcp_set_ca_state(newsk, TCP_CA_Open); tcp_init_xmit_timers(newsk); skb_queue_head_init(&newtp->out_of_order_queue); - newtp->rcv_wup = treq->rcv_isn + 1; newtp->write_seq = treq->snt_isn + 1; newtp->pushed_seq = newtp->write_seq; - newtp->copied_seq = treq->rcv_isn + 1; newtp->rx_opt.saw_tstamp = 0; -- cgit v1.2.3-59-g8ed1b From e0ef57cc56c3c96493f9b0d6c77bb9608eeaa173 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Thu, 22 Feb 2007 22:52:59 -0800 Subject: [TCP]: Make snd_cwnd_clamp a u32. Signed-off-by: David S. Miller --- include/linux/tcp.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/tcp.h b/include/linux/tcp.h index 415193e171e4..18a468dd5055 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -302,7 +302,7 @@ struct tcp_sock { u32 snd_ssthresh; /* Slow start size threshold */ u32 snd_cwnd; /* Sending congestion window */ u16 snd_cwnd_cnt; /* Linear increase counter */ - u16 snd_cwnd_clamp; /* Do not allow snd_cwnd to grow above this */ + u32 snd_cwnd_clamp; /* Do not allow snd_cwnd to grow above this */ u32 snd_cwnd_used; u32 snd_cwnd_stamp; -- cgit v1.2.3-59-g8ed1b From 3cfe3baaf07c9e40a75f9a70662de56df1c246a8 Mon Sep 17 00:00:00 2001 From: Ilpo Järvinen Date: Tue, 27 Feb 2007 10:09:49 -0800 Subject: [TCP]: Add two new spurious RTO responses to FRTO MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit New sysctl tcp_frto_response is added to select amongst these responses: - Rate halving based; reuses CA_CWR state (default) - Very conservative; used to be the only one available (=1) - Undo cwr; undoes ssthresh and cwnd reductions (=2) The response with rate halving requires a new parameter to tcp_enter_cwr because FRTO has already reduced ssthresh and doing a second reduction there has to be prevented. In addition, to keep things nice on 80 cols screen, a local variable was added. Signed-off-by: Ilpo Järvinen Signed-off-by: David S. Miller --- include/linux/sysctl.h | 1 + include/net/tcp.h | 3 ++- net/ipv4/sysctl_net_ipv4.c | 8 ++++++++ net/ipv4/tcp_input.c | 36 ++++++++++++++++++++++++++++++++---- net/ipv4/tcp_output.c | 2 +- 5 files changed, 44 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h index 98e0fd241a25..c9ccb550206f 100644 --- a/include/linux/sysctl.h +++ b/include/linux/sysctl.h @@ -439,6 +439,7 @@ enum NET_TCP_AVAIL_CONG_CONTROL=122, NET_TCP_ALLOWED_CONG_CONTROL=123, NET_TCP_MAX_SSTHRESH=124, + NET_TCP_FRTO_RESPONSE=125, }; enum { diff --git a/include/net/tcp.h b/include/net/tcp.h index 6d09f5085f6a..f0c9e3400a09 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -220,6 +220,7 @@ extern int sysctl_tcp_app_win; extern int sysctl_tcp_adv_win_scale; extern int sysctl_tcp_tw_reuse; extern int sysctl_tcp_frto; +extern int sysctl_tcp_frto_response; extern int sysctl_tcp_low_latency; extern int sysctl_tcp_dma_copybreak; extern int sysctl_tcp_nometrics_save; @@ -738,7 +739,7 @@ static inline void tcp_sync_left_out(struct tcp_sock *tp) tp->left_out = tp->sacked_out + tp->lost_out; } -extern void tcp_enter_cwr(struct sock *sk); +extern void tcp_enter_cwr(struct sock *sk, const int set_ssthresh); extern __u32 tcp_init_cwnd(struct tcp_sock *tp, struct dst_entry *dst); /* Slow start with delack produces 3 packets of burst, so that diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c index d68effe98e8d..6817d6485df5 100644 --- a/net/ipv4/sysctl_net_ipv4.c +++ b/net/ipv4/sysctl_net_ipv4.c @@ -646,6 +646,14 @@ ctl_table ipv4_table[] = { .mode = 0644, .proc_handler = &proc_dointvec }, + { + .ctl_name = NET_TCP_FRTO_RESPONSE, + .procname = "tcp_frto_response", + .data = &sysctl_tcp_frto_response, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &proc_dointvec + }, { .ctl_name = NET_TCP_LOW_LATENCY, .procname = "tcp_low_latency", diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index f6ba07f0d816..322e43c56461 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -86,6 +86,7 @@ int sysctl_tcp_stdurg __read_mostly; int sysctl_tcp_rfc1337 __read_mostly; int sysctl_tcp_max_orphans __read_mostly = NR_FILE; int sysctl_tcp_frto __read_mostly; +int sysctl_tcp_frto_response __read_mostly; int sysctl_tcp_nometrics_save __read_mostly; int sysctl_tcp_moderate_rcvbuf __read_mostly = 1; @@ -762,15 +763,17 @@ __u32 tcp_init_cwnd(struct tcp_sock *tp, struct dst_entry *dst) } /* Set slow start threshold and cwnd not falling to slow start */ -void tcp_enter_cwr(struct sock *sk) +void tcp_enter_cwr(struct sock *sk, const int set_ssthresh) { struct tcp_sock *tp = tcp_sk(sk); + const struct inet_connection_sock *icsk = inet_csk(sk); tp->prior_ssthresh = 0; tp->bytes_acked = 0; if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) { tp->undo_marker = 0; - tp->snd_ssthresh = inet_csk(sk)->icsk_ca_ops->ssthresh(sk); + if (set_ssthresh) + tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk); tp->snd_cwnd = min(tp->snd_cwnd, tcp_packets_in_flight(tp) + 1U); tp->snd_cwnd_cnt = 0; @@ -2003,7 +2006,7 @@ static void tcp_try_to_open(struct sock *sk, struct tcp_sock *tp, int flag) tp->retrans_stamp = 0; if (flag&FLAG_ECE) - tcp_enter_cwr(sk); + tcp_enter_cwr(sk, 1); if (inet_csk(sk)->icsk_ca_state != TCP_CA_CWR) { int state = TCP_CA_Open; @@ -2579,6 +2582,21 @@ static void tcp_conservative_spur_to_response(struct tcp_sock *tp) tcp_moderate_cwnd(tp); } +/* A conservative spurious RTO response algorithm: reduce cwnd using + * rate halving and continue in congestion avoidance. + */ +static void tcp_ratehalving_spur_to_response(struct sock *sk) +{ + struct tcp_sock *tp = tcp_sk(sk); + tcp_enter_cwr(sk, 0); + tp->high_seq = tp->frto_highmark; /* Smoother w/o this? - ij */ +} + +static void tcp_undo_spur_to_response(struct sock *sk) +{ + tcp_undo_cwr(sk, 1); +} + /* F-RTO spurious RTO detection algorithm (RFC4138) * * F-RTO affects during two new ACKs following RTO (well, almost, see inline @@ -2661,7 +2679,17 @@ static int tcp_process_frto(struct sock *sk, u32 prior_snd_una, int flag) tp->frto_counter = 2; return 1; } else /* frto_counter == 2 */ { - tcp_conservative_spur_to_response(tp); + switch (sysctl_tcp_frto_response) { + case 2: + tcp_undo_spur_to_response(sk); + break; + case 1: + tcp_conservative_spur_to_response(tp); + break; + default: + tcp_ratehalving_spur_to_response(sk); + break; + }; tp->frto_counter = 0; } return 0; diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 3c24881f2a65..d19b2f3b70fd 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -545,7 +545,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, if (likely(err <= 0)) return err; - tcp_enter_cwr(sk); + tcp_enter_cwr(sk, 1); return net_xmit_eval(err); -- cgit v1.2.3-59-g8ed1b From fa438ccfdfd3f6db02c13b61b21454eb81cd6a13 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Sun, 4 Mar 2007 16:05:44 -0800 Subject: [NET]: Keep sk_backlog near sk_lock sk_backlog is a critical field of struct sock. (known famous words) It is (ab)used in hot paths, in particular in release_sock(), tcp_recvmsg(), tcp_v4_rcv(), sk_receive_skb(). It really makes sense to place it next to sk_lock, because sk_backlog is only used after sk_lock locked (and thus memory cache line in L1 cache). This should reduce cache misses and sk_lock acquisition time. (In theory, we could only move the head pointer near sk_lock, and leaving tail far away, because 'tail' is normally not so hot, but keep it simple :) ) Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- include/net/sock.h | 18 +++++++++--------- net/core/sock.c | 2 +- 2 files changed, 10 insertions(+), 10 deletions(-) (limited to 'include') diff --git a/include/net/sock.h b/include/net/sock.h index 2c7d60ca3548..a3366c3c837a 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -202,6 +202,15 @@ struct sock { unsigned short sk_type; int sk_rcvbuf; socket_lock_t sk_lock; + /* + * The backlog queue is special, it is always used with + * the per-socket spinlock held and requires low latency + * access. Therefore we special case it's implementation. + */ + struct { + struct sk_buff *head; + struct sk_buff *tail; + } sk_backlog; wait_queue_head_t *sk_sleep; struct dst_entry *sk_dst_cache; struct xfrm_policy *sk_policy[2]; @@ -221,15 +230,6 @@ struct sock { int sk_rcvlowat; unsigned long sk_flags; unsigned long sk_lingertime; - /* - * The backlog queue is special, it is always used with - * the per-socket spinlock held and requires low latency - * access. Therefore we special case it's implementation. - */ - struct { - struct sk_buff *head; - struct sk_buff *tail; - } sk_backlog; struct sk_buff_head sk_error_queue; struct proto *sk_prot_creator; rwlock_t sk_callback_lock; diff --git a/net/core/sock.c b/net/core/sock.c index 27c4f62382bd..6d35d5775ba8 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -904,6 +904,7 @@ struct sock *sk_clone(const struct sock *sk, const gfp_t priority) sk_node_init(&newsk->sk_node); sock_lock_init(newsk); bh_lock_sock(newsk); + newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL; atomic_set(&newsk->sk_rmem_alloc, 0); atomic_set(&newsk->sk_wmem_alloc, 0); @@ -923,7 +924,6 @@ struct sock *sk_clone(const struct sock *sk, const gfp_t priority) newsk->sk_wmem_queued = 0; newsk->sk_forward_alloc = 0; newsk->sk_send_head = NULL; - newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL; newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK; sock_reset_flag(newsk, SOCK_DONE); -- cgit v1.2.3-59-g8ed1b From 9d729f72dca9406025bcfa9c1f660d71d9ef0ff5 Mon Sep 17 00:00:00 2001 From: James Morris Date: Sun, 4 Mar 2007 16:12:44 -0800 Subject: [NET]: Convert xtime.tv_sec to get_seconds() Where appropriate, convert references to xtime.tv_sec to the get_seconds() helper function. Signed-off-by: James Morris Signed-off-by: David S. Miller --- include/net/tcp.h | 4 ++-- net/ipv4/route.c | 2 +- net/ipv4/tcp_input.c | 6 +++--- net/ipv4/tcp_ipv4.c | 10 +++++----- net/ipv4/tcp_minisocks.c | 8 ++++---- net/ipv6/xfrm6_output.c | 2 +- net/rxrpc/main.c | 2 +- net/xfrm/xfrm_policy.c | 12 ++++++------ net/xfrm/xfrm_state.c | 6 +++--- 9 files changed, 26 insertions(+), 26 deletions(-) (limited to 'include') diff --git a/include/net/tcp.h b/include/net/tcp.h index f0c9e3400a09..181c0600af1c 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -1014,7 +1014,7 @@ static inline int tcp_paws_check(const struct tcp_options_received *rx_opt, int { if ((s32)(rx_opt->rcv_tsval - rx_opt->ts_recent) >= 0) return 0; - if (xtime.tv_sec >= rx_opt->ts_recent_stamp + TCP_PAWS_24DAYS) + if (get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_24DAYS) return 0; /* RST segments are not recommended to carry timestamp, @@ -1029,7 +1029,7 @@ static inline int tcp_paws_check(const struct tcp_options_received *rx_opt, int However, we can relax time bounds for RST segments to MSL. */ - if (rst && xtime.tv_sec >= rx_opt->ts_recent_stamp + TCP_PAWS_MSL) + if (rst && get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_MSL) return 0; return 1; } diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 37e0d4d5cf94..0b3d7bf40f4e 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -2683,7 +2683,7 @@ static int rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq, int event, id = rt->peer->ip_id_count; if (rt->peer->tcp_ts_stamp) { ts = rt->peer->tcp_ts; - tsage = xtime.tv_sec - rt->peer->tcp_ts_stamp; + tsage = get_seconds() - rt->peer->tcp_ts_stamp; } } diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index d894bbcc1d24..d0a3630f41a7 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -2933,7 +2933,7 @@ static int tcp_fast_parse_options(struct sk_buff *skb, struct tcphdr *th, static inline void tcp_store_ts_recent(struct tcp_sock *tp) { tp->rx_opt.ts_recent = tp->rx_opt.rcv_tsval; - tp->rx_opt.ts_recent_stamp = xtime.tv_sec; + tp->rx_opt.ts_recent_stamp = get_seconds(); } static inline void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq) @@ -2947,7 +2947,7 @@ static inline void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq) */ if((s32)(tp->rx_opt.rcv_tsval - tp->rx_opt.ts_recent) >= 0 || - xtime.tv_sec >= tp->rx_opt.ts_recent_stamp + TCP_PAWS_24DAYS) + get_seconds() >= tp->rx_opt.ts_recent_stamp + TCP_PAWS_24DAYS) tcp_store_ts_recent(tp); } } @@ -2999,7 +2999,7 @@ static inline int tcp_paws_discard(const struct sock *sk, const struct sk_buff * { const struct tcp_sock *tp = tcp_sk(sk); return ((s32)(tp->rx_opt.ts_recent - tp->rx_opt.rcv_tsval) > TCP_PAWS_WINDOW && - xtime.tv_sec < tp->rx_opt.ts_recent_stamp + TCP_PAWS_24DAYS && + get_seconds() < tp->rx_opt.ts_recent_stamp + TCP_PAWS_24DAYS && !tcp_disordered_ack(sk, skb)); } diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index f6793b4cc669..addac1110f94 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -149,7 +149,7 @@ int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp) */ if (tcptw->tw_ts_recent_stamp && (twp == NULL || (sysctl_tcp_tw_reuse && - xtime.tv_sec - tcptw->tw_ts_recent_stamp > 1))) { + get_seconds() - tcptw->tw_ts_recent_stamp > 1))) { tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2; if (tp->write_seq == 0) tp->write_seq = 1; @@ -224,7 +224,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) * when trying new connection. */ if (peer != NULL && - peer->tcp_ts_stamp + TCP_PAWS_MSL >= xtime.tv_sec) { + peer->tcp_ts_stamp + TCP_PAWS_MSL >= get_seconds()) { tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp; tp->rx_opt.ts_recent = peer->tcp_ts; } @@ -1351,7 +1351,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) (dst = inet_csk_route_req(sk, req)) != NULL && (peer = rt_get_peer((struct rtable *)dst)) != NULL && peer->v4daddr == saddr) { - if (xtime.tv_sec < peer->tcp_ts_stamp + TCP_PAWS_MSL && + if (get_seconds() < peer->tcp_ts_stamp + TCP_PAWS_MSL && (s32)(peer->tcp_ts - req->ts_recent) > TCP_PAWS_WINDOW) { NET_INC_STATS_BH(LINUX_MIB_PAWSPASSIVEREJECTED); @@ -1770,7 +1770,7 @@ int tcp_v4_remember_stamp(struct sock *sk) if (peer) { if ((s32)(peer->tcp_ts - tp->rx_opt.ts_recent) <= 0 || - (peer->tcp_ts_stamp + TCP_PAWS_MSL < xtime.tv_sec && + (peer->tcp_ts_stamp + TCP_PAWS_MSL < get_seconds() && peer->tcp_ts_stamp <= tp->rx_opt.ts_recent_stamp)) { peer->tcp_ts_stamp = tp->rx_opt.ts_recent_stamp; peer->tcp_ts = tp->rx_opt.ts_recent; @@ -1791,7 +1791,7 @@ int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw) const struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw); if ((s32)(peer->tcp_ts - tcptw->tw_ts_recent) <= 0 || - (peer->tcp_ts_stamp + TCP_PAWS_MSL < xtime.tv_sec && + (peer->tcp_ts_stamp + TCP_PAWS_MSL < get_seconds() && peer->tcp_ts_stamp <= tcptw->tw_ts_recent_stamp)) { peer->tcp_ts_stamp = tcptw->tw_ts_recent_stamp; peer->tcp_ts = tcptw->tw_ts_recent; diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index 706932726a11..ac4ce48a6599 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c @@ -149,7 +149,7 @@ kill_with_rst: tw->tw_substate = TCP_TIME_WAIT; tcptw->tw_rcv_nxt = TCP_SKB_CB(skb)->end_seq; if (tmp_opt.saw_tstamp) { - tcptw->tw_ts_recent_stamp = xtime.tv_sec; + tcptw->tw_ts_recent_stamp = get_seconds(); tcptw->tw_ts_recent = tmp_opt.rcv_tsval; } @@ -208,7 +208,7 @@ kill: if (tmp_opt.saw_tstamp) { tcptw->tw_ts_recent = tmp_opt.rcv_tsval; - tcptw->tw_ts_recent_stamp = xtime.tv_sec; + tcptw->tw_ts_recent_stamp = get_seconds(); } inet_twsk_put(tw); @@ -458,7 +458,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req, if (newtp->rx_opt.tstamp_ok) { newtp->rx_opt.ts_recent = req->ts_recent; - newtp->rx_opt.ts_recent_stamp = xtime.tv_sec; + newtp->rx_opt.ts_recent_stamp = get_seconds(); newtp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED; } else { newtp->rx_opt.ts_recent_stamp = 0; @@ -504,7 +504,7 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb, * it can be estimated (approximately) * from another data. */ - tmp_opt.ts_recent_stamp = xtime.tv_sec - ((TCP_TIMEOUT_INIT/HZ)<retrans); + tmp_opt.ts_recent_stamp = get_seconds() - ((TCP_TIMEOUT_INIT/HZ)<retrans); paws_reject = tcp_paws_check(&tmp_opt, th->rst); } } diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c index d6d786b89d2b..8e4170f9a0da 100644 --- a/net/ipv6/xfrm6_output.c +++ b/net/ipv6/xfrm6_output.c @@ -76,7 +76,7 @@ static int xfrm6_output_one(struct sk_buff *skb) x->curlft.bytes += skb->len; x->curlft.packets++; if (x->props.mode == XFRM_MODE_ROUTEOPTIMIZATION) - x->lastused = (u64)xtime.tv_sec; + x->lastused = get_seconds(); spin_unlock_bh(&x->lock); diff --git a/net/rxrpc/main.c b/net/rxrpc/main.c index baec1f7fd8b9..cead31b5bdf5 100644 --- a/net/rxrpc/main.c +++ b/net/rxrpc/main.c @@ -37,7 +37,7 @@ static int __init rxrpc_initialise(void) int ret; /* my epoch value */ - rxrpc_epoch = htonl(xtime.tv_sec); + rxrpc_epoch = htonl(get_seconds()); /* register the /proc interface */ #ifdef CONFIG_PROC_FS diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 785c3e39f062..194257554553 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -268,7 +268,7 @@ static inline unsigned long make_jiffies(long secs) static void xfrm_policy_timer(unsigned long data) { struct xfrm_policy *xp = (struct xfrm_policy*)data; - unsigned long now = (unsigned long)xtime.tv_sec; + unsigned long now = get_seconds(); long next = LONG_MAX; int warn = 0; int dir; @@ -690,7 +690,7 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl) } policy->index = delpol ? delpol->index : xfrm_gen_index(policy->type, dir); hlist_add_head(&policy->byidx, xfrm_policy_byidx+idx_hash(policy->index)); - policy->curlft.add_time = (unsigned long)xtime.tv_sec; + policy->curlft.add_time = get_seconds(); policy->curlft.use_time = 0; if (!mod_timer(&policy->timer, jiffies + HZ)) xfrm_pol_hold(policy); @@ -1133,7 +1133,7 @@ int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol) old_pol = sk->sk_policy[dir]; sk->sk_policy[dir] = pol; if (pol) { - pol->curlft.add_time = (unsigned long)xtime.tv_sec; + pol->curlft.add_time = get_seconds(); pol->index = xfrm_gen_index(pol->type, XFRM_POLICY_MAX+dir); __xfrm_policy_link(pol, XFRM_POLICY_MAX+dir); } @@ -1386,7 +1386,7 @@ restart: return 0; family = dst_orig->ops->family; - policy->curlft.use_time = (unsigned long)xtime.tv_sec; + policy->curlft.use_time = get_seconds(); pols[0] = policy; npols ++; xfrm_nr += pols[0]->xfrm_nr; @@ -1682,7 +1682,7 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, return 1; } - pol->curlft.use_time = (unsigned long)xtime.tv_sec; + pol->curlft.use_time = get_seconds(); pols[0] = pol; npols ++; @@ -1694,7 +1694,7 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, if (pols[1]) { if (IS_ERR(pols[1])) return 0; - pols[1]->curlft.use_time = (unsigned long)xtime.tv_sec; + pols[1]->curlft.use_time = get_seconds(); npols ++; } } diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index e3a0bcfa5df1..63a20e818164 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c @@ -233,7 +233,7 @@ static inline unsigned long make_jiffies(long secs) static void xfrm_timer_handler(unsigned long data) { struct xfrm_state *x = (struct xfrm_state*)data; - unsigned long now = (unsigned long)xtime.tv_sec; + unsigned long now = get_seconds(); long next = LONG_MAX; int warn = 0; int err = 0; @@ -326,7 +326,7 @@ struct xfrm_state *xfrm_state_alloc(void) init_timer(&x->rtimer); x->rtimer.function = xfrm_replay_timer_handler; x->rtimer.data = (unsigned long)x; - x->curlft.add_time = (unsigned long)xtime.tv_sec; + x->curlft.add_time = get_seconds(); x->lft.soft_byte_limit = XFRM_INF; x->lft.soft_packet_limit = XFRM_INF; x->lft.hard_byte_limit = XFRM_INF; @@ -1051,7 +1051,7 @@ EXPORT_SYMBOL(xfrm_state_update); int xfrm_state_check_expire(struct xfrm_state *x) { if (!x->curlft.use_time) - x->curlft.use_time = (unsigned long)xtime.tv_sec; + x->curlft.use_time = get_seconds(); if (x->km.state != XFRM_STATE_VALID) return -EINVAL; -- cgit v1.2.3-59-g8ed1b From 3927f2e8f9afa3424bb51ca81f7abac01ffd0005 Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Sun, 25 Mar 2007 19:54:23 -0700 Subject: [NET]: div64_64 consolidate (rev3) Here is the current version of the 64 bit divide common code. Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- include/asm-arm/div64.h | 3 +++ include/asm-generic/div64.h | 7 +++++++ include/asm-i386/div64.h | 4 ++++ include/asm-m68k/div64.h | 3 +++ include/asm-mips/div64.h | 11 ++++++++++- include/asm-um/div64.h | 1 + include/asm-xtensa/div64.h | 6 ++++++ lib/Makefile | 5 +++-- lib/div64.c | 22 ++++++++++++++++++++++ net/ipv4/tcp_cubic.c | 23 ----------------------- net/ipv4/tcp_yeah.c | 21 --------------------- net/ipv4/tcp_yeah.h | 1 + net/netfilter/xt_connbytes.c | 16 ---------------- 13 files changed, 60 insertions(+), 63 deletions(-) (limited to 'include') diff --git a/include/asm-arm/div64.h b/include/asm-arm/div64.h index 37e0a96e8789..0b5f881c3d85 100644 --- a/include/asm-arm/div64.h +++ b/include/asm-arm/div64.h @@ -2,6 +2,7 @@ #define __ASM_ARM_DIV64 #include +#include /* * The semantics of do_div() are: @@ -223,4 +224,6 @@ #endif +extern uint64_t div64_64(uint64_t dividend, uint64_t divisor); + #endif diff --git a/include/asm-generic/div64.h b/include/asm-generic/div64.h index 8f4e3193342e..a4a49370793c 100644 --- a/include/asm-generic/div64.h +++ b/include/asm-generic/div64.h @@ -30,6 +30,11 @@ __rem; \ }) +static inline uint64_t div64_64(uint64_t dividend, uint64_t divisor) +{ + return dividend / divisor; +} + #elif BITS_PER_LONG == 32 extern uint32_t __div64_32(uint64_t *dividend, uint32_t divisor); @@ -49,6 +54,8 @@ extern uint32_t __div64_32(uint64_t *dividend, uint32_t divisor); __rem; \ }) +extern uint64_t div64_64(uint64_t dividend, uint64_t divisor); + #else /* BITS_PER_LONG == ?? */ # error do_div() does not yet support the C64 diff --git a/include/asm-i386/div64.h b/include/asm-i386/div64.h index 75c67c785bb8..438e980068bd 100644 --- a/include/asm-i386/div64.h +++ b/include/asm-i386/div64.h @@ -1,6 +1,8 @@ #ifndef __I386_DIV64 #define __I386_DIV64 +#include + /* * do_div() is NOT a C function. It wants to return * two values (the quotient and the remainder), but @@ -45,4 +47,6 @@ div_ll_X_l_rem(long long divs, long div, long *rem) return dum2; } + +extern uint64_t div64_64(uint64_t dividend, uint64_t divisor); #endif diff --git a/include/asm-m68k/div64.h b/include/asm-m68k/div64.h index 9f65de1a2480..33caad1628d4 100644 --- a/include/asm-m68k/div64.h +++ b/include/asm-m68k/div64.h @@ -1,6 +1,8 @@ #ifndef _M68K_DIV64_H #define _M68K_DIV64_H +#include + /* n = n / base; return rem; */ #define do_div(n, base) ({ \ @@ -23,4 +25,5 @@ __rem; \ }) +extern uint64_t div64_64(uint64_t dividend, uint64_t divisor); #endif /* _M68K_DIV64_H */ diff --git a/include/asm-mips/div64.h b/include/asm-mips/div64.h index d107832de1b6..66189f5f6399 100644 --- a/include/asm-mips/div64.h +++ b/include/asm-mips/div64.h @@ -1,6 +1,6 @@ /* * Copyright (C) 2000, 2004 Maciej W. Rozycki - * Copyright (C) 2003 Ralf Baechle + * Copyright (C) 2003, 07 Ralf Baechle (ralf@linux-mips.org) * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive @@ -9,6 +9,8 @@ #ifndef _ASM_DIV64_H #define _ASM_DIV64_H +#include + #if (_MIPS_SZLONG == 32) #include @@ -78,6 +80,8 @@ __quot = __quot << 32 | __low; \ (n) = __quot; \ __mod; }) + +extern uint64_t div64_64(uint64_t dividend, uint64_t divisor); #endif /* (_MIPS_SZLONG == 32) */ #if (_MIPS_SZLONG == 64) @@ -101,6 +105,11 @@ (n) = __quot; \ __mod; }) +static inline uint64_t div64_64(uint64_t dividend, uint64_t divisor) +{ + return dividend / divisor; +} + #endif /* (_MIPS_SZLONG == 64) */ #endif /* _ASM_DIV64_H */ diff --git a/include/asm-um/div64.h b/include/asm-um/div64.h index 1e17f7409cab..7b73b2cd5b34 100644 --- a/include/asm-um/div64.h +++ b/include/asm-um/div64.h @@ -3,4 +3,5 @@ #include "asm/arch/div64.h" +extern uint64_t div64_64(uint64_t dividend, uint64_t divisor); #endif diff --git a/include/asm-xtensa/div64.h b/include/asm-xtensa/div64.h index c4a105776383..20965e3af1dd 100644 --- a/include/asm-xtensa/div64.h +++ b/include/asm-xtensa/div64.h @@ -11,9 +11,15 @@ #ifndef _XTENSA_DIV64_H #define _XTENSA_DIV64_H +#include + #define do_div(n,base) ({ \ int __res = n % ((unsigned int) base); \ n /= (unsigned int) base; \ __res; }) +static inline uint64_t div64_64(uint64_t dividend, uint64_t divisor) +{ + return dividend / divisor; +} #endif diff --git a/lib/Makefile b/lib/Makefile index 992a39ef9ffd..ae57f357fec0 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -4,7 +4,7 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \ rbtree.o radix-tree.o dump_stack.o \ - idr.o div64.o int_sqrt.o bitmap.o extable.o prio_tree.o \ + idr.o int_sqrt.o bitmap.o extable.o prio_tree.o \ sha1.o irq_regs.o reciprocal_div.o lib-$(CONFIG_MMU) += ioremap.o @@ -12,7 +12,8 @@ lib-$(CONFIG_SMP) += cpumask.o lib-y += kobject.o kref.o kobject_uevent.o klist.o -obj-y += sort.o parser.o halfmd4.o debug_locks.o random32.o bust_spinlocks.o +obj-y += div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ + bust_spinlocks.o ifeq ($(CONFIG_DEBUG_KOBJECT),y) CFLAGS_kobject.o += -DDEBUG diff --git a/lib/div64.c b/lib/div64.c index 365719f84832..c3d7655cdfb5 100644 --- a/lib/div64.c +++ b/lib/div64.c @@ -58,4 +58,26 @@ uint32_t __div64_32(uint64_t *n, uint32_t base) EXPORT_SYMBOL(__div64_32); +/* 64bit divisor, dividend and result. dynamic precision */ +uint64_t div64_64(uint64_t dividend, uint64_t divisor) +{ + uint32_t d = divisor; + + if (divisor > 0xffffffffULL) { + unsigned int shift = fls(divisor >> 32); + + d = divisor >> shift; + dividend >>= shift; + } + + /* avoid 64 bit division if possible */ + if (dividend >> 32) + do_div(dividend, d); + else + dividend = (uint32_t) dividend / d; + + return dividend; +} +EXPORT_SYMBOL(div64_64); + #endif /* BITS_PER_LONG == 32 */ diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c index 9a582fb4ef9f..6f08adbda54e 100644 --- a/net/ipv4/tcp_cubic.c +++ b/net/ipv4/tcp_cubic.c @@ -51,8 +51,6 @@ MODULE_PARM_DESC(bic_scale, "scale (scaled by 1024) value for bic function (bic_ module_param(tcp_friendliness, int, 0644); MODULE_PARM_DESC(tcp_friendliness, "turn on/off tcp friendliness"); -#include - /* BIC TCP Parameters */ struct bictcp { u32 cnt; /* increase cwnd by 1 after ACKs */ @@ -93,27 +91,6 @@ static void bictcp_init(struct sock *sk) tcp_sk(sk)->snd_ssthresh = initial_ssthresh; } -/* 64bit divisor, dividend and result. dynamic precision */ -static inline u_int64_t div64_64(u_int64_t dividend, u_int64_t divisor) -{ - u_int32_t d = divisor; - - if (divisor > 0xffffffffULL) { - unsigned int shift = fls(divisor >> 32); - - d = divisor >> shift; - dividend >>= shift; - } - - /* avoid 64 bit division if possible */ - if (dividend >> 32) - do_div(dividend, d); - else - dividend = (uint32_t) dividend / d; - - return dividend; -} - /* * calculate the cubic root of x using Newton-Raphson */ diff --git a/net/ipv4/tcp_yeah.c b/net/ipv4/tcp_yeah.c index 815e020e98fe..18355a2608e1 100644 --- a/net/ipv4/tcp_yeah.c +++ b/net/ipv4/tcp_yeah.c @@ -73,27 +73,6 @@ static void tcp_yeah_pkts_acked(struct sock *sk, u32 pkts_acked) yeah->pkts_acked = pkts_acked; } -/* 64bit divisor, dividend and result. dynamic precision */ -static inline u64 div64_64(u64 dividend, u64 divisor) -{ - u32 d = divisor; - - if (divisor > 0xffffffffULL) { - unsigned int shift = fls(divisor >> 32); - - d = divisor >> shift; - dividend >>= shift; - } - - /* avoid 64 bit division if possible */ - if (dividend >> 32) - do_div(dividend, d); - else - dividend = (u32) dividend / d; - - return dividend; -} - static void tcp_yeah_cong_avoid(struct sock *sk, u32 ack, u32 seq_rtt, u32 in_flight, int flag) { diff --git a/net/ipv4/tcp_yeah.h b/net/ipv4/tcp_yeah.h index b3255dba4e2d..a62d82038fd0 100644 --- a/net/ipv4/tcp_yeah.h +++ b/net/ipv4/tcp_yeah.h @@ -2,6 +2,7 @@ #include #include #include +#include #include diff --git a/net/netfilter/xt_connbytes.c b/net/netfilter/xt_connbytes.c index 5e32dfa2668b..302043bc41b2 100644 --- a/net/netfilter/xt_connbytes.c +++ b/net/netfilter/xt_connbytes.c @@ -24,22 +24,6 @@ MODULE_AUTHOR("Harald Welte "); MODULE_DESCRIPTION("iptables match for matching number of pkts/bytes per connection"); MODULE_ALIAS("ipt_connbytes"); -/* 64bit divisor, dividend and result. dynamic precision */ -static u_int64_t div64_64(u_int64_t dividend, u_int64_t divisor) -{ - u_int32_t d = divisor; - - if (divisor > 0xffffffffULL) { - unsigned int shift = fls(divisor >> 32); - - d = divisor >> shift; - dividend >>= shift; - } - - do_div(dividend, d); - return dividend; -} - static int match(const struct sk_buff *skb, const struct net_device *in, -- cgit v1.2.3-59-g8ed1b From b7aa0bf70c4afb9e38be25f5c0922498d0f8684c Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 19 Apr 2007 16:16:32 -0700 Subject: [NET]: convert network timestamps to ktime_t We currently use a special structure (struct skb_timeval) and plain 'struct timeval' to store packet timestamps in sk_buffs and struct sock. This has some drawbacks : - Fixed resolution of micro second. - Waste of space on 64bit platforms where sizeof(struct timeval)=16 I suggest using ktime_t that is a nice abstraction of high resolution time services, currently capable of nanosecond resolution. As sizeof(ktime_t) is 8 bytes, using ktime_t in 'struct sock' permits a 8 byte shrink of this structure on 64bit architectures. Some other structures also benefit from this size reduction (struct ipq in ipv4/ip_fragment.c, struct frag_queue in ipv6/reassembly.c, ...) Once this ktime infrastructure adopted, we can more easily provide nanosecond resolution on top of it. (ioctl SIOCGSTAMPNS and/or SO_TIMESTAMPNS/SCM_TIMESTAMPNS) Note : this patch includes a bug correction in compat_sock_get_timestamp() where a "err = 0;" was missing (so this syscall returned -ENOENT instead of 0) Signed-off-by: Eric Dumazet CC: Stephen Hemminger CC: John find Signed-off-by: David S. Miller --- include/linux/skbuff.h | 26 +++++--------------------- include/net/sock.h | 18 +++++++++--------- kernel/time.c | 1 + net/bridge/netfilter/ebt_ulog.c | 6 ++++-- net/compat.c | 15 ++++++++++----- net/core/dev.c | 19 ++++--------------- net/core/sock.c | 16 +++++++++------- net/econet/af_econet.c | 2 +- net/ipv4/ip_fragment.c | 8 ++++---- net/ipv4/netfilter/ip_queue.c | 6 ++++-- net/ipv4/netfilter/ipt_ULOG.c | 8 +++++--- net/ipv6/exthdrs.c | 2 +- net/ipv6/netfilter/ip6_queue.c | 6 ++++-- net/ipv6/netfilter/nf_conntrack_reasm.c | 6 +++--- net/ipv6/reassembly.c | 6 +++--- net/ipx/af_ipx.c | 4 ++-- net/netfilter/nfnetlink_log.c | 8 ++++---- net/netfilter/nfnetlink_queue.c | 8 ++++---- net/packet/af_packet.c | 8 +++++--- net/sunrpc/svcsock.c | 10 +++------- 20 files changed, 85 insertions(+), 98 deletions(-) (limited to 'include') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 5992f65b4184..f9441b5f8d13 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -27,6 +27,7 @@ #include #include #include +#include #define HAVE_ALLOC_SKB /* For the drivers to know */ #define HAVE_ALIGNABLE_SKB /* Ditto 8) */ @@ -156,11 +157,6 @@ struct skb_shared_info { #define SKB_DATAREF_SHIFT 16 #define SKB_DATAREF_MASK ((1 << SKB_DATAREF_SHIFT) - 1) -struct skb_timeval { - u32 off_sec; - u32 off_usec; -}; - enum { SKB_FCLONE_UNAVAILABLE, @@ -233,7 +229,7 @@ struct sk_buff { struct sk_buff *prev; struct sock *sk; - struct skb_timeval tstamp; + ktime_t tstamp; struct net_device *dev; int iif; /* 4 byte hole on 64 bit*/ @@ -1365,26 +1361,14 @@ extern void skb_add_mtu(int mtu); */ static inline void skb_get_timestamp(const struct sk_buff *skb, struct timeval *stamp) { - stamp->tv_sec = skb->tstamp.off_sec; - stamp->tv_usec = skb->tstamp.off_usec; + *stamp = ktime_to_timeval(skb->tstamp); } -/** - * skb_set_timestamp - set timestamp of a skb - * @skb: skb to set stamp of - * @stamp: pointer to struct timeval to get stamp from - * - * Timestamps are stored in the skb as offsets to a base timestamp. - * This function converts a struct timeval to an offset and stores - * it in the skb. - */ -static inline void skb_set_timestamp(struct sk_buff *skb, const struct timeval *stamp) +static inline void __net_timestamp(struct sk_buff *skb) { - skb->tstamp.off_sec = stamp->tv_sec; - skb->tstamp.off_usec = stamp->tv_usec; + skb->tstamp = ktime_get_real(); } -extern void __net_timestamp(struct sk_buff *skb); extern __sum16 __skb_checksum_complete(struct sk_buff *skb); diff --git a/include/net/sock.h b/include/net/sock.h index a3366c3c837a..9583639090d2 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -244,7 +244,7 @@ struct sock { struct sk_filter *sk_filter; void *sk_protinfo; struct timer_list sk_timer; - struct timeval sk_stamp; + ktime_t sk_stamp; struct socket *sk_socket; void *sk_user_data; struct page *sk_sndmsg_page; @@ -1307,19 +1307,19 @@ static inline int sock_intr_errno(long timeo) static __inline__ void sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb) { - struct timeval stamp; + ktime_t kt = skb->tstamp; - skb_get_timestamp(skb, &stamp); if (sock_flag(sk, SOCK_RCVTSTAMP)) { + struct timeval tv; /* Race occurred between timestamp enabling and packet receiving. Fill in the current time for now. */ - if (stamp.tv_sec == 0) - do_gettimeofday(&stamp); - skb_set_timestamp(skb, &stamp); - put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMP, sizeof(struct timeval), - &stamp); + if (kt.tv64 == 0) + kt = ktime_get_real(); + skb->tstamp = kt; + tv = ktime_to_timeval(kt); + put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMP, sizeof(tv), &tv); } else - sk->sk_stamp = stamp; + sk->sk_stamp = kt; } /** diff --git a/kernel/time.c b/kernel/time.c index 2f47888e46c9..a1439f421d0b 100644 --- a/kernel/time.c +++ b/kernel/time.c @@ -469,6 +469,7 @@ struct timeval ns_to_timeval(const s64 nsec) return tv; } +EXPORT_SYMBOL(ns_to_timeval); /* * Convert jiffies to milliseconds and back. diff --git a/net/bridge/netfilter/ebt_ulog.c b/net/bridge/netfilter/ebt_ulog.c index 8e15cc47f6c0..259f5c370f3c 100644 --- a/net/bridge/netfilter/ebt_ulog.c +++ b/net/bridge/netfilter/ebt_ulog.c @@ -130,6 +130,7 @@ static void ebt_ulog_packet(unsigned int hooknr, const struct sk_buff *skb, unsigned int group = uloginfo->nlgroup; ebt_ulog_buff_t *ub = &ulog_buffers[group]; spinlock_t *lock = &ub->lock; + ktime_t kt; if ((uloginfo->cprange == 0) || (uloginfo->cprange > skb->len + ETH_HLEN)) @@ -164,9 +165,10 @@ static void ebt_ulog_packet(unsigned int hooknr, const struct sk_buff *skb, /* Fill in the ulog data */ pm->version = EBT_ULOG_VERSION; - do_gettimeofday(&pm->stamp); + kt = ktime_get_real(); + pm->stamp = ktime_to_timeval(kt); if (ub->qlen == 1) - skb_set_timestamp(ub->skb, &pm->stamp); + ub->skb->tstamp = kt; pm->data_len = copy_len; pm->mark = skb->mark; pm->hook = hooknr; diff --git a/net/compat.c b/net/compat.c index 1f32866d09b7..17c2710b2b93 100644 --- a/net/compat.c +++ b/net/compat.c @@ -545,15 +545,20 @@ int compat_sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp) struct compat_timeval __user *ctv = (struct compat_timeval __user*) userstamp; int err = -ENOENT; + struct timeval tv; if (!sock_flag(sk, SOCK_TIMESTAMP)) sock_enable_timestamp(sk); - if (sk->sk_stamp.tv_sec == -1) + tv = ktime_to_timeval(sk->sk_stamp); + if (tv.tv_sec == -1) return err; - if (sk->sk_stamp.tv_sec == 0) - do_gettimeofday(&sk->sk_stamp); - if (put_user(sk->sk_stamp.tv_sec, &ctv->tv_sec) || - put_user(sk->sk_stamp.tv_usec, &ctv->tv_usec)) + if (tv.tv_sec == 0) { + sk->sk_stamp = ktime_get_real(); + tv = ktime_to_timeval(sk->sk_stamp); + } + err = 0; + if (put_user(tv.tv_sec, &ctv->tv_sec) || + put_user(tv.tv_usec, &ctv->tv_usec)) err = -EFAULT; return err; } diff --git a/net/core/dev.c b/net/core/dev.c index 4dc93cc4d5b7..582db646cc54 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1031,23 +1031,12 @@ void net_disable_timestamp(void) atomic_dec(&netstamp_needed); } -void __net_timestamp(struct sk_buff *skb) -{ - struct timeval tv; - - do_gettimeofday(&tv); - skb_set_timestamp(skb, &tv); -} -EXPORT_SYMBOL(__net_timestamp); - static inline void net_timestamp(struct sk_buff *skb) { if (atomic_read(&netstamp_needed)) __net_timestamp(skb); - else { - skb->tstamp.off_sec = 0; - skb->tstamp.off_usec = 0; - } + else + skb->tstamp.tv64 = 0; } /* @@ -1577,7 +1566,7 @@ int netif_rx(struct sk_buff *skb) if (netpoll_rx(skb)) return NET_RX_DROP; - if (!skb->tstamp.off_sec) + if (!skb->tstamp.tv64) net_timestamp(skb); /* @@ -1769,7 +1758,7 @@ int netif_receive_skb(struct sk_buff *skb) if (skb->dev->poll && netpoll_rx(skb)) return NET_RX_DROP; - if (!skb->tstamp.off_sec) + if (!skb->tstamp.tv64) net_timestamp(skb); if (!skb->iif) diff --git a/net/core/sock.c b/net/core/sock.c index 6d35d5775ba8..6ddb3664b993 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -1512,8 +1512,7 @@ void sock_init_data(struct socket *sock, struct sock *sk) sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT; sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT; - sk->sk_stamp.tv_sec = -1L; - sk->sk_stamp.tv_usec = -1L; + sk->sk_stamp = ktime_set(-1L, -1L); atomic_set(&sk->sk_refcnt, 1); } @@ -1554,14 +1553,17 @@ EXPORT_SYMBOL(release_sock); int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp) { + struct timeval tv; if (!sock_flag(sk, SOCK_TIMESTAMP)) sock_enable_timestamp(sk); - if (sk->sk_stamp.tv_sec == -1) + tv = ktime_to_timeval(sk->sk_stamp); + if (tv.tv_sec == -1) return -ENOENT; - if (sk->sk_stamp.tv_sec == 0) - do_gettimeofday(&sk->sk_stamp); - return copy_to_user(userstamp, &sk->sk_stamp, sizeof(struct timeval)) ? - -EFAULT : 0; + if (tv.tv_sec == 0) { + sk->sk_stamp = ktime_get_real(); + tv = ktime_to_timeval(sk->sk_stamp); + } + return copy_to_user(userstamp, &tv, sizeof(tv)) ? -EFAULT : 0; } EXPORT_SYMBOL(sock_get_timestamp); diff --git a/net/econet/af_econet.c b/net/econet/af_econet.c index bc12e36263f0..f573eddc6034 100644 --- a/net/econet/af_econet.c +++ b/net/econet/af_econet.c @@ -162,7 +162,7 @@ static int econet_recvmsg(struct kiocb *iocb, struct socket *sock, err = memcpy_toiovec(msg->msg_iov, skb->data, copied); if (err) goto out_free; - skb_get_timestamp(skb, &sk->sk_stamp); + sk->sk_stamp = skb->tstamp; if (msg->msg_name) memcpy(msg->msg_name, skb->cb, msg->msg_namelen); diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c index b6f055380373..e10be7d7752d 100644 --- a/net/ipv4/ip_fragment.c +++ b/net/ipv4/ip_fragment.c @@ -92,7 +92,7 @@ struct ipq { spinlock_t lock; atomic_t refcnt; struct timer_list timer; /* when will this queue expire? */ - struct timeval stamp; + ktime_t stamp; int iif; unsigned int rid; struct inet_peer *peer; @@ -592,7 +592,7 @@ static void ip_frag_queue(struct ipq *qp, struct sk_buff *skb) if (skb->dev) qp->iif = skb->dev->ifindex; skb->dev = NULL; - skb_get_timestamp(skb, &qp->stamp); + qp->stamp = skb->tstamp; qp->meat += skb->len; atomic_add(skb->truesize, &ip_frag_mem); if (offset == 0) @@ -674,7 +674,7 @@ static struct sk_buff *ip_frag_reasm(struct ipq *qp, struct net_device *dev) head->next = NULL; head->dev = dev; - skb_set_timestamp(head, &qp->stamp); + head->tstamp = qp->stamp; iph = head->nh.iph; iph->frag_off = 0; @@ -734,7 +734,7 @@ struct sk_buff *ip_defrag(struct sk_buff *skb, u32 user) return NULL; } -void ipfrag_init(void) +void __init ipfrag_init(void) { ipfrag_hash_rnd = (u32) ((num_physpages ^ (num_physpages>>7)) ^ (jiffies ^ (jiffies >> 6))); diff --git a/net/ipv4/netfilter/ip_queue.c b/net/ipv4/netfilter/ip_queue.c index a14798a850d7..5842f1aa973a 100644 --- a/net/ipv4/netfilter/ip_queue.c +++ b/net/ipv4/netfilter/ip_queue.c @@ -197,6 +197,7 @@ ipq_build_packet_message(struct ipq_queue_entry *entry, int *errp) struct sk_buff *skb; struct ipq_packet_msg *pmsg; struct nlmsghdr *nlh; + struct timeval tv; read_lock_bh(&queue_lock); @@ -241,8 +242,9 @@ ipq_build_packet_message(struct ipq_queue_entry *entry, int *errp) pmsg->packet_id = (unsigned long )entry; pmsg->data_len = data_len; - pmsg->timestamp_sec = entry->skb->tstamp.off_sec; - pmsg->timestamp_usec = entry->skb->tstamp.off_usec; + tv = ktime_to_timeval(entry->skb->tstamp); + pmsg->timestamp_sec = tv.tv_sec; + pmsg->timestamp_usec = tv.tv_usec; pmsg->mark = entry->skb->mark; pmsg->hook = entry->info->hook; pmsg->hw_protocol = entry->skb->protocol; diff --git a/net/ipv4/netfilter/ipt_ULOG.c b/net/ipv4/netfilter/ipt_ULOG.c index 9acc018766f2..9718b666a380 100644 --- a/net/ipv4/netfilter/ipt_ULOG.c +++ b/net/ipv4/netfilter/ipt_ULOG.c @@ -187,6 +187,7 @@ static void ipt_ulog_packet(unsigned int hooknum, ulog_packet_msg_t *pm; size_t size, copy_len; struct nlmsghdr *nlh; + struct timeval tv; /* ffs == find first bit set, necessary because userspace * is already shifting groupnumber, but we need unshifted. @@ -232,13 +233,14 @@ static void ipt_ulog_packet(unsigned int hooknum, pm = NLMSG_DATA(nlh); /* We might not have a timestamp, get one */ - if (skb->tstamp.off_sec == 0) + if (skb->tstamp.tv64 == 0) __net_timestamp((struct sk_buff *)skb); /* copy hook, prefix, timestamp, payload, etc. */ pm->data_len = copy_len; - put_unaligned(skb->tstamp.off_sec, &pm->timestamp_sec); - put_unaligned(skb->tstamp.off_usec, &pm->timestamp_usec); + tv = ktime_to_timeval(skb->tstamp); + put_unaligned(tv.tv_sec, &pm->timestamp_sec); + put_unaligned(tv.tv_usec, &pm->timestamp_usec); put_unaligned(skb->mark, &pm->mark); pm->hook = hooknum; if (prefix != NULL) diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c index fb39604c3d09..a963a31e5fb6 100644 --- a/net/ipv6/exthdrs.c +++ b/net/ipv6/exthdrs.c @@ -255,7 +255,7 @@ static int ipv6_dest_hao(struct sk_buff **skbp, int optoff) ipv6_addr_copy(&ipv6h->saddr, &hao->addr); ipv6_addr_copy(&hao->addr, &tmp_addr); - if (skb->tstamp.off_sec == 0) + if (skb->tstamp.tv64 == 0) __net_timestamp(skb); return 1; diff --git a/net/ipv6/netfilter/ip6_queue.c b/net/ipv6/netfilter/ip6_queue.c index fdb30a5916e5..66a2c4135251 100644 --- a/net/ipv6/netfilter/ip6_queue.c +++ b/net/ipv6/netfilter/ip6_queue.c @@ -195,6 +195,7 @@ ipq_build_packet_message(struct ipq_queue_entry *entry, int *errp) struct sk_buff *skb; struct ipq_packet_msg *pmsg; struct nlmsghdr *nlh; + struct timeval tv; read_lock_bh(&queue_lock); @@ -239,8 +240,9 @@ ipq_build_packet_message(struct ipq_queue_entry *entry, int *errp) pmsg->packet_id = (unsigned long )entry; pmsg->data_len = data_len; - pmsg->timestamp_sec = entry->skb->tstamp.off_sec; - pmsg->timestamp_usec = entry->skb->tstamp.off_usec; + tv = ktime_to_timeval(entry->skb->tstamp); + pmsg->timestamp_sec = tv.tv_sec; + pmsg->timestamp_usec = tv.tv_usec; pmsg->mark = entry->skb->mark; pmsg->hook = entry->info->hook; pmsg->hw_protocol = entry->skb->protocol; diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c index 15ab1e3e8b56..c311b9a12ca6 100644 --- a/net/ipv6/netfilter/nf_conntrack_reasm.c +++ b/net/ipv6/netfilter/nf_conntrack_reasm.c @@ -82,7 +82,7 @@ struct nf_ct_frag6_queue struct sk_buff *fragments; int len; int meat; - struct timeval stamp; + ktime_t stamp; unsigned int csum; __u8 last_in; /* has first/last segment arrived? */ #define COMPLETE 4 @@ -542,7 +542,7 @@ static int nf_ct_frag6_queue(struct nf_ct_frag6_queue *fq, struct sk_buff *skb, fq->fragments = skb; skb->dev = NULL; - skb_get_timestamp(skb, &fq->stamp); + fq->stamp = skb->tstamp; fq->meat += skb->len; atomic_add(skb->truesize, &nf_ct_frag6_mem); @@ -648,7 +648,7 @@ nf_ct_frag6_reasm(struct nf_ct_frag6_queue *fq, struct net_device *dev) head->next = NULL; head->dev = dev; - skb_set_timestamp(head, &fq->stamp); + head->tstamp = fq->stamp; head->nh.ipv6h->payload_len = htons(payload_len); /* Yes, and fold redundant checksum back. 8) */ diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c index 7034c54e5010..1dde449379fb 100644 --- a/net/ipv6/reassembly.c +++ b/net/ipv6/reassembly.c @@ -88,7 +88,7 @@ struct frag_queue int len; int meat; int iif; - struct timeval stamp; + ktime_t stamp; unsigned int csum; __u8 last_in; /* has first/last segment arrived? */ #define COMPLETE 4 @@ -562,7 +562,7 @@ static void ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb, if (skb->dev) fq->iif = skb->dev->ifindex; skb->dev = NULL; - skb_get_timestamp(skb, &fq->stamp); + fq->stamp = skb->tstamp; fq->meat += skb->len; atomic_add(skb->truesize, &ip6_frag_mem); @@ -663,7 +663,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff **skb_in, head->next = NULL; head->dev = dev; - skb_set_timestamp(head, &fq->stamp); + head->tstamp = fq->stamp; head->nh.ipv6h->payload_len = htons(payload_len); IP6CB(head)->nhoff = nhoff; diff --git a/net/ipx/af_ipx.c b/net/ipx/af_ipx.c index cac35a77f069..6c6c0a3a0ab5 100644 --- a/net/ipx/af_ipx.c +++ b/net/ipx/af_ipx.c @@ -1807,8 +1807,8 @@ static int ipx_recvmsg(struct kiocb *iocb, struct socket *sock, copied); if (rc) goto out_free; - if (skb->tstamp.off_sec) - skb_get_timestamp(skb, &sk->sk_stamp); + if (skb->tstamp.tv64) + sk->sk_stamp = skb->tstamp; msg->msg_namelen = sizeof(*sipx); diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c index 5cb30ebba0f4..5eeebd2efa7a 100644 --- a/net/netfilter/nfnetlink_log.c +++ b/net/netfilter/nfnetlink_log.c @@ -509,11 +509,11 @@ __build_packet_message(struct nfulnl_instance *inst, NFA_PUT(inst->skb, NFULA_HWADDR, sizeof(phw), &phw); } - if (skb->tstamp.off_sec) { + if (skb->tstamp.tv64) { struct nfulnl_msg_packet_timestamp ts; - - ts.sec = cpu_to_be64(skb->tstamp.off_sec); - ts.usec = cpu_to_be64(skb->tstamp.off_usec); + struct timeval tv = ktime_to_timeval(skb->tstamp); + ts.sec = cpu_to_be64(tv.tv_sec); + ts.usec = cpu_to_be64(tv.tv_usec); NFA_PUT(inst->skb, NFULA_TIMESTAMP, sizeof(ts), &ts); } diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c index d9ce4a71d0f3..cfbee39f61d6 100644 --- a/net/netfilter/nfnetlink_queue.c +++ b/net/netfilter/nfnetlink_queue.c @@ -495,11 +495,11 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue, NFA_PUT(skb, NFQA_HWADDR, sizeof(phw), &phw); } - if (entskb->tstamp.off_sec) { + if (entskb->tstamp.tv64) { struct nfqnl_msg_packet_timestamp ts; - - ts.sec = cpu_to_be64(entskb->tstamp.off_sec); - ts.usec = cpu_to_be64(entskb->tstamp.off_usec); + struct timeval tv = ktime_to_timeval(entskb->tstamp); + ts.sec = cpu_to_be64(tv.tv_sec); + ts.usec = cpu_to_be64(tv.tv_usec); NFA_PUT(skb, NFQA_TIMESTAMP, sizeof(ts), &ts); } diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 28d47e8f2873..f9866a8456a1 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -582,6 +582,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, struct packe unsigned long status = TP_STATUS_LOSING|TP_STATUS_USER; unsigned short macoff, netoff; struct sk_buff *copy_skb = NULL; + struct timeval tv; if (skb->pkt_type == PACKET_LOOPBACK) goto drop; @@ -656,12 +657,13 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, struct packe h->tp_snaplen = snaplen; h->tp_mac = macoff; h->tp_net = netoff; - if (skb->tstamp.off_sec == 0) { + if (skb->tstamp.tv64 == 0) { __net_timestamp(skb); sock_enable_timestamp(sk); } - h->tp_sec = skb->tstamp.off_sec; - h->tp_usec = skb->tstamp.off_usec; + tv = ktime_to_timeval(skb->tstamp); + h->tp_sec = tv.tv_sec; + h->tp_usec = tv.tv_usec; sll = (struct sockaddr_ll*)((u8*)h + TPACKET_ALIGN(sizeof(*h))); sll->sll_halen = 0; diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index 2772fee93881..22f61aee4824 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c @@ -798,16 +798,12 @@ svc_udp_recvfrom(struct svc_rqst *rqstp) dprintk("svc: recvfrom returned error %d\n", -err); } rqstp->rq_addrlen = sizeof(rqstp->rq_addr); - if (skb->tstamp.off_sec == 0) { - struct timeval tv; - - tv.tv_sec = xtime.tv_sec; - tv.tv_usec = xtime.tv_nsec / NSEC_PER_USEC; - skb_set_timestamp(skb, &tv); + if (skb->tstamp.tv64 == 0) { + skb->tstamp = ktime_get_real(); /* Don't enable netstamp, sunrpc doesn't need that much accuracy */ } - skb_get_timestamp(skb, &svsk->sk_sk->sk_stamp); + svsk->sk_sk->sk_stamp = skb->tstamp; set_bit(SK_DATA, &svsk->sk_flags); /* there may be more data... */ /* -- cgit v1.2.3-59-g8ed1b From 95c385b4d5a71b8ad552aecaa968ea46d7da2f6a Mon Sep 17 00:00:00 2001 From: Neil Horman Date: Wed, 25 Apr 2007 17:08:10 -0700 Subject: [IPV6] ADDRCONF: Optimistic Duplicate Address Detection (RFC 4429) Support. Nominally an autoconfigured IPv6 address is added to an interface in the Tentative state (as per RFC 2462). Addresses in this state remain in this state while the Duplicate Address Detection process operates on them to determine their uniqueness on the network. During this period, these tentative addresses may not be used for communication, increasing the time before a node may be able to communicate on a network. Using Optimistic Duplicate Address Detection, autoconfigured addresses may be used immediately for communication on the network, as long as certain rules are followed to avoid conflicts with other nodes during the Duplicate Address Detection process. Signed-off-by: Neil Horman Signed-off-by: YOSHIFUJI Hideaki Signed-off-by: David S. Miller --- include/linux/if_addr.h | 1 + include/linux/ipv6.h | 4 ++ include/net/addrconf.h | 4 +- net/ipv6/Kconfig | 10 +++++ net/ipv6/addrconf.c | 106 +++++++++++++++++++++++++++++++++++++++++------- net/ipv6/ip6_output.c | 35 ++++++++++++++++ net/ipv6/mcast.c | 4 +- net/ipv6/ndisc.c | 84 +++++++++++++++++++++++++++----------- 8 files changed, 207 insertions(+), 41 deletions(-) (limited to 'include') diff --git a/include/linux/if_addr.h b/include/linux/if_addr.h index d557e4ce9b6b..43f3bedaafd3 100644 --- a/include/linux/if_addr.h +++ b/include/linux/if_addr.h @@ -39,6 +39,7 @@ enum #define IFA_F_TEMPORARY IFA_F_SECONDARY #define IFA_F_NODAD 0x02 +#define IFA_F_OPTIMISTIC 0x04 #define IFA_F_HOMEADDRESS 0x10 #define IFA_F_DEPRECATED 0x20 #define IFA_F_TENTATIVE 0x40 diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index 713eb5eaa81f..e046b22a2222 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h @@ -178,6 +178,9 @@ struct ipv6_devconf { #endif __s32 proxy_ndp; __s32 accept_source_route; +#ifdef CONFIG_IPV6_OPTIMISTIC_DAD + __s32 optimistic_dad; +#endif void *sysctl; }; @@ -208,6 +211,7 @@ enum { DEVCONF_PROXY_NDP, __DEVCONF_OPTIMISTIC_DAD, DEVCONF_ACCEPT_SOURCE_ROUTE, + DEVCONF_OPTIMISTIC_DAD, DEVCONF_MAX }; diff --git a/include/net/addrconf.h b/include/net/addrconf.h index 88df8fc814e4..f3531d0bcd05 100644 --- a/include/net/addrconf.h +++ b/include/net/addrconf.h @@ -73,7 +73,9 @@ extern int ipv6_get_saddr(struct dst_entry *dst, extern int ipv6_dev_get_saddr(struct net_device *dev, struct in6_addr *daddr, struct in6_addr *saddr); -extern int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *); +extern int ipv6_get_lladdr(struct net_device *dev, + struct in6_addr *addr, + unsigned char banned_flags); extern int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2); extern void addrconf_join_solict(struct net_device *dev, diff --git a/net/ipv6/Kconfig b/net/ipv6/Kconfig index 79682efb14be..8e5d54f23b49 100644 --- a/net/ipv6/Kconfig +++ b/net/ipv6/Kconfig @@ -57,6 +57,16 @@ config IPV6_ROUTE_INFO If unsure, say N. +config IPV6_OPTIMISTIC_DAD + bool "IPv6: Enable RFC 4429 Optimistic DAD (EXPERIMENTAL)" + depends on IPV6 && EXPERIMENTAL + ---help--- + This is experimental support for optimistic Duplicate + Address Detection. It allows for autoconfigured addresses + to be used more quickly. + + If unsure, say N. + config INET6_AH tristate "IPv6: AH transformation" depends on IPV6 diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index e035896657bc..38274c20eaa2 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -530,6 +530,16 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr, int pfxlen, ifa->rt = rt; + /* + * part one of RFC 4429, section 3.3 + * We should not configure an address as + * optimistic if we do not yet know the link + * layer address of our nexhop router + */ + + if (rt->rt6i_nexthop == NULL) + ifa->flags &= ~IFA_F_OPTIMISTIC; + ifa->idev = idev; in6_dev_hold(idev); /* For caller */ @@ -706,6 +716,7 @@ static int ipv6_create_tempaddr(struct inet6_ifaddr *ifp, struct inet6_ifaddr *i int tmp_plen; int ret = 0; int max_addresses; + u32 addr_flags; write_lock(&idev->lock); if (ift) { @@ -763,10 +774,17 @@ retry: spin_unlock_bh(&ifp->lock); write_unlock(&idev->lock); + + addr_flags = IFA_F_TEMPORARY; + /* set in addrconf_prefix_rcv() */ + if (ifp->flags & IFA_F_OPTIMISTIC) + addr_flags |= IFA_F_OPTIMISTIC; + ift = !max_addresses || ipv6_count_addresses(idev) < max_addresses ? ipv6_add_addr(idev, &addr, tmp_plen, - ipv6_addr_type(&addr)&IPV6_ADDR_SCOPE_MASK, IFA_F_TEMPORARY) : NULL; + ipv6_addr_type(&addr)&IPV6_ADDR_SCOPE_MASK, + addr_flags) : NULL; if (!ift || IS_ERR(ift)) { in6_ifa_put(ifp); in6_dev_put(idev); @@ -898,13 +916,14 @@ int ipv6_dev_get_saddr(struct net_device *daddr_dev, * - Tentative Address (RFC2462 section 5.4) * - A tentative address is not considered * "assigned to an interface" in the traditional - * sense. + * sense, unless it is also flagged as optimistic. * - Candidate Source Address (section 4) * - In any case, anycast addresses, multicast * addresses, and the unspecified address MUST * NOT be included in a candidate set. */ - if (ifa->flags & IFA_F_TENTATIVE) + if ((ifa->flags & IFA_F_TENTATIVE) && + (!(ifa->flags & IFA_F_OPTIMISTIC))) continue; if (unlikely(score.addr_type == IPV6_ADDR_ANY || score.addr_type & IPV6_ADDR_MULTICAST)) { @@ -963,15 +982,17 @@ int ipv6_dev_get_saddr(struct net_device *daddr_dev, } } - /* Rule 3: Avoid deprecated address */ + /* Rule 3: Avoid deprecated and optimistic addresses */ if (hiscore.rule < 3) { if (ipv6_saddr_preferred(hiscore.addr_type) || - !(ifa_result->flags & IFA_F_DEPRECATED)) + (((ifa_result->flags & + (IFA_F_DEPRECATED|IFA_F_OPTIMISTIC)) == 0))) hiscore.attrs |= IPV6_SADDR_SCORE_PREFERRED; hiscore.rule++; } if (ipv6_saddr_preferred(score.addr_type) || - !(ifa->flags & IFA_F_DEPRECATED)) { + (((ifa_result->flags & + (IFA_F_DEPRECATED|IFA_F_OPTIMISTIC)) == 0))) { score.attrs |= IPV6_SADDR_SCORE_PREFERRED; if (!(hiscore.attrs & IPV6_SADDR_SCORE_PREFERRED)) { score.rule = 3; @@ -1111,7 +1132,8 @@ int ipv6_get_saddr(struct dst_entry *dst, EXPORT_SYMBOL(ipv6_get_saddr); -int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr) +int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr, + unsigned char banned_flags) { struct inet6_dev *idev; int err = -EADDRNOTAVAIL; @@ -1122,7 +1144,7 @@ int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr) read_lock_bh(&idev->lock); for (ifp=idev->addr_list; ifp; ifp=ifp->if_next) { - if (ifp->scope == IFA_LINK && !(ifp->flags&IFA_F_TENTATIVE)) { + if (ifp->scope == IFA_LINK && !(ifp->flags & banned_flags)) { ipv6_addr_copy(addr, &ifp->addr); err = 0; break; @@ -1674,6 +1696,13 @@ ok: if (ifp == NULL && valid_lft) { int max_addresses = in6_dev->cnf.max_addresses; + u32 addr_flags = 0; + +#ifdef CONFIG_IPV6_OPTIMISTIC_DAD + if (in6_dev->cnf.optimistic_dad && + !ipv6_devconf.forwarding) + addr_flags = IFA_F_OPTIMISTIC; +#endif /* Do not allow to create too much of autoconfigured * addresses; this would be too easy way to crash kernel. @@ -1681,7 +1710,8 @@ ok: if (!max_addresses || ipv6_count_addresses(in6_dev) < max_addresses) ifp = ipv6_add_addr(in6_dev, &addr, pinfo->prefix_len, - addr_type&IPV6_ADDR_SCOPE_MASK, 0); + addr_type&IPV6_ADDR_SCOPE_MASK, + addr_flags); if (!ifp || IS_ERR(ifp)) { in6_dev_put(in6_dev); @@ -1889,6 +1919,11 @@ static int inet6_addr_add(int ifindex, struct in6_addr *pfx, int plen, addrconf_prefix_route(&ifp->addr, ifp->prefix_len, dev, jiffies_to_clock_t(valid_lft * HZ), flags); + /* + * Note that section 3.1 of RFC 4429 indicates + * that the Optimistic flag should not be set for + * manually configured addresses + */ addrconf_dad_start(ifp, 0); in6_ifa_put(ifp); addrconf_verify(0); @@ -2065,8 +2100,16 @@ static void init_loopback(struct net_device *dev) static void addrconf_add_linklocal(struct inet6_dev *idev, struct in6_addr *addr) { struct inet6_ifaddr * ifp; + u32 addr_flags = IFA_F_PERMANENT; + +#ifdef CONFIG_IPV6_OPTIMISTIC_DAD + if (idev->cnf.optimistic_dad && + !ipv6_devconf.forwarding) + addr_flags |= IFA_F_OPTIMISTIC; +#endif - ifp = ipv6_add_addr(idev, addr, 64, IFA_LINK, IFA_F_PERMANENT); + + ifp = ipv6_add_addr(idev, addr, 64, IFA_LINK, addr_flags); if (!IS_ERR(ifp)) { addrconf_prefix_route(&ifp->addr, ifp->prefix_len, idev->dev, 0, 0); addrconf_dad_start(ifp, 0); @@ -2134,7 +2177,7 @@ ipv6_inherit_linklocal(struct inet6_dev *idev, struct net_device *link_dev) { struct in6_addr lladdr; - if (!ipv6_get_lladdr(link_dev, &lladdr)) { + if (!ipv6_get_lladdr(link_dev, &lladdr, IFA_F_TENTATIVE)) { addrconf_add_linklocal(idev, &lladdr); return 0; } @@ -2479,7 +2522,11 @@ static void addrconf_dad_kick(struct inet6_ifaddr *ifp) unsigned long rand_num; struct inet6_dev *idev = ifp->idev; - rand_num = net_random() % (idev->cnf.rtr_solicit_delay ? : 1); + if (ifp->flags & IFA_F_OPTIMISTIC) + rand_num = 0; + else + rand_num = net_random() % (idev->cnf.rtr_solicit_delay ? : 1); + ifp->probes = idev->cnf.dad_transmits; addrconf_mod_timer(ifp, AC_DAD, rand_num); } @@ -2501,7 +2548,7 @@ static void addrconf_dad_start(struct inet6_ifaddr *ifp, u32 flags) if (dev->flags&(IFF_NOARP|IFF_LOOPBACK) || !(ifp->flags&IFA_F_TENTATIVE) || ifp->flags & IFA_F_NODAD) { - ifp->flags &= ~IFA_F_TENTATIVE; + ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC); spin_unlock_bh(&ifp->lock); read_unlock_bh(&idev->lock); @@ -2521,6 +2568,14 @@ static void addrconf_dad_start(struct inet6_ifaddr *ifp, u32 flags) addrconf_dad_stop(ifp); return; } + + /* + * Optimistic nodes can start receiving + * Frames right away + */ + if(ifp->flags & IFA_F_OPTIMISTIC) + ip6_ins_rt(ifp->rt); + addrconf_dad_kick(ifp); spin_unlock_bh(&ifp->lock); out: @@ -2545,7 +2600,7 @@ static void addrconf_dad_timer(unsigned long data) * DAD was successful */ - ifp->flags &= ~IFA_F_TENTATIVE; + ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC); spin_unlock_bh(&ifp->lock); read_unlock_bh(&idev->lock); @@ -3364,6 +3419,9 @@ static inline void ipv6_store_devconf(struct ipv6_devconf *cnf, #endif array[DEVCONF_PROXY_NDP] = cnf->proxy_ndp; array[DEVCONF_ACCEPT_SOURCE_ROUTE] = cnf->accept_source_route; +#ifdef CONFIG_IPV6_OPTIMISTIC_DAD + array[DEVCONF_OPTIMISTIC_DAD] = cnf->optimistic_dad; +#endif } static inline size_t inet6_if_nlmsg_size(void) @@ -3578,7 +3636,14 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp) switch (event) { case RTM_NEWADDR: - ip6_ins_rt(ifp->rt); + /* + * If the address was optimistic + * we inserted the route at the start of + * our DAD process, so we don't need + * to do it again + */ + if (!(ifp->rt->rt6i_node)) + ip6_ins_rt(ifp->rt); if (ifp->idev->cnf.forwarding) addrconf_join_anycast(ifp); break; @@ -3899,6 +3964,17 @@ static struct addrconf_sysctl_table .mode = 0644, .proc_handler = &proc_dointvec, }, +#ifdef CONFIG_IPV6_OPTIMISTIC_DAD + { + .ctl_name = CTL_UNNUMBERED, + .procname = "optimistic_dad", + .data = &ipv6_devconf.optimistic_dad, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &proc_dointvec, + + }, +#endif { .ctl_name = 0, /* sentinel */ } diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 0d60fbc59d8f..7e25043d826c 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -863,6 +863,41 @@ static int ip6_dst_lookup_tail(struct sock *sk, goto out_err_release; } +#ifdef CONFIG_IPV6_OPTIMISTIC_DAD + /* + * Here if the dst entry we've looked up + * has a neighbour entry that is in the INCOMPLETE + * state and the src address from the flow is + * marked as OPTIMISTIC, we release the found + * dst entry and replace it instead with the + * dst entry of the nexthop router + */ + if (!((*dst)->neighbour->nud_state & NUD_VALID)) { + struct inet6_ifaddr *ifp; + struct flowi fl_gw; + int redirect; + + ifp = ipv6_get_ifaddr(&fl->fl6_src, (*dst)->dev, 1); + + redirect = (ifp && ifp->flags & IFA_F_OPTIMISTIC); + if (ifp) + in6_ifa_put(ifp); + + if (redirect) { + /* + * We need to get the dst entry for the + * default router instead + */ + dst_release(*dst); + memcpy(&fl_gw, fl, sizeof(struct flowi)); + memset(&fl_gw.fl6_dst, 0, sizeof(struct in6_addr)); + *dst = ip6_route_output(sk, &fl_gw); + if ((err = (*dst)->error)) + goto out_err_release; + } + } +#endif + return 0; out_err_release: diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index a8d6625ec782..924e24907c3e 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c @@ -1411,7 +1411,7 @@ static struct sk_buff *mld_newpack(struct net_device *dev, int size) skb_reserve(skb, LL_RESERVED_SPACE(dev)); - if (ipv6_get_lladdr(dev, &addr_buf)) { + if (ipv6_get_lladdr(dev, &addr_buf, IFA_F_TENTATIVE)) { /* : * use unspecified address as the source address * when a valid link-local address is not available. @@ -1791,7 +1791,7 @@ static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type) skb_reserve(skb, LL_RESERVED_SPACE(dev)); - if (ipv6_get_lladdr(dev, &addr_buf)) { + if (ipv6_get_lladdr(dev, &addr_buf, IFA_F_TENTATIVE)) { /* : * use unspecified address as the source address * when a valid link-local address is not available. diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c index 886c5be14906..b79b00042310 100644 --- a/net/ipv6/ndisc.c +++ b/net/ipv6/ndisc.c @@ -449,6 +449,8 @@ static void ndisc_send_na(struct net_device *dev, struct neighbour *neigh, ifp = ipv6_get_ifaddr(solicited_addr, dev, 1); if (ifp) { src_addr = solicited_addr; + if (ifp->flags & IFA_F_OPTIMISTIC) + override = 0; in6_ifa_put(ifp); } else { if (ipv6_dev_get_saddr(dev, daddr, &tmpaddr)) @@ -544,7 +546,8 @@ void ndisc_send_ns(struct net_device *dev, struct neighbour *neigh, int send_llinfo; if (saddr == NULL) { - if (ipv6_get_lladdr(dev, &addr_buf)) + if (ipv6_get_lladdr(dev, &addr_buf, + (IFA_F_TENTATIVE|IFA_F_OPTIMISTIC))) return; saddr = &addr_buf; } @@ -624,9 +627,33 @@ void ndisc_send_rs(struct net_device *dev, struct in6_addr *saddr, struct sk_buff *skb; struct icmp6hdr *hdr; __u8 * opt; + struct inet6_ifaddr *ifp; + int send_sllao = dev->addr_len; int len; int err; + +#ifdef CONFIG_IPV6_OPTIMISTIC_DAD + /* + * According to section 2.2 of RFC 4429, we must not + * send router solicitations with a sllao from + * optimistic addresses, but we may send the solicitation + * if we don't include the sllao. So here we check + * if our address is optimistic, and if so, we + * supress the inclusion of the sllao. + */ + if (send_sllao) { + ifp = ipv6_get_ifaddr(saddr, dev, 1); + if (ifp) { + if (ifp->flags & IFA_F_OPTIMISTIC) { + send_sllao=0; + in6_ifa_put(ifp); + } + } else { + send_sllao = 0; + } + } +#endif ndisc_flow_init(&fl, NDISC_ROUTER_SOLICITATION, saddr, daddr, dev->ifindex); @@ -639,7 +666,7 @@ void ndisc_send_rs(struct net_device *dev, struct in6_addr *saddr, return; len = sizeof(struct icmp6hdr); - if (dev->addr_len) + if (send_sllao) len += ndisc_opt_addr_space(dev); skb = sock_alloc_send_skb(sk, @@ -666,7 +693,7 @@ void ndisc_send_rs(struct net_device *dev, struct in6_addr *saddr, opt = (u8*) (hdr + 1); - if (dev->addr_len) + if (send_sllao) ndisc_fill_addr_option(opt, ND_OPT_SOURCE_LL_ADDR, dev->dev_addr, dev->addr_len, dev->type); @@ -798,28 +825,39 @@ static void ndisc_recv_ns(struct sk_buff *skb) inc = ipv6_addr_is_multicast(daddr); if ((ifp = ipv6_get_ifaddr(&msg->target, dev, 1)) != NULL) { - if (ifp->flags & IFA_F_TENTATIVE) { - /* Address is tentative. If the source - is unspecified address, it is someone - does DAD, otherwise we ignore solicitations - until DAD timer expires. - */ - if (!dad) + + if (ifp->flags & (IFA_F_TENTATIVE|IFA_F_OPTIMISTIC)) { + if (dad) { + if (dev->type == ARPHRD_IEEE802_TR) { + unsigned char *sadr = skb->mac.raw; + if (((sadr[8] ^ dev->dev_addr[0]) & 0x7f) == 0 && + sadr[9] == dev->dev_addr[1] && + sadr[10] == dev->dev_addr[2] && + sadr[11] == dev->dev_addr[3] && + sadr[12] == dev->dev_addr[4] && + sadr[13] == dev->dev_addr[5]) { + /* looped-back to us */ + goto out; + } + } + + /* + * We are colliding with another node + * who is doing DAD + * so fail our DAD process + */ + addrconf_dad_failure(ifp); goto out; - if (dev->type == ARPHRD_IEEE802_TR) { - unsigned char *sadr = skb->mac.raw; - if (((sadr[8] ^ dev->dev_addr[0]) & 0x7f) == 0 && - sadr[9] == dev->dev_addr[1] && - sadr[10] == dev->dev_addr[2] && - sadr[11] == dev->dev_addr[3] && - sadr[12] == dev->dev_addr[4] && - sadr[13] == dev->dev_addr[5]) { - /* looped-back to us */ + } else { + /* + * This is not a dad solicitation. + * If we are an optimistic node, + * we should respond. + * Otherwise, we should ignore it. + */ + if (!(ifp->flags & IFA_F_OPTIMISTIC)) goto out; - } } - addrconf_dad_failure(ifp); - return; } idev = ifp->idev; @@ -1408,7 +1446,7 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh, dev = skb->dev; - if (ipv6_get_lladdr(dev, &saddr_buf)) { + if (ipv6_get_lladdr(dev, &saddr_buf, IFA_F_TENTATIVE)) { ND_PRINTK2(KERN_WARNING "ICMPv6 Redirect: no link-local address on %s\n", dev->name); -- cgit v1.2.3-59-g8ed1b From fc910a27839584209726537698b596576940add4 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sun, 25 Mar 2007 20:27:59 -0700 Subject: [NETLINK]: Limit NLMSG_GOODSIZE to 8K. Signed-off-by: David S. Miller --- include/linux/netlink.h | 11 +++++++++-- include/linux/skbuff.h | 8 +++++--- 2 files changed, 14 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/include/linux/netlink.h b/include/linux/netlink.h index 2a20f488ac1b..a9d3ad5bc80f 100644 --- a/include/linux/netlink.h +++ b/include/linux/netlink.h @@ -171,9 +171,16 @@ int netlink_sendskb(struct sock *sk, struct sk_buff *skb, int protocol); /* * skb should fit one page. This choice is good for headerless malloc. + * But we should limit to 8K so that userspace does not have to + * use enormous buffer sizes on recvmsg() calls just to avoid + * MSG_TRUNC when PAGE_SIZE is very large. */ -#define NLMSG_GOODORDER 0 -#define NLMSG_GOODSIZE (SKB_MAX_ORDER(0, NLMSG_GOODORDER)) +#if PAGE_SIZE < 8192UL +#define NLMSG_GOODSIZE SKB_WITH_OVERHEAD(PAGE_SIZE) +#else +#define NLMSG_GOODSIZE SKB_WITH_OVERHEAD(8192UL) +#endif + #define NLMSG_DEFAULT_SIZE (NLMSG_GOODSIZE - NLMSG_HDRLEN) diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index f9441b5f8d13..30089adb2e78 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -39,9 +39,11 @@ #define SKB_DATA_ALIGN(X) (((X) + (SMP_CACHE_BYTES - 1)) & \ ~(SMP_CACHE_BYTES - 1)) -#define SKB_MAX_ORDER(X, ORDER) (((PAGE_SIZE << (ORDER)) - (X) - \ - sizeof(struct skb_shared_info)) & \ - ~(SMP_CACHE_BYTES - 1)) +#define SKB_WITH_OVERHEAD(X) \ + (((X) - sizeof(struct skb_shared_info)) & \ + ~(SMP_CACHE_BYTES - 1)) +#define SKB_MAX_ORDER(X, ORDER) \ + SKB_WITH_OVERHEAD((PAGE_SIZE << (ORDER)) - (X)) #define SKB_MAX_HEAD(X) (SKB_MAX_ORDER((X), 0)) #define SKB_MAX_ALLOC (SKB_MAX_ORDER(0, 2)) -- cgit v1.2.3-59-g8ed1b From 759e5d006462d53fb708daa8284b4ad909415da1 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sun, 25 Mar 2007 20:10:56 -0700 Subject: [UDP]: Clean up UDP-Lite receive checksum This patch eliminates some duplicate code for the verification of receive checksums between UDP-Lite and UDP. It does this by introducing __skb_checksum_complete_head which is identical to __skb_checksum_complete_head apart from the fact that it takes a length parameter rather than computing the first skb->len bytes. As a result UDP-Lite will be able to use hardware checksum offload for packets which do not use partial coverage checksums. It also means that UDP-Lite loopback no longer does unnecessary checksum verification. If any NICs start support UDP-Lite this would also start working automatically. This patch removes the assumption that msg_flags has MSG_TRUNC clear upon entry in recvmsg. Signed-off-by: Herbert Xu Signed-off-by: David S. Miller --- include/linux/skbuff.h | 1 + include/net/udp.h | 5 +-- include/net/udplite.h | 39 ++++---------------- net/core/datagram.c | 10 ++++-- net/ipv4/udp.c | 96 ++++++++++++++++++++++++++------------------------ net/ipv4/udplite.c | 2 +- net/ipv6/udp.c | 74 +++++++++++++++++++++----------------- net/ipv6/udplite.c | 2 +- 8 files changed, 109 insertions(+), 120 deletions(-) (limited to 'include') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 30089adb2e78..df229bd5f1a9 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -1372,6 +1372,7 @@ static inline void __net_timestamp(struct sk_buff *skb) } +extern __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len); extern __sum16 __skb_checksum_complete(struct sk_buff *skb); /** diff --git a/include/net/udp.h b/include/net/udp.h index 1b921fa81474..4a9699f79281 100644 --- a/include/net/udp.h +++ b/include/net/udp.h @@ -72,10 +72,7 @@ struct sk_buff; */ static inline __sum16 __udp_lib_checksum_complete(struct sk_buff *skb) { - if (! UDP_SKB_CB(skb)->partial_cov) - return __skb_checksum_complete(skb); - return csum_fold(skb_checksum(skb, 0, UDP_SKB_CB(skb)->cscov, - skb->csum)); + return __skb_checksum_complete_head(skb, UDP_SKB_CB(skb)->cscov); } static inline int udp_lib_checksum_complete(struct sk_buff *skb) diff --git a/include/net/udplite.h b/include/net/udplite.h index 67ac51424307..d99df75fe54c 100644 --- a/include/net/udplite.h +++ b/include/net/udplite.h @@ -47,11 +47,10 @@ static inline int udplite_checksum_init(struct sk_buff *skb, struct udphdr *uh) return 1; } - UDP_SKB_CB(skb)->partial_cov = 0; cscov = ntohs(uh->len); if (cscov == 0) /* Indicates that full coverage is required. */ - cscov = skb->len; + ; else if (cscov < 8 || cscov > skb->len) { /* * Coverage length violates RFC 3828: log and discard silently. @@ -60,42 +59,16 @@ static inline int udplite_checksum_init(struct sk_buff *skb, struct udphdr *uh) cscov, skb->len); return 1; - } else if (cscov < skb->len) + } else if (cscov < skb->len) { UDP_SKB_CB(skb)->partial_cov = 1; - - UDP_SKB_CB(skb)->cscov = cscov; - - /* - * There is no known NIC manufacturer supporting UDP-Lite yet, - * hence ip_summed is always (re-)set to CHECKSUM_NONE. - */ - skb->ip_summed = CHECKSUM_NONE; + UDP_SKB_CB(skb)->cscov = cscov; + if (skb->ip_summed == CHECKSUM_COMPLETE) + skb->ip_summed = CHECKSUM_NONE; + } return 0; } -static __inline__ int udplite4_csum_init(struct sk_buff *skb, struct udphdr *uh) -{ - int rc = udplite_checksum_init(skb, uh); - - if (!rc) - skb->csum = csum_tcpudp_nofold(skb->nh.iph->saddr, - skb->nh.iph->daddr, - skb->len, IPPROTO_UDPLITE, 0); - return rc; -} - -static __inline__ int udplite6_csum_init(struct sk_buff *skb, struct udphdr *uh) -{ - int rc = udplite_checksum_init(skb, uh); - - if (!rc) - skb->csum = ~csum_unfold(csum_ipv6_magic(&skb->nh.ipv6h->saddr, - &skb->nh.ipv6h->daddr, - skb->len, IPPROTO_UDPLITE, 0)); - return rc; -} - static inline int udplite_sender_cscov(struct udp_sock *up, struct udphdr *uh) { int cscov = up->len; diff --git a/net/core/datagram.c b/net/core/datagram.c index 186212b5b7da..cb056f476126 100644 --- a/net/core/datagram.c +++ b/net/core/datagram.c @@ -411,11 +411,11 @@ fault: return -EFAULT; } -__sum16 __skb_checksum_complete(struct sk_buff *skb) +__sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len) { __sum16 sum; - sum = csum_fold(skb_checksum(skb, 0, skb->len, skb->csum)); + sum = csum_fold(skb_checksum(skb, 0, len, skb->csum)); if (likely(!sum)) { if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE)) netdev_rx_csum_fault(skb->dev); @@ -423,6 +423,12 @@ __sum16 __skb_checksum_complete(struct sk_buff *skb) } return sum; } +EXPORT_SYMBOL(__skb_checksum_complete_head); + +__sum16 __skb_checksum_complete(struct sk_buff *skb) +{ + return __skb_checksum_complete_head(skb, skb->len); +} EXPORT_SYMBOL(__skb_checksum_complete); /** diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index fc620a7c1db4..86368832d481 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -810,7 +810,9 @@ int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, struct inet_sock *inet = inet_sk(sk); struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name; struct sk_buff *skb; - int copied, err, copy_only, is_udplite = IS_UDPLITE(sk); + unsigned int ulen, copied; + int err; + int is_udplite = IS_UDPLITE(sk); /* * Check any passed addresses @@ -826,28 +828,25 @@ try_again: if (!skb) goto out; - copied = skb->len - sizeof(struct udphdr); - if (copied > len) { - copied = len; + ulen = skb->len - sizeof(struct udphdr); + copied = len; + if (copied > ulen) + copied = ulen; + else if (copied < ulen) msg->msg_flags |= MSG_TRUNC; - } /* - * Decide whether to checksum and/or copy data. - * - * UDP: checksum may have been computed in HW, - * (re-)compute it if message is truncated. - * UDP-Lite: always needs to checksum, no HW support. + * If checksum is needed at all, try to do it while copying the + * data. If the data is truncated, or if we only want a partial + * coverage checksum (UDP-Lite), do it before the copy. */ - copy_only = (skb->ip_summed==CHECKSUM_UNNECESSARY); - if (is_udplite || (!copy_only && msg->msg_flags&MSG_TRUNC)) { - if (__udp_lib_checksum_complete(skb)) + if (copied < ulen || UDP_SKB_CB(skb)->partial_cov) { + if (udp_lib_checksum_complete(skb)) goto csum_copy_err; - copy_only = 1; } - if (copy_only) + if (skb->ip_summed == CHECKSUM_UNNECESSARY) err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov, copied ); else { @@ -875,7 +874,7 @@ try_again: err = copied; if (flags & MSG_TRUNC) - err = skb->len - sizeof(struct udphdr); + err = ulen; out_free: skb_free_datagram(sk, skb); @@ -1095,10 +1094,9 @@ int udp_queue_rcv_skb(struct sock * sk, struct sk_buff *skb) } } - if (sk->sk_filter && skb->ip_summed != CHECKSUM_UNNECESSARY) { - if (__udp_lib_checksum_complete(skb)) + if (sk->sk_filter) { + if (udp_lib_checksum_complete(skb)) goto drop; - skb->ip_summed = CHECKSUM_UNNECESSARY; } if ((rc = sock_queue_rcv_skb(sk,skb)) < 0) { @@ -1166,25 +1164,36 @@ static int __udp4_lib_mcast_deliver(struct sk_buff *skb, * Otherwise, csum completion requires chacksumming packet body, * including udp header and folding it to skb->csum. */ -static inline void udp4_csum_init(struct sk_buff *skb, struct udphdr *uh) +static inline int udp4_csum_init(struct sk_buff *skb, struct udphdr *uh, + int proto) { + int err; + + UDP_SKB_CB(skb)->partial_cov = 0; + UDP_SKB_CB(skb)->cscov = skb->len; + + if (proto == IPPROTO_UDPLITE) { + err = udplite_checksum_init(skb, uh); + if (err) + return err; + } + if (uh->check == 0) { skb->ip_summed = CHECKSUM_UNNECESSARY; } else if (skb->ip_summed == CHECKSUM_COMPLETE) { if (!csum_tcpudp_magic(skb->nh.iph->saddr, skb->nh.iph->daddr, - skb->len, IPPROTO_UDP, skb->csum )) + skb->len, proto, skb->csum)) skb->ip_summed = CHECKSUM_UNNECESSARY; } if (skb->ip_summed != CHECKSUM_UNNECESSARY) skb->csum = csum_tcpudp_nofold(skb->nh.iph->saddr, skb->nh.iph->daddr, - skb->len, IPPROTO_UDP, 0); + skb->len, proto, 0); /* Probably, we should checksum udp header (it should be in cache * in any case) and data in tiny packets (< rx copybreak). */ - /* UDP = UDP-Lite with a non-partial checksum coverage */ - UDP_SKB_CB(skb)->partial_cov = 0; + return 0; } /* @@ -1192,7 +1201,7 @@ static inline void udp4_csum_init(struct sk_buff *skb, struct udphdr *uh) */ int __udp4_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[], - int is_udplite) + int proto) { struct sock *sk; struct udphdr *uh = skb->h.uh; @@ -1211,19 +1220,16 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[], if (ulen > skb->len) goto short_packet; - if(! is_udplite ) { /* UDP validates ulen. */ - + if (proto == IPPROTO_UDP) { + /* UDP validates ulen. */ if (ulen < sizeof(*uh) || pskb_trim_rcsum(skb, ulen)) goto short_packet; uh = skb->h.uh; - - udp4_csum_init(skb, uh); - - } else { /* UDP-Lite validates cscov. */ - if (udplite4_csum_init(skb, uh)) - goto csum_error; } + if (udp4_csum_init(skb, uh, proto)) + goto csum_error; + if(rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST)) return __udp4_lib_mcast_deliver(skb, uh, saddr, daddr, udptable); @@ -1250,7 +1256,7 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[], if (udp_lib_checksum_complete(skb)) goto csum_error; - UDP_INC_STATS_BH(UDP_MIB_NOPORTS, is_udplite); + UDP_INC_STATS_BH(UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE); icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0); /* @@ -1262,7 +1268,7 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[], short_packet: LIMIT_NETDEBUG(KERN_DEBUG "UDP%s: short packet: From %u.%u.%u.%u:%u %d/%d to %u.%u.%u.%u:%u\n", - is_udplite? "-Lite" : "", + proto == IPPROTO_UDPLITE ? "-Lite" : "", NIPQUAD(saddr), ntohs(uh->source), ulen, @@ -1277,21 +1283,21 @@ csum_error: * the network is concerned, anyway) as per 4.1.3.4 (MUST). */ LIMIT_NETDEBUG(KERN_DEBUG "UDP%s: bad checksum. From %d.%d.%d.%d:%d to %d.%d.%d.%d:%d ulen %d\n", - is_udplite? "-Lite" : "", + proto == IPPROTO_UDPLITE ? "-Lite" : "", NIPQUAD(saddr), ntohs(uh->source), NIPQUAD(daddr), ntohs(uh->dest), ulen); drop: - UDP_INC_STATS_BH(UDP_MIB_INERRORS, is_udplite); + UDP_INC_STATS_BH(UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE); kfree_skb(skb); return(0); } __inline__ int udp_rcv(struct sk_buff *skb) { - return __udp4_lib_rcv(skb, udp_hash, 0); + return __udp4_lib_rcv(skb, udp_hash, IPPROTO_UDP); } int udp_destroy_sock(struct sock *sk) @@ -1486,15 +1492,11 @@ unsigned int udp_poll(struct file *file, struct socket *sock, poll_table *wait) struct sk_buff *skb; spin_lock_bh(&rcvq->lock); - while ((skb = skb_peek(rcvq)) != NULL) { - if (udp_lib_checksum_complete(skb)) { - UDP_INC_STATS_BH(UDP_MIB_INERRORS, is_lite); - __skb_unlink(skb, rcvq); - kfree_skb(skb); - } else { - skb->ip_summed = CHECKSUM_UNNECESSARY; - break; - } + while ((skb = skb_peek(rcvq)) != NULL && + udp_lib_checksum_complete(skb)) { + UDP_INC_STATS_BH(UDP_MIB_INERRORS, is_lite); + __skb_unlink(skb, rcvq); + kfree_skb(skb); } spin_unlock_bh(&rcvq->lock); diff --git a/net/ipv4/udplite.c b/net/ipv4/udplite.c index b28fe1edf98b..f34fd686a8f1 100644 --- a/net/ipv4/udplite.c +++ b/net/ipv4/udplite.c @@ -31,7 +31,7 @@ static int udplite_v4_get_port(struct sock *sk, unsigned short snum) static int udplite_rcv(struct sk_buff *skb) { - return __udp4_lib_rcv(skb, udplite_hash, 1); + return __udp4_lib_rcv(skb, udplite_hash, IPPROTO_UDPLITE); } static void udplite_err(struct sk_buff *skb, u32 info) diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 3413fc22ce4a..733371689795 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -120,8 +120,9 @@ int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk, struct ipv6_pinfo *np = inet6_sk(sk); struct inet_sock *inet = inet_sk(sk); struct sk_buff *skb; - size_t copied; - int err, copy_only, is_udplite = IS_UDPLITE(sk); + unsigned int ulen, copied; + int err; + int is_udplite = IS_UDPLITE(sk); if (addr_len) *addr_len=sizeof(struct sockaddr_in6); @@ -134,24 +135,25 @@ try_again: if (!skb) goto out; - copied = skb->len - sizeof(struct udphdr); - if (copied > len) { - copied = len; + ulen = skb->len - sizeof(struct udphdr); + copied = len; + if (copied > ulen) + copied = ulen; + else if (copied < ulen) msg->msg_flags |= MSG_TRUNC; - } /* - * Decide whether to checksum and/or copy data. + * If checksum is needed at all, try to do it while copying the + * data. If the data is truncated, or if we only want a partial + * coverage checksum (UDP-Lite), do it before the copy. */ - copy_only = (skb->ip_summed==CHECKSUM_UNNECESSARY); - if (is_udplite || (!copy_only && msg->msg_flags&MSG_TRUNC)) { - if (__udp_lib_checksum_complete(skb)) + if (copied < ulen || UDP_SKB_CB(skb)->partial_cov) { + if (udp_lib_checksum_complete(skb)) goto csum_copy_err; - copy_only = 1; } - if (copy_only) + if (skb->ip_summed == CHECKSUM_UNNECESSARY) err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov, copied ); else { @@ -194,7 +196,7 @@ try_again: err = copied; if (flags & MSG_TRUNC) - err = skb->len - sizeof(struct udphdr); + err = ulen; out_free: skb_free_datagram(sk, skb); @@ -368,9 +370,20 @@ out: return 0; } -static inline int udp6_csum_init(struct sk_buff *skb, struct udphdr *uh) - +static inline int udp6_csum_init(struct sk_buff *skb, struct udphdr *uh, + int proto) { + int err; + + UDP_SKB_CB(skb)->partial_cov = 0; + UDP_SKB_CB(skb)->cscov = skb->len; + + if (proto == IPPROTO_UDPLITE) { + err = udplite_checksum_init(skb, uh); + if (err) + return err; + } + if (uh->check == 0) { /* RFC 2460 section 8.1 says that we SHOULD log this error. Well, it is reasonable. @@ -380,20 +393,19 @@ static inline int udp6_csum_init(struct sk_buff *skb, struct udphdr *uh) } if (skb->ip_summed == CHECKSUM_COMPLETE && !csum_ipv6_magic(&skb->nh.ipv6h->saddr, &skb->nh.ipv6h->daddr, - skb->len, IPPROTO_UDP, skb->csum )) + skb->len, proto, skb->csum)) skb->ip_summed = CHECKSUM_UNNECESSARY; if (skb->ip_summed != CHECKSUM_UNNECESSARY) skb->csum = ~csum_unfold(csum_ipv6_magic(&skb->nh.ipv6h->saddr, &skb->nh.ipv6h->daddr, - skb->len, IPPROTO_UDP, - 0)); + skb->len, proto, 0)); - return (UDP_SKB_CB(skb)->partial_cov = 0); + return 0; } int __udp6_lib_rcv(struct sk_buff **pskb, struct hlist_head udptable[], - int is_udplite) + int proto) { struct sk_buff *skb = *pskb; struct sock *sk; @@ -413,7 +425,8 @@ int __udp6_lib_rcv(struct sk_buff **pskb, struct hlist_head udptable[], if (ulen > skb->len) goto short_packet; - if(! is_udplite ) { /* UDP validates ulen. */ + if (proto == IPPROTO_UDP) { + /* UDP validates ulen. */ /* Check for jumbo payload */ if (ulen == 0) @@ -429,15 +442,11 @@ int __udp6_lib_rcv(struct sk_buff **pskb, struct hlist_head udptable[], daddr = &skb->nh.ipv6h->daddr; uh = skb->h.uh; } - - if (udp6_csum_init(skb, uh)) - goto discard; - - } else { /* UDP-Lite validates cscov. */ - if (udplite6_csum_init(skb, uh)) - goto discard; } + if (udp6_csum_init(skb, uh, proto)) + goto discard; + /* * Multicast receive code */ @@ -459,7 +468,7 @@ int __udp6_lib_rcv(struct sk_buff **pskb, struct hlist_head udptable[], if (udp_lib_checksum_complete(skb)) goto discard; - UDP6_INC_STATS_BH(UDP_MIB_NOPORTS, is_udplite); + UDP6_INC_STATS_BH(UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE); icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0, dev); @@ -475,17 +484,18 @@ int __udp6_lib_rcv(struct sk_buff **pskb, struct hlist_head udptable[], short_packet: LIMIT_NETDEBUG(KERN_DEBUG "UDP%sv6: short packet: %d/%u\n", - is_udplite? "-Lite" : "", ulen, skb->len); + proto == IPPROTO_UDPLITE ? "-Lite" : "", + ulen, skb->len); discard: - UDP6_INC_STATS_BH(UDP_MIB_INERRORS, is_udplite); + UDP6_INC_STATS_BH(UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE); kfree_skb(skb); return(0); } static __inline__ int udpv6_rcv(struct sk_buff **pskb) { - return __udp6_lib_rcv(pskb, udp_hash, 0); + return __udp6_lib_rcv(pskb, udp_hash, IPPROTO_UDP); } /* diff --git a/net/ipv6/udplite.c b/net/ipv6/udplite.c index 629f97162fbc..f54016a55004 100644 --- a/net/ipv6/udplite.c +++ b/net/ipv6/udplite.c @@ -19,7 +19,7 @@ DEFINE_SNMP_STAT(struct udp_mib, udplite_stats_in6) __read_mostly; static int udplitev6_rcv(struct sk_buff **pskb) { - return __udp6_lib_rcv(pskb, udplite_hash, 1); + return __udp6_lib_rcv(pskb, udplite_hash, IPPROTO_UDPLITE); } static void udplitev6_err(struct sk_buff *skb, -- cgit v1.2.3-59-g8ed1b From fe067e8ab5e0dc5ca3c54634924c628da92090b4 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Wed, 7 Mar 2007 12:12:44 -0800 Subject: [TCP]: Abstract out all write queue operations. This allows the write queue implementation to be changed, for example, to one which allows fast interval searching. Signed-off-by: David S. Miller --- include/net/sock.h | 21 ---------- include/net/tcp.h | 114 ++++++++++++++++++++++++++++++++++++++++++++++++++ net/ipv4/tcp.c | 32 +++++++------- net/ipv4/tcp_input.c | 64 ++++++++++++++++++---------- net/ipv4/tcp_ipv4.c | 2 +- net/ipv4/tcp_output.c | 95 +++++++++++++++++++---------------------- net/ipv4/tcp_timer.c | 10 ++--- 7 files changed, 221 insertions(+), 117 deletions(-) (limited to 'include') diff --git a/include/net/sock.h b/include/net/sock.h index 9583639090d2..2974bacc8850 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -710,15 +710,6 @@ static inline void sk_stream_mem_reclaim(struct sock *sk) __sk_stream_mem_reclaim(sk); } -static inline void sk_stream_writequeue_purge(struct sock *sk) -{ - struct sk_buff *skb; - - while ((skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) - sk_stream_free_skb(sk, skb); - sk_stream_mem_reclaim(sk); -} - static inline int sk_stream_rmem_schedule(struct sock *sk, struct sk_buff *skb) { return (int)skb->truesize <= sk->sk_forward_alloc || @@ -1256,18 +1247,6 @@ static inline struct page *sk_stream_alloc_page(struct sock *sk) return page; } -#define sk_stream_for_retrans_queue(skb, sk) \ - for (skb = (sk)->sk_write_queue.next; \ - (skb != (sk)->sk_send_head) && \ - (skb != (struct sk_buff *)&(sk)->sk_write_queue); \ - skb = skb->next) - -/*from STCP for fast SACK Process*/ -#define sk_stream_for_retrans_queue_from(skb, sk) \ - for (; (skb != (sk)->sk_send_head) && \ - (skb != (struct sk_buff *)&(sk)->sk_write_queue); \ - skb = skb->next) - /* * Default write policy as shown to user space via poll/select/SIGIO */ diff --git a/include/net/tcp.h b/include/net/tcp.h index 181c0600af1c..6dacc352dcf1 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -1162,6 +1162,120 @@ static inline void tcp_put_md5sig_pool(void) put_cpu(); } +/* write queue abstraction */ +static inline void tcp_write_queue_purge(struct sock *sk) +{ + struct sk_buff *skb; + + while ((skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) + sk_stream_free_skb(sk, skb); + sk_stream_mem_reclaim(sk); +} + +static inline struct sk_buff *tcp_write_queue_head(struct sock *sk) +{ + struct sk_buff *skb = sk->sk_write_queue.next; + if (skb == (struct sk_buff *) &sk->sk_write_queue) + return NULL; + return skb; +} + +static inline struct sk_buff *tcp_write_queue_tail(struct sock *sk) +{ + struct sk_buff *skb = sk->sk_write_queue.prev; + if (skb == (struct sk_buff *) &sk->sk_write_queue) + return NULL; + return skb; +} + +static inline struct sk_buff *tcp_write_queue_next(struct sock *sk, struct sk_buff *skb) +{ + return skb->next; +} + +#define tcp_for_write_queue(skb, sk) \ + for (skb = (sk)->sk_write_queue.next; \ + (skb != (struct sk_buff *)&(sk)->sk_write_queue); \ + skb = skb->next) + +#define tcp_for_write_queue_from(skb, sk) \ + for (; (skb != (struct sk_buff *)&(sk)->sk_write_queue);\ + skb = skb->next) + +static inline struct sk_buff *tcp_send_head(struct sock *sk) +{ + return sk->sk_send_head; +} + +static inline void tcp_advance_send_head(struct sock *sk, struct sk_buff *skb) +{ + sk->sk_send_head = skb->next; + if (sk->sk_send_head == (struct sk_buff *)&sk->sk_write_queue) + sk->sk_send_head = NULL; +} + +static inline void tcp_check_send_head(struct sock *sk, struct sk_buff *skb_unlinked) +{ + if (sk->sk_send_head == skb_unlinked) + sk->sk_send_head = NULL; +} + +static inline void tcp_init_send_head(struct sock *sk) +{ + sk->sk_send_head = NULL; +} + +static inline void __tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb) +{ + __skb_queue_tail(&sk->sk_write_queue, skb); +} + +static inline void tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb) +{ + __tcp_add_write_queue_tail(sk, skb); + + /* Queue it, remembering where we must start sending. */ + if (sk->sk_send_head == NULL) + sk->sk_send_head = skb; +} + +static inline void __tcp_add_write_queue_head(struct sock *sk, struct sk_buff *skb) +{ + __skb_queue_head(&sk->sk_write_queue, skb); +} + +/* Insert buff after skb on the write queue of sk. */ +static inline void tcp_insert_write_queue_after(struct sk_buff *skb, + struct sk_buff *buff, + struct sock *sk) +{ + __skb_append(skb, buff, &sk->sk_write_queue); +} + +/* Insert skb between prev and next on the write queue of sk. */ +static inline void tcp_insert_write_queue_before(struct sk_buff *new, + struct sk_buff *skb, + struct sock *sk) +{ + __skb_insert(new, skb->prev, skb, &sk->sk_write_queue); +} + +static inline void tcp_unlink_write_queue(struct sk_buff *skb, struct sock *sk) +{ + __skb_unlink(skb, &sk->sk_write_queue); +} + +static inline int tcp_skb_is_last(const struct sock *sk, + const struct sk_buff *skb) +{ + return skb->next == (struct sk_buff *)&sk->sk_write_queue; +} + +static inline int tcp_write_queue_empty(struct sock *sk) +{ + return skb_queue_empty(&sk->sk_write_queue); +} + /* /proc */ enum tcp_seq_states { TCP_SEQ_STATE_LISTENING, diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 3834b10b5115..689f9330f1b9 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -470,10 +470,8 @@ static inline void skb_entail(struct sock *sk, struct tcp_sock *tp, tcb->flags = TCPCB_FLAG_ACK; tcb->sacked = 0; skb_header_release(skb); - __skb_queue_tail(&sk->sk_write_queue, skb); + tcp_add_write_queue_tail(sk, skb); sk_charge_skb(sk, skb); - if (!sk->sk_send_head) - sk->sk_send_head = skb; if (tp->nonagle & TCP_NAGLE_PUSH) tp->nonagle &= ~TCP_NAGLE_PUSH; } @@ -491,8 +489,8 @@ static inline void tcp_mark_urg(struct tcp_sock *tp, int flags, static inline void tcp_push(struct sock *sk, struct tcp_sock *tp, int flags, int mss_now, int nonagle) { - if (sk->sk_send_head) { - struct sk_buff *skb = sk->sk_write_queue.prev; + if (tcp_send_head(sk)) { + struct sk_buff *skb = tcp_write_queue_tail(sk); if (!(flags & MSG_MORE) || forced_push(tp)) tcp_mark_push(tp, skb); tcp_mark_urg(tp, flags, skb); @@ -526,13 +524,13 @@ static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffse goto do_error; while (psize > 0) { - struct sk_buff *skb = sk->sk_write_queue.prev; + struct sk_buff *skb = tcp_write_queue_tail(sk); struct page *page = pages[poffset / PAGE_SIZE]; int copy, i, can_coalesce; int offset = poffset % PAGE_SIZE; int size = min_t(size_t, psize, PAGE_SIZE - offset); - if (!sk->sk_send_head || (copy = size_goal - skb->len) <= 0) { + if (!tcp_send_head(sk) || (copy = size_goal - skb->len) <= 0) { new_segment: if (!sk_stream_memory_free(sk)) goto wait_for_sndbuf; @@ -589,7 +587,7 @@ new_segment: if (forced_push(tp)) { tcp_mark_push(tp, skb); __tcp_push_pending_frames(sk, tp, mss_now, TCP_NAGLE_PUSH); - } else if (skb == sk->sk_send_head) + } else if (skb == tcp_send_head(sk)) tcp_push_one(sk, mss_now); continue; @@ -704,9 +702,9 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, while (seglen > 0) { int copy; - skb = sk->sk_write_queue.prev; + skb = tcp_write_queue_tail(sk); - if (!sk->sk_send_head || + if (!tcp_send_head(sk) || (copy = size_goal - skb->len) <= 0) { new_segment: @@ -833,7 +831,7 @@ new_segment: if (forced_push(tp)) { tcp_mark_push(tp, skb); __tcp_push_pending_frames(sk, tp, mss_now, TCP_NAGLE_PUSH); - } else if (skb == sk->sk_send_head) + } else if (skb == tcp_send_head(sk)) tcp_push_one(sk, mss_now); continue; @@ -860,9 +858,11 @@ out: do_fault: if (!skb->len) { - if (sk->sk_send_head == skb) - sk->sk_send_head = NULL; - __skb_unlink(skb, &sk->sk_write_queue); + tcp_unlink_write_queue(skb, sk); + /* It is the one place in all of TCP, except connection + * reset, where we can be unlinking the send_head. + */ + tcp_check_send_head(sk, skb); sk_stream_free_skb(sk, skb); } @@ -1732,7 +1732,7 @@ int tcp_disconnect(struct sock *sk, int flags) tcp_clear_xmit_timers(sk); __skb_queue_purge(&sk->sk_receive_queue); - sk_stream_writequeue_purge(sk); + tcp_write_queue_purge(sk); __skb_queue_purge(&tp->out_of_order_queue); #ifdef CONFIG_NET_DMA __skb_queue_purge(&sk->sk_async_wait_queue); @@ -1758,7 +1758,7 @@ int tcp_disconnect(struct sock *sk, int flags) tcp_set_ca_state(sk, TCP_CA_Open); tcp_clear_retrans(tp); inet_csk_delack_init(sk); - sk->sk_send_head = NULL; + tcp_init_send_head(sk); tp->rx_opt.saw_tstamp = 0; tcp_sack_reset(&tp->rx_opt); __sk_dst_reset(sk); diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index d0a3630f41a7..22d0bb03c5da 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -1044,7 +1044,7 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_ cached_skb = tp->fastpath_skb_hint; cached_fack_count = tp->fastpath_cnt_hint; if (!cached_skb) { - cached_skb = sk->sk_write_queue.next; + cached_skb = tcp_write_queue_head(sk); cached_fack_count = 0; } @@ -1061,10 +1061,13 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_ if (after(end_seq, tp->high_seq)) flag |= FLAG_DATA_LOST; - sk_stream_for_retrans_queue_from(skb, sk) { + tcp_for_write_queue_from(skb, sk) { int in_sack, pcount; u8 sacked; + if (skb == tcp_send_head(sk)) + break; + cached_skb = skb; cached_fack_count = fack_count; if (i == first_sack_index) { @@ -1213,7 +1216,9 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_ if (lost_retrans && icsk->icsk_ca_state == TCP_CA_Recovery) { struct sk_buff *skb; - sk_stream_for_retrans_queue(skb, sk) { + tcp_for_write_queue(skb, sk) { + if (skb == tcp_send_head(sk)) + break; if (after(TCP_SKB_CB(skb)->seq, lost_retrans)) break; if (!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una)) @@ -1266,8 +1271,8 @@ int tcp_use_frto(struct sock *sk) const struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb; - if (!sysctl_tcp_frto || !sk->sk_send_head || - after(TCP_SKB_CB(sk->sk_send_head)->end_seq, + if (!sysctl_tcp_frto || !tcp_send_head(sk) || + after(TCP_SKB_CB(tcp_send_head(sk))->end_seq, tp->snd_una + tp->snd_wnd)) return 0; @@ -1278,8 +1283,11 @@ int tcp_use_frto(struct sock *sk) if (tp->retrans_out > 1) return 0; - skb = skb_peek(&sk->sk_write_queue)->next; /* Skips head */ - sk_stream_for_retrans_queue_from(skb, sk) { + skb = tcp_write_queue_head(sk); + skb = tcp_write_queue_next(sk, skb); /* Skips head */ + tcp_for_write_queue_from(skb, sk) { + if (skb == tcp_send_head(sk)) + break; if (TCP_SKB_CB(skb)->sacked&TCPCB_RETRANS) return 0; /* Short-circuit when first non-SACKed skb has been checked */ @@ -1343,7 +1351,7 @@ void tcp_enter_frto(struct sock *sk) tp->undo_marker = tp->snd_una; tp->undo_retrans = 0; - skb = skb_peek(&sk->sk_write_queue); + skb = tcp_write_queue_head(sk); if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) { TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS; tp->retrans_out -= tcp_skb_pcount(skb); @@ -1380,7 +1388,9 @@ static void tcp_enter_frto_loss(struct sock *sk, int allowed_segments, int flag) tp->fackets_out = 0; tp->retrans_out = 0; - sk_stream_for_retrans_queue(skb, sk) { + tcp_for_write_queue(skb, sk) { + if (skb == tcp_send_head(sk)) + break; cnt += tcp_skb_pcount(skb); /* * Count the retransmission made on RTO correctly (only when @@ -1468,7 +1478,9 @@ void tcp_enter_loss(struct sock *sk, int how) if (!how) tp->undo_marker = tp->snd_una; - sk_stream_for_retrans_queue(skb, sk) { + tcp_for_write_queue(skb, sk) { + if (skb == tcp_send_head(sk)) + break; cnt += tcp_skb_pcount(skb); if (TCP_SKB_CB(skb)->sacked&TCPCB_RETRANS) tp->undo_marker = 0; @@ -1503,14 +1515,14 @@ static int tcp_check_sack_reneging(struct sock *sk) * receiver _host_ is heavily congested (or buggy). * Do processing similar to RTO timeout. */ - if ((skb = skb_peek(&sk->sk_write_queue)) != NULL && + if ((skb = tcp_write_queue_head(sk)) != NULL && (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) { struct inet_connection_sock *icsk = inet_csk(sk); NET_INC_STATS_BH(LINUX_MIB_TCPSACKRENEGING); tcp_enter_loss(sk, 1); icsk->icsk_retransmits++; - tcp_retransmit_skb(sk, skb_peek(&sk->sk_write_queue)); + tcp_retransmit_skb(sk, tcp_write_queue_head(sk)); inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto, TCP_RTO_MAX); return 1; @@ -1531,7 +1543,7 @@ static inline int tcp_skb_timedout(struct sock *sk, struct sk_buff *skb) static inline int tcp_head_timedout(struct sock *sk, struct tcp_sock *tp) { return tp->packets_out && - tcp_skb_timedout(sk, skb_peek(&sk->sk_write_queue)); + tcp_skb_timedout(sk, tcp_write_queue_head(sk)); } /* Linux NewReno/SACK/FACK/ECN state machine. @@ -1726,11 +1738,13 @@ static void tcp_mark_head_lost(struct sock *sk, struct tcp_sock *tp, skb = tp->lost_skb_hint; cnt = tp->lost_cnt_hint; } else { - skb = sk->sk_write_queue.next; + skb = tcp_write_queue_head(sk); cnt = 0; } - sk_stream_for_retrans_queue_from(skb, sk) { + tcp_for_write_queue_from(skb, sk) { + if (skb == tcp_send_head(sk)) + break; /* TODO: do this better */ /* this is not the most efficient way to do this... */ tp->lost_skb_hint = skb; @@ -1777,9 +1791,11 @@ static void tcp_update_scoreboard(struct sock *sk, struct tcp_sock *tp) struct sk_buff *skb; skb = tp->scoreboard_skb_hint ? tp->scoreboard_skb_hint - : sk->sk_write_queue.next; + : tcp_write_queue_head(sk); - sk_stream_for_retrans_queue_from(skb, sk) { + tcp_for_write_queue_from(skb, sk) { + if (skb == tcp_send_head(sk)) + break; if (!tcp_skb_timedout(sk, skb)) break; @@ -1970,7 +1986,9 @@ static int tcp_try_undo_loss(struct sock *sk, struct tcp_sock *tp) { if (tcp_may_undo(tp)) { struct sk_buff *skb; - sk_stream_for_retrans_queue(skb, sk) { + tcp_for_write_queue(skb, sk) { + if (skb == tcp_send_head(sk)) + break; TCP_SKB_CB(skb)->sacked &= ~TCPCB_LOST; } @@ -2382,8 +2400,8 @@ static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p) = icsk->icsk_ca_ops->rtt_sample; struct timeval tv = { .tv_sec = 0, .tv_usec = 0 }; - while ((skb = skb_peek(&sk->sk_write_queue)) && - skb != sk->sk_send_head) { + while ((skb = tcp_write_queue_head(sk)) && + skb != tcp_send_head(sk)) { struct tcp_skb_cb *scb = TCP_SKB_CB(skb); __u8 sacked = scb->sacked; @@ -2446,7 +2464,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p) } tcp_dec_pcount_approx(&tp->fackets_out, skb); tcp_packets_out_dec(tp, skb); - __skb_unlink(skb, &sk->sk_write_queue); + tcp_unlink_write_queue(skb, sk); sk_stream_free_skb(sk, skb); clear_all_retrans_hints(tp); } @@ -2495,7 +2513,7 @@ static void tcp_ack_probe(struct sock *sk) /* Was it a usable window open? */ - if (!after(TCP_SKB_CB(sk->sk_send_head)->end_seq, + if (!after(TCP_SKB_CB(tcp_send_head(sk))->end_seq, tp->snd_una + tp->snd_wnd)) { icsk->icsk_backoff = 0; inet_csk_clear_xmit_timer(sk, ICSK_TIME_PROBE0); @@ -2795,7 +2813,7 @@ no_queue: * being used to time the probes, and is probably far higher than * it needs to be for normal retransmission. */ - if (sk->sk_send_head) + if (tcp_send_head(sk)) tcp_ack_probe(sk); return 1; diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index addac1110f94..3326681b8429 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -1890,7 +1890,7 @@ int tcp_v4_destroy_sock(struct sock *sk) tcp_cleanup_congestion_control(sk); /* Cleanup up the write buffer. */ - sk_stream_writequeue_purge(sk); + tcp_write_queue_purge(sk); /* Cleans up our, hopefully empty, out_of_order_queue. */ __skb_queue_purge(&tp->out_of_order_queue); diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index d19b2f3b70fd..2a62b55b15f1 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -65,9 +65,7 @@ int sysctl_tcp_slow_start_after_idle __read_mostly = 1; static void update_send_head(struct sock *sk, struct tcp_sock *tp, struct sk_buff *skb) { - sk->sk_send_head = skb->next; - if (sk->sk_send_head == (struct sk_buff *)&sk->sk_write_queue) - sk->sk_send_head = NULL; + tcp_advance_send_head(sk, skb); tp->snd_nxt = TCP_SKB_CB(skb)->end_seq; tcp_packets_out_inc(sk, tp, skb); } @@ -567,12 +565,8 @@ static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb) /* Advance write_seq and place onto the write_queue. */ tp->write_seq = TCP_SKB_CB(skb)->end_seq; skb_header_release(skb); - __skb_queue_tail(&sk->sk_write_queue, skb); + tcp_add_write_queue_tail(sk, skb); sk_charge_skb(sk, skb); - - /* Queue it, remembering where we must start sending. */ - if (sk->sk_send_head == NULL) - sk->sk_send_head = skb; } static void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb, unsigned int mss_now) @@ -705,7 +699,7 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, unsigned int mss /* Link BUFF into the send queue. */ skb_header_release(buff); - __skb_append(skb, buff, &sk->sk_write_queue); + tcp_insert_write_queue_after(skb, buff, sk); return 0; } @@ -1056,7 +1050,7 @@ static inline int tcp_snd_wnd_test(struct tcp_sock *tp, struct sk_buff *skb, uns return !after(end_seq, tp->snd_una + tp->snd_wnd); } -/* This checks if the data bearing packet SKB (usually sk->sk_send_head) +/* This checks if the data bearing packet SKB (usually tcp_send_head(sk)) * should be put on the wire right now. If so, it returns the number of * packets allowed by the congestion window. */ @@ -1079,15 +1073,9 @@ static unsigned int tcp_snd_test(struct sock *sk, struct sk_buff *skb, return cwnd_quota; } -static inline int tcp_skb_is_last(const struct sock *sk, - const struct sk_buff *skb) -{ - return skb->next == (struct sk_buff *)&sk->sk_write_queue; -} - int tcp_may_send_now(struct sock *sk, struct tcp_sock *tp) { - struct sk_buff *skb = sk->sk_send_head; + struct sk_buff *skb = tcp_send_head(sk); return (skb && tcp_snd_test(sk, skb, tcp_current_mss(sk, 1), @@ -1143,7 +1131,7 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len, /* Link BUFF into the send queue. */ skb_header_release(buff); - __skb_append(skb, buff, &sk->sk_write_queue); + tcp_insert_write_queue_after(skb, buff, sk); return 0; } @@ -1249,10 +1237,10 @@ static int tcp_mtu_probe(struct sock *sk) /* Have enough data in the send queue to probe? */ len = 0; - if ((skb = sk->sk_send_head) == NULL) + if ((skb = tcp_send_head(sk)) == NULL) return -1; while ((len += skb->len) < probe_size && !tcp_skb_is_last(sk, skb)) - skb = skb->next; + skb = tcp_write_queue_next(sk, skb); if (len < probe_size) return -1; @@ -1279,9 +1267,9 @@ static int tcp_mtu_probe(struct sock *sk) return -1; sk_charge_skb(sk, nskb); - skb = sk->sk_send_head; - __skb_insert(nskb, skb->prev, skb, &sk->sk_write_queue); - sk->sk_send_head = nskb; + skb = tcp_send_head(sk); + tcp_insert_write_queue_before(nskb, skb, sk); + tcp_advance_send_head(sk, skb); TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq; TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size; @@ -1292,7 +1280,7 @@ static int tcp_mtu_probe(struct sock *sk) len = 0; while (len < probe_size) { - next = skb->next; + next = tcp_write_queue_next(sk, skb); copy = min_t(int, skb->len, probe_size - len); if (nskb->ip_summed) @@ -1305,7 +1293,7 @@ static int tcp_mtu_probe(struct sock *sk) /* We've eaten all the data from this skb. * Throw it away. */ TCP_SKB_CB(nskb)->flags |= TCP_SKB_CB(skb)->flags; - __skb_unlink(skb, &sk->sk_write_queue); + tcp_unlink_write_queue(skb, sk); sk_stream_free_skb(sk, skb); } else { TCP_SKB_CB(nskb)->flags |= TCP_SKB_CB(skb)->flags & @@ -1377,7 +1365,7 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle) sent_pkts = 1; } - while ((skb = sk->sk_send_head)) { + while ((skb = tcp_send_head(sk))) { unsigned int limit; tso_segs = tcp_init_tso_segs(sk, skb, mss_now); @@ -1435,7 +1423,7 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle) tcp_cwnd_validate(sk, tp); return 0; } - return !tp->packets_out && sk->sk_send_head; + return !tp->packets_out && tcp_send_head(sk); } /* Push out any pending frames which were held back due to @@ -1445,7 +1433,7 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle) void __tcp_push_pending_frames(struct sock *sk, struct tcp_sock *tp, unsigned int cur_mss, int nonagle) { - struct sk_buff *skb = sk->sk_send_head; + struct sk_buff *skb = tcp_send_head(sk); if (skb) { if (tcp_write_xmit(sk, cur_mss, nonagle)) @@ -1459,7 +1447,7 @@ void __tcp_push_pending_frames(struct sock *sk, struct tcp_sock *tp, void tcp_push_one(struct sock *sk, unsigned int mss_now) { struct tcp_sock *tp = tcp_sk(sk); - struct sk_buff *skb = sk->sk_send_head; + struct sk_buff *skb = tcp_send_head(sk); unsigned int tso_segs, cwnd_quota; BUG_ON(!skb || skb->len < mss_now); @@ -1620,7 +1608,7 @@ u32 __tcp_select_window(struct sock *sk) static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *skb, int mss_now) { struct tcp_sock *tp = tcp_sk(sk); - struct sk_buff *next_skb = skb->next; + struct sk_buff *next_skb = tcp_write_queue_next(sk, skb); /* The first test we must make is that neither of these two * SKB's are still referenced by someone else. @@ -1652,7 +1640,7 @@ static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *skb, int m clear_all_retrans_hints(tp); /* Ok. We will be able to collapse the packet. */ - __skb_unlink(next_skb, &sk->sk_write_queue); + tcp_unlink_write_queue(next_skb, sk); memcpy(skb_put(skb, next_skb_size), next_skb->data, next_skb_size); @@ -1706,7 +1694,9 @@ void tcp_simple_retransmit(struct sock *sk) unsigned int mss = tcp_current_mss(sk, 0); int lost = 0; - sk_stream_for_retrans_queue(skb, sk) { + tcp_for_write_queue(skb, sk) { + if (skb == tcp_send_head(sk)) + break; if (skb->len > mss && !(TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_ACKED)) { if (TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_RETRANS) { @@ -1790,10 +1780,10 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) /* Collapse two adjacent packets if worthwhile and we can. */ if(!(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_SYN) && (skb->len < (cur_mss >> 1)) && - (skb->next != sk->sk_send_head) && - (skb->next != (struct sk_buff *)&sk->sk_write_queue) && - (skb_shinfo(skb)->nr_frags == 0 && skb_shinfo(skb->next)->nr_frags == 0) && - (tcp_skb_pcount(skb) == 1 && tcp_skb_pcount(skb->next) == 1) && + (tcp_write_queue_next(sk, skb) != tcp_send_head(sk)) && + (!tcp_skb_is_last(sk, skb)) && + (skb_shinfo(skb)->nr_frags == 0 && skb_shinfo(tcp_write_queue_next(sk, skb))->nr_frags == 0) && + (tcp_skb_pcount(skb) == 1 && tcp_skb_pcount(tcp_write_queue_next(sk, skb)) == 1) && (sysctl_tcp_retrans_collapse != 0)) tcp_retrans_try_collapse(sk, skb, cur_mss); @@ -1872,15 +1862,17 @@ void tcp_xmit_retransmit_queue(struct sock *sk) skb = tp->retransmit_skb_hint; packet_cnt = tp->retransmit_cnt_hint; }else{ - skb = sk->sk_write_queue.next; + skb = tcp_write_queue_head(sk); packet_cnt = 0; } /* First pass: retransmit lost packets. */ if (tp->lost_out) { - sk_stream_for_retrans_queue_from(skb, sk) { + tcp_for_write_queue_from(skb, sk) { __u8 sacked = TCP_SKB_CB(skb)->sacked; + if (skb == tcp_send_head(sk)) + break; /* we could do better than to assign each time */ tp->retransmit_skb_hint = skb; tp->retransmit_cnt_hint = packet_cnt; @@ -1906,8 +1898,7 @@ void tcp_xmit_retransmit_queue(struct sock *sk) else NET_INC_STATS_BH(LINUX_MIB_TCPSLOWSTARTRETRANS); - if (skb == - skb_peek(&sk->sk_write_queue)) + if (skb == tcp_write_queue_head(sk)) inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, inet_csk(sk)->icsk_rto, TCP_RTO_MAX); @@ -1944,11 +1935,13 @@ void tcp_xmit_retransmit_queue(struct sock *sk) skb = tp->forward_skb_hint; packet_cnt = tp->forward_cnt_hint; } else{ - skb = sk->sk_write_queue.next; + skb = tcp_write_queue_head(sk); packet_cnt = 0; } - sk_stream_for_retrans_queue_from(skb, sk) { + tcp_for_write_queue_from(skb, sk) { + if (skb == tcp_send_head(sk)) + break; tp->forward_cnt_hint = packet_cnt; tp->forward_skb_hint = skb; @@ -1973,7 +1966,7 @@ void tcp_xmit_retransmit_queue(struct sock *sk) break; } - if (skb == skb_peek(&sk->sk_write_queue)) + if (skb == tcp_write_queue_head(sk)) inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, inet_csk(sk)->icsk_rto, TCP_RTO_MAX); @@ -1989,7 +1982,7 @@ void tcp_xmit_retransmit_queue(struct sock *sk) void tcp_send_fin(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); - struct sk_buff *skb = skb_peek_tail(&sk->sk_write_queue); + struct sk_buff *skb = tcp_write_queue_tail(sk); int mss_now; /* Optimization, tack on the FIN if we have a queue of @@ -1998,7 +1991,7 @@ void tcp_send_fin(struct sock *sk) */ mss_now = tcp_current_mss(sk, 1); - if (sk->sk_send_head != NULL) { + if (tcp_send_head(sk) != NULL) { TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_FIN; TCP_SKB_CB(skb)->end_seq++; tp->write_seq++; @@ -2071,7 +2064,7 @@ int tcp_send_synack(struct sock *sk) { struct sk_buff* skb; - skb = skb_peek(&sk->sk_write_queue); + skb = tcp_write_queue_head(sk); if (skb == NULL || !(TCP_SKB_CB(skb)->flags&TCPCB_FLAG_SYN)) { printk(KERN_DEBUG "tcp_send_synack: wrong queue state\n"); return -EFAULT; @@ -2081,9 +2074,9 @@ int tcp_send_synack(struct sock *sk) struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC); if (nskb == NULL) return -ENOMEM; - __skb_unlink(skb, &sk->sk_write_queue); + tcp_unlink_write_queue(skb, sk); skb_header_release(nskb); - __skb_queue_head(&sk->sk_write_queue, nskb); + __tcp_add_write_queue_head(sk, nskb); sk_stream_free_skb(sk, skb); sk_charge_skb(sk, nskb); skb = nskb; @@ -2285,7 +2278,7 @@ int tcp_connect(struct sock *sk) TCP_SKB_CB(buff)->when = tcp_time_stamp; tp->retrans_stamp = TCP_SKB_CB(buff)->when; skb_header_release(buff); - __skb_queue_tail(&sk->sk_write_queue, buff); + __tcp_add_write_queue_tail(sk, buff); sk_charge_skb(sk, buff); tp->packets_out += tcp_skb_pcount(buff); tcp_transmit_skb(sk, buff, 1, GFP_KERNEL); @@ -2441,7 +2434,7 @@ int tcp_write_wakeup(struct sock *sk) struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb; - if ((skb = sk->sk_send_head) != NULL && + if ((skb = tcp_send_head(sk)) != NULL && before(TCP_SKB_CB(skb)->seq, tp->snd_una+tp->snd_wnd)) { int err; unsigned int mss = tcp_current_mss(sk, 0); @@ -2491,7 +2484,7 @@ void tcp_send_probe0(struct sock *sk) err = tcp_write_wakeup(sk); - if (tp->packets_out || !sk->sk_send_head) { + if (tp->packets_out || !tcp_send_head(sk)) { /* Cancel probe timer, if it is not required. */ icsk->icsk_probes_out = 0; icsk->icsk_backoff = 0; diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index a9243cfc1bea..2ca97b20929d 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c @@ -233,7 +233,7 @@ static void tcp_probe_timer(struct sock *sk) struct tcp_sock *tp = tcp_sk(sk); int max_probes; - if (tp->packets_out || !sk->sk_send_head) { + if (tp->packets_out || !tcp_send_head(sk)) { icsk->icsk_probes_out = 0; return; } @@ -284,7 +284,7 @@ static void tcp_retransmit_timer(struct sock *sk) if (!tp->packets_out) goto out; - BUG_TRAP(!skb_queue_empty(&sk->sk_write_queue)); + BUG_TRAP(!tcp_write_queue_empty(sk)); if (!tp->snd_wnd && !sock_flag(sk, SOCK_DEAD) && !((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))) { @@ -306,7 +306,7 @@ static void tcp_retransmit_timer(struct sock *sk) goto out; } tcp_enter_loss(sk, 0); - tcp_retransmit_skb(sk, skb_peek(&sk->sk_write_queue)); + tcp_retransmit_skb(sk, tcp_write_queue_head(sk)); __sk_dst_reset(sk); goto out_reset_timer; } @@ -341,7 +341,7 @@ static void tcp_retransmit_timer(struct sock *sk) tcp_enter_loss(sk, 0); } - if (tcp_retransmit_skb(sk, skb_peek(&sk->sk_write_queue)) > 0) { + if (tcp_retransmit_skb(sk, tcp_write_queue_head(sk)) > 0) { /* Retransmission failed because of local congestion, * do not backoff. */ @@ -482,7 +482,7 @@ static void tcp_keepalive_timer (unsigned long data) elapsed = keepalive_time_when(tp); /* It is alive without keepalive 8) */ - if (tp->packets_out || sk->sk_send_head) + if (tp->packets_out || tcp_send_head(sk)) goto resched; elapsed = tcp_time_stamp - tp->rcv_tstamp; -- cgit v1.2.3-59-g8ed1b From ae40eb1ef30ab4120bd3c8b7e3da99ee53d27a23 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Sun, 18 Mar 2007 17:33:16 -0700 Subject: [NET]: Introduce SIOCGSTAMPNS ioctl to get timestamps with nanosec resolution Now network timestamps use ktime_t infrastructure, we can add a new ioctl() SIOCGSTAMPNS command to get timestamps in 'struct timespec'. User programs can thus access to nanosecond resolution. Signed-off-by: Eric Dumazet CC: Stephen Hemminger Signed-off-by: David S. Miller --- fs/compat_ioctl.c | 18 ++++++++++++++++++ include/asm-alpha/sockios.h | 3 ++- include/asm-arm/sockios.h | 3 ++- include/asm-arm26/sockios.h | 3 ++- include/asm-avr32/sockios.h | 3 ++- include/asm-cris/sockios.h | 3 ++- include/asm-frv/sockios.h | 3 ++- include/asm-h8300/sockios.h | 3 ++- include/asm-i386/sockios.h | 3 ++- include/asm-ia64/sockios.h | 3 ++- include/asm-m32r/sockios.h | 3 ++- include/asm-m68k/sockios.h | 3 ++- include/asm-mips/sockios.h | 3 ++- include/asm-parisc/sockios.h | 3 ++- include/asm-powerpc/sockios.h | 3 ++- include/asm-s390/sockios.h | 3 ++- include/asm-sh/sockios.h | 3 ++- include/asm-sh64/sockios.h | 3 ++- include/asm-sparc/sockios.h | 3 ++- include/asm-sparc64/sockios.h | 3 ++- include/asm-v850/sockios.h | 3 ++- include/asm-x86_64/sockios.h | 3 ++- include/asm-xtensa/sockios.h | 3 ++- include/net/compat.h | 1 + include/net/sock.h | 1 + net/appletalk/ddp.c | 3 +++ net/atm/ioctl.c | 3 +++ net/ax25/af_ax25.c | 4 ++++ net/compat.c | 24 ++++++++++++++++++++++++ net/core/sock.c | 16 ++++++++++++++++ net/econet/af_econet.c | 3 +++ net/ipv4/af_inet.c | 3 +++ net/ipv6/af_inet6.c | 3 +++ net/netrom/af_netrom.c | 6 ++++++ net/packet/af_packet.c | 2 ++ net/rose/af_rose.c | 3 +++ net/x25/af_x25.c | 12 ++++++++++++ 37 files changed, 146 insertions(+), 22 deletions(-) (limited to 'include') diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c index 8b1c5d8bf4ef..c68b055fa26e 100644 --- a/fs/compat_ioctl.c +++ b/fs/compat_ioctl.c @@ -266,6 +266,23 @@ static int do_siocgstamp(unsigned int fd, unsigned int cmd, unsigned long arg) return err; } +static int do_siocgstampns(unsigned int fd, unsigned int cmd, unsigned long arg) +{ + struct compat_timespec __user *up = compat_ptr(arg); + struct timespec kts; + mm_segment_t old_fs = get_fs(); + int err; + + set_fs(KERNEL_DS); + err = sys_ioctl(fd, cmd, (unsigned long)&kts); + set_fs(old_fs); + if (!err) { + err = put_user(kts.tv_sec, &up->tv_sec); + err |= __put_user(kts.tv_nsec, &up->tv_nsec); + } + return err; +} + struct ifmap32 { compat_ulong_t mem_start; compat_ulong_t mem_end; @@ -2437,6 +2454,7 @@ HANDLE_IOCTL(SIOCBRDELIF, dev_ifsioc) /* Note SIOCRTMSG is no longer, so this is safe and * the user would have seen just an -EINVAL anyways. */ HANDLE_IOCTL(SIOCRTMSG, ret_einval) HANDLE_IOCTL(SIOCGSTAMP, do_siocgstamp) +HANDLE_IOCTL(SIOCGSTAMPNS, do_siocgstampns) #endif #ifdef CONFIG_BLOCK HANDLE_IOCTL(HDIO_GETGEO, hdio_getgeo) diff --git a/include/asm-alpha/sockios.h b/include/asm-alpha/sockios.h index e4961a740e5f..7932c7ab4a4d 100644 --- a/include/asm-alpha/sockios.h +++ b/include/asm-alpha/sockios.h @@ -10,6 +10,7 @@ #define SIOCSPGRP _IOW('s', 8, pid_t) #define SIOCGPGRP _IOR('s', 9, pid_t) -#define SIOCGSTAMP 0x8906 /* Get stamp - linux-specific */ +#define SIOCGSTAMP 0x8906 /* Get stamp (timeval) */ +#define SIOCGSTAMPNS 0x8907 /* Get stamp (timespec) */ #endif /* _ASM_ALPHA_SOCKIOS_H */ diff --git a/include/asm-arm/sockios.h b/include/asm-arm/sockios.h index 77c34087d513..a2588a2512df 100644 --- a/include/asm-arm/sockios.h +++ b/include/asm-arm/sockios.h @@ -7,6 +7,7 @@ #define FIOGETOWN 0x8903 #define SIOCGPGRP 0x8904 #define SIOCATMARK 0x8905 -#define SIOCGSTAMP 0x8906 /* Get stamp */ +#define SIOCGSTAMP 0x8906 /* Get stamp (timeval) */ +#define SIOCGSTAMPNS 0x8907 /* Get stamp (timespec) */ #endif diff --git a/include/asm-arm26/sockios.h b/include/asm-arm26/sockios.h index 77c34087d513..a2588a2512df 100644 --- a/include/asm-arm26/sockios.h +++ b/include/asm-arm26/sockios.h @@ -7,6 +7,7 @@ #define FIOGETOWN 0x8903 #define SIOCGPGRP 0x8904 #define SIOCATMARK 0x8905 -#define SIOCGSTAMP 0x8906 /* Get stamp */ +#define SIOCGSTAMP 0x8906 /* Get stamp (timeval) */ +#define SIOCGSTAMPNS 0x8907 /* Get stamp (timespec) */ #endif diff --git a/include/asm-avr32/sockios.h b/include/asm-avr32/sockios.h index 84f3d65b3b3b..0802d742f97d 100644 --- a/include/asm-avr32/sockios.h +++ b/include/asm-avr32/sockios.h @@ -7,6 +7,7 @@ #define FIOGETOWN 0x8903 #define SIOCGPGRP 0x8904 #define SIOCATMARK 0x8905 -#define SIOCGSTAMP 0x8906 /* Get stamp */ +#define SIOCGSTAMP 0x8906 /* Get stamp (timeval) */ +#define SIOCGSTAMPNS 0x8907 /* Get stamp (timespec) */ #endif /* __ASM_AVR32_SOCKIOS_H */ diff --git a/include/asm-cris/sockios.h b/include/asm-cris/sockios.h index 6c4012f0b29f..cfe7bfecf599 100644 --- a/include/asm-cris/sockios.h +++ b/include/asm-cris/sockios.h @@ -7,6 +7,7 @@ #define FIOGETOWN 0x8903 #define SIOCGPGRP 0x8904 #define SIOCATMARK 0x8905 -#define SIOCGSTAMP 0x8906 /* Get stamp */ +#define SIOCGSTAMP 0x8906 /* Get stamp (timeval) */ +#define SIOCGSTAMPNS 0x8907 /* Get stamp (timespec) */ #endif diff --git a/include/asm-frv/sockios.h b/include/asm-frv/sockios.h index 8a6e4b2074b7..5dbdd13e6de3 100644 --- a/include/asm-frv/sockios.h +++ b/include/asm-frv/sockios.h @@ -7,7 +7,8 @@ #define FIOGETOWN 0x8903 #define SIOCGPGRP 0x8904 #define SIOCATMARK 0x8905 -#define SIOCGSTAMP 0x8906 /* Get stamp */ +#define SIOCGSTAMP 0x8906 /* Get stamp (timeval) */ +#define SIOCGSTAMPNS 0x8907 /* Get stamp (timespec) */ #endif /* _ASM_SOCKIOS__ */ diff --git a/include/asm-h8300/sockios.h b/include/asm-h8300/sockios.h index d005d9594cc6..e9c7ec810c23 100644 --- a/include/asm-h8300/sockios.h +++ b/include/asm-h8300/sockios.h @@ -7,6 +7,7 @@ #define FIOGETOWN 0x8903 #define SIOCGPGRP 0x8904 #define SIOCATMARK 0x8905 -#define SIOCGSTAMP 0x8906 /* Get stamp */ +#define SIOCGSTAMP 0x8906 /* Get stamp (timeval) */ +#define SIOCGSTAMPNS 0x8907 /* Get stamp (timespec) */ #endif /* __ARCH_H8300_SOCKIOS__ */ diff --git a/include/asm-i386/sockios.h b/include/asm-i386/sockios.h index 6b747f8e228b..ff528c7d255c 100644 --- a/include/asm-i386/sockios.h +++ b/include/asm-i386/sockios.h @@ -7,6 +7,7 @@ #define FIOGETOWN 0x8903 #define SIOCGPGRP 0x8904 #define SIOCATMARK 0x8905 -#define SIOCGSTAMP 0x8906 /* Get stamp */ +#define SIOCGSTAMP 0x8906 /* Get stamp (timeval) */ +#define SIOCGSTAMPNS 0x8907 /* Get stamp (timespec) */ #endif diff --git a/include/asm-ia64/sockios.h b/include/asm-ia64/sockios.h index cf94857c8a54..15c92468ad38 100644 --- a/include/asm-ia64/sockios.h +++ b/include/asm-ia64/sockios.h @@ -14,6 +14,7 @@ #define FIOGETOWN 0x8903 #define SIOCGPGRP 0x8904 #define SIOCATMARK 0x8905 -#define SIOCGSTAMP 0x8906 /* Get stamp */ +#define SIOCGSTAMP 0x8906 /* Get stamp (timeval) */ +#define SIOCGSTAMPNS 0x8907 /* Get stamp (timespec) */ #endif /* _ASM_IA64_SOCKIOS_H */ diff --git a/include/asm-m32r/sockios.h b/include/asm-m32r/sockios.h index f89962e231fe..6c1fb9b43bdb 100644 --- a/include/asm-m32r/sockios.h +++ b/include/asm-m32r/sockios.h @@ -7,6 +7,7 @@ #define FIOGETOWN 0x8903 #define SIOCGPGRP 0x8904 #define SIOCATMARK 0x8905 -#define SIOCGSTAMP 0x8906 /* Get stamp */ +#define SIOCGSTAMP 0x8906 /* Get stamp (timeval) */ +#define SIOCGSTAMPNS 0x8907 /* Get stamp (timespec) */ #endif /* _ASM_M32R_SOCKIOS_H */ diff --git a/include/asm-m68k/sockios.h b/include/asm-m68k/sockios.h index 9b9ed973c24e..c04a23943cb7 100644 --- a/include/asm-m68k/sockios.h +++ b/include/asm-m68k/sockios.h @@ -7,6 +7,7 @@ #define FIOGETOWN 0x8903 #define SIOCGPGRP 0x8904 #define SIOCATMARK 0x8905 -#define SIOCGSTAMP 0x8906 /* Get stamp */ +#define SIOCGSTAMP 0x8906 /* Get stamp (timeval) */ +#define SIOCGSTAMPNS 0x8907 /* Get stamp (timespec) */ #endif /* __ARCH_M68K_SOCKIOS__ */ diff --git a/include/asm-mips/sockios.h b/include/asm-mips/sockios.h index 87a50bf039ed..ed1a5f78d22f 100644 --- a/include/asm-mips/sockios.h +++ b/include/asm-mips/sockios.h @@ -20,6 +20,7 @@ #define SIOCSPGRP _IOW('s', 8, pid_t) #define SIOCGPGRP _IOR('s', 9, pid_t) -#define SIOCGSTAMP 0x8906 /* Get stamp - linux-specific */ +#define SIOCGSTAMP 0x8906 /* Get stamp (timeval) */ +#define SIOCGSTAMPNS 0x8907 /* Get stamp (timespec) */ #endif /* _ASM_SOCKIOS_H */ diff --git a/include/asm-parisc/sockios.h b/include/asm-parisc/sockios.h index aace49629949..dabfbc7483f6 100644 --- a/include/asm-parisc/sockios.h +++ b/include/asm-parisc/sockios.h @@ -7,6 +7,7 @@ #define FIOGETOWN 0x8903 #define SIOCGPGRP 0x8904 #define SIOCATMARK 0x8905 -#define SIOCGSTAMP 0x8906 /* Get stamp */ +#define SIOCGSTAMP 0x8906 /* Get stamp (timeval) */ +#define SIOCGSTAMPNS 0x8907 /* Get stamp (timespec) */ #endif diff --git a/include/asm-powerpc/sockios.h b/include/asm-powerpc/sockios.h index 590078d8ed28..55cef7675a31 100644 --- a/include/asm-powerpc/sockios.h +++ b/include/asm-powerpc/sockios.h @@ -14,6 +14,7 @@ #define FIOGETOWN 0x8903 #define SIOCGPGRP 0x8904 #define SIOCATMARK 0x8905 -#define SIOCGSTAMP 0x8906 /* Get stamp */ +#define SIOCGSTAMP 0x8906 /* Get stamp (timeval) */ +#define SIOCGSTAMPNS 0x8907 /* Get stamp (timespec) */ #endif /* _ASM_POWERPC_SOCKIOS_H */ diff --git a/include/asm-s390/sockios.h b/include/asm-s390/sockios.h index 412aeb4dd6ce..f4fc16c7da59 100644 --- a/include/asm-s390/sockios.h +++ b/include/asm-s390/sockios.h @@ -15,6 +15,7 @@ #define FIOGETOWN 0x8903 #define SIOCGPGRP 0x8904 #define SIOCATMARK 0x8905 -#define SIOCGSTAMP 0x8906 /* Get stamp */ +#define SIOCGSTAMP 0x8906 /* Get stamp (timeval) */ +#define SIOCGSTAMPNS 0x8907 /* Get stamp (timespec) */ #endif diff --git a/include/asm-sh/sockios.h b/include/asm-sh/sockios.h index 08a71df8a8be..cf8b96b1f9ab 100644 --- a/include/asm-sh/sockios.h +++ b/include/asm-sh/sockios.h @@ -9,5 +9,6 @@ #define SIOCSPGRP _IOW('s', 8, pid_t) #define SIOCGPGRP _IOR('s', 9, pid_t) -#define SIOCGSTAMP _IOR('s', 100, struct timeval) /* Get stamp - linux-specific */ +#define SIOCGSTAMP _IOR('s', 100, struct timeval) /* Get stamp (timeval) */ +#define SIOCGSTAMPNS _IOR('s', 101, struct timespec) /* Get stamp (timespec) */ #endif /* __ASM_SH_SOCKIOS_H */ diff --git a/include/asm-sh64/sockios.h b/include/asm-sh64/sockios.h index 1ae23ae82977..419e76f12f41 100644 --- a/include/asm-sh64/sockios.h +++ b/include/asm-sh64/sockios.h @@ -20,5 +20,6 @@ #define SIOCSPGRP _IOW('s', 8, pid_t) #define SIOCGPGRP _IOR('s', 9, pid_t) -#define SIOCGSTAMP _IOR('s', 100, struct timeval) /* Get stamp - linux-specific */ +#define SIOCGSTAMP _IOR('s', 100, struct timeval) /* Get stamp (timeval) */ +#define SIOCGSTAMPNS _IOR('s', 101, struct timespec) /* Get stamp (timespec) */ #endif /* __ASM_SH64_SOCKIOS_H */ diff --git a/include/asm-sparc/sockios.h b/include/asm-sparc/sockios.h index 0c01b597b06f..990ea746486b 100644 --- a/include/asm-sparc/sockios.h +++ b/include/asm-sparc/sockios.h @@ -7,7 +7,8 @@ #define FIOGETOWN 0x8903 #define SIOCGPGRP 0x8904 #define SIOCATMARK 0x8905 -#define SIOCGSTAMP 0x8906 /* Get stamp */ +#define SIOCGSTAMP 0x8906 /* Get stamp (timeval) */ +#define SIOCGSTAMPNS 0x8907 /* Get stamp (timespec) */ #endif /* !(_ASM_SPARC_SOCKIOS_H) */ diff --git a/include/asm-sparc64/sockios.h b/include/asm-sparc64/sockios.h index 6735bab4f39d..c7d9900638d0 100644 --- a/include/asm-sparc64/sockios.h +++ b/include/asm-sparc64/sockios.h @@ -7,7 +7,8 @@ #define FIOGETOWN 0x8903 #define SIOCGPGRP 0x8904 #define SIOCATMARK 0x8905 -#define SIOCGSTAMP 0x8906 /* Get stamp */ +#define SIOCGSTAMP 0x8906 /* Get stamp (timeval) */ +#define SIOCGSTAMPNS 0x8907 /* Get stamp (timespec) */ #endif /* !(_ASM_SPARC64_SOCKIOS_H) */ diff --git a/include/asm-v850/sockios.h b/include/asm-v850/sockios.h index cf4874c2fd8a..823e106e6cd0 100644 --- a/include/asm-v850/sockios.h +++ b/include/asm-v850/sockios.h @@ -7,6 +7,7 @@ #define FIOGETOWN 0x8903 #define SIOCGPGRP 0x8904 #define SIOCATMARK 0x8905 -#define SIOCGSTAMP 0x8906 /* Get stamp */ +#define SIOCGSTAMP 0x8906 /* Get stamp (timeval) */ +#define SIOCGSTAMPNS 0x8907 /* Get stamp (timespec) */ #endif /* __V850_SOCKIOS_H__ */ diff --git a/include/asm-x86_64/sockios.h b/include/asm-x86_64/sockios.h index 2eefd10d4f48..d726ba2513e3 100644 --- a/include/asm-x86_64/sockios.h +++ b/include/asm-x86_64/sockios.h @@ -7,6 +7,7 @@ #define FIOGETOWN 0x8903 #define SIOCGPGRP 0x8904 #define SIOCATMARK 0x8905 -#define SIOCGSTAMP 0x8906 /* Get stamp */ +#define SIOCGSTAMP 0x8906 /* Get stamp (timeval) */ +#define SIOCGSTAMPNS 0x8907 /* Get stamp (timespec) */ #endif diff --git a/include/asm-xtensa/sockios.h b/include/asm-xtensa/sockios.h index 20d2ba10ecd1..efe0af379f01 100644 --- a/include/asm-xtensa/sockios.h +++ b/include/asm-xtensa/sockios.h @@ -25,6 +25,7 @@ #define SIOCSPGRP _IOW('s', 8, pid_t) #define SIOCGPGRP _IOR('s', 9, pid_t) -#define SIOCGSTAMP 0x8906 /* Get stamp - linux-specific */ +#define SIOCGSTAMP 0x8906 /* Get stamp (timeval) */ +#define SIOCGSTAMPNS 0x8907 /* Get stamp (timespec) */ #endif /* _XTENSA_SOCKIOS_H */ diff --git a/include/net/compat.h b/include/net/compat.h index 9859b60280d5..406db242f73a 100644 --- a/include/net/compat.h +++ b/include/net/compat.h @@ -25,6 +25,7 @@ struct compat_cmsghdr { }; extern int compat_sock_get_timestamp(struct sock *, struct timeval __user *); +extern int compat_sock_get_timestampns(struct sock *, struct timespec __user *); #else /* defined(CONFIG_COMPAT) */ #define compat_msghdr msghdr /* to avoid compiler warnings */ diff --git a/include/net/sock.h b/include/net/sock.h index 2974bacc8850..d093e49fdc85 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -1329,6 +1329,7 @@ static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb, int copied_e extern void sock_enable_timestamp(struct sock *sk); extern int sock_get_timestamp(struct sock *, struct timeval __user *); +extern int sock_get_timestampns(struct sock *, struct timespec __user *); /* * Enable debug/info messages diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c index c8b7dc2c3257..32b82705b685 100644 --- a/net/appletalk/ddp.c +++ b/net/appletalk/ddp.c @@ -1771,6 +1771,9 @@ static int atalk_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) case SIOCGSTAMP: rc = sock_get_timestamp(sk, argp); break; + case SIOCGSTAMPNS: + rc = sock_get_timestampns(sk, argp); + break; /* Routing */ case SIOCADDRT: case SIOCDELRT: diff --git a/net/atm/ioctl.c b/net/atm/ioctl.c index 8ccee4591f65..7afd8e7754fd 100644 --- a/net/atm/ioctl.c +++ b/net/atm/ioctl.c @@ -82,6 +82,9 @@ int vcc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) case SIOCGSTAMP: /* borrowed from IP */ error = sock_get_timestamp(sk, argp); goto done; + case SIOCGSTAMPNS: /* borrowed from IP */ + error = sock_get_timestampns(sk, argp); + goto done; case ATM_SETSC: printk(KERN_WARNING "ATM_SETSC is obsolete\n"); error = 0; diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c index 1c07c6a50eb8..62605dc5a2c8 100644 --- a/net/ax25/af_ax25.c +++ b/net/ax25/af_ax25.c @@ -1711,6 +1711,10 @@ static int ax25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) res = sock_get_timestamp(sk, argp); break; + case SIOCGSTAMPNS: + res = sock_get_timestampns(sk, argp); + break; + case SIOCAX25ADDUID: /* Add a uid to the uid/call map table */ case SIOCAX25DELUID: /* Delete a uid from the uid/call map table */ case SIOCAX25GETUID: { diff --git a/net/compat.c b/net/compat.c index 17c2710b2b93..2fc6d9bb622b 100644 --- a/net/compat.c +++ b/net/compat.c @@ -564,6 +564,30 @@ int compat_sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp) } EXPORT_SYMBOL(compat_sock_get_timestamp); +int compat_sock_get_timestampns(struct sock *sk, struct timespec __user *userstamp) +{ + struct compat_timespec __user *ctv = + (struct compat_timespec __user*) userstamp; + int err = -ENOENT; + struct timespec ts; + + if (!sock_flag(sk, SOCK_TIMESTAMP)) + sock_enable_timestamp(sk); + ts = ktime_to_timespec(sk->sk_stamp); + if (ts.tv_sec == -1) + return err; + if (ts.tv_sec == 0) { + sk->sk_stamp = ktime_get_real(); + ts = ktime_to_timespec(sk->sk_stamp); + } + err = 0; + if (put_user(ts.tv_sec, &ctv->tv_sec) || + put_user(ts.tv_nsec, &ctv->tv_nsec)) + err = -EFAULT; + return err; +} +EXPORT_SYMBOL(compat_sock_get_timestampns); + asmlinkage long compat_sys_getsockopt(int fd, int level, int optname, char __user *optval, int __user *optlen) { diff --git a/net/core/sock.c b/net/core/sock.c index 6ddb3664b993..cb48fa0e1249 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -1567,6 +1567,22 @@ int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp) } EXPORT_SYMBOL(sock_get_timestamp); +int sock_get_timestampns(struct sock *sk, struct timespec __user *userstamp) +{ + struct timespec ts; + if (!sock_flag(sk, SOCK_TIMESTAMP)) + sock_enable_timestamp(sk); + ts = ktime_to_timespec(sk->sk_stamp); + if (ts.tv_sec == -1) + return -ENOENT; + if (ts.tv_sec == 0) { + sk->sk_stamp = ktime_get_real(); + ts = ktime_to_timespec(sk->sk_stamp); + } + return copy_to_user(userstamp, &ts, sizeof(ts)) ? -EFAULT : 0; +} +EXPORT_SYMBOL(sock_get_timestampns); + void sock_enable_timestamp(struct sock *sk) { if (!sock_flag(sk, SOCK_TIMESTAMP)) { diff --git a/net/econet/af_econet.c b/net/econet/af_econet.c index f573eddc6034..487f879f5a19 100644 --- a/net/econet/af_econet.c +++ b/net/econet/af_econet.c @@ -727,6 +727,9 @@ static int econet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg case SIOCGSTAMP: return sock_get_timestamp(sk, argp); + case SIOCGSTAMPNS: + return sock_get_timestampns(sk, argp); + case SIOCSIFADDR: case SIOCGIFADDR: return ec_dev_ioctl(sock, cmd, argp); diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index cf358c84c440..df41856fc603 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -755,6 +755,9 @@ int inet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) case SIOCGSTAMP: err = sock_get_timestamp(sk, (struct timeval __user *)arg); break; + case SIOCGSTAMPNS: + err = sock_get_timestampns(sk, (struct timespec __user *)arg); + break; case SIOCADDRT: case SIOCDELRT: case SIOCRTMSG: diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index fed3758181e1..2ff070417955 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c @@ -443,6 +443,9 @@ int inet6_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) case SIOCGSTAMP: return sock_get_timestamp(sk, (struct timeval __user *)arg); + case SIOCGSTAMPNS: + return sock_get_timestampns(sk, (struct timespec __user *)arg); + case SIOCADDRT: case SIOCDELRT: diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c index bf9837dd95c4..a54e7ef2568a 100644 --- a/net/netrom/af_netrom.c +++ b/net/netrom/af_netrom.c @@ -1209,6 +1209,12 @@ static int nr_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) release_sock(sk); return ret; + case SIOCGSTAMPNS: + lock_sock(sk); + ret = sock_get_timestampns(sk, argp); + release_sock(sk); + return ret; + case SIOCGIFADDR: case SIOCSIFADDR: case SIOCGIFDSTADDR: diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index f9866a8456a1..6f8c72d2413b 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -1545,6 +1545,8 @@ static int packet_ioctl(struct socket *sock, unsigned int cmd, } case SIOCGSTAMP: return sock_get_timestamp(sk, (struct timeval __user *)arg); + case SIOCGSTAMPNS: + return sock_get_timestampns(sk, (struct timespec __user *)arg); #ifdef CONFIG_INET case SIOCADDRT: diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c index f92d5310847b..f64be9369ef7 100644 --- a/net/rose/af_rose.c +++ b/net/rose/af_rose.c @@ -1296,6 +1296,9 @@ static int rose_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) case SIOCGSTAMP: return sock_get_timestamp(sk, (struct timeval __user *) argp); + case SIOCGSTAMPNS: + return sock_get_timestampns(sk, (struct timespec __user *) argp); + case SIOCGIFADDR: case SIOCSIFADDR: case SIOCGIFDSTADDR: diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c index e62ba41b05c5..a19884315622 100644 --- a/net/x25/af_x25.c +++ b/net/x25/af_x25.c @@ -1280,6 +1280,12 @@ static int x25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) rc = sock_get_timestamp(sk, (struct timeval __user *)argp); break; + case SIOCGSTAMPNS: + rc = -EINVAL; + if (sk) + rc = sock_get_timestampns(sk, + (struct timespec __user *)argp); + break; case SIOCGIFADDR: case SIOCSIFADDR: case SIOCGIFDSTADDR: @@ -1521,6 +1527,12 @@ static int compat_x25_ioctl(struct socket *sock, unsigned int cmd, rc = compat_sock_get_timestamp(sk, (struct timeval __user*)argp); break; + case SIOCGSTAMPNS: + rc = -EINVAL; + if (sk) + rc = compat_sock_get_timestampns(sk, + (struct timespec __user*)argp); + break; case SIOCGIFADDR: case SIOCSIFADDR: case SIOCGIFDSTADDR: -- cgit v1.2.3-59-g8ed1b From a2a316fd068c455c609ecc155dcfaa7e208d29fe Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Thu, 8 Mar 2007 20:41:08 -0800 Subject: [NET]: Replace CONFIG_NET_DEBUG with sysctl. Covert network warning messages from a compile time to runtime choice. Removes kernel config option and replaces it with new /proc/sys/net/core/warnings. Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- Documentation/filesystems/proc.txt | 9 +++++++++ include/linux/sysctl.h | 1 + include/net/sock.h | 12 +++++------- net/Kconfig | 7 ------- net/core/sysctl_net_core.c | 8 ++++++++ net/core/utils.c | 2 ++ 6 files changed, 25 insertions(+), 14 deletions(-) (limited to 'include') diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt index 5484ab5efd4f..7aaf09b86a55 100644 --- a/Documentation/filesystems/proc.txt +++ b/Documentation/filesystems/proc.txt @@ -1421,6 +1421,15 @@ fewer messages that will be written. Message_burst controls when messages will be dropped. The default settings limit warning messages to one every five seconds. +warnings +-------- + +This controls console messages from the networking stack that can occur because +of problems on the network like duplicate address or bad checksums. Normally, +this should be enabled, but if the problem persists the messages can be +disabled. + + netdev_max_backlog ------------------ diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h index c9ccb550206f..df2d9ed20a4e 100644 --- a/include/linux/sysctl.h +++ b/include/linux/sysctl.h @@ -290,6 +290,7 @@ enum NET_CORE_BUDGET=19, NET_CORE_AEVENT_ETIME=20, NET_CORE_AEVENT_RSEQTH=21, + NET_CORE_WARNINGS=22, }; /* /proc/sys/net/ethernet */ diff --git a/include/net/sock.h b/include/net/sock.h index d093e49fdc85..51246579592e 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -1334,14 +1334,12 @@ extern int sock_get_timestampns(struct sock *, struct timespec __user *); /* * Enable debug/info messages */ +extern int net_msg_warn; +#define NETDEBUG(fmt, args...) \ + do { if (net_msg_warn) printk(fmt,##args); } while (0) -#ifdef CONFIG_NETDEBUG -#define NETDEBUG(fmt, args...) printk(fmt,##args) -#define LIMIT_NETDEBUG(fmt, args...) do { if (net_ratelimit()) printk(fmt,##args); } while(0) -#else -#define NETDEBUG(fmt, args...) do { } while (0) -#define LIMIT_NETDEBUG(fmt, args...) do { } while(0) -#endif +#define LIMIT_NETDEBUG(fmt, args...) \ + do { if (net_msg_warn && net_ratelimit()) printk(fmt,##args); } while(0) /* * Macros for sleeping on a socket. Use them like this: diff --git a/net/Kconfig b/net/Kconfig index 915657832d94..e2d9b3b9cda4 100644 --- a/net/Kconfig +++ b/net/Kconfig @@ -27,13 +27,6 @@ if NET menu "Networking options" -config NETDEBUG - bool "Network packet debugging" - help - You can say Y here if you want to get additional messages useful in - debugging bad packets, but can overwhelm logs under denial of service - attacks. - source "net/packet/Kconfig" source "net/unix/Kconfig" source "net/xfrm/Kconfig" diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c index 1e75b1585460..b29712033dd4 100644 --- a/net/core/sysctl_net_core.c +++ b/net/core/sysctl_net_core.c @@ -136,6 +136,14 @@ ctl_table core_table[] = { .mode = 0644, .proc_handler = &proc_dointvec }, + { + .ctl_name = NET_CORE_WARNINGS, + .procname = "warnings", + .data = &net_msg_warn, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &proc_dointvec + }, { .ctl_name = 0 } }; diff --git a/net/core/utils.c b/net/core/utils.c index 07236c17fab9..34f08107b98e 100644 --- a/net/core/utils.c +++ b/net/core/utils.c @@ -32,6 +32,8 @@ int net_msg_cost = 5*HZ; int net_msg_burst = 10; +int net_msg_warn = 1; +EXPORT_SYMBOL(net_msg_warn); /* * All net warning printk()s should be guarded by this function. -- cgit v1.2.3-59-g8ed1b From 92f37fd2ee805aa77925c1e64fd56088b46094fc Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Sun, 25 Mar 2007 22:14:49 -0700 Subject: [NET]: Adding SO_TIMESTAMPNS / SCM_TIMESTAMPNS support Now that network timestamps use ktime_t infrastructure, we can add a new SOL_SOCKET sockopt SO_TIMESTAMPNS. This command is similar to SO_TIMESTAMP, but permits transmission of a 'timespec struct' instead of a 'timeval struct' control message. (nanosecond resolution instead of microsecond) Control message is labelled SCM_TIMESTAMPNS instead of SCM_TIMESTAMP A socket cannot mix SO_TIMESTAMP and SO_TIMESTAMPNS : the two modes are mutually exclusive. sock_recv_timestamp() became too big to be fully inlined so I added a __sock_recv_timestamp() helper function. Signed-off-by: Eric Dumazet CC: linux-arch@vger.kernel.org Signed-off-by: David S. Miller --- include/asm-alpha/socket.h | 2 ++ include/asm-arm/socket.h | 2 ++ include/asm-arm26/socket.h | 2 ++ include/asm-avr32/socket.h | 2 ++ include/asm-cris/socket.h | 2 ++ include/asm-frv/socket.h | 2 ++ include/asm-h8300/socket.h | 2 ++ include/asm-i386/socket.h | 2 ++ include/asm-ia64/socket.h | 2 ++ include/asm-m32r/socket.h | 2 ++ include/asm-m68k/socket.h | 2 ++ include/asm-mips/socket.h | 2 ++ include/asm-parisc/socket.h | 2 ++ include/asm-powerpc/socket.h | 2 ++ include/asm-s390/socket.h | 2 ++ include/asm-sh/socket.h | 2 ++ include/asm-sparc/socket.h | 2 ++ include/asm-sparc64/socket.h | 2 ++ include/asm-v850/socket.h | 2 ++ include/asm-x86_64/socket.h | 2 ++ include/asm-xtensa/socket.h | 2 ++ include/net/sock.h | 17 +++++++---------- net/compat.c | 10 +++++++++- net/core/sock.c | 16 ++++++++++++++-- net/socket.c | 29 +++++++++++++++++++++++++++++ 25 files changed, 101 insertions(+), 13 deletions(-) (limited to 'include') diff --git a/include/asm-alpha/socket.h b/include/asm-alpha/socket.h index d22ab97ea72e..1fede7f92860 100644 --- a/include/asm-alpha/socket.h +++ b/include/asm-alpha/socket.h @@ -52,6 +52,8 @@ #define SO_PEERSEC 30 #define SO_PASSSEC 34 +#define SO_TIMESTAMPNS 35 +#define SCM_TIMESTAMPNS SO_TIMESTAMPNS /* Security levels - as per NRL IPv6 - don't actually do anything */ #define SO_SECURITY_AUTHENTICATION 19 diff --git a/include/asm-arm/socket.h b/include/asm-arm/socket.h index 19f7df702b06..65a1a64bf934 100644 --- a/include/asm-arm/socket.h +++ b/include/asm-arm/socket.h @@ -49,5 +49,7 @@ #define SO_PEERSEC 31 #define SO_PASSSEC 34 +#define SO_TIMESTAMPNS 35 +#define SCM_TIMESTAMPNS SO_TIMESTAMPNS #endif /* _ASM_SOCKET_H */ diff --git a/include/asm-arm26/socket.h b/include/asm-arm26/socket.h index 19f7df702b06..65a1a64bf934 100644 --- a/include/asm-arm26/socket.h +++ b/include/asm-arm26/socket.h @@ -49,5 +49,7 @@ #define SO_PEERSEC 31 #define SO_PASSSEC 34 +#define SO_TIMESTAMPNS 35 +#define SCM_TIMESTAMPNS SO_TIMESTAMPNS #endif /* _ASM_SOCKET_H */ diff --git a/include/asm-avr32/socket.h b/include/asm-avr32/socket.h index 543229de8173..a0d0507a5034 100644 --- a/include/asm-avr32/socket.h +++ b/include/asm-avr32/socket.h @@ -49,5 +49,7 @@ #define SO_PEERSEC 31 #define SO_PASSSEC 34 +#define SO_TIMESTAMPNS 35 +#define SCM_TIMESTAMPNS SO_TIMESTAMPNS #endif /* __ASM_AVR32_SOCKET_H */ diff --git a/include/asm-cris/socket.h b/include/asm-cris/socket.h index 01cfdf1d6d33..5b18dfdf1748 100644 --- a/include/asm-cris/socket.h +++ b/include/asm-cris/socket.h @@ -51,6 +51,8 @@ #define SO_PEERSEC 31 #define SO_PASSSEC 34 +#define SO_TIMESTAMPNS 35 +#define SCM_TIMESTAMPNS SO_TIMESTAMPNS #endif /* _ASM_SOCKET_H */ diff --git a/include/asm-frv/socket.h b/include/asm-frv/socket.h index 31db18fc871f..a823befd11dd 100644 --- a/include/asm-frv/socket.h +++ b/include/asm-frv/socket.h @@ -49,6 +49,8 @@ #define SO_PEERSEC 31 #define SO_PASSSEC 34 +#define SO_TIMESTAMPNS 35 +#define SCM_TIMESTAMPNS SO_TIMESTAMPNS #endif /* _ASM_SOCKET_H */ diff --git a/include/asm-h8300/socket.h b/include/asm-h8300/socket.h index ebc830fee0d0..39911d8c9684 100644 --- a/include/asm-h8300/socket.h +++ b/include/asm-h8300/socket.h @@ -49,5 +49,7 @@ #define SO_PEERSEC 31 #define SO_PASSSEC 34 +#define SO_TIMESTAMPNS 35 +#define SCM_TIMESTAMPNS SO_TIMESTAMPNS #endif /* _ASM_SOCKET_H */ diff --git a/include/asm-i386/socket.h b/include/asm-i386/socket.h index 5755d57c4e95..99ca648b94c5 100644 --- a/include/asm-i386/socket.h +++ b/include/asm-i386/socket.h @@ -49,5 +49,7 @@ #define SO_PEERSEC 31 #define SO_PASSSEC 34 +#define SO_TIMESTAMPNS 35 +#define SCM_TIMESTAMPNS SO_TIMESTAMPNS #endif /* _ASM_SOCKET_H */ diff --git a/include/asm-ia64/socket.h b/include/asm-ia64/socket.h index d638ef3d50c3..9e42ce43cfbe 100644 --- a/include/asm-ia64/socket.h +++ b/include/asm-ia64/socket.h @@ -58,5 +58,7 @@ #define SO_PEERSEC 31 #define SO_PASSSEC 34 +#define SO_TIMESTAMPNS 35 +#define SCM_TIMESTAMPNS SO_TIMESTAMPNS #endif /* _ASM_IA64_SOCKET_H */ diff --git a/include/asm-m32r/socket.h b/include/asm-m32r/socket.h index acdf748fcdc8..793d5d30c850 100644 --- a/include/asm-m32r/socket.h +++ b/include/asm-m32r/socket.h @@ -49,5 +49,7 @@ #define SO_PEERSEC 31 #define SO_PASSSEC 34 +#define SO_TIMESTAMPNS 35 +#define SCM_TIMESTAMPNS SO_TIMESTAMPNS #endif /* _ASM_M32R_SOCKET_H */ diff --git a/include/asm-m68k/socket.h b/include/asm-m68k/socket.h index a5966ec005ae..6d21b90863ad 100644 --- a/include/asm-m68k/socket.h +++ b/include/asm-m68k/socket.h @@ -49,5 +49,7 @@ #define SO_PEERSEC 31 #define SO_PASSSEC 34 +#define SO_TIMESTAMPNS 35 +#define SCM_TIMESTAMPNS SO_TIMESTAMPNS #endif /* _ASM_SOCKET_H */ diff --git a/include/asm-mips/socket.h b/include/asm-mips/socket.h index 36ebe4e186a7..95945689b1c6 100644 --- a/include/asm-mips/socket.h +++ b/include/asm-mips/socket.h @@ -70,6 +70,8 @@ To add: #define SO_REUSEPORT 0x0200 /* Allow local address and port reuse. */ #define SO_SNDBUFFORCE 31 #define SO_RCVBUFFORCE 33 #define SO_PASSSEC 34 +#define SO_TIMESTAMPNS 35 +#define SCM_TIMESTAMPNS SO_TIMESTAMPNS #ifdef __KERNEL__ diff --git a/include/asm-parisc/socket.h b/include/asm-parisc/socket.h index ce2eae1708b5..99e868f6a8f5 100644 --- a/include/asm-parisc/socket.h +++ b/include/asm-parisc/socket.h @@ -33,6 +33,8 @@ #define SO_PEERCRED 0x4011 #define SO_TIMESTAMP 0x4012 #define SCM_TIMESTAMP SO_TIMESTAMP +#define SO_TIMESTAMPNS 0x4013 +#define SCM_TIMESTAMPNS SO_TIMESTAMPNS /* Security levels - as per NRL IPv6 - don't actually do anything */ #define SO_SECURITY_AUTHENTICATION 0x4016 diff --git a/include/asm-powerpc/socket.h b/include/asm-powerpc/socket.h index c8b1da50e72d..403e9fde2eb5 100644 --- a/include/asm-powerpc/socket.h +++ b/include/asm-powerpc/socket.h @@ -56,5 +56,7 @@ #define SO_PEERSEC 31 #define SO_PASSSEC 34 +#define SO_TIMESTAMPNS 35 +#define SCM_TIMESTAMPNS SO_TIMESTAMPNS #endif /* _ASM_POWERPC_SOCKET_H */ diff --git a/include/asm-s390/socket.h b/include/asm-s390/socket.h index 1778a49a74c5..1161ebe3dec9 100644 --- a/include/asm-s390/socket.h +++ b/include/asm-s390/socket.h @@ -57,5 +57,7 @@ #define SO_PEERSEC 31 #define SO_PASSSEC 34 +#define SO_TIMESTAMPNS 35 +#define SCM_TIMESTAMPNS SO_TIMESTAMPNS #endif /* _ASM_SOCKET_H */ diff --git a/include/asm-sh/socket.h b/include/asm-sh/socket.h index ca70362eb563..c48d6fc9da38 100644 --- a/include/asm-sh/socket.h +++ b/include/asm-sh/socket.h @@ -49,5 +49,7 @@ #define SO_PEERSEC 31 #define SO_PASSSEC 34 +#define SO_TIMESTAMPNS 35 +#define SCM_TIMESTAMPNS SO_TIMESTAMPNS #endif /* __ASM_SH_SOCKET_H */ diff --git a/include/asm-sparc/socket.h b/include/asm-sparc/socket.h index f6c4e5baf3f7..7c1423997cf0 100644 --- a/include/asm-sparc/socket.h +++ b/include/asm-sparc/socket.h @@ -49,6 +49,8 @@ #define SO_PEERSEC 0x001e #define SO_PASSSEC 0x001f +#define SO_TIMESTAMPNS 0x0021 +#define SCM_TIMESTAMPNS SO_TIMESTAMPNS /* Security levels - as per NRL IPv6 - don't actually do anything */ #define SO_SECURITY_AUTHENTICATION 0x5001 diff --git a/include/asm-sparc64/socket.h b/include/asm-sparc64/socket.h index 754d46a50af3..986441dcb8f0 100644 --- a/include/asm-sparc64/socket.h +++ b/include/asm-sparc64/socket.h @@ -49,6 +49,8 @@ #define SO_PEERSEC 0x001e #define SO_PASSSEC 0x001f +#define SO_TIMESTAMPNS 0x0021 +#define SCM_TIMESTAMPNS SO_TIMESTAMPNS /* Security levels - as per NRL IPv6 - don't actually do anything */ #define SO_SECURITY_AUTHENTICATION 0x5001 diff --git a/include/asm-v850/socket.h b/include/asm-v850/socket.h index 0dfe55ac2ef2..a4c2493b025f 100644 --- a/include/asm-v850/socket.h +++ b/include/asm-v850/socket.h @@ -49,5 +49,7 @@ #define SO_PEERSEC 31 #define SO_PASSSEC 34 +#define SO_TIMESTAMPNS 35 +#define SCM_TIMESTAMPNS SO_TIMESTAMPNS #endif /* __V850_SOCKET_H__ */ diff --git a/include/asm-x86_64/socket.h b/include/asm-x86_64/socket.h index b46702607933..90af60cf3c0e 100644 --- a/include/asm-x86_64/socket.h +++ b/include/asm-x86_64/socket.h @@ -49,5 +49,7 @@ #define SO_PEERSEC 31 #define SO_PASSSEC 34 +#define SO_TIMESTAMPNS 35 +#define SCM_TIMESTAMPNS SO_TIMESTAMPNS #endif /* _ASM_SOCKET_H */ diff --git a/include/asm-xtensa/socket.h b/include/asm-xtensa/socket.h index 971d231be60e..1f5aeacb9da2 100644 --- a/include/asm-xtensa/socket.h +++ b/include/asm-xtensa/socket.h @@ -60,5 +60,7 @@ #define SO_ACCEPTCONN 30 #define SO_PEERSEC 31 #define SO_PASSSEC 34 +#define SO_TIMESTAMPNS 35 +#define SCM_TIMESTAMPNS SO_TIMESTAMPNS #endif /* _XTENSA_SOCKET_H */ diff --git a/include/net/sock.h b/include/net/sock.h index 51246579592e..390c04700590 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -390,6 +390,7 @@ enum sock_flags { SOCK_USE_WRITE_QUEUE, /* whether to call sk->sk_write_space in sock_wfree */ SOCK_DBG, /* %SO_DEBUG setting */ SOCK_RCVTSTAMP, /* %SO_TIMESTAMP setting */ + SOCK_RCVTSTAMPNS, /* %SO_TIMESTAMPNS setting */ SOCK_LOCALROUTE, /* route locally only, %SO_DONTROUTE setting */ SOCK_QUEUE_SHRUNK, /* write queue has been shrunk recently */ }; @@ -1283,21 +1284,17 @@ static inline int sock_intr_errno(long timeo) return timeo == MAX_SCHEDULE_TIMEOUT ? -ERESTARTSYS : -EINTR; } +extern void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk, + struct sk_buff *skb); + static __inline__ void sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb) { ktime_t kt = skb->tstamp; - if (sock_flag(sk, SOCK_RCVTSTAMP)) { - struct timeval tv; - /* Race occurred between timestamp enabling and packet - receiving. Fill in the current time for now. */ - if (kt.tv64 == 0) - kt = ktime_get_real(); - skb->tstamp = kt; - tv = ktime_to_timeval(kt); - put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMP, sizeof(tv), &tv); - } else + if (sock_flag(sk, SOCK_RCVTSTAMP)) + __sock_recv_timestamp(msg, sk, skb); + else sk->sk_stamp = kt; } diff --git a/net/compat.c b/net/compat.c index 0e407563ae85..9a0f5f2b90c8 100644 --- a/net/compat.c +++ b/net/compat.c @@ -215,6 +215,7 @@ Efault: int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *data) { struct compat_timeval ctv; + struct compat_timespec cts; struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control; struct compat_cmsghdr cmhdr; int cmlen; @@ -229,7 +230,14 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat ctv.tv_sec = tv->tv_sec; ctv.tv_usec = tv->tv_usec; data = &ctv; - len = sizeof(struct compat_timeval); + len = sizeof(ctv); + } + if (level == SOL_SOCKET && type == SO_TIMESTAMPNS) { + struct timespec *ts = (struct timespec *)data; + cts.tv_sec = ts->tv_sec; + cts.tv_nsec = ts->tv_nsec; + data = &cts; + len = sizeof(cts); } cmlen = CMSG_COMPAT_LEN(len); diff --git a/net/core/sock.c b/net/core/sock.c index 792ae39804a2..f9e6991d3729 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -521,11 +521,18 @@ set_rcvbuf: break; case SO_TIMESTAMP: + case SO_TIMESTAMPNS: if (valbool) { + if (optname == SO_TIMESTAMP) + sock_reset_flag(sk, SOCK_RCVTSTAMPNS); + else + sock_set_flag(sk, SOCK_RCVTSTAMPNS); sock_set_flag(sk, SOCK_RCVTSTAMP); sock_enable_timestamp(sk); - } else + } else { sock_reset_flag(sk, SOCK_RCVTSTAMP); + sock_reset_flag(sk, SOCK_RCVTSTAMPNS); + } break; case SO_RCVLOWAT: @@ -715,7 +722,12 @@ int sock_getsockopt(struct socket *sock, int level, int optname, break; case SO_TIMESTAMP: - v.val = sock_flag(sk, SOCK_RCVTSTAMP); + v.val = sock_flag(sk, SOCK_RCVTSTAMP) && + !sock_flag(sk, SOCK_RCVTSTAMPNS); + break; + + case SO_TIMESTAMPNS: + v.val = sock_flag(sk, SOCK_RCVTSTAMPNS); break; case SO_RCVTIMEO: diff --git a/net/socket.c b/net/socket.c index cf18c5eb592e..a7bd0df115b2 100644 --- a/net/socket.c +++ b/net/socket.c @@ -585,6 +585,35 @@ int kernel_sendmsg(struct socket *sock, struct msghdr *msg, return result; } +/* + * called from sock_recv_timestamp() if sock_flag(sk, SOCK_RCVTSTAMP) + */ +void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk, + struct sk_buff *skb) +{ + ktime_t kt = skb->tstamp; + + if (!sock_flag(sk, SOCK_RCVTSTAMPNS)) { + struct timeval tv; + /* Race occurred between timestamp enabling and packet + receiving. Fill in the current time for now. */ + if (kt.tv64 == 0) + kt = ktime_get_real(); + skb->tstamp = kt; + tv = ktime_to_timeval(kt); + put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMP, sizeof(tv), &tv); + } else { + struct timespec ts; + /* Race occurred between timestamp enabling and packet + receiving. Fill in the current time for now. */ + if (kt.tv64 == 0) + kt = ktime_get_real(); + skb->tstamp = kt; + ts = ktime_to_timespec(kt); + put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMPNS, sizeof(ts), &ts); + } +} + static inline int __sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t size, int flags) { -- cgit v1.2.3-59-g8ed1b From 459a98ed881802dee55897441bc7f77af614368e Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Mon, 19 Mar 2007 15:30:44 -0700 Subject: [SK_BUFF]: Introduce skb_reset_mac_header(skb) For the common, open coded 'skb->mac.raw = skb->data' operation, so that we can later turn skb->mac.raw into a offset, reducing the size of struct sk_buff in 64bit land while possibly keeping it as a pointer on 32bit. This one touches just the most simple case, next will handle the slightly more "complex" cases. Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: David S. Miller --- arch/um/drivers/net_kern.c | 2 +- arch/xtensa/platform-iss/network.c | 2 +- drivers/block/aoe/aoecmd.c | 3 ++- drivers/ieee1394/eth1394.c | 2 +- drivers/infiniband/ulp/ipoib/ipoib_cm.c | 2 +- drivers/infiniband/ulp/ipoib/ipoib_ib.c | 2 +- drivers/isdn/i4l/isdn_net.c | 4 ++-- drivers/isdn/i4l/isdn_ppp.c | 2 +- drivers/message/fusion/mptlan.c | 4 ++-- drivers/net/appletalk/cops.c | 2 +- drivers/net/appletalk/ltpc.c | 2 +- drivers/net/arcnet/arc-rawmode.c | 2 +- drivers/net/arcnet/capmode.c | 11 ++++------- drivers/net/arcnet/rfc1051.c | 2 +- drivers/net/arcnet/rfc1201.c | 2 +- drivers/net/bonding/bond_3ad.c | 4 ++-- drivers/net/bonding/bond_alb.c | 4 ++-- drivers/net/cxgb3/cxgb3_offload.c | 2 +- drivers/net/cxgb3/sge.c | 3 ++- drivers/net/irda/ali-ircc.c | 2 +- drivers/net/irda/au1k_ir.c | 2 +- drivers/net/irda/donauboe.c | 2 +- drivers/net/irda/irda-usb.c | 2 +- drivers/net/irda/mcs7780.c | 4 ++-- drivers/net/irda/nsc-ircc.c | 2 +- drivers/net/irda/pxaficp_ir.c | 2 +- drivers/net/irda/sa1100_ir.c | 2 +- drivers/net/irda/smsc-ircc2.c | 2 +- drivers/net/irda/stir4200.c | 2 +- drivers/net/irda/via-ircc.c | 8 ++++---- drivers/net/irda/vlsi_ir.c | 2 +- drivers/net/irda/w83977af_ir.c | 2 +- drivers/net/myri_sbus.c | 2 +- drivers/net/ppp_generic.c | 2 +- drivers/net/sb1000.c | 2 +- drivers/net/tun.c | 2 +- drivers/net/wan/cosa.c | 2 +- drivers/net/wan/cycx_x25.c | 2 +- drivers/net/wan/dlci.c | 2 +- drivers/net/wan/farsync.c | 2 +- drivers/net/wan/lmc/lmc_main.c | 4 ++-- drivers/net/wan/pc300_drv.c | 2 +- drivers/net/wan/pc300_tty.c | 2 +- drivers/net/wireless/airo.c | 4 ++-- drivers/net/wireless/hostap/hostap_80211_rx.c | 7 ++++--- drivers/net/wireless/hostap/hostap_80211_tx.c | 2 +- drivers/net/wireless/hostap/hostap_ap.c | 3 ++- drivers/net/wireless/hostap/hostap_hw.c | 2 +- drivers/net/wireless/hostap/hostap_main.c | 3 ++- drivers/net/wireless/ipw2200.c | 2 +- drivers/net/wireless/orinoco.c | 2 +- drivers/net/wireless/prism54/islpci_eth.c | 2 +- drivers/net/wireless/strip.c | 2 +- drivers/s390/net/ctcmain.c | 4 ++-- drivers/s390/net/netiucv.c | 4 ++-- drivers/s390/net/qeth_eddp.c | 2 +- drivers/s390/net/qeth_main.c | 4 ++-- include/linux/hdlc.h | 4 ++-- include/linux/skbuff.h | 5 +++++ include/net/ax25.h | 2 +- include/net/x25device.h | 2 +- net/802/fddi.c | 2 +- net/802/hippi.c | 2 +- net/802/tr.c | 2 +- net/atm/br2684.c | 2 +- net/atm/clip.c | 2 +- net/ax25/ax25_in.c | 2 +- net/bluetooth/bnep/core.c | 2 +- net/bridge/br_device.c | 2 +- net/core/dev.c | 4 ++-- net/core/netpoll.c | 2 +- net/core/skbuff.c | 2 +- net/decnet/dn_route.c | 2 +- net/ethernet/eth.c | 2 +- net/ieee80211/ieee80211_rx.c | 7 ++++--- net/ipv4/ip_gre.c | 2 +- net/ipv4/ip_output.c | 2 +- net/ipv4/route.c | 3 ++- net/ipv6/ip6_output.c | 2 +- net/ipv6/route.c | 2 +- net/irda/irlap_frame.c | 3 ++- net/irda/wrapper.c | 2 +- net/llc/llc_output.c | 2 +- net/netrom/nr_dev.c | 2 +- net/wanrouter/wanmain.c | 2 +- 85 files changed, 119 insertions(+), 109 deletions(-) (limited to 'include') diff --git a/arch/um/drivers/net_kern.c b/arch/um/drivers/net_kern.c index 04e31f86c10a..859303730b2f 100644 --- a/arch/um/drivers/net_kern.c +++ b/arch/um/drivers/net_kern.c @@ -55,7 +55,7 @@ static int uml_net_rx(struct net_device *dev) skb->dev = dev; skb_put(skb, dev->mtu); - skb->mac.raw = skb->data; + skb_reset_mac_header(skb); pkt_len = (*lp->read)(lp->fd, &skb, lp); if (pkt_len > 0) { diff --git a/arch/xtensa/platform-iss/network.c b/arch/xtensa/platform-iss/network.c index 8ebfc8761229..ab05bff40104 100644 --- a/arch/xtensa/platform-iss/network.c +++ b/arch/xtensa/platform-iss/network.c @@ -386,7 +386,7 @@ static int iss_net_rx(struct net_device *dev) /* Setup skb */ skb->dev = dev; - skb->mac.raw = skb->data; + skb_reset_mac_header(skb); pkt_len = lp->tp.read(lp, &skb); skb_put(skb, pkt_len); diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c index 4ab7b40e8c5a..74062dc4e90d 100644 --- a/drivers/block/aoe/aoecmd.c +++ b/drivers/block/aoe/aoecmd.c @@ -27,7 +27,8 @@ new_skb(ulong len) skb = alloc_skb(len, GFP_ATOMIC); if (skb) { - skb->nh.raw = skb->mac.raw = skb->data; + skb_reset_mac_header(skb); + skb->nh.raw = skb->data; skb->protocol = __constant_htons(ETH_P_AOE); skb->priority = 0; skb->next = skb->prev = NULL; diff --git a/drivers/ieee1394/eth1394.c b/drivers/ieee1394/eth1394.c index 03e44b337eb0..db2346f4d207 100644 --- a/drivers/ieee1394/eth1394.c +++ b/drivers/ieee1394/eth1394.c @@ -834,7 +834,7 @@ static inline u16 ether1394_type_trans(struct sk_buff *skb, struct eth1394hdr *eth; unsigned char *rawp; - skb->mac.raw = skb->data; + skb_reset_mac_header(skb); skb_pull (skb, ETH1394_HLEN); eth = eth1394_hdr(skb); diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c index 2b242a4823f8..c722e5c141b3 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c @@ -408,7 +408,7 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) skb_put_frags(skb, IPOIB_CM_HEAD_SIZE, wc->byte_len, newskb); skb->protocol = ((struct ipoib_header *) skb->data)->proto; - skb->mac.raw = skb->data; + skb_reset_mac_header(skb); skb_pull(skb, IPOIB_ENCAP_LEN); dev->last_rx = jiffies; diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c index ba0ee5cf2ad7..93f74567897e 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c @@ -216,7 +216,7 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) if (wc->slid != priv->local_lid || wc->src_qp != priv->qp->qp_num) { skb->protocol = ((struct ipoib_header *) skb->data)->proto; - skb->mac.raw = skb->data; + skb_reset_mac_header(skb); skb_pull(skb, IPOIB_ENCAP_LEN); dev->last_rx = jiffies; diff --git a/drivers/isdn/i4l/isdn_net.c b/drivers/isdn/i4l/isdn_net.c index 838b3734e2b6..fadb9291bc1b 100644 --- a/drivers/isdn/i4l/isdn_net.c +++ b/drivers/isdn/i4l/isdn_net.c @@ -1366,7 +1366,7 @@ isdn_net_type_trans(struct sk_buff *skb, struct net_device *dev) struct ethhdr *eth; unsigned char *rawp; - skb->mac.raw = skb->data; + skb_reset_mac_header(skb); skb_pull(skb, ETH_HLEN); eth = eth_hdr(skb); @@ -1786,7 +1786,7 @@ isdn_net_receive(struct net_device *ndev, struct sk_buff *skb) } skb->dev = ndev; skb->pkt_type = PACKET_HOST; - skb->mac.raw = skb->data; + skb_reset_mac_header(skb); #ifdef ISDN_DEBUG_NET_DUMP isdn_dumppkt("R:", skb->data, skb->len, 40); #endif diff --git a/drivers/isdn/i4l/isdn_ppp.c b/drivers/isdn/i4l/isdn_ppp.c index 1b2df80c3bce..be915051cb2e 100644 --- a/drivers/isdn/i4l/isdn_ppp.c +++ b/drivers/isdn/i4l/isdn_ppp.c @@ -1167,7 +1167,7 @@ isdn_ppp_push_higher(isdn_net_dev * net_dev, isdn_net_local * lp, struct sk_buff mlp->huptimer = 0; #endif /* CONFIG_IPPP_FILTER */ skb->dev = dev; - skb->mac.raw = skb->data; + skb_reset_mac_header(skb); netif_rx(skb); /* net_dev->local->stats.rx_packets++; done in isdn_net.c */ return; diff --git a/drivers/message/fusion/mptlan.c b/drivers/message/fusion/mptlan.c index b691292ff599..d5b878d56280 100644 --- a/drivers/message/fusion/mptlan.c +++ b/drivers/message/fusion/mptlan.c @@ -753,7 +753,7 @@ mpt_lan_sdu_send (struct sk_buff *skb, struct net_device *dev) /* Set the mac.raw pointer, since this apparently isn't getting * done before we get the skb. Pull the data pointer past the mac data. */ - skb->mac.raw = skb->data; + skb_reset_mac_header(skb); skb_pull(skb, 12); dma = pci_map_single(mpt_dev->pcidev, skb->data, skb->len, @@ -1549,7 +1549,7 @@ mpt_lan_type_trans(struct sk_buff *skb, struct net_device *dev) struct mpt_lan_ohdr *fch = (struct mpt_lan_ohdr *)skb->data; struct fcllc *fcllc; - skb->mac.raw = skb->data; + skb_reset_mac_header(skb); skb_pull(skb, sizeof(struct mpt_lan_ohdr)); if (fch->dtype == htons(0xffff)) { diff --git a/drivers/net/appletalk/cops.c b/drivers/net/appletalk/cops.c index dba5e5165452..28cb79cee910 100644 --- a/drivers/net/appletalk/cops.c +++ b/drivers/net/appletalk/cops.c @@ -853,7 +853,7 @@ static void cops_rx(struct net_device *dev) return; } - skb->mac.raw = skb->data; /* Point to entire packet. */ + skb_reset_mac_header(skb); /* Point to entire packet. */ skb_pull(skb,3); skb->h.raw = skb->data; /* Point to data (Skip header). */ diff --git a/drivers/net/appletalk/ltpc.c b/drivers/net/appletalk/ltpc.c index 2ea44ce49810..12682439f8bd 100644 --- a/drivers/net/appletalk/ltpc.c +++ b/drivers/net/appletalk/ltpc.c @@ -770,7 +770,7 @@ static int sendup_buffer (struct net_device *dev) skb->data[0] = dnode; skb->data[1] = snode; skb->data[2] = llaptype; - skb->mac.raw = skb->data; /* save pointer to llap header */ + skb_reset_mac_header(skb); /* save pointer to llap header */ skb_pull(skb,3); /* copy ddp(s,e)hdr + contents */ diff --git a/drivers/net/arcnet/arc-rawmode.c b/drivers/net/arcnet/arc-rawmode.c index 6318814a11a8..e0a18e7c73cb 100644 --- a/drivers/net/arcnet/arc-rawmode.c +++ b/drivers/net/arcnet/arc-rawmode.c @@ -110,7 +110,7 @@ static void rx(struct net_device *dev, int bufnum, pkt = (struct archdr *) skb->data; - skb->mac.raw = skb->data; + skb_reset_mac_header(skb); skb_pull(skb, ARC_HDR_SIZE); /* up to sizeof(pkt->soft) has already been copied from the card */ diff --git a/drivers/net/arcnet/capmode.c b/drivers/net/arcnet/capmode.c index 66485585ab39..6c764b66e9cc 100644 --- a/drivers/net/arcnet/capmode.c +++ b/drivers/net/arcnet/capmode.c @@ -122,10 +122,8 @@ static void rx(struct net_device *dev, int bufnum, } skb_put(skb, length + ARC_HDR_SIZE + sizeof(int)); skb->dev = dev; - - pkt = (struct archdr *) skb->data; - - skb->mac.raw = skb->data; + skb_reset_mac_header(skb); + pkt = (struct archdr *)skb->mac.raw; skb_pull(skb, ARC_HDR_SIZE); /* up to sizeof(pkt->soft) has already been copied from the card */ @@ -270,9 +268,8 @@ static int ack_tx(struct net_device *dev, int acked) skb_put(ackskb, length + ARC_HDR_SIZE ); ackskb->dev = dev; - ackpkt = (struct archdr *) ackskb->data; - - ackskb->mac.raw = ackskb->data; + skb_reset_mac_header(ackskb); + ackpkt = (struct archdr *)ackskb->mac.raw; /* skb_pull(ackskb, ARC_HDR_SIZE); */ diff --git a/drivers/net/arcnet/rfc1051.c b/drivers/net/arcnet/rfc1051.c index 6d6c69f036ef..2de8877ece29 100644 --- a/drivers/net/arcnet/rfc1051.c +++ b/drivers/net/arcnet/rfc1051.c @@ -94,7 +94,7 @@ static unsigned short type_trans(struct sk_buff *skb, struct net_device *dev) int hdr_size = ARC_HDR_SIZE + RFC1051_HDR_SIZE; /* Pull off the arcnet header. */ - skb->mac.raw = skb->data; + skb_reset_mac_header(skb); skb_pull(skb, hdr_size); if (pkt->hard.dest == 0) diff --git a/drivers/net/arcnet/rfc1201.c b/drivers/net/arcnet/rfc1201.c index bee34226abfa..460a095000c2 100644 --- a/drivers/net/arcnet/rfc1201.c +++ b/drivers/net/arcnet/rfc1201.c @@ -96,7 +96,7 @@ static unsigned short type_trans(struct sk_buff *skb, struct net_device *dev) int hdr_size = ARC_HDR_SIZE + RFC1201_HDR_SIZE; /* Pull off the arcnet header. */ - skb->mac.raw = skb->data; + skb_reset_mac_header(skb); skb_pull(skb, hdr_size); if (pkt->hard.dest == 0) diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c index 3fb354d9c515..e3c9e2e56d14 100644 --- a/drivers/net/bonding/bond_3ad.c +++ b/drivers/net/bonding/bond_3ad.c @@ -884,7 +884,7 @@ static int ad_lacpdu_send(struct port *port) } skb->dev = slave->dev; - skb->mac.raw = skb->data; + skb_reset_mac_header(skb); skb->nh.raw = skb->data + ETH_HLEN; skb->protocol = PKT_TYPE_LACPDU; skb->priority = TC_PRIO_CONTROL; @@ -928,7 +928,7 @@ static int ad_marker_send(struct port *port, struct marker *marker) skb_reserve(skb, 16); skb->dev = slave->dev; - skb->mac.raw = skb->data; + skb_reset_mac_header(skb); skb->nh.raw = skb->data + ETH_HLEN; skb->protocol = PKT_TYPE_LACPDU; diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c index 217a2eedee0a..916162ca0c98 100644 --- a/drivers/net/bonding/bond_alb.c +++ b/drivers/net/bonding/bond_alb.c @@ -890,7 +890,7 @@ static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[]) data = skb_put(skb, size); memcpy(data, &pkt, size); - skb->mac.raw = data; + skb_reset_mac_header(skb); skb->nh.raw = data + ETH_HLEN; skb->protocol = pkt.type; skb->priority = TC_PRIO_CONTROL; @@ -1266,7 +1266,7 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev) u8 *hash_start = NULL; int res = 1; - skb->mac.raw = (unsigned char *)skb->data; + skb_reset_mac_header(skb); eth_data = eth_hdr(skb); /* make sure that the curr_active_slave and the slaves list do diff --git a/drivers/net/cxgb3/cxgb3_offload.c b/drivers/net/cxgb3/cxgb3_offload.c index 199e5066acf3..ebcf35e4cf5b 100644 --- a/drivers/net/cxgb3/cxgb3_offload.c +++ b/drivers/net/cxgb3/cxgb3_offload.c @@ -783,7 +783,7 @@ static int do_trace(struct t3cdev *dev, struct sk_buff *skb) skb->protocol = htons(0xffff); skb->dev = dev->lldev; skb_pull(skb, sizeof(*p)); - skb->mac.raw = skb->data; + skb_reset_mac_header(skb); netif_receive_skb(skb); return 0; } diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c index 8946f7aa97cd..b5cf2a60834d 100644 --- a/drivers/net/cxgb3/sge.c +++ b/drivers/net/cxgb3/sge.c @@ -1620,7 +1620,8 @@ static inline int rx_offload(struct t3cdev *tdev, struct sge_rspq *rq, unsigned int gather_idx) { rq->offload_pkts++; - skb->mac.raw = skb->nh.raw = skb->h.raw = skb->data; + skb_reset_mac_header(skb); + skb->nh.raw = skb->h.raw = skb->data; if (rq->polling) { rx_gather[gather_idx++] = skb; diff --git a/drivers/net/irda/ali-ircc.c b/drivers/net/irda/ali-ircc.c index cebf8c374bc5..0f10758226fa 100644 --- a/drivers/net/irda/ali-ircc.c +++ b/drivers/net/irda/ali-ircc.c @@ -1932,7 +1932,7 @@ static int ali_ircc_dma_receive_complete(struct ali_ircc_cb *self) self->stats.rx_packets++; skb->dev = self->netdev; - skb->mac.raw = skb->data; + skb_reset_mac_header(skb); skb->protocol = htons(ETH_P_IRDA); netif_rx(skb); self->netdev->last_rx = jiffies; diff --git a/drivers/net/irda/au1k_ir.c b/drivers/net/irda/au1k_ir.c index 37914dc5b90e..27afd0f367d6 100644 --- a/drivers/net/irda/au1k_ir.c +++ b/drivers/net/irda/au1k_ir.c @@ -606,7 +606,7 @@ static int au1k_irda_rx(struct net_device *dev) skb_put(skb, count-2); memcpy(skb->data, (void *)pDB->vaddr, count-2); skb->dev = dev; - skb->mac.raw = skb->data; + skb_reset_mac_header(skb); skb->protocol = htons(ETH_P_IRDA); netif_rx(skb); prxd->count_0 = 0; diff --git a/drivers/net/irda/donauboe.c b/drivers/net/irda/donauboe.c index 11af0ae7510e..ddfa6c38a16b 100644 --- a/drivers/net/irda/donauboe.c +++ b/drivers/net/irda/donauboe.c @@ -1286,7 +1286,7 @@ dumpbufs(self->rx_bufs[self->rxs],len,'<'); self->stats.rx_packets++; skb->dev = self->netdev; - skb->mac.raw = skb->data; + skb_reset_mac_header(skb); skb->protocol = htons (ETH_P_IRDA); } else diff --git a/drivers/net/irda/irda-usb.c b/drivers/net/irda/irda-usb.c index 1d510bdc9b84..6ef375a095f4 100644 --- a/drivers/net/irda/irda-usb.c +++ b/drivers/net/irda/irda-usb.c @@ -921,7 +921,7 @@ static void irda_usb_receive(struct urb *urb) /* Ask the networking layer to queue the packet for the IrDA stack */ dataskb->dev = self->netdev; - dataskb->mac.raw = dataskb->data; + skb_reset_mac_header(dataskb); dataskb->protocol = htons(ETH_P_IRDA); len = dataskb->len; netif_rx(dataskb); diff --git a/drivers/net/irda/mcs7780.c b/drivers/net/irda/mcs7780.c index f0c61f3b2a82..3ff1f4b33c06 100644 --- a/drivers/net/irda/mcs7780.c +++ b/drivers/net/irda/mcs7780.c @@ -428,7 +428,7 @@ static void mcs_unwrap_mir(struct mcs_cb *mcs, __u8 *buf, int len) skb_reserve(skb, 1); memcpy(skb->data, buf, new_len); skb_put(skb, new_len); - skb->mac.raw = skb->data; + skb_reset_mac_header(skb); skb->protocol = htons(ETH_P_IRDA); skb->dev = mcs->netdev; @@ -481,7 +481,7 @@ static void mcs_unwrap_fir(struct mcs_cb *mcs, __u8 *buf, int len) skb_reserve(skb, 1); memcpy(skb->data, buf, new_len); skb_put(skb, new_len); - skb->mac.raw = skb->data; + skb_reset_mac_header(skb); skb->protocol = htons(ETH_P_IRDA); skb->dev = mcs->netdev; diff --git a/drivers/net/irda/nsc-ircc.c b/drivers/net/irda/nsc-ircc.c index 29b5ccd29d0b..8ce7dad582f4 100644 --- a/drivers/net/irda/nsc-ircc.c +++ b/drivers/net/irda/nsc-ircc.c @@ -1881,7 +1881,7 @@ static int nsc_ircc_dma_receive_complete(struct nsc_ircc_cb *self, int iobase) self->stats.rx_packets++; skb->dev = self->netdev; - skb->mac.raw = skb->data; + skb_reset_mac_header(skb); skb->protocol = htons(ETH_P_IRDA); netif_rx(skb); self->netdev->last_rx = jiffies; diff --git a/drivers/net/irda/pxaficp_ir.c b/drivers/net/irda/pxaficp_ir.c index 2272156af31e..f35d7d42624e 100644 --- a/drivers/net/irda/pxaficp_ir.c +++ b/drivers/net/irda/pxaficp_ir.c @@ -391,7 +391,7 @@ static void pxa_irda_fir_irq_eif(struct pxa_irda *si, struct net_device *dev, in /* Feed it to IrLAP */ skb->dev = dev; - skb->mac.raw = skb->data; + skb_reset_mac_header(skb); skb->protocol = htons(ETH_P_IRDA); netif_rx(skb); diff --git a/drivers/net/irda/sa1100_ir.c b/drivers/net/irda/sa1100_ir.c index 937372d00398..056639f72bec 100644 --- a/drivers/net/irda/sa1100_ir.c +++ b/drivers/net/irda/sa1100_ir.c @@ -504,7 +504,7 @@ static void sa1100_irda_fir_error(struct sa1100_irda *si, struct net_device *dev skb_put(skb, len); skb->dev = dev; - skb->mac.raw = skb->data; + skb_reset_mac_header(skb); skb->protocol = htons(ETH_P_IRDA); si->stats.rx_packets++; si->stats.rx_bytes += len; diff --git a/drivers/net/irda/smsc-ircc2.c b/drivers/net/irda/smsc-ircc2.c index 31c623381ea8..103a2d18ed2f 100644 --- a/drivers/net/irda/smsc-ircc2.c +++ b/drivers/net/irda/smsc-ircc2.c @@ -1412,7 +1412,7 @@ static void smsc_ircc_dma_receive_complete(struct smsc_ircc_cb *self) self->stats.rx_bytes += len; skb->dev = self->netdev; - skb->mac.raw = skb->data; + skb_reset_mac_header(skb); skb->protocol = htons(ETH_P_IRDA); netif_rx(skb); } diff --git a/drivers/net/irda/stir4200.c b/drivers/net/irda/stir4200.c index 20d306fea4cb..a22175f4ea81 100644 --- a/drivers/net/irda/stir4200.c +++ b/drivers/net/irda/stir4200.c @@ -364,7 +364,7 @@ static void fir_eof(struct stir_cb *stir) skb_put(skb, len); - skb->mac.raw = skb->data; + skb_reset_mac_header(skb); skb->protocol = htons(ETH_P_IRDA); skb->dev = stir->netdev; diff --git a/drivers/net/irda/via-ircc.c b/drivers/net/irda/via-ircc.c index c3ed9b3067e5..5ff416314604 100644 --- a/drivers/net/irda/via-ircc.c +++ b/drivers/net/irda/via-ircc.c @@ -1125,7 +1125,7 @@ static int via_ircc_dma_receive_complete(struct via_ircc_cb *self, self->stats.rx_bytes += len; self->stats.rx_packets++; skb->dev = self->netdev; - skb->mac.raw = skb->data; + skb_reset_mac_header(skb); skb->protocol = htons(ETH_P_IRDA); netif_rx(skb); return TRUE; @@ -1198,7 +1198,7 @@ F01_E */ self->stats.rx_bytes += len; self->stats.rx_packets++; skb->dev = self->netdev; - skb->mac.raw = skb->data; + skb_reset_mac_header(skb); skb->protocol = htons(ETH_P_IRDA); netif_rx(skb); @@ -1244,7 +1244,7 @@ static int upload_rxdata(struct via_ircc_cb *self, int iobase) self->stats.rx_bytes += len; self->stats.rx_packets++; skb->dev = self->netdev; - skb->mac.raw = skb->data; + skb_reset_mac_header(skb); skb->protocol = htons(ETH_P_IRDA); netif_rx(skb); if (st_fifo->len < (MAX_RX_WINDOW + 2)) { @@ -1313,7 +1313,7 @@ static int RxTimerHandler(struct via_ircc_cb *self, int iobase) self->stats.rx_bytes += len; self->stats.rx_packets++; skb->dev = self->netdev; - skb->mac.raw = skb->data; + skb_reset_mac_header(skb); skb->protocol = htons(ETH_P_IRDA); netif_rx(skb); } //while diff --git a/drivers/net/irda/vlsi_ir.c b/drivers/net/irda/vlsi_ir.c index 3457e9d8b667..79b407f3a49a 100644 --- a/drivers/net/irda/vlsi_ir.c +++ b/drivers/net/irda/vlsi_ir.c @@ -595,7 +595,7 @@ static int vlsi_process_rx(struct vlsi_ring *r, struct ring_descr *rd) rd->skb = NULL; skb->dev = ndev; memcpy(skb_put(skb,len), rd->buf, len); - skb->mac.raw = skb->data; + skb_reset_mac_header(skb); if (in_interrupt()) netif_rx(skb); else diff --git a/drivers/net/irda/w83977af_ir.c b/drivers/net/irda/w83977af_ir.c index 4212657fa4f9..bee445130952 100644 --- a/drivers/net/irda/w83977af_ir.c +++ b/drivers/net/irda/w83977af_ir.c @@ -919,7 +919,7 @@ int w83977af_dma_receive_complete(struct w83977af_ir *self) self->stats.rx_packets++; skb->dev = self->netdev; - skb->mac.raw = skb->data; + skb_reset_mac_header(skb); skb->protocol = htons(ETH_P_IRDA); netif_rx(skb); self->netdev->last_rx = jiffies; diff --git a/drivers/net/myri_sbus.c b/drivers/net/myri_sbus.c index ee26ef52289f..de092658db6c 100644 --- a/drivers/net/myri_sbus.c +++ b/drivers/net/myri_sbus.c @@ -368,7 +368,7 @@ static __be16 myri_type_trans(struct sk_buff *skb, struct net_device *dev) struct ethhdr *eth; unsigned char *rawp; - skb->mac.raw = (((unsigned char *)skb->data) + MYRI_PAD_LEN); + skb->mac.raw = skb->data + MYRI_PAD_LEN; skb_pull(skb, dev->hard_header_len); eth = eth_hdr(skb); diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c index ef58e4128782..18f1790aab9a 100644 --- a/drivers/net/ppp_generic.c +++ b/drivers/net/ppp_generic.c @@ -1685,7 +1685,7 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb) skb_pull_rcsum(skb, 2); skb->dev = ppp->dev; skb->protocol = htons(npindex_to_ethertype[npi]); - skb->mac.raw = skb->data; + skb_reset_mac_header(skb); netif_rx(skb); ppp->dev->last_rx = jiffies; } diff --git a/drivers/net/sb1000.c b/drivers/net/sb1000.c index b9fa4fbb1398..1de3eec1a792 100644 --- a/drivers/net/sb1000.c +++ b/drivers/net/sb1000.c @@ -834,7 +834,7 @@ printk("cm0: IP identification: %02x%02x fragment offset: %02x%02x\n", buffer[3 goto dropped_frame; } skb->dev = dev; - skb->mac.raw = skb->data; + skb_reset_mac_header(skb); skb->protocol = (unsigned short) buffer[NewDatagramHeaderSkip + 16]; insw(ioaddr, skb_put(skb, NewDatagramDataSize), NewDatagramDataSize / 2); diff --git a/drivers/net/tun.c b/drivers/net/tun.c index a57aa010cb25..288d8559f8c5 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -256,7 +256,7 @@ static __inline__ ssize_t tun_get_user(struct tun_struct *tun, struct iovec *iv, switch (tun->flags & TUN_TYPE_MASK) { case TUN_TUN_DEV: - skb->mac.raw = skb->data; + skb_reset_mac_header(skb); skb->protocol = pi.proto; skb->dev = tun->dev; break; diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c index 5b82e4fd0d73..c198511ec3f5 100644 --- a/drivers/net/wan/cosa.c +++ b/drivers/net/wan/cosa.c @@ -773,7 +773,7 @@ static int sppp_rx_done(struct channel_data *chan) } chan->rx_skb->protocol = htons(ETH_P_WAN_PPP); chan->rx_skb->dev = chan->pppdev.dev; - chan->rx_skb->mac.raw = chan->rx_skb->data; + skb_reset_mac_header(chan->rx_skb) chan->stats.rx_packets++; chan->stats.rx_bytes += chan->cosa->rxsize; netif_rx(chan->rx_skb); diff --git a/drivers/net/wan/cycx_x25.c b/drivers/net/wan/cycx_x25.c index a631d1c2fa14..016b3ff3ea5e 100644 --- a/drivers/net/wan/cycx_x25.c +++ b/drivers/net/wan/cycx_x25.c @@ -834,7 +834,7 @@ static void cycx_x25_irq_rx(struct cycx_device *card, struct cycx_x25_cmd *cmd) ++chan->ifstats.rx_packets; chan->ifstats.rx_bytes += pktlen; - skb->mac.raw = skb->data; + skb_reset_mac_header(skb); netif_rx(skb); dev->last_rx = jiffies; /* timestamp */ } diff --git a/drivers/net/wan/dlci.c b/drivers/net/wan/dlci.c index 736987559432..66be20c292b6 100644 --- a/drivers/net/wan/dlci.c +++ b/drivers/net/wan/dlci.c @@ -176,7 +176,7 @@ static void dlci_receive(struct sk_buff *skb, struct net_device *dev) if (process) { /* we've set up the protocol, so discard the header */ - skb->mac.raw = skb->data; + skb_reset_mac_header(skb); skb_pull(skb, header); dlp->stats.rx_bytes += skb->len; netif_rx(skb); diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c index c45d6a83339d..58a53b6d9b42 100644 --- a/drivers/net/wan/farsync.c +++ b/drivers/net/wan/farsync.c @@ -864,7 +864,7 @@ fst_tx_dma_complete(struct fst_card_info *card, struct fst_port_info *port, static __be16 farsync_type_trans(struct sk_buff *skb, struct net_device *dev) { skb->dev = dev; - skb->mac.raw = skb->data; + skb_reset_mac_header(skb); skb->pkt_type = PACKET_HOST; return htons(ETH_P_CUST); } diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c index 2b54f1bc3a0d..6d288839ddaa 100644 --- a/drivers/net/wan/lmc/lmc_main.c +++ b/drivers/net/wan/lmc/lmc_main.c @@ -1667,7 +1667,7 @@ static int lmc_rx (struct net_device *dev) /*fold00*/ skb_put (skb, len); skb->protocol = lmc_proto_type(sc, skb); skb->protocol = htons(ETH_P_WAN_PPP); - skb->mac.raw = skb->data; + skb_reset_mac_header(skb); // skb->nh.raw = skb->data; skb->dev = dev; lmc_proto_netif(sc, skb); @@ -1705,7 +1705,7 @@ static int lmc_rx (struct net_device *dev) /*fold00*/ memcpy(skb_put(nsb, len), skb->data, len); nsb->protocol = lmc_proto_type(sc, skb); - nsb->mac.raw = nsb->data; + skb_reset_mac_header(nsb); // nsb->nh.raw = nsb->data; nsb->dev = dev; lmc_proto_netif(sc, nsb); diff --git a/drivers/net/wan/pc300_drv.c b/drivers/net/wan/pc300_drv.c index 62184dee377c..edbc55528be5 100644 --- a/drivers/net/wan/pc300_drv.c +++ b/drivers/net/wan/pc300_drv.c @@ -1755,7 +1755,7 @@ cpc_trace(struct net_device *dev, struct sk_buff *skb_main, char rx_tx) skb->dev = dev; skb->protocol = htons(ETH_P_CUST); - skb->mac.raw = skb->data; + skb_reset_mac_header(skb); skb->pkt_type = PACKET_HOST; skb->len = 10 + skb_main->len; diff --git a/drivers/net/wan/pc300_tty.c b/drivers/net/wan/pc300_tty.c index 5873c346e7e9..de02a07259cf 100644 --- a/drivers/net/wan/pc300_tty.c +++ b/drivers/net/wan/pc300_tty.c @@ -1003,7 +1003,7 @@ static void cpc_tty_trace(pc300dev_t *dev, char* buf, int len, char rxtx) skb_put (skb, 10 + len); skb->dev = dev->dev; skb->protocol = htons(ETH_P_CUST); - skb->mac.raw = skb->data; + skb_reset_mac_header(skb); skb->pkt_type = PACKET_HOST; skb->len = 10 + len; diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c index e50b1482d792..692a23f9834d 100644 --- a/drivers/net/wireless/airo.c +++ b/drivers/net/wireless/airo.c @@ -3411,7 +3411,7 @@ badrx: OUT4500( apriv, EVACK, EV_RX); if (test_bit(FLAG_802_11, &apriv->flags)) { - skb->mac.raw = skb->data; + skb_reset_mac_header(skb); skb->pkt_type = PACKET_OTHERHOST; skb->dev = apriv->wifidev; skb->protocol = htons(ETH_P_802_2); @@ -3746,7 +3746,7 @@ void mpi_receive_802_11 (struct airo_info *ai) wireless_spy_update(ai->dev, sa, &wstats); } #endif /* IW_WIRELESS_SPY */ - skb->mac.raw = skb->data; + skb_reset_mac_header(skb); skb->pkt_type = PACKET_OTHERHOST; skb->dev = ai->wifidev; skb->protocol = htons(ETH_P_802_2); diff --git a/drivers/net/wireless/hostap/hostap_80211_rx.c b/drivers/net/wireless/hostap/hostap_80211_rx.c index f78ee26d787a..e4082f9d766b 100644 --- a/drivers/net/wireless/hostap/hostap_80211_rx.c +++ b/drivers/net/wireless/hostap/hostap_80211_rx.c @@ -167,7 +167,7 @@ hdr->f.status = s; hdr->f.len = l; hdr->f.data = d ret = skb->len - phdrlen; skb->dev = dev; - skb->mac.raw = skb->data; + skb_reset_mac_header(skb); skb_pull(skb, hdrlen); if (prism_header) skb_pull(skb, phdrlen); @@ -1073,10 +1073,11 @@ void hostap_80211_rx(struct net_device *dev, struct sk_buff *skb, if (skb2 != NULL) { /* send to wireless media */ + skb2->dev = dev; skb2->protocol = __constant_htons(ETH_P_802_3); - skb2->mac.raw = skb2->nh.raw = skb2->data; + skb_reset_mac_header(skb2); + skb2->nh.raw = skb2->data; /* skb2->nh.raw = skb2->data + ETH_HLEN; */ - skb2->dev = dev; dev_queue_xmit(skb2); } diff --git a/drivers/net/wireless/hostap/hostap_80211_tx.c b/drivers/net/wireless/hostap/hostap_80211_tx.c index 4a5be70c0419..159baef18e4a 100644 --- a/drivers/net/wireless/hostap/hostap_80211_tx.c +++ b/drivers/net/wireless/hostap/hostap_80211_tx.c @@ -237,7 +237,7 @@ int hostap_data_start_xmit(struct sk_buff *skb, struct net_device *dev) iface->stats.tx_packets++; iface->stats.tx_bytes += skb->len; - skb->mac.raw = skb->data; + skb_reset_mac_header(skb); meta = (struct hostap_skb_tx_data *) skb->cb; memset(meta, 0, sizeof(*meta)); meta->magic = HOSTAP_SKB_TX_DATA_MAGIC; diff --git a/drivers/net/wireless/hostap/hostap_ap.c b/drivers/net/wireless/hostap/hostap_ap.c index efb8cf3bd8ad..cc18f9686d27 100644 --- a/drivers/net/wireless/hostap/hostap_ap.c +++ b/drivers/net/wireless/hostap/hostap_ap.c @@ -982,7 +982,8 @@ static void prism2_send_mgmt(struct net_device *dev, meta->tx_cb_idx = tx_cb_idx; skb->dev = dev; - skb->mac.raw = skb->nh.raw = skb->data; + skb_reset_mac_header(skb); + skb->nh.raw = skb->data; dev_queue_xmit(skb); } #endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */ diff --git a/drivers/net/wireless/hostap/hostap_hw.c b/drivers/net/wireless/hostap/hostap_hw.c index 3079378fb8cd..9003ff7d151a 100644 --- a/drivers/net/wireless/hostap/hostap_hw.c +++ b/drivers/net/wireless/hostap/hostap_hw.c @@ -2217,7 +2217,7 @@ static void hostap_tx_callback(local_info_t *local, memcpy(skb_put(skb, len), payload, len); skb->dev = local->dev; - skb->mac.raw = skb->data; + skb_reset_mac_header(skb); cb->func(skb, ok, cb->data); } diff --git a/drivers/net/wireless/hostap/hostap_main.c b/drivers/net/wireless/hostap/hostap_main.c index 9077e6edde34..0e29ff762879 100644 --- a/drivers/net/wireless/hostap/hostap_main.c +++ b/drivers/net/wireless/hostap/hostap_main.c @@ -1063,7 +1063,8 @@ int prism2_sta_send_mgmt(local_info_t *local, u8 *dst, u16 stype, meta->iface = netdev_priv(dev); skb->dev = dev; - skb->mac.raw = skb->nh.raw = skb->data; + skb_reset_mac_header(skb); + skb->nh.raw = skb->data; dev_queue_xmit(skb); return 0; diff --git a/drivers/net/wireless/ipw2200.c b/drivers/net/wireless/ipw2200.c index c878a2f3239c..b04c56a25cc5 100644 --- a/drivers/net/wireless/ipw2200.c +++ b/drivers/net/wireless/ipw2200.c @@ -8133,7 +8133,7 @@ static void ipw_handle_mgmt_packet(struct ipw_priv *priv, skb->dev = priv->ieee->dev; /* Point raw at the ieee80211_stats */ - skb->mac.raw = skb->data; + skb_reset_mac_header(skb); skb->pkt_type = PACKET_OTHERHOST; skb->protocol = __constant_htons(ETH_P_80211_STATS); diff --git a/drivers/net/wireless/orinoco.c b/drivers/net/wireless/orinoco.c index 3f9d78d059b5..f1415bff527f 100644 --- a/drivers/net/wireless/orinoco.c +++ b/drivers/net/wireless/orinoco.c @@ -770,7 +770,7 @@ static void orinoco_rx_monitor(struct net_device *dev, u16 rxfid, /* Copy the 802.11 header to the skb */ memcpy(skb_put(skb, hdrlen), &(desc->frame_ctl), hdrlen); - skb->mac.raw = skb->data; + skb_reset_mac_header(skb); /* If any, copy the data from the card to the skb */ if (datalen > 0) { diff --git a/drivers/net/wireless/prism54/islpci_eth.c b/drivers/net/wireless/prism54/islpci_eth.c index fc2e0f3a896e..6ebfff034242 100644 --- a/drivers/net/wireless/prism54/islpci_eth.c +++ b/drivers/net/wireless/prism54/islpci_eth.c @@ -303,7 +303,7 @@ islpci_monitor_rx(islpci_private *priv, struct sk_buff **skb) skb_pull(*skb, sizeof (struct rfmon_header)); (*skb)->protocol = htons(ETH_P_802_2); - (*skb)->mac.raw = (*skb)->data; + skb_reset_mac_header(*skb); (*skb)->pkt_type = PACKET_OTHERHOST; return 0; diff --git a/drivers/net/wireless/strip.c b/drivers/net/wireless/strip.c index f5ce1c6063d8..2a299a0676a6 100644 --- a/drivers/net/wireless/strip.c +++ b/drivers/net/wireless/strip.c @@ -2009,7 +2009,7 @@ static void deliver_packet(struct strip *strip_info, STRIP_Header * header, packetlen); skb->dev = get_strip_dev(strip_info); skb->protocol = header->protocol; - skb->mac.raw = skb->data; + skb_reset_mac_header(skb); /* Having put a fake header on the front of the sk_buff for the */ /* benefit of tools like tcpdump, skb_pull now 'consumes' that */ diff --git a/drivers/s390/net/ctcmain.c b/drivers/s390/net/ctcmain.c index 0d6d5fcc128b..787c01317042 100644 --- a/drivers/s390/net/ctcmain.c +++ b/drivers/s390/net/ctcmain.c @@ -455,7 +455,7 @@ ctc_unpack_skb(struct channel *ch, struct sk_buff *pskb) return; } skb_put(pskb, header->length); - pskb->mac.raw = pskb->data; + skb_reset_mac_header(pskb); len -= header->length; skb = dev_alloc_skb(pskb->len); if (!skb) { @@ -473,7 +473,7 @@ ctc_unpack_skb(struct channel *ch, struct sk_buff *pskb) return; } memcpy(skb_put(skb, pskb->len), pskb->data, pskb->len); - skb->mac.raw = skb->data; + skb_reset_mac_header(skb); skb->dev = pskb->dev; skb->protocol = pskb->protocol; pskb->ip_summed = CHECKSUM_UNNECESSARY; diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c index 594320ca1b7c..82edf2014402 100644 --- a/drivers/s390/net/netiucv.c +++ b/drivers/s390/net/netiucv.c @@ -635,7 +635,7 @@ static void netiucv_unpack_skb(struct iucv_connection *conn, return; } skb_put(pskb, header->next); - pskb->mac.raw = pskb->data; + skb_reset_mac_header(pskb); skb = dev_alloc_skb(pskb->len); if (!skb) { PRINT_WARN("%s Out of memory in netiucv_unpack_skb\n", @@ -646,7 +646,7 @@ static void netiucv_unpack_skb(struct iucv_connection *conn, return; } memcpy(skb_put(skb, pskb->len), pskb->data, pskb->len); - skb->mac.raw = skb->data; + skb_reset_mac_header(skb); skb->dev = pskb->dev; skb->protocol = pskb->protocol; pskb->ip_summed = CHECKSUM_UNNECESSARY; diff --git a/drivers/s390/net/qeth_eddp.c b/drivers/s390/net/qeth_eddp.c index 7c735e1fe063..910a8ab66b05 100644 --- a/drivers/s390/net/qeth_eddp.c +++ b/drivers/s390/net/qeth_eddp.c @@ -486,7 +486,7 @@ qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx, return -ENOMEM; } if (qhdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2) { - skb->mac.raw = (skb->data) + sizeof(struct qeth_hdr); + skb->mac.raw = skb->data + sizeof(struct qeth_hdr); memcpy(&eddp->mac, eth_hdr(skb), ETH_HLEN); #ifdef CONFIG_QETH_VLAN if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q)) { diff --git a/drivers/s390/net/qeth_main.c b/drivers/s390/net/qeth_main.c index d502b77adf6b..28822025b791 100644 --- a/drivers/s390/net/qeth_main.c +++ b/drivers/s390/net/qeth_main.c @@ -2278,7 +2278,7 @@ qeth_type_trans(struct sk_buff *skb, struct net_device *dev) (card->info.link_type == QETH_LINK_TYPE_LANE_TR)) return tr_type_trans(skb,dev); #endif /* CONFIG_TR */ - skb->mac.raw = skb->data; + skb_reset_mac_header(skb); skb_pull(skb, ETH_HLEN ); eth = eth_hdr(skb); @@ -2461,7 +2461,7 @@ qeth_rebuild_skb(struct qeth_card *card, struct sk_buff *skb, if (card->options.fake_ll) qeth_rebuild_skb_fake_ll(card, skb, hdr); else - skb->mac.raw = skb->data; + skb_reset_mac_header(skb); skb->ip_summed = card->options.checksum_type; if (card->options.checksum_type == HW_CHECKSUMMING){ if ( (hdr->hdr.l3.ext_flags & diff --git a/include/linux/hdlc.h b/include/linux/hdlc.h index d4b333938f73..0fe562af9c8c 100644 --- a/include/linux/hdlc.h +++ b/include/linux/hdlc.h @@ -132,8 +132,8 @@ static __inline__ __be16 hdlc_type_trans(struct sk_buff *skb, { hdlc_device *hdlc = dev_to_hdlc(dev); - skb->mac.raw = skb->data; - skb->dev = dev; + skb->dev = dev; + skb_reset_mac_header(skb); if (hdlc->proto->type_trans) return hdlc->proto->type_trans(skb, dev); diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index df229bd5f1a9..748f254b50cc 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -960,6 +960,11 @@ static inline void skb_reserve(struct sk_buff *skb, int len) skb->tail += len; } +static inline void skb_reset_mac_header(struct sk_buff *skb) +{ + skb->mac.raw = skb->data; +} + /* * CPUs often take a performance hit when accessing unaligned memory * locations. The actual performance hit varies, it can be small if the diff --git a/include/net/ax25.h b/include/net/ax25.h index 47ff2f46e908..99a4e364c74a 100644 --- a/include/net/ax25.h +++ b/include/net/ax25.h @@ -263,8 +263,8 @@ static __inline__ void ax25_cb_put(ax25_cb *ax25) static inline __be16 ax25_type_trans(struct sk_buff *skb, struct net_device *dev) { skb->dev = dev; + skb_reset_mac_header(skb); skb->pkt_type = PACKET_HOST; - skb->mac.raw = skb->data; return htons(ETH_P_AX25); } diff --git a/include/net/x25device.h b/include/net/x25device.h index 1d10c879f7e2..1415bcf93980 100644 --- a/include/net/x25device.h +++ b/include/net/x25device.h @@ -7,8 +7,8 @@ static inline __be16 x25_type_trans(struct sk_buff *skb, struct net_device *dev) { - skb->mac.raw = skb->data; skb->dev = dev; + skb_reset_mac_header(skb); skb->pkt_type = PACKET_HOST; return htons(ETH_P_X25); diff --git a/net/802/fddi.c b/net/802/fddi.c index f8a0c9f6fec9..91dde41b5481 100644 --- a/net/802/fddi.c +++ b/net/802/fddi.c @@ -131,7 +131,7 @@ __be16 fddi_type_trans(struct sk_buff *skb, struct net_device *dev) */ skb->dev = dev; - skb->mac.raw = skb->data; /* point to frame control (FC) */ + skb_reset_mac_header(skb); /* point to frame control (FC) */ if(fddi->hdr.llc_8022_1.dsap==0xe0) { diff --git a/net/802/hippi.c b/net/802/hippi.c index 138302c14ee6..d87190038edb 100644 --- a/net/802/hippi.c +++ b/net/802/hippi.c @@ -131,7 +131,7 @@ __be16 hippi_type_trans(struct sk_buff *skb, struct net_device *dev) * set the raw address here. */ skb->dev = dev; - skb->mac.raw = skb->data; + skb_reset_mac_header(skb); hip = (struct hippi_hdr *)skb->mac.raw; skb_pull(skb, HIPPI_HLEN); diff --git a/net/802/tr.c b/net/802/tr.c index 987d91559bcc..eb2de0d16208 100644 --- a/net/802/tr.c +++ b/net/802/tr.c @@ -194,7 +194,7 @@ __be16 tr_type_trans(struct sk_buff *skb, struct net_device *dev) unsigned riflen=0; skb->dev = dev; - skb->mac.raw = skb->data; + skb_reset_mac_header(skb); trh = tr_hdr(skb); if(trh->saddr[0] & TR_RII) diff --git a/net/atm/br2684.c b/net/atm/br2684.c index c444f5eda22d..900d42ca8a50 100644 --- a/net/atm/br2684.c +++ b/net/atm/br2684.c @@ -458,7 +458,7 @@ static void br2684_push(struct atm_vcc *atmvcc, struct sk_buff *skb) /* FIXME: tcpdump shows that pointer to mac header is 2 bytes earlier, than should be. What else should I set? */ skb_pull(skb, plen); - skb->mac.raw = ((char *) (skb->data)) - ETH_HLEN; + skb->mac.raw = skb->data - ETH_HLEN; skb->pkt_type = PACKET_HOST; #ifdef CONFIG_BR2684_FAST_TRANS skb->protocol = ((u16 *) skb->data)[-1]; diff --git a/net/atm/clip.c b/net/atm/clip.c index 8c3825816085..ccba24ffb966 100644 --- a/net/atm/clip.c +++ b/net/atm/clip.c @@ -213,7 +213,7 @@ static void clip_push(struct atm_vcc *vcc, struct sk_buff *skb) return; } ATM_SKB(skb)->vcc = vcc; - skb->mac.raw = skb->data; + skb_reset_mac_header(skb); if (!clip_vcc->encap || skb->len < RFC1483LLC_LEN || memcmp(skb->data, llc_oui, sizeof (llc_oui))) diff --git a/net/ax25/ax25_in.c b/net/ax25/ax25_in.c index 4a6b26becadc..6d11b0633d5a 100644 --- a/net/ax25/ax25_in.c +++ b/net/ax25/ax25_in.c @@ -122,7 +122,7 @@ int ax25_rx_iframe(ax25_cb *ax25, struct sk_buff *skb) } skb_pull(skb, 1); /* Remove PID */ - skb->mac.raw = skb->nh.raw; + skb_reset_mac_header(skb); skb->nh.raw = skb->data; skb->dev = ax25->ax25_dev->dev; skb->pkt_type = PACKET_HOST; diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c index f7ade186bf93..b1c2fa96c69e 100644 --- a/net/bluetooth/bnep/core.c +++ b/net/bluetooth/bnep/core.c @@ -326,7 +326,7 @@ static inline int bnep_rx_frame(struct bnep_session *s, struct sk_buff *skb) return 0; } - skb->mac.raw = skb->data; + skb_reset_mac_header(skb); /* Verify and pull out header */ if (!skb_pull(skb, __bnep_rx_hlen[type & BNEP_TYPE_MASK])) diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c index 905a39c33a16..b22ada529cc3 100644 --- a/net/bridge/br_device.c +++ b/net/bridge/br_device.c @@ -37,7 +37,7 @@ int br_dev_xmit(struct sk_buff *skb, struct net_device *dev) br->statistics.tx_packets++; br->statistics.tx_bytes += skb->len; - skb->mac.raw = skb->data; + skb_reset_mac_header(skb); skb_pull(skb, ETH_HLEN); if (dest[0] & 1) diff --git a/net/core/dev.c b/net/core/dev.c index 424d6d0e98f8..2fcaf5bc4a9c 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1066,7 +1066,7 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev) set by sender, so that the second statement is just protection against buggy protocols. */ - skb2->mac.raw = skb2->data; + skb_reset_mac_header(skb2); if (skb2->nh.raw < skb2->data || skb2->nh.raw > skb2->tail) { @@ -1206,7 +1206,7 @@ struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features) BUG_ON(skb_shinfo(skb)->frag_list); - skb->mac.raw = skb->data; + skb_reset_mac_header(skb); skb->mac_len = skb->nh.raw - skb->data; __skb_pull(skb, skb->mac_len); diff --git a/net/core/netpoll.c b/net/core/netpoll.c index 32a9f80b5f19..0ad3896bbf62 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c @@ -324,7 +324,7 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len) iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); eth = (struct ethhdr *) skb_push(skb, ETH_HLEN); - skb->mac.raw = skb->data; + skb_reset_mac_header(skb); skb->protocol = eth->h_proto = htons(ETH_P_IP); memcpy(eth->h_source, np->local_mac, 6); memcpy(eth->h_dest, np->remote_mac, 6); diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 336958fbbcb2..8f6ebd0d3693 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -1928,7 +1928,7 @@ struct sk_buff *skb_segment(struct sk_buff *skb, int features) nskb->mac_len = skb->mac_len; skb_reserve(nskb, headroom); - nskb->mac.raw = nskb->data; + skb_reset_mac_header(nskb); nskb->nh.raw = nskb->data + skb->mac_len; nskb->h.raw = nskb->nh.raw + (skb->h.raw - skb->nh.raw); memcpy(skb_put(nskb, doffset), skb->data, doffset); diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c index c1b5502f195b..ef94ca56d7bd 100644 --- a/net/decnet/dn_route.c +++ b/net/decnet/dn_route.c @@ -1537,7 +1537,7 @@ int dn_cache_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, void *arg) skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); if (skb == NULL) return -ENOBUFS; - skb->mac.raw = skb->data; + skb_reset_mac_header(skb); cb = DN_SKB_CB(skb); if (rta[RTA_SRC-1]) diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c index 01ecbe42b1e7..0ac2524f3b68 100644 --- a/net/ethernet/eth.c +++ b/net/ethernet/eth.c @@ -157,7 +157,7 @@ __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev) unsigned char *rawp; skb->dev = dev; - skb->mac.raw = skb->data; + skb_reset_mac_header(skb); skb_pull(skb, ETH_HLEN); eth = eth_hdr(skb); diff --git a/net/ieee80211/ieee80211_rx.c b/net/ieee80211/ieee80211_rx.c index d5f5c6616689..f39bf7c41012 100644 --- a/net/ieee80211/ieee80211_rx.c +++ b/net/ieee80211/ieee80211_rx.c @@ -42,7 +42,7 @@ static void ieee80211_monitor_rx(struct ieee80211_device *ieee, u16 fc = le16_to_cpu(hdr->frame_ctl); skb->dev = ieee->dev; - skb->mac.raw = skb->data; + skb_reset_mac_header(skb); skb_pull(skb, ieee80211_get_hdrlen(fc)); skb->pkt_type = PACKET_OTHERHOST; skb->protocol = __constant_htons(ETH_P_80211_RAW); @@ -789,10 +789,11 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb, if (skb2 != NULL) { /* send to wireless media */ + skb2->dev = dev; skb2->protocol = __constant_htons(ETH_P_802_3); - skb2->mac.raw = skb2->nh.raw = skb2->data; + skb_reset_mac_header(skb2); + skb2->nh.raw = skb2->data; /* skb2->nh.raw = skb2->data + ETH_HLEN; */ - skb2->dev = dev; dev_queue_xmit(skb2); } #endif diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index 9151da642318..88f8aae873f4 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c @@ -616,7 +616,7 @@ static int ipgre_rcv(struct sk_buff *skb) offset += 4; } - skb->mac.raw = skb->nh.raw; + skb_reset_mac_header(skb); skb->nh.raw = __pskb_pull(skb, offset); skb_postpull_rcsum(skb, skb->h.raw, offset); skb->pkt_type = PACKET_HOST; diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 5db301b33372..ddba857bd243 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -95,7 +95,7 @@ __inline__ void ip_send_check(struct iphdr *iph) /* dev_loopback_xmit for use with netfilter. */ static int ip_dev_loopback_xmit(struct sk_buff *newskb) { - newskb->mac.raw = newskb->data; + skb_reset_mac_header(newskb); __skb_pull(newskb, newskb->nh.raw - newskb->data); newskb->pkt_type = PACKET_LOOPBACK; newskb->ip_summed = CHECKSUM_UNNECESSARY; diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 0b3d7bf40f4e..29ee7be45aa6 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -2747,7 +2747,8 @@ int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void *arg) /* Reserve room for dummy headers, this skb can pass through good chunk of routing engine. */ - skb->mac.raw = skb->nh.raw = skb->data; + skb_reset_mac_header(skb); + skb->nh.raw = skb->data; /* Bugfix: need to give ip_route_input enough of an IP header to not gag. */ skb->nh.iph->protocol = IPPROTO_ICMP; diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 7e25043d826c..a5f4562b5d29 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -88,7 +88,7 @@ static inline int ip6_output_finish(struct sk_buff *skb) /* dev_loopback_xmit for use with netfilter. */ static int ip6_dev_loopback_xmit(struct sk_buff *newskb) { - newskb->mac.raw = newskb->data; + skb_reset_mac_header(newskb); __skb_pull(newskb, newskb->nh.raw - newskb->data); newskb->pkt_type = PACKET_LOOPBACK; newskb->ip_summed = CHECKSUM_UNNECESSARY; diff --git a/net/ipv6/route.c b/net/ipv6/route.c index cc08cc48e9e9..0aa4762f53f7 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -2218,7 +2218,7 @@ int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void *arg) /* Reserve room for dummy headers, this skb can pass through good chunk of routing engine. */ - skb->mac.raw = skb->data; + skb_reset_mac_header(skb); skb_reserve(skb, MAX_HEADER + sizeof(struct ipv6hdr)); rt = (struct rt6_info*) ip6_route_output(NULL, &fl); diff --git a/net/irda/irlap_frame.c b/net/irda/irlap_frame.c index 0b04603e9c47..1b7e2490e2e1 100644 --- a/net/irda/irlap_frame.c +++ b/net/irda/irlap_frame.c @@ -93,7 +93,8 @@ void irlap_queue_xmit(struct irlap_cb *self, struct sk_buff *skb) { /* Some common init stuff */ skb->dev = self->netdev; - skb->h.raw = skb->nh.raw = skb->mac.raw = skb->data; + skb_reset_mac_header(skb); + skb->h.raw = skb->nh.raw = skb->data; skb->protocol = htons(ETH_P_IRDA); skb->priority = TC_PRIO_BESTEFFORT; diff --git a/net/irda/wrapper.c b/net/irda/wrapper.c index 5abfb71aae8d..2acc66dfb558 100644 --- a/net/irda/wrapper.c +++ b/net/irda/wrapper.c @@ -256,7 +256,7 @@ async_bump(struct net_device *dev, /* Feed it to IrLAP layer */ dataskb->dev = dev; - dataskb->mac.raw = dataskb->data; + skb_reset_mac_header(dataskb); dataskb->protocol = htons(ETH_P_IRDA); netif_rx(dataskb); diff --git a/net/llc/llc_output.c b/net/llc/llc_output.c index f4291f349e92..729e25108275 100644 --- a/net/llc/llc_output.c +++ b/net/llc/llc_output.c @@ -52,7 +52,7 @@ int llc_mac_hdr_init(struct sk_buff *skb, if (da) { memcpy(trh->daddr, da, dev->addr_len); tr_source_route(skb, trh, dev); - skb->mac.raw = skb->data; + skb_reset_mac_header(skb); } break; } diff --git a/net/netrom/nr_dev.c b/net/netrom/nr_dev.c index 9a97ed6e6910..17c3f1ef83e9 100644 --- a/net/netrom/nr_dev.c +++ b/net/netrom/nr_dev.c @@ -56,7 +56,7 @@ int nr_rx_ip(struct sk_buff *skb, struct net_device *dev) /* Spoof incoming device */ skb->dev = dev; - skb->mac.raw = skb->nh.raw; + skb_reset_mac_header(skb); skb->nh.raw = skb->data; skb->pkt_type = PACKET_HOST; diff --git a/net/wanrouter/wanmain.c b/net/wanrouter/wanmain.c index 5d2d93dc0837..c49e223084f1 100644 --- a/net/wanrouter/wanmain.c +++ b/net/wanrouter/wanmain.c @@ -339,7 +339,7 @@ __be16 wanrouter_type_trans(struct sk_buff *skb, struct net_device *dev) skb->protocol = ethertype; skb->pkt_type = PACKET_HOST; /* Physically point to point */ skb_pull(skb, cnt); - skb->mac.raw = skb->data; + skb_reset_mac_header(skb); return ethertype; } -- cgit v1.2.3-59-g8ed1b From 48d49d0ccdaa9caff4636ef9c3410973d28131b5 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Sat, 10 Mar 2007 12:30:58 -0300 Subject: [SK_BUFF]: Introduce skb_set_mac_header() For the cases where we want to set skb->mac.raw to an offset from skb->data. Simple cases first, the memmove ones and specially pktgen will be left for later. Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: David S. Miller --- drivers/net/loopback.c | 2 +- drivers/net/myri_sbus.c | 2 +- drivers/s390/net/qeth_eddp.c | 2 +- drivers/s390/net/qeth_main.c | 4 ++-- include/linux/skbuff.h | 5 +++++ net/atm/br2684.c | 2 +- 6 files changed, 11 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c index 4380e5e89dc9..a71d8e0a9b57 100644 --- a/drivers/net/loopback.c +++ b/drivers/net/loopback.c @@ -90,7 +90,7 @@ static void emulate_large_send_offload(struct sk_buff *skb) if (!nskb) break; skb_reserve(nskb, 32); - nskb->mac.raw = nskb->data - 14; + skb_set_mac_header(nskb, -ETH_HLEN); nskb->nh.raw = nskb->data; iph = nskb->nh.iph; memcpy(nskb->data, skb->nh.raw, doffset); diff --git a/drivers/net/myri_sbus.c b/drivers/net/myri_sbus.c index de092658db6c..e1f16fb05846 100644 --- a/drivers/net/myri_sbus.c +++ b/drivers/net/myri_sbus.c @@ -368,7 +368,7 @@ static __be16 myri_type_trans(struct sk_buff *skb, struct net_device *dev) struct ethhdr *eth; unsigned char *rawp; - skb->mac.raw = skb->data + MYRI_PAD_LEN; + skb_set_mac_header(skb, MYRI_PAD_LEN); skb_pull(skb, dev->hard_header_len); eth = eth_hdr(skb); diff --git a/drivers/s390/net/qeth_eddp.c b/drivers/s390/net/qeth_eddp.c index 910a8ab66b05..893125403c68 100644 --- a/drivers/s390/net/qeth_eddp.c +++ b/drivers/s390/net/qeth_eddp.c @@ -486,7 +486,7 @@ qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx, return -ENOMEM; } if (qhdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2) { - skb->mac.raw = skb->data + sizeof(struct qeth_hdr); + skb_set_mac_header(skb, sizeof(struct qeth_hdr)); memcpy(&eddp->mac, eth_hdr(skb), ETH_HLEN); #ifdef CONFIG_QETH_VLAN if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q)) { diff --git a/drivers/s390/net/qeth_main.c b/drivers/s390/net/qeth_main.c index 28822025b791..c0ee6d94ea38 100644 --- a/drivers/s390/net/qeth_main.c +++ b/drivers/s390/net/qeth_main.c @@ -2306,7 +2306,7 @@ qeth_rebuild_skb_fake_ll_tr(struct qeth_card *card, struct sk_buff *skb, struct iphdr *ip_hdr; QETH_DBF_TEXT(trace,5,"skbfktr"); - skb->mac.raw = skb->data - QETH_FAKE_LL_LEN_TR; + skb_set_mac_header(skb, -QETH_FAKE_LL_LEN_TR); /* this is a fake ethernet header */ fake_hdr = tr_hdr(skb); @@ -2359,7 +2359,7 @@ qeth_rebuild_skb_fake_ll_eth(struct qeth_card *card, struct sk_buff *skb, struct iphdr *ip_hdr; QETH_DBF_TEXT(trace,5,"skbfketh"); - skb->mac.raw = skb->data - QETH_FAKE_LL_LEN_ETH; + skb_set_mac_header(skb, -QETH_FAKE_LL_LEN_ETH); /* this is a fake ethernet header */ fake_hdr = eth_hdr(skb); diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 748f254b50cc..43ab6cbf8446 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -965,6 +965,11 @@ static inline void skb_reset_mac_header(struct sk_buff *skb) skb->mac.raw = skb->data; } +static inline void skb_set_mac_header(struct sk_buff *skb, const int offset) +{ + skb->mac.raw = skb->data + offset; +} + /* * CPUs often take a performance hit when accessing unaligned memory * locations. The actual performance hit varies, it can be small if the diff --git a/net/atm/br2684.c b/net/atm/br2684.c index 900d42ca8a50..a1686dfcbb9a 100644 --- a/net/atm/br2684.c +++ b/net/atm/br2684.c @@ -458,7 +458,7 @@ static void br2684_push(struct atm_vcc *atmvcc, struct sk_buff *skb) /* FIXME: tcpdump shows that pointer to mac header is 2 bytes earlier, than should be. What else should I set? */ skb_pull(skb, plen); - skb->mac.raw = skb->data - ETH_HLEN; + skb_set_mac_header(skb, -ETH_HLEN); skb->pkt_type = PACKET_HOST; #ifdef CONFIG_BR2684_FAST_TRANS skb->protocol = ((u16 *) skb->data)[-1]; -- cgit v1.2.3-59-g8ed1b From 98e399f82ab3a6d863d1d4a7ea48925cc91c830e Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Mon, 19 Mar 2007 15:33:04 -0700 Subject: [SK_BUFF]: Introduce skb_mac_header() For the places where we need a pointer to the mac header, it is still legal to touch skb->mac.raw directly if just adding to, subtracting from or setting it to another layer header. This one also converts some more cases to skb_reset_mac_header() that my regex missed as it had no spaces before nor after '=', ugh. Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: David S. Miller --- arch/um/drivers/daemon_kern.c | 2 +- arch/um/drivers/mcast_kern.c | 2 +- arch/um/drivers/pcap_kern.c | 2 +- arch/um/drivers/slip_kern.c | 2 +- arch/um/drivers/slirp_kern.c | 2 +- arch/um/os-Linux/drivers/ethertap_kern.c | 2 +- arch/um/os-Linux/drivers/tuntap_kern.c | 2 +- drivers/block/aoe/aoe.h | 2 +- drivers/ieee1394/eth1394.h | 2 +- drivers/media/dvb/dvb-core/dvb_net.c | 2 +- drivers/message/fusion/mptlan.c | 26 ++++++++++++++------------ drivers/net/arcnet/capmode.c | 4 ++-- drivers/net/plip.c | 2 +- drivers/net/slip.c | 2 +- drivers/net/wan/hostess_sv11.c | 2 +- drivers/net/wan/sealevel.c | 2 +- drivers/net/wan/syncppp.c | 2 +- drivers/net/wireless/airo.c | 2 +- drivers/net/wireless/hostap/hostap_main.c | 14 +++++++------- drivers/net/wireless/orinoco.c | 2 +- drivers/net/wireless/wavelan.c | 5 +++-- drivers/net/wireless/wavelan_cs.c | 4 ++-- drivers/s390/net/claw.c | 2 +- include/linux/if_ether.h | 2 +- include/linux/if_tr.h | 2 +- include/linux/if_vlan.h | 2 +- include/linux/netfilter_bridge/ebt_802_3.h | 2 +- include/linux/skbuff.h | 10 ++++++++++ net/802/hippi.c | 2 +- net/appletalk/ddp.c | 6 +++--- net/ax25/af_ax25.c | 5 +++-- net/bluetooth/bnep/core.c | 11 +++++++---- net/bridge/br_netfilter.c | 5 +++-- net/core/dev.c | 2 +- net/core/filter.c | 2 +- net/core/skbuff.c | 2 +- net/ipv4/netfilter/ipt_LOG.c | 4 ++-- net/ipv4/netfilter/ipt_ULOG.c | 4 ++-- net/ipv4/route.c | 4 ++-- net/ipv4/tcp_input.c | 2 +- net/ipv4/xfrm4_mode_tunnel.c | 4 ++-- net/ipv6/ndisc.c | 3 ++- net/ipv6/netfilter/ip6t_LOG.c | 5 +++-- net/ipv6/netfilter/ip6t_eui64.c | 4 ++-- net/ipv6/xfrm6_mode_beet.c | 4 ++-- net/ipv6/xfrm6_mode_tunnel.c | 4 ++-- net/netfilter/xt_mac.c | 4 ++-- net/packet/af_packet.c | 8 ++++---- net/tipc/eth_media.c | 4 ++-- 49 files changed, 108 insertions(+), 88 deletions(-) (limited to 'include') diff --git a/arch/um/drivers/daemon_kern.c b/arch/um/drivers/daemon_kern.c index 9c2e7a758f21..adeece11e596 100644 --- a/arch/um/drivers/daemon_kern.c +++ b/arch/um/drivers/daemon_kern.c @@ -46,7 +46,7 @@ static int daemon_read(int fd, struct sk_buff **skb, { *skb = ether_adjust_skb(*skb, ETH_HEADER_OTHER); if(*skb == NULL) return(-ENOMEM); - return(net_recvfrom(fd, (*skb)->mac.raw, + return(net_recvfrom(fd, skb_mac_header(*skb), (*skb)->dev->mtu + ETH_HEADER_OTHER)); } diff --git a/arch/um/drivers/mcast_kern.c b/arch/um/drivers/mcast_kern.c index 52ccb7b53cd2..e6b8e0dd72a8 100644 --- a/arch/um/drivers/mcast_kern.c +++ b/arch/um/drivers/mcast_kern.c @@ -50,7 +50,7 @@ static int mcast_read(int fd, struct sk_buff **skb, struct uml_net_private *lp) { *skb = ether_adjust_skb(*skb, ETH_HEADER_OTHER); if(*skb == NULL) return(-ENOMEM); - return(net_recvfrom(fd, (*skb)->mac.raw, + return(net_recvfrom(fd, skb_mac_header(*skb), (*skb)->dev->mtu + ETH_HEADER_OTHER)); } diff --git a/arch/um/drivers/pcap_kern.c b/arch/um/drivers/pcap_kern.c index e67362acf0e7..948849343ca4 100644 --- a/arch/um/drivers/pcap_kern.c +++ b/arch/um/drivers/pcap_kern.c @@ -36,7 +36,7 @@ static int pcap_read(int fd, struct sk_buff **skb, { *skb = ether_adjust_skb(*skb, ETH_HEADER_OTHER); if(*skb == NULL) return(-ENOMEM); - return(pcap_user_read(fd, (*skb)->mac.raw, + return(pcap_user_read(fd, skb_mac_header(*skb), (*skb)->dev->mtu + ETH_HEADER_OTHER, (struct pcap_data *) &lp->user)); } diff --git a/arch/um/drivers/slip_kern.c b/arch/um/drivers/slip_kern.c index 25634bd1f585..125c44f77638 100644 --- a/arch/um/drivers/slip_kern.c +++ b/arch/um/drivers/slip_kern.c @@ -49,7 +49,7 @@ static unsigned short slip_protocol(struct sk_buff *skbuff) static int slip_read(int fd, struct sk_buff **skb, struct uml_net_private *lp) { - return(slip_user_read(fd, (*skb)->mac.raw, (*skb)->dev->mtu, + return(slip_user_read(fd, skb_mac_header(*skb), (*skb)->dev->mtu, (struct slip_data *) &lp->user)); } diff --git a/arch/um/drivers/slirp_kern.c b/arch/um/drivers/slirp_kern.c index b3ed8fb874ab..0a0324a6d290 100644 --- a/arch/um/drivers/slirp_kern.c +++ b/arch/um/drivers/slirp_kern.c @@ -53,7 +53,7 @@ static unsigned short slirp_protocol(struct sk_buff *skbuff) static int slirp_read(int fd, struct sk_buff **skb, struct uml_net_private *lp) { - return(slirp_user_read(fd, (*skb)->mac.raw, (*skb)->dev->mtu, + return(slirp_user_read(fd, skb_mac_header(*skb), (*skb)->dev->mtu, (struct slirp_data *) &lp->user)); } diff --git a/arch/um/os-Linux/drivers/ethertap_kern.c b/arch/um/os-Linux/drivers/ethertap_kern.c index 70541821775f..12689141414d 100644 --- a/arch/um/os-Linux/drivers/ethertap_kern.c +++ b/arch/um/os-Linux/drivers/ethertap_kern.c @@ -43,7 +43,7 @@ static int etap_read(int fd, struct sk_buff **skb, struct uml_net_private *lp) *skb = ether_adjust_skb(*skb, ETH_HEADER_ETHERTAP); if(*skb == NULL) return(-ENOMEM); - len = net_recvfrom(fd, (*skb)->mac.raw, + len = net_recvfrom(fd, skb_mac_header(*skb), (*skb)->dev->mtu + 2 * ETH_HEADER_ETHERTAP); if(len <= 0) return(len); skb_pull(*skb, 2); diff --git a/arch/um/os-Linux/drivers/tuntap_kern.c b/arch/um/os-Linux/drivers/tuntap_kern.c index 76570a2c25c3..f1714e7fb1d0 100644 --- a/arch/um/os-Linux/drivers/tuntap_kern.c +++ b/arch/um/os-Linux/drivers/tuntap_kern.c @@ -43,7 +43,7 @@ static int tuntap_read(int fd, struct sk_buff **skb, { *skb = ether_adjust_skb(*skb, ETH_HEADER_OTHER); if(*skb == NULL) return(-ENOMEM); - return(net_read(fd, (*skb)->mac.raw, + return(net_read(fd, skb_mac_header(*skb), (*skb)->dev->mtu + ETH_HEADER_OTHER)); } diff --git a/drivers/block/aoe/aoe.h b/drivers/block/aoe/aoe.h index 4c34f8d31cc9..1d8466817943 100644 --- a/drivers/block/aoe/aoe.h +++ b/drivers/block/aoe/aoe.h @@ -53,7 +53,7 @@ struct aoe_hdr { static inline struct aoe_hdr *aoe_hdr(const struct sk_buff *skb) { - return (struct aoe_hdr *)skb->mac.raw; + return (struct aoe_hdr *)skb_mac_header(skb); } #endif diff --git a/drivers/ieee1394/eth1394.h b/drivers/ieee1394/eth1394.h index c45cbff9138d..1e8356535149 100644 --- a/drivers/ieee1394/eth1394.h +++ b/drivers/ieee1394/eth1394.h @@ -90,7 +90,7 @@ struct eth1394hdr { static inline struct eth1394hdr *eth1394_hdr(const struct sk_buff *skb) { - return (struct eth1394hdr *)skb->mac.raw; + return (struct eth1394hdr *)skb_mac_header(skb); } #endif diff --git a/drivers/media/dvb/dvb-core/dvb_net.c b/drivers/media/dvb/dvb-core/dvb_net.c index 76e9c36597eb..c6b004182d91 100644 --- a/drivers/media/dvb/dvb-core/dvb_net.c +++ b/drivers/media/dvb/dvb-core/dvb_net.c @@ -174,7 +174,7 @@ static unsigned short dvb_net_eth_type_trans(struct sk_buff *skb, struct ethhdr *eth; unsigned char *rawp; - skb->mac.raw=skb->data; + skb_reset_mac_header(skb); skb_pull(skb,dev->hard_header_len); eth = eth_hdr(skb); diff --git a/drivers/message/fusion/mptlan.c b/drivers/message/fusion/mptlan.c index d5b878d56280..21fe1b66808c 100644 --- a/drivers/message/fusion/mptlan.c +++ b/drivers/message/fusion/mptlan.c @@ -714,6 +714,7 @@ mpt_lan_sdu_send (struct sk_buff *skb, struct net_device *dev) LANSendRequest_t *pSendReq; SGETransaction32_t *pTrans; SGESimple64_t *pSimple; + const unsigned char *mac; dma_addr_t dma; unsigned long flags; int ctx; @@ -784,6 +785,7 @@ mpt_lan_sdu_send (struct sk_buff *skb, struct net_device *dev) // IOC_AND_NETDEV_NAMES_s_s(dev), // ctx, skb, skb->data)); + mac = skb_mac_header(skb); #ifdef QLOGIC_NAA_WORKAROUND { struct NAA_Hosed *nh; @@ -793,12 +795,12 @@ mpt_lan_sdu_send (struct sk_buff *skb, struct net_device *dev) drops. */ read_lock_irq(&bad_naa_lock); for (nh = mpt_bad_naa; nh != NULL; nh=nh->next) { - if ((nh->ieee[0] == skb->mac.raw[0]) && - (nh->ieee[1] == skb->mac.raw[1]) && - (nh->ieee[2] == skb->mac.raw[2]) && - (nh->ieee[3] == skb->mac.raw[3]) && - (nh->ieee[4] == skb->mac.raw[4]) && - (nh->ieee[5] == skb->mac.raw[5])) { + if ((nh->ieee[0] == mac[0]) && + (nh->ieee[1] == mac[1]) && + (nh->ieee[2] == mac[2]) && + (nh->ieee[3] == mac[3]) && + (nh->ieee[4] == mac[4]) && + (nh->ieee[5] == mac[5])) { cur_naa = nh->NAA; dlprintk ((KERN_INFO "mptlan/sdu_send: using NAA value " "= %04x.\n", cur_naa)); @@ -810,12 +812,12 @@ mpt_lan_sdu_send (struct sk_buff *skb, struct net_device *dev) #endif pTrans->TransactionDetails[0] = cpu_to_le32((cur_naa << 16) | - (skb->mac.raw[0] << 8) | - (skb->mac.raw[1] << 0)); - pTrans->TransactionDetails[1] = cpu_to_le32((skb->mac.raw[2] << 24) | - (skb->mac.raw[3] << 16) | - (skb->mac.raw[4] << 8) | - (skb->mac.raw[5] << 0)); + (mac[0] << 8) | + (mac[1] << 0)); + pTrans->TransactionDetails[1] = cpu_to_le32((mac[2] << 24) | + (mac[3] << 16) | + (mac[4] << 8) | + (mac[5] << 0)); pSimple = (SGESimple64_t *) &pTrans->TransactionDetails[2]; diff --git a/drivers/net/arcnet/capmode.c b/drivers/net/arcnet/capmode.c index 6c764b66e9cc..f6a87bd20ff2 100644 --- a/drivers/net/arcnet/capmode.c +++ b/drivers/net/arcnet/capmode.c @@ -123,7 +123,7 @@ static void rx(struct net_device *dev, int bufnum, skb_put(skb, length + ARC_HDR_SIZE + sizeof(int)); skb->dev = dev; skb_reset_mac_header(skb); - pkt = (struct archdr *)skb->mac.raw; + pkt = (struct archdr *)skb_mac_header(skb); skb_pull(skb, ARC_HDR_SIZE); /* up to sizeof(pkt->soft) has already been copied from the card */ @@ -269,7 +269,7 @@ static int ack_tx(struct net_device *dev, int acked) ackskb->dev = dev; skb_reset_mac_header(ackskb); - ackpkt = (struct archdr *)ackskb->mac.raw; + ackpkt = (struct archdr *)skb_mac_header(ackskb); /* skb_pull(ackskb, ARC_HDR_SIZE); */ diff --git a/drivers/net/plip.c b/drivers/net/plip.c index 6bb085f54437..8754cf3356b0 100644 --- a/drivers/net/plip.c +++ b/drivers/net/plip.c @@ -546,7 +546,7 @@ static __be16 plip_type_trans(struct sk_buff *skb, struct net_device *dev) struct ethhdr *eth; unsigned char *rawp; - skb->mac.raw=skb->data; + skb_reset_mac_header(skb); skb_pull(skb,dev->hard_header_len); eth = eth_hdr(skb); diff --git a/drivers/net/slip.c b/drivers/net/slip.c index 2f4b1de7a2b4..65bd20fac820 100644 --- a/drivers/net/slip.c +++ b/drivers/net/slip.c @@ -363,7 +363,7 @@ sl_bump(struct slip *sl) } skb->dev = sl->dev; memcpy(skb_put(skb,count), sl->rbuff, count); - skb->mac.raw=skb->data; + skb_reset_mac_header(skb); skb->protocol=htons(ETH_P_IP); netif_rx(skb); sl->dev->last_rx = jiffies; diff --git a/drivers/net/wan/hostess_sv11.c b/drivers/net/wan/hostess_sv11.c index a02c5fb40567..9ba3e4ee6ec7 100644 --- a/drivers/net/wan/hostess_sv11.c +++ b/drivers/net/wan/hostess_sv11.c @@ -59,7 +59,7 @@ static void hostess_input(struct z8530_channel *c, struct sk_buff *skb) /* Drop the CRC - it's not a good idea to try and negotiate it ;) */ skb_trim(skb, skb->len-2); skb->protocol=__constant_htons(ETH_P_WAN_PPP); - skb->mac.raw=skb->data; + skb_reset_mac_header(skb); skb->dev=c->netdevice; /* * Send it to the PPP layer. We don't have time to process diff --git a/drivers/net/wan/sealevel.c b/drivers/net/wan/sealevel.c index 70fb1b98b1dd..131358108c5a 100644 --- a/drivers/net/wan/sealevel.c +++ b/drivers/net/wan/sealevel.c @@ -61,7 +61,7 @@ static void sealevel_input(struct z8530_channel *c, struct sk_buff *skb) /* Drop the CRC - it's not a good idea to try and negotiate it ;) */ skb_trim(skb, skb->len-2); skb->protocol=htons(ETH_P_WAN_PPP); - skb->mac.raw=skb->data; + skb_reset_mac_header(skb); skb->dev=c->netdevice; /* * Send it to the PPP layer. We don't have time to process diff --git a/drivers/net/wan/syncppp.c b/drivers/net/wan/syncppp.c index 218f7b574ab3..67fc67cfd452 100644 --- a/drivers/net/wan/syncppp.c +++ b/drivers/net/wan/syncppp.c @@ -227,7 +227,7 @@ static void sppp_input (struct net_device *dev, struct sk_buff *skb) unsigned long flags; skb->dev=dev; - skb->mac.raw=skb->data; + skb_reset_mac_header(skb); if (dev->flags & IFF_RUNNING) { diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c index 692a23f9834d..7fe0a61091a6 100644 --- a/drivers/net/wireless/airo.c +++ b/drivers/net/wireless/airo.c @@ -2444,7 +2444,7 @@ static int add_airo_dev( struct net_device *dev ); static int wll_header_parse(struct sk_buff *skb, unsigned char *haddr) { - memcpy(haddr, skb->mac.raw + 10, ETH_ALEN); + memcpy(haddr, skb_mac_header(skb) + 10, ETH_ALEN); return ETH_ALEN; } diff --git a/drivers/net/wireless/hostap/hostap_main.c b/drivers/net/wireless/hostap/hostap_main.c index 0e29ff762879..c2616e7b0059 100644 --- a/drivers/net/wireless/hostap/hostap_main.c +++ b/drivers/net/wireless/hostap/hostap_main.c @@ -590,20 +590,20 @@ void hostap_dump_tx_header(const char *name, const struct hfa384x_tx_frame *tx) int hostap_80211_header_parse(struct sk_buff *skb, unsigned char *haddr) { - memcpy(haddr, skb->mac.raw + 10, ETH_ALEN); /* addr2 */ + memcpy(haddr, skb_mac_header(skb) + 10, ETH_ALEN); /* addr2 */ return ETH_ALEN; } int hostap_80211_prism_header_parse(struct sk_buff *skb, unsigned char *haddr) { - if (*(u32 *)skb->mac.raw == LWNG_CAP_DID_BASE) { - memcpy(haddr, skb->mac.raw + - sizeof(struct linux_wlan_ng_prism_hdr) + 10, + const unsigned char *mac = skb_mac_header(skb); + + if (*(u32 *)mac == LWNG_CAP_DID_BASE) { + memcpy(haddr, mac + sizeof(struct linux_wlan_ng_prism_hdr) + 10, ETH_ALEN); /* addr2 */ - } else { /* (*(u32 *)skb->mac.raw == htonl(LWNG_CAPHDR_VERSION)) */ - memcpy(haddr, skb->mac.raw + - sizeof(struct linux_wlan_ng_cap_hdr) + 10, + } else { /* (*(u32 *)mac == htonl(LWNG_CAPHDR_VERSION)) */ + memcpy(haddr, mac + sizeof(struct linux_wlan_ng_cap_hdr) + 10, ETH_ALEN); /* addr2 */ } return ETH_ALEN; diff --git a/drivers/net/wireless/orinoco.c b/drivers/net/wireless/orinoco.c index f1415bff527f..062286dc8e15 100644 --- a/drivers/net/wireless/orinoco.c +++ b/drivers/net/wireless/orinoco.c @@ -689,7 +689,7 @@ static void orinoco_stat_gather(struct net_device *dev, /* Note : gcc will optimise the whole section away if * WIRELESS_SPY is not defined... - Jean II */ if (SPY_NUMBER(priv)) { - orinoco_spy_gather(dev, skb->mac.raw + ETH_ALEN, + orinoco_spy_gather(dev, skb_mac_header(skb) + ETH_ALEN, desc->signal, desc->silence); } } diff --git a/drivers/net/wireless/wavelan.c b/drivers/net/wireless/wavelan.c index 69cb1471096b..2bf77b1ee531 100644 --- a/drivers/net/wireless/wavelan.c +++ b/drivers/net/wireless/wavelan.c @@ -2517,7 +2517,8 @@ wv_packet_read(struct net_device * dev, u16 buf_off, int sksize) skb->protocol = eth_type_trans(skb, dev); #ifdef DEBUG_RX_INFO - wv_packet_info(skb->mac.raw, sksize, dev->name, "wv_packet_read"); + wv_packet_info(skb_mac_header(skb), sksize, dev->name, + "wv_packet_read"); #endif /* DEBUG_RX_INFO */ /* Statistics-gathering and associated stuff. @@ -2553,7 +2554,7 @@ wv_packet_read(struct net_device * dev, u16 buf_off, int sksize) /* Spying stuff */ #ifdef IW_WIRELESS_SPY - wl_spy_gather(dev, skb->mac.raw + WAVELAN_ADDR_SIZE, + wl_spy_gather(dev, skb_mac_header(skb) + WAVELAN_ADDR_SIZE, stats); #endif /* IW_WIRELESS_SPY */ #ifdef HISTOGRAM diff --git a/drivers/net/wireless/wavelan_cs.c b/drivers/net/wireless/wavelan_cs.c index 9351ee773314..67b867f837ca 100644 --- a/drivers/net/wireless/wavelan_cs.c +++ b/drivers/net/wireless/wavelan_cs.c @@ -2889,7 +2889,7 @@ wv_packet_read(struct net_device * dev, skb->protocol = eth_type_trans(skb, dev); #ifdef DEBUG_RX_INFO - wv_packet_info(skb->mac.raw, sksize, dev->name, "wv_packet_read"); + wv_packet_info(skb_mac_header(skb), sksize, dev->name, "wv_packet_read"); #endif /* DEBUG_RX_INFO */ /* Statistics gathering & stuff associated. @@ -2923,7 +2923,7 @@ wv_packet_read(struct net_device * dev, #endif /* WAVELAN_ROAMING */ #ifdef WIRELESS_SPY - wl_spy_gather(dev, skb->mac.raw + WAVELAN_ADDR_SIZE, stats); + wl_spy_gather(dev, skb_mac_header(skb) + WAVELAN_ADDR_SIZE, stats); #endif /* WIRELESS_SPY */ #ifdef HISTOGRAM wl_his_gather(dev, stats); diff --git a/drivers/s390/net/claw.c b/drivers/s390/net/claw.c index 7809a79feec7..6dd64d0c8d45 100644 --- a/drivers/s390/net/claw.c +++ b/drivers/s390/net/claw.c @@ -3525,8 +3525,8 @@ unpack_next: memcpy(skb_put(skb,len_of_data), privptr->p_mtc_envelope, len_of_data); - skb->mac.raw=skb->data; skb->dev=dev; + skb_reset_mac_header(skb); skb->protocol=htons(ETH_P_IP); skb->ip_summed=CHECKSUM_UNNECESSARY; privptr->stats.rx_packets++; diff --git a/include/linux/if_ether.h b/include/linux/if_ether.h index ab08f35cbc35..f6863fbcf334 100644 --- a/include/linux/if_ether.h +++ b/include/linux/if_ether.h @@ -112,7 +112,7 @@ struct ethhdr { static inline struct ethhdr *eth_hdr(const struct sk_buff *skb) { - return (struct ethhdr *)skb->mac.raw; + return (struct ethhdr *)skb_mac_header(skb); } #ifdef CONFIG_SYSCTL diff --git a/include/linux/if_tr.h b/include/linux/if_tr.h index 2f94cf2c7abb..046e9d95ba9a 100644 --- a/include/linux/if_tr.h +++ b/include/linux/if_tr.h @@ -47,7 +47,7 @@ struct trh_hdr { static inline struct trh_hdr *tr_hdr(const struct sk_buff *skb) { - return (struct trh_hdr *)skb->mac.raw; + return (struct trh_hdr *)skb_mac_header(skb); } #ifdef CONFIG_SYSCTL extern struct ctl_table tr_table[]; diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h index d103580c72d2..544490d9d0bd 100644 --- a/include/linux/if_vlan.h +++ b/include/linux/if_vlan.h @@ -51,7 +51,7 @@ struct vlan_ethhdr { static inline struct vlan_ethhdr *vlan_eth_hdr(const struct sk_buff *skb) { - return (struct vlan_ethhdr *)skb->mac.raw; + return (struct vlan_ethhdr *)skb_mac_header(skb); } struct vlan_hdr { diff --git a/include/linux/netfilter_bridge/ebt_802_3.h b/include/linux/netfilter_bridge/ebt_802_3.h index 07f044ff1a6b..a11b0c2017fd 100644 --- a/include/linux/netfilter_bridge/ebt_802_3.h +++ b/include/linux/netfilter_bridge/ebt_802_3.h @@ -54,7 +54,7 @@ struct ebt_802_3_hdr { static inline struct ebt_802_3_hdr *ebt_802_3_hdr(const struct sk_buff *skb) { - return (struct ebt_802_3_hdr *)skb->mac.raw; + return (struct ebt_802_3_hdr *)skb_mac_header(skb); } #endif diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 43ab6cbf8446..dff81af454b7 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -960,6 +960,16 @@ static inline void skb_reserve(struct sk_buff *skb, int len) skb->tail += len; } +static inline unsigned char *skb_mac_header(const struct sk_buff *skb) +{ + return skb->mac.raw; +} + +static inline int skb_mac_header_was_set(const struct sk_buff *skb) +{ + return skb->mac.raw != NULL; +} + static inline void skb_reset_mac_header(struct sk_buff *skb) { skb->mac.raw = skb->data; diff --git a/net/802/hippi.c b/net/802/hippi.c index d87190038edb..87ffc12b6891 100644 --- a/net/802/hippi.c +++ b/net/802/hippi.c @@ -132,7 +132,7 @@ __be16 hippi_type_trans(struct sk_buff *skb, struct net_device *dev) */ skb->dev = dev; skb_reset_mac_header(skb); - hip = (struct hippi_hdr *)skb->mac.raw; + hip = (struct hippi_hdr *)skb_mac_header(skb); skb_pull(skb, HIPPI_HLEN); /* diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c index 32b82705b685..934f25993ce8 100644 --- a/net/appletalk/ddp.c +++ b/net/appletalk/ddp.c @@ -1484,7 +1484,7 @@ static int ltalk_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) { /* Expand any short form frames */ - if (skb->mac.raw[2] == 1) { + if (skb_mac_header(skb)[2] == 1) { struct ddpehdr *ddp; /* Find our address */ struct atalk_addr *ap = atalk_find_dev_addr(dev); @@ -1510,8 +1510,8 @@ static int ltalk_rcv(struct sk_buff *skb, struct net_device *dev, * we write the network numbers ! */ - ddp->deh_dnode = skb->mac.raw[0]; /* From physical header */ - ddp->deh_snode = skb->mac.raw[1]; /* From physical header */ + ddp->deh_dnode = skb_mac_header(skb)[0]; /* From physical header */ + ddp->deh_snode = skb_mac_header(skb)[1]; /* From physical header */ ddp->deh_dnet = ap->s_net; /* Network number */ ddp->deh_snet = ap->s_net; diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c index 62605dc5a2c8..c89e4f6f9025 100644 --- a/net/ax25/af_ax25.c +++ b/net/ax25/af_ax25.c @@ -1645,9 +1645,10 @@ static int ax25_recvmsg(struct kiocb *iocb, struct socket *sock, struct sockaddr_ax25 *sax = (struct sockaddr_ax25 *)msg->msg_name; ax25_digi digi; ax25_address src; + const unsigned char *mac = skb_mac_header(skb); - ax25_addr_parse(skb->mac.raw+1, skb->data-skb->mac.raw-1, &src, NULL, &digi, NULL, NULL); - + ax25_addr_parse(mac + 1, skb->data - mac - 1, &src, NULL, + &digi, NULL, NULL); sax->sax25_family = AF_AX25; /* We set this correctly, even though we may not let the application know the digi calls further down (because it diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c index b1c2fa96c69e..97156c4abc8d 100644 --- a/net/bluetooth/bnep/core.c +++ b/net/bluetooth/bnep/core.c @@ -364,17 +364,20 @@ static inline int bnep_rx_frame(struct bnep_session *s, struct sk_buff *skb) case BNEP_COMPRESSED_SRC_ONLY: memcpy(__skb_put(nskb, ETH_ALEN), s->eh.h_dest, ETH_ALEN); - memcpy(__skb_put(nskb, ETH_ALEN), skb->mac.raw, ETH_ALEN); + memcpy(__skb_put(nskb, ETH_ALEN), skb_mac_header(skb), ETH_ALEN); put_unaligned(s->eh.h_proto, (__be16 *) __skb_put(nskb, 2)); break; case BNEP_COMPRESSED_DST_ONLY: - memcpy(__skb_put(nskb, ETH_ALEN), skb->mac.raw, ETH_ALEN); - memcpy(__skb_put(nskb, ETH_ALEN + 2), s->eh.h_source, ETH_ALEN + 2); + memcpy(__skb_put(nskb, ETH_ALEN), skb_mac_header(skb), + ETH_ALEN); + memcpy(__skb_put(nskb, ETH_ALEN + 2), s->eh.h_source, + ETH_ALEN + 2); break; case BNEP_GENERAL: - memcpy(__skb_put(nskb, ETH_ALEN * 2), skb->mac.raw, ETH_ALEN * 2); + memcpy(__skb_put(nskb, ETH_ALEN * 2), skb_mac_header(skb), + ETH_ALEN * 2); put_unaligned(s->eh.h_proto, (__be16 *) __skb_put(nskb, 2)); break; } diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c index 5439a3c46c3e..1163c4f69899 100644 --- a/net/bridge/br_netfilter.c +++ b/net/bridge/br_netfilter.c @@ -753,7 +753,8 @@ static unsigned int br_nf_post_routing(unsigned int hook, struct sk_buff **pskb, #ifdef CONFIG_NETFILTER_DEBUG /* Be very paranoid. This probably won't happen anymore, but let's * keep the check just to be sure... */ - if (skb->mac.raw < skb->head || skb->mac.raw + ETH_HLEN > skb->data) { + if (skb_mac_header(skb) < skb->head || + skb_mac_header(skb) + ETH_HLEN > skb->data) { printk(KERN_CRIT "br_netfilter: Argh!! br_nf_post_routing: " "bad mac.raw pointer.\n"); goto print_error; @@ -808,7 +809,7 @@ print_error: if (realoutdev) printk("[%s]", realoutdev->name); } - printk(" head:%p, raw:%p, data:%p\n", skb->head, skb->mac.raw, + printk(" head:%p, raw:%p, data:%p\n", skb->head, skb_mac_header(skb), skb->data); dump_stack(); return NF_ACCEPT; diff --git a/net/core/dev.c b/net/core/dev.c index 2fcaf5bc4a9c..560560fe3064 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1232,7 +1232,7 @@ struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features) } rcu_read_unlock(); - __skb_push(skb, skb->data - skb->mac.raw); + __skb_push(skb, skb->data - skb_mac_header(skb)); return segs; } diff --git a/net/core/filter.c b/net/core/filter.c index 8d185a089c53..1cc128d05422 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -44,7 +44,7 @@ static void *__load_pointer(struct sk_buff *skb, int k) if (k >= SKF_NET_OFF) ptr = skb->nh.raw + k - SKF_NET_OFF; else if (k >= SKF_LL_OFF) - ptr = skb->mac.raw + k - SKF_LL_OFF; + ptr = skb_mac_header(skb) + k - SKF_LL_OFF; if (ptr >= skb->head && ptr < skb->tail) return ptr; diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 8f6ebd0d3693..1493c95f633e 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -1878,7 +1878,7 @@ struct sk_buff *skb_segment(struct sk_buff *skb, int features) struct sk_buff *segs = NULL; struct sk_buff *tail = NULL; unsigned int mss = skb_shinfo(skb)->gso_size; - unsigned int doffset = skb->data - skb->mac.raw; + unsigned int doffset = skb->data - skb_mac_header(skb); unsigned int offset = doffset; unsigned int headroom; unsigned int len; diff --git a/net/ipv4/netfilter/ipt_LOG.c b/net/ipv4/netfilter/ipt_LOG.c index d9c37fd94228..c697971fe317 100644 --- a/net/ipv4/netfilter/ipt_LOG.c +++ b/net/ipv4/netfilter/ipt_LOG.c @@ -399,9 +399,9 @@ ipt_log_packet(unsigned int pf, /* MAC logging for input chain only. */ printk("MAC="); if (skb->dev && skb->dev->hard_header_len - && skb->mac.raw != (void*)skb->nh.iph) { + && skb->mac.raw != skb->nh.raw) { int i; - unsigned char *p = skb->mac.raw; + const unsigned char *p = skb_mac_header(skb); for (i = 0; i < skb->dev->hard_header_len; i++,p++) printk("%02x%c", *p, i==skb->dev->hard_header_len - 1 diff --git a/net/ipv4/netfilter/ipt_ULOG.c b/net/ipv4/netfilter/ipt_ULOG.c index 9718b666a380..fae2a34d23d0 100644 --- a/net/ipv4/netfilter/ipt_ULOG.c +++ b/net/ipv4/netfilter/ipt_ULOG.c @@ -251,9 +251,9 @@ static void ipt_ulog_packet(unsigned int hooknum, *(pm->prefix) = '\0'; if (in && in->hard_header_len > 0 - && skb->mac.raw != (void *) skb->nh.iph + && skb->mac.raw != skb->nh.raw && in->hard_header_len <= ULOG_MAC_LEN) { - memcpy(pm->mac, skb->mac.raw, in->hard_header_len); + memcpy(pm->mac, skb_mac_header(skb), in->hard_header_len); pm->mac_len = in->hard_header_len; } else pm->mac_len = 0; diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 29ee7be45aa6..486ab93127ce 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -1698,9 +1698,9 @@ static void ip_handle_martian_source(struct net_device *dev, printk(KERN_WARNING "martian source %u.%u.%u.%u from " "%u.%u.%u.%u, on dev %s\n", NIPQUAD(daddr), NIPQUAD(saddr), dev->name); - if (dev->hard_header_len && skb->mac.raw) { + if (dev->hard_header_len && skb_mac_header_was_set(skb)) { int i; - unsigned char *p = skb->mac.raw; + const unsigned char *p = skb_mac_header(skb); printk(KERN_WARNING "ll header: "); for (i = 0; i < dev->hard_header_len; i++, p++) { printk("%02x", *p); diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 1ec05bd673a7..f5e019cefc15 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -3633,7 +3633,7 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list, if (!nskb) return; - skb_set_mac_header(nskb, skb->mac.raw - skb->head); + skb_set_mac_header(nskb, skb_mac_header(skb) - skb->head); nskb->nh.raw = nskb->data + (skb->nh.raw - skb->head); nskb->h.raw = nskb->data + (skb->h.raw - skb->head); diff --git a/net/ipv4/xfrm4_mode_tunnel.c b/net/ipv4/xfrm4_mode_tunnel.c index f09055d3a768..8e123e30cf61 100644 --- a/net/ipv4/xfrm4_mode_tunnel.c +++ b/net/ipv4/xfrm4_mode_tunnel.c @@ -126,9 +126,9 @@ static int xfrm4_tunnel_input(struct xfrm_state *x, struct sk_buff *skb) skb->protocol = htons(ETH_P_IPV6); } #endif - old_mac = skb->mac.raw; + old_mac = skb_mac_header(skb); skb_set_mac_header(skb, -skb->mac_len); - memmove(skb->mac.raw, old_mac, skb->mac_len); + memmove(skb_mac_header(skb), old_mac, skb->mac_len); skb->nh.raw = skb->data; err = 0; diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c index 053147a0027e..a3e3d9e2f44b 100644 --- a/net/ipv6/ndisc.c +++ b/net/ipv6/ndisc.c @@ -828,7 +828,8 @@ static void ndisc_recv_ns(struct sk_buff *skb) if (ifp->flags & (IFA_F_TENTATIVE|IFA_F_OPTIMISTIC)) { if (dad) { if (dev->type == ARPHRD_IEEE802_TR) { - unsigned char *sadr = skb->mac.raw; + const unsigned char *sadr; + sadr = skb_mac_header(skb); if (((sadr[8] ^ dev->dev_addr[0]) & 0x7f) == 0 && sadr[9] == dev->dev_addr[1] && sadr[10] == dev->dev_addr[2] && diff --git a/net/ipv6/netfilter/ip6t_LOG.c b/net/ipv6/netfilter/ip6t_LOG.c index afaa039d0b7b..fc9e51a77784 100644 --- a/net/ipv6/netfilter/ip6t_LOG.c +++ b/net/ipv6/netfilter/ip6t_LOG.c @@ -397,7 +397,7 @@ ip6t_log_packet(unsigned int pf, printk("MAC="); if (skb->dev && (len = skb->dev->hard_header_len) && skb->mac.raw != skb->nh.raw) { - unsigned char *p = skb->mac.raw; + const unsigned char *p = skb_mac_header(skb); int i; if (skb->dev->type == ARPHRD_SIT && @@ -412,7 +412,8 @@ ip6t_log_packet(unsigned int pf, printk(" "); if (skb->dev->type == ARPHRD_SIT) { - struct iphdr *iph = (struct iphdr *)skb->mac.raw; + const struct iphdr *iph = + (struct iphdr *)skb_mac_header(skb); printk("TUNNEL=%u.%u.%u.%u->%u.%u.%u.%u ", NIPQUAD(iph->saddr), NIPQUAD(iph->daddr)); diff --git a/net/ipv6/netfilter/ip6t_eui64.c b/net/ipv6/netfilter/ip6t_eui64.c index 967bed71d4a8..c2676066a80f 100644 --- a/net/ipv6/netfilter/ip6t_eui64.c +++ b/net/ipv6/netfilter/ip6t_eui64.c @@ -32,8 +32,8 @@ match(const struct sk_buff *skb, unsigned char eui64[8]; int i = 0; - if (!(skb->mac.raw >= skb->head && - (skb->mac.raw + ETH_HLEN) <= skb->data) && + if (!(skb_mac_header(skb) >= skb->head && + (skb_mac_header(skb) + ETH_HLEN) <= skb->data) && offset != 0) { *hotdrop = 1; return 0; diff --git a/net/ipv6/xfrm6_mode_beet.c b/net/ipv6/xfrm6_mode_beet.c index 53cfe1a10ccd..79364b1e965a 100644 --- a/net/ipv6/xfrm6_mode_beet.c +++ b/net/ipv6/xfrm6_mode_beet.c @@ -70,9 +70,9 @@ static int xfrm6_beet_input(struct xfrm_state *x, struct sk_buff *skb) memmove(skb->data, skb->nh.raw, size); skb->nh.raw = skb->data; - old_mac = skb->mac.raw; + old_mac = skb_mac_header(skb); skb_set_mac_header(skb, -skb->mac_len); - memmove(skb->mac.raw, old_mac, skb->mac_len); + memmove(skb_mac_header(skb), old_mac, skb->mac_len); ip6h = skb->nh.ipv6h; ip6h->payload_len = htons(skb->len - size); diff --git a/net/ipv6/xfrm6_mode_tunnel.c b/net/ipv6/xfrm6_mode_tunnel.c index d2c560c181a1..5bb0677d3730 100644 --- a/net/ipv6/xfrm6_mode_tunnel.c +++ b/net/ipv6/xfrm6_mode_tunnel.c @@ -108,9 +108,9 @@ static int xfrm6_tunnel_input(struct xfrm_state *x, struct sk_buff *skb) ip6ip_ecn_decapsulate(skb); skb->protocol = htons(ETH_P_IP); } - old_mac = skb->mac.raw; + old_mac = skb_mac_header(skb); skb_set_mac_header(skb, -skb->mac_len); - memmove(skb->mac.raw, old_mac, skb->mac_len); + memmove(skb_mac_header(skb), old_mac, skb->mac_len); skb->nh.raw = skb->data; err = 0; diff --git a/net/netfilter/xt_mac.c b/net/netfilter/xt_mac.c index d430d90d7b26..1d3a1d98b885 100644 --- a/net/netfilter/xt_mac.c +++ b/net/netfilter/xt_mac.c @@ -37,8 +37,8 @@ match(const struct sk_buff *skb, const struct xt_mac_info *info = matchinfo; /* Is mac pointer valid? */ - return (skb->mac.raw >= skb->head - && (skb->mac.raw + ETH_HLEN) <= skb->data + return (skb_mac_header(skb) >= skb->head && + (skb_mac_header(skb) + ETH_HLEN) <= skb->data /* If so, compare... */ && ((!compare_ether_addr(eth_hdr(skb)->h_source, info->srcaddr)) ^ info->invert)); diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 6f8c72d2413b..73cb2d3e27d2 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -284,7 +284,7 @@ static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev, struct * Incoming packets have ll header pulled, * push it back. * - * For outgoing ones skb->data == skb->mac.raw + * For outgoing ones skb->data == skb_mac_header(skb) * so that this procedure is noop. */ @@ -303,7 +303,7 @@ static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev, struct spkt = &PACKET_SKB_CB(skb)->sa.pkt; - skb_push(skb, skb->data-skb->mac.raw); + skb_push(skb, skb->data - skb_mac_header(skb)); /* * The SOCK_PACKET socket receives _all_ frames. @@ -488,7 +488,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev, struct packet never delivered to user. */ if (sk->sk_type != SOCK_DGRAM) - skb_push(skb, skb->data - skb->mac.raw); + skb_push(skb, skb->data - skb_mac_header(skb)); else if (skb->pkt_type == PACKET_OUTGOING) { /* Special case: outgoing packets have ll header at head */ skb_pull(skb, skb->nh.raw - skb->data); @@ -592,7 +592,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, struct packe if (dev->hard_header) { if (sk->sk_type != SOCK_DGRAM) - skb_push(skb, skb->data - skb->mac.raw); + skb_push(skb, skb->data - skb_mac_header(skb)); else if (skb->pkt_type == PACKET_OUTGOING) { /* Special case: outgoing packets have ll header at head */ skb_pull(skb, skb->nh.raw - skb->data); diff --git a/net/tipc/eth_media.c b/net/tipc/eth_media.c index f71ba9db611e..03a9db364538 100644 --- a/net/tipc/eth_media.c +++ b/net/tipc/eth_media.c @@ -99,8 +99,8 @@ static int recv_msg(struct sk_buff *buf, struct net_device *dev, if (likely(eb_ptr->bearer)) { if (likely(!dev->promiscuity) || - !memcmp(buf->mac.raw,dev->dev_addr,ETH_ALEN) || - !memcmp(buf->mac.raw,dev->broadcast,ETH_ALEN)) { + !memcmp(skb_mac_header(buf), dev->dev_addr, ETH_ALEN) || + !memcmp(skb_mac_header(buf), dev->broadcast, ETH_ALEN)) { size = msg_size((struct tipc_msg *)buf->data); skb_trim(buf, size); if (likely(buf->len == size)) { -- cgit v1.2.3-59-g8ed1b From 37e6636669b0b996681586facee8034f7f674f6a Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Sat, 10 Mar 2007 15:34:36 -0300 Subject: [LLC]: Kill llc_set_pdu_hdr We'll have skb_reset_network_header soon. Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: David S. Miller --- include/net/llc_pdu.h | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/include/net/llc_pdu.h b/include/net/llc_pdu.h index aa33a477c3fb..4e620992c806 100644 --- a/include/net/llc_pdu.h +++ b/include/net/llc_pdu.h @@ -218,11 +218,6 @@ static inline struct llc_pdu_un *llc_pdu_un_hdr(struct sk_buff *skb) return (struct llc_pdu_un *)skb->nh.raw; } -static inline void *llc_set_pdu_hdr(struct sk_buff *skb, void *ptr) -{ - return skb->nh.raw = ptr; -} - /** * llc_pdu_header_init - initializes pdu header * @skb: input skb that header must be set into it. @@ -237,7 +232,11 @@ static inline void llc_pdu_header_init(struct sk_buff *skb, u8 type, u8 ssap, u8 dsap, u8 cr) { const int hlen = type == LLC_PDU_TYPE_U ? 3 : 4; - struct llc_pdu_un *pdu = llc_set_pdu_hdr(skb, skb_push(skb, hlen)); + struct llc_pdu_un *pdu; + + skb_push(skb, hlen); + skb->nh.raw = skb->data; + pdu = llc_pdu_un_hdr(skb); pdu->dsap = dsap; pdu->ssap = ssap; pdu->ssap |= cr; -- cgit v1.2.3-59-g8ed1b From 797659fb4a4a511649cd71028141c32ad1698a12 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Sat, 10 Mar 2007 15:56:08 -0300 Subject: [PPPOE]: Introduce pppoe_hdr() For consistency with all the other skb->nh.raw accessors. Also do some really obvious simplifications in pppoe_recvmsg, well the kfree_skb one is not so obvious, but free() and kfree() have the same behaviour (hint :-) ). Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: David S. Miller --- drivers/net/pppoe.c | 22 ++++++++-------------- include/linux/if_pppox.h | 7 +++++++ 2 files changed, 15 insertions(+), 14 deletions(-) (limited to 'include') diff --git a/drivers/net/pppoe.c b/drivers/net/pppoe.c index ebfa2967cd68..3080a44b23ab 100644 --- a/drivers/net/pppoe.c +++ b/drivers/net/pppoe.c @@ -347,7 +347,7 @@ static int pppoe_rcv_core(struct sock *sk, struct sk_buff *skb) struct pppox_sock *relay_po = NULL; if (sk->sk_state & PPPOX_BOUND) { - struct pppoe_hdr *ph = (struct pppoe_hdr *) skb->nh.raw; + struct pppoe_hdr *ph = pppoe_hdr(skb); int len = ntohs(ph->length); skb_pull_rcsum(skb, sizeof(struct pppoe_hdr)); if (pskb_trim_rcsum(skb, len)) @@ -401,7 +401,7 @@ static int pppoe_rcv(struct sk_buff *skb, if (!(skb = skb_share_check(skb, GFP_ATOMIC))) goto out; - ph = (struct pppoe_hdr *) skb->nh.raw; + ph = pppoe_hdr(skb); po = get_item((unsigned long) ph->sid, eth_hdr(skb)->h_source, dev->ifindex); if (po != NULL) @@ -433,7 +433,7 @@ static int pppoe_disc_rcv(struct sk_buff *skb, if (!(skb = skb_share_check(skb, GFP_ATOMIC))) goto out; - ph = (struct pppoe_hdr *) skb->nh.raw; + ph = pppoe_hdr(skb); if (ph->code != PADT_CODE) goto abort; @@ -931,8 +931,6 @@ static int pppoe_recvmsg(struct kiocb *iocb, struct socket *sock, struct sock *sk = sock->sk; struct sk_buff *skb = NULL; int error = 0; - int len; - struct pppoe_hdr *ph = NULL; if (sk->sk_state & PPPOX_BOUND) { error = -EIO; @@ -949,19 +947,15 @@ static int pppoe_recvmsg(struct kiocb *iocb, struct socket *sock, m->msg_namelen = 0; if (skb) { - error = 0; - ph = (struct pppoe_hdr *) skb->nh.raw; - len = ntohs(ph->length); + struct pppoe_hdr *ph = pppoe_hdr(skb); + const int len = ntohs(ph->length); error = memcpy_toiovec(m->msg_iov, (unsigned char *) &ph->tag[0], len); - if (error < 0) - goto do_skb_free; - error = len; + if (error == 0) + error = len; } -do_skb_free: - if (skb) - kfree_skb(skb); + kfree_skb(skb); end: return error; } diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h index e33ee763c052..7044f8ab30a0 100644 --- a/include/linux/if_pppox.h +++ b/include/linux/if_pppox.h @@ -112,6 +112,13 @@ struct pppoe_hdr { } __attribute__ ((packed)); #ifdef __KERNEL__ +#include + +static inline struct pppoe_hdr *pppoe_hdr(const struct sk_buff *skb) +{ + return (struct pppoe_hdr *)skb->nh.raw; +} + struct pppoe_opt { struct net_device *dev; /* device associated with socket*/ int ifindex; /* ifindex of device associated with socket */ -- cgit v1.2.3-59-g8ed1b From c1d2bbe1cd6c7bbdc6d532cefebb66c7efb789ce Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Tue, 10 Apr 2007 20:45:18 -0700 Subject: [SK_BUFF]: Introduce skb_reset_network_header(skb) For the common, open coded 'skb->nh.raw = skb->data' operation, so that we can later turn skb->nh.raw into a offset, reducing the size of struct sk_buff in 64bit land while possibly keeping it as a pointer on 32bit. This one touches just the most simple case, next will handle the slightly more "complex" cases. Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: David S. Miller --- drivers/block/aoe/aoecmd.c | 2 +- drivers/net/cxgb3/sge.c | 3 ++- drivers/net/hamradio/bpqether.c | 2 +- drivers/net/loopback.c | 2 +- drivers/net/pppoe.c | 4 ++-- drivers/net/wan/hdlc_cisco.c | 2 +- drivers/net/wan/hdlc_fr.c | 2 +- drivers/net/wan/lmc/lmc_main.c | 4 ++-- drivers/net/wireless/hostap/hostap_80211_rx.c | 4 ++-- drivers/net/wireless/hostap/hostap_ap.c | 2 +- drivers/net/wireless/hostap/hostap_main.c | 2 +- include/linux/skbuff.h | 5 +++++ include/net/llc_pdu.h | 2 +- net/appletalk/aarp.c | 2 +- net/atm/mpc.c | 2 +- net/ax25/af_ax25.c | 2 +- net/ax25/ax25_ds_subr.c | 2 +- net/ax25/ax25_in.c | 8 ++++---- net/ax25/ax25_ip.c | 2 +- net/ax25/ax25_out.c | 2 +- net/ax25/ax25_subr.c | 4 ++-- net/core/dev.c | 5 +++-- net/core/netpoll.c | 5 +++-- net/decnet/dn_dev.c | 4 ++-- net/decnet/dn_neigh.c | 6 +++--- net/decnet/dn_route.c | 2 +- net/econet/af_econet.c | 2 +- net/ieee80211/ieee80211_rx.c | 2 +- net/ipv4/arp.c | 2 +- net/ipv4/ip_gre.c | 2 +- net/ipv4/ip_output.c | 4 ++-- net/ipv4/ipip.c | 4 ++-- net/ipv4/route.c | 2 +- net/ipv4/xfrm4_mode_tunnel.c | 2 +- net/ipv6/ip6_output.c | 4 ++-- net/ipv6/ip6_tunnel.c | 6 +++--- net/ipv6/sit.c | 4 ++-- net/ipv6/xfrm6_mode_beet.c | 4 ++-- net/ipv6/xfrm6_mode_tunnel.c | 4 ++-- net/ipv6/xfrm6_output.c | 2 +- net/irda/irlap_frame.c | 3 ++- net/iucv/af_iucv.c | 2 +- net/llc/llc_sap.c | 3 ++- net/netrom/nr_dev.c | 2 +- net/packet/af_packet.c | 6 +++--- net/tipc/eth_media.c | 2 +- net/x25/x25_dev.c | 2 +- 47 files changed, 78 insertions(+), 68 deletions(-) (limited to 'include') diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c index 74062dc4e90d..1a6aeac5a1c3 100644 --- a/drivers/block/aoe/aoecmd.c +++ b/drivers/block/aoe/aoecmd.c @@ -28,7 +28,7 @@ new_skb(ulong len) skb = alloc_skb(len, GFP_ATOMIC); if (skb) { skb_reset_mac_header(skb); - skb->nh.raw = skb->data; + skb_reset_network_header(skb); skb->protocol = __constant_htons(ETH_P_AOE); skb->priority = 0; skb->next = skb->prev = NULL; diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c index b5cf2a60834d..4dd712088bcf 100644 --- a/drivers/net/cxgb3/sge.c +++ b/drivers/net/cxgb3/sge.c @@ -1621,7 +1621,8 @@ static inline int rx_offload(struct t3cdev *tdev, struct sge_rspq *rq, { rq->offload_pkts++; skb_reset_mac_header(skb); - skb->nh.raw = skb->h.raw = skb->data; + skb_reset_network_header(skb); + skb->h.raw = skb->data; if (rq->polling) { rx_gather[gather_idx++] = skb; diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c index d2542697e298..656f2789c9ba 100644 --- a/drivers/net/hamradio/bpqether.c +++ b/drivers/net/hamradio/bpqether.c @@ -282,7 +282,7 @@ static int bpq_xmit(struct sk_buff *skb, struct net_device *dev) } skb->protocol = ax25_type_trans(skb, dev); - skb->nh.raw = skb->data; + skb_reset_network_header(skb); dev->hard_header(skb, dev, ETH_P_BPQ, bpq->dest_addr, NULL, 0); bpq->stats.tx_packets++; bpq->stats.tx_bytes+=skb->len; diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c index a71d8e0a9b57..af476d2a513d 100644 --- a/drivers/net/loopback.c +++ b/drivers/net/loopback.c @@ -91,7 +91,7 @@ static void emulate_large_send_offload(struct sk_buff *skb) break; skb_reserve(nskb, 32); skb_set_mac_header(nskb, -ETH_HLEN); - nskb->nh.raw = nskb->data; + skb_reset_network_header(nskb); iph = nskb->nh.iph; memcpy(nskb->data, skb->nh.raw, doffset); if (skb_copy_bits(skb, diff --git a/drivers/net/pppoe.c b/drivers/net/pppoe.c index 3080a44b23ab..e94790632d55 100644 --- a/drivers/net/pppoe.c +++ b/drivers/net/pppoe.c @@ -799,7 +799,7 @@ static int pppoe_sendmsg(struct kiocb *iocb, struct socket *sock, /* Reserve space for headers. */ skb_reserve(skb, dev->hard_header_len); - skb->nh.raw = skb->data; + skb_reset_network_header(skb); skb->dev = dev; @@ -884,7 +884,7 @@ static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb) memcpy(ph, &hdr, sizeof(struct pppoe_hdr)); skb2->protocol = __constant_htons(ETH_P_PPP_SES); - skb2->nh.raw = skb2->data; + skb_reset_network_header(skb2); skb2->dev = dev; diff --git a/drivers/net/wan/hdlc_cisco.c b/drivers/net/wan/hdlc_cisco.c index c9664fd8a917..00e0aaadabcc 100644 --- a/drivers/net/wan/hdlc_cisco.c +++ b/drivers/net/wan/hdlc_cisco.c @@ -124,7 +124,7 @@ static void cisco_keepalive_send(struct net_device *dev, u32 type, skb_put(skb, sizeof(struct cisco_packet)); skb->priority = TC_PRIO_CONTROL; skb->dev = dev; - skb->nh.raw = skb->data; + skb_reset_network_header(skb); dev_queue_xmit(skb); } diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c index 3240d10fc86d..b747228c7198 100644 --- a/drivers/net/wan/hdlc_fr.c +++ b/drivers/net/wan/hdlc_fr.c @@ -590,7 +590,7 @@ static void fr_lmi_send(struct net_device *dev, int fullrep) skb_put(skb, i); skb->priority = TC_PRIO_CONTROL; skb->dev = dev; - skb->nh.raw = skb->data; + skb_reset_network_header(skb); dev_queue_xmit(skb); } diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c index 6d288839ddaa..d4851465c83b 100644 --- a/drivers/net/wan/lmc/lmc_main.c +++ b/drivers/net/wan/lmc/lmc_main.c @@ -1668,7 +1668,7 @@ static int lmc_rx (struct net_device *dev) /*fold00*/ skb->protocol = lmc_proto_type(sc, skb); skb->protocol = htons(ETH_P_WAN_PPP); skb_reset_mac_header(skb); -// skb->nh.raw = skb->data; + /* skb_reset_network_header(skb); */ skb->dev = dev; lmc_proto_netif(sc, skb); @@ -1706,7 +1706,7 @@ static int lmc_rx (struct net_device *dev) /*fold00*/ nsb->protocol = lmc_proto_type(sc, skb); skb_reset_mac_header(nsb); -// nsb->nh.raw = nsb->data; + /* skb_reset_network_header(nsb); */ nsb->dev = dev; lmc_proto_netif(sc, nsb); } diff --git a/drivers/net/wireless/hostap/hostap_80211_rx.c b/drivers/net/wireless/hostap/hostap_80211_rx.c index e4082f9d766b..7b7c1ca8f1f4 100644 --- a/drivers/net/wireless/hostap/hostap_80211_rx.c +++ b/drivers/net/wireless/hostap/hostap_80211_rx.c @@ -1076,8 +1076,8 @@ void hostap_80211_rx(struct net_device *dev, struct sk_buff *skb, skb2->dev = dev; skb2->protocol = __constant_htons(ETH_P_802_3); skb_reset_mac_header(skb2); - skb2->nh.raw = skb2->data; - /* skb2->nh.raw = skb2->data + ETH_HLEN; */ + skb_reset_network_header(skb2); + /* skb2->nh.raw += ETH_HLEN; */ dev_queue_xmit(skb2); } diff --git a/drivers/net/wireless/hostap/hostap_ap.c b/drivers/net/wireless/hostap/hostap_ap.c index cc18f9686d27..797d950d5d61 100644 --- a/drivers/net/wireless/hostap/hostap_ap.c +++ b/drivers/net/wireless/hostap/hostap_ap.c @@ -983,7 +983,7 @@ static void prism2_send_mgmt(struct net_device *dev, skb->dev = dev; skb_reset_mac_header(skb); - skb->nh.raw = skb->data; + skb_reset_network_header(skb); dev_queue_xmit(skb); } #endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */ diff --git a/drivers/net/wireless/hostap/hostap_main.c b/drivers/net/wireless/hostap/hostap_main.c index c2616e7b0059..1f9edd91565d 100644 --- a/drivers/net/wireless/hostap/hostap_main.c +++ b/drivers/net/wireless/hostap/hostap_main.c @@ -1064,7 +1064,7 @@ int prism2_sta_send_mgmt(local_info_t *local, u8 *dst, u16 stype, skb->dev = dev; skb_reset_mac_header(skb); - skb->nh.raw = skb->data; + skb_reset_network_header(skb); dev_queue_xmit(skb); return 0; diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index dff81af454b7..6440c78fe625 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -960,6 +960,11 @@ static inline void skb_reserve(struct sk_buff *skb, int len) skb->tail += len; } +static inline void skb_reset_network_header(struct sk_buff *skb) +{ + skb->nh.raw = skb->data; +} + static inline unsigned char *skb_mac_header(const struct sk_buff *skb) { return skb->mac.raw; diff --git a/include/net/llc_pdu.h b/include/net/llc_pdu.h index 4e620992c806..778f75a40b4d 100644 --- a/include/net/llc_pdu.h +++ b/include/net/llc_pdu.h @@ -235,7 +235,7 @@ static inline void llc_pdu_header_init(struct sk_buff *skb, u8 type, struct llc_pdu_un *pdu; skb_push(skb, hlen); - skb->nh.raw = skb->data; + skb_reset_network_header(skb); pdu = llc_pdu_un_hdr(skb); pdu->dsap = dsap; pdu->ssap = ssap; diff --git a/net/appletalk/aarp.c b/net/appletalk/aarp.c index d89d62f3702f..d4e5ba22e246 100644 --- a/net/appletalk/aarp.c +++ b/net/appletalk/aarp.c @@ -539,7 +539,7 @@ int aarp_send_ddp(struct net_device *dev, struct sk_buff *skb, int hash; struct aarp_entry *a; - skb->nh.raw = skb->data; + skb_reset_network_header(skb); /* Check for LocalTalk first */ if (dev->type == ARPHRD_LOCALTLK) { diff --git a/net/atm/mpc.c b/net/atm/mpc.c index cb3c004ff022..bc15728fd847 100644 --- a/net/atm/mpc.c +++ b/net/atm/mpc.c @@ -713,7 +713,7 @@ static void mpc_push(struct atm_vcc *vcc, struct sk_buff *skb) skb_push(new_skb, eg->ctrl_info.DH_length); /* add MAC header */ memcpy(new_skb->data, eg->ctrl_info.DLL_header, eg->ctrl_info.DH_length); new_skb->protocol = eth_type_trans(new_skb, dev); - new_skb->nh.raw = new_skb->data; + skb_reset_network_header(new_skb); eg->latest_ip_addr = new_skb->nh.iph->saddr; eg->packets_rcvd++; diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c index c89e4f6f9025..b1a4d60ce9a8 100644 --- a/net/ax25/af_ax25.c +++ b/net/ax25/af_ax25.c @@ -1548,7 +1548,7 @@ static int ax25_sendmsg(struct kiocb *iocb, struct socket *sock, goto out; } - skb->nh.raw = skb->data; + skb_reset_network_header(skb); /* Add the PID if one is not supplied by the user in the skb */ if (!ax25->pidincl) { diff --git a/net/ax25/ax25_ds_subr.c b/net/ax25/ax25_ds_subr.c index 9569dd3fa466..a49773ff2b92 100644 --- a/net/ax25/ax25_ds_subr.c +++ b/net/ax25/ax25_ds_subr.c @@ -136,7 +136,7 @@ static void ax25_kiss_cmd(ax25_dev *ax25_dev, unsigned char cmd, unsigned char p if ((skb = alloc_skb(2, GFP_ATOMIC)) == NULL) return; - skb->nh.raw = skb->data; + skb_reset_network_header(skb); p = skb_put(skb, 2); *p++ = cmd; diff --git a/net/ax25/ax25_in.c b/net/ax25/ax25_in.c index 6d11b0633d5a..3b2aac670266 100644 --- a/net/ax25/ax25_in.c +++ b/net/ax25/ax25_in.c @@ -61,8 +61,8 @@ static int ax25_rx_fragment(ax25_cb *ax25, struct sk_buff *skb) skb_reserve(skbn, AX25_MAX_HEADER_LEN); skbn->dev = ax25->ax25_dev->dev; + skb_reset_network_header(skbn); skbn->h.raw = skbn->data; - skbn->nh.raw = skbn->data; /* Copy data from the fragments */ while ((skbo = skb_dequeue(&ax25->frag_queue)) != NULL) { @@ -123,7 +123,7 @@ int ax25_rx_iframe(ax25_cb *ax25, struct sk_buff *skb) skb_pull(skb, 1); /* Remove PID */ skb_reset_mac_header(skb); - skb->nh.raw = skb->data; + skb_reset_network_header(skb); skb->dev = ax25->ax25_dev->dev; skb->pkt_type = PACKET_HOST; skb->protocol = htons(ETH_P_IP); @@ -247,7 +247,7 @@ static int ax25_rcv(struct sk_buff *skb, struct net_device *dev, case AX25_P_IP: skb_pull(skb,2); /* drop PID/CTRL */ skb->h.raw = skb->data; - skb->nh.raw = skb->data; + skb_reset_network_header(skb); skb->dev = dev; skb->pkt_type = PACKET_HOST; skb->protocol = htons(ETH_P_IP); @@ -257,7 +257,7 @@ static int ax25_rcv(struct sk_buff *skb, struct net_device *dev, case AX25_P_ARP: skb_pull(skb,2); skb->h.raw = skb->data; - skb->nh.raw = skb->data; + skb_reset_network_header(skb); skb->dev = dev; skb->pkt_type = PACKET_HOST; skb->protocol = htons(ETH_P_ARP); diff --git a/net/ax25/ax25_ip.c b/net/ax25/ax25_ip.c index 7f818bbcd1c5..4d4ef35e1782 100644 --- a/net/ax25/ax25_ip.c +++ b/net/ax25/ax25_ip.c @@ -171,7 +171,7 @@ int ax25_rebuild_header(struct sk_buff *skb) src_c = *(ax25_address *)(bp + 8); skb_pull(ourskb, AX25_HEADER_LEN - 1); /* Keep PID */ - ourskb->nh.raw = ourskb->data; + skb_reset_network_header(ourskb); ax25=ax25_send_frame( ourskb, diff --git a/net/ax25/ax25_out.c b/net/ax25/ax25_out.c index 223835092b7a..6e08dc8dee40 100644 --- a/net/ax25/ax25_out.c +++ b/net/ax25/ax25_out.c @@ -205,7 +205,7 @@ static void ax25_send_iframe(ax25_cb *ax25, struct sk_buff *skb, int poll_bit) if (skb == NULL) return; - skb->nh.raw = skb->data; + skb_reset_network_header(skb); if (ax25->modulus == AX25_MODULUS) { frame = skb_push(skb, 1); diff --git a/net/ax25/ax25_subr.c b/net/ax25/ax25_subr.c index b6c577e3c914..5fe9b2a6697d 100644 --- a/net/ax25/ax25_subr.c +++ b/net/ax25/ax25_subr.c @@ -162,7 +162,7 @@ void ax25_send_control(ax25_cb *ax25, int frametype, int poll_bit, int type) skb_reserve(skb, ax25->ax25_dev->dev->hard_header_len); - skb->nh.raw = skb->data; + skb_reset_network_header(skb); /* Assume a response - address structure for DTE */ if (ax25->modulus == AX25_MODULUS) { @@ -205,7 +205,7 @@ void ax25_return_dm(struct net_device *dev, ax25_address *src, ax25_address *des return; /* Next SABM will get DM'd */ skb_reserve(skb, dev->hard_header_len); - skb->nh.raw = skb->data; + skb_reset_network_header(skb); ax25_digi_invert(digi, &retdigi); diff --git a/net/core/dev.c b/net/core/dev.c index 560560fe3064..1b0758254ba0 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1074,7 +1074,7 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev) printk(KERN_CRIT "protocol %04x is " "buggy, dev %s\n", skb2->protocol, dev->name); - skb2->nh.raw = skb2->data; + skb_reset_network_header(skb2); } skb2->h.raw = skb2->nh.raw; @@ -1771,7 +1771,8 @@ int netif_receive_skb(struct sk_buff *skb) __get_cpu_var(netdev_rx_stat).total++; - skb->h.raw = skb->nh.raw = skb->data; + skb_reset_network_header(skb); + skb->h.raw = skb->data; skb->mac_len = skb->nh.raw - skb->mac.raw; pt_prev = NULL; diff --git a/net/core/netpoll.c b/net/core/netpoll.c index 0ad3896bbf62..b4d1cdd58f11 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c @@ -359,7 +359,8 @@ static void arp_reply(struct sk_buff *skb) (2 * sizeof(u32))))) return; - skb->h.raw = skb->nh.raw = skb->data; + skb_reset_network_header(skb); + skb->h.raw = skb->data; arp = skb->nh.arph; if ((arp->ar_hrd != htons(ARPHRD_ETHER) && @@ -389,7 +390,7 @@ static void arp_reply(struct sk_buff *skb) if (!send_skb) return; - send_skb->nh.raw = send_skb->data; + skb_reset_network_header(send_skb); arp = (struct arphdr *) skb_put(send_skb, size); send_skb->dev = skb->dev; send_skb->protocol = htons(ETH_P_ARP); diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c index 060d725e2942..95871a669dc4 100644 --- a/net/decnet/dn_dev.c +++ b/net/decnet/dn_dev.c @@ -913,7 +913,7 @@ static void dn_send_endnode_hello(struct net_device *dev, struct dn_ifaddr *ifa) pktlen = (__le16 *)skb_push(skb,2); *pktlen = dn_htons(skb->len - 2); - skb->nh.raw = skb->data; + skb_reset_network_header(skb); dn_rt_finish_output(skb, dn_rt_all_rt_mcast, msg->id); } @@ -1005,7 +1005,7 @@ static void dn_send_router_hello(struct net_device *dev, struct dn_ifaddr *ifa) pktlen = (__le16 *)skb_push(skb, 2); *pktlen = dn_htons(skb->len - 2); - skb->nh.raw = skb->data; + skb_reset_network_header(skb); if (dn_am_i_a_router(dn, dn_db, ifa)) { struct sk_buff *skb2 = skb_copy(skb, GFP_ATOMIC); diff --git a/net/decnet/dn_neigh.c b/net/decnet/dn_neigh.c index bf701cf5a386..4bf066c416e2 100644 --- a/net/decnet/dn_neigh.c +++ b/net/decnet/dn_neigh.c @@ -261,7 +261,7 @@ static int dn_long_output(struct sk_buff *skb) lp->s_class = 0; lp->pt = 0; - skb->nh.raw = skb->data; + skb_reset_network_header(skb); return NF_HOOK(PF_DECnet, NF_DN_POST_ROUTING, skb, NULL, neigh->dev, dn_neigh_output_packet); } @@ -300,7 +300,7 @@ static int dn_short_output(struct sk_buff *skb) sp->srcnode = cb->src; sp->forward = cb->hops & 0x3f; - skb->nh.raw = skb->data; + skb_reset_network_header(skb); return NF_HOOK(PF_DECnet, NF_DN_POST_ROUTING, skb, NULL, neigh->dev, dn_neigh_output_packet); } @@ -342,7 +342,7 @@ static int dn_phase3_output(struct sk_buff *skb) sp->srcnode = cb->src & dn_htons(0x03ff); sp->forward = cb->hops & 0x3f; - skb->nh.raw = skb->data; + skb_reset_network_header(skb); return NF_HOOK(PF_DECnet, NF_DN_POST_ROUTING, skb, NULL, neigh->dev, dn_neigh_output_packet); } diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c index ef94ca56d7bd..34079b7ba1d3 100644 --- a/net/decnet/dn_route.c +++ b/net/decnet/dn_route.c @@ -615,7 +615,7 @@ int dn_route_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type flags = *skb->data; } - skb->nh.raw = skb->data; + skb_reset_network_header(skb); /* * Weed out future version DECnet diff --git a/net/econet/af_econet.c b/net/econet/af_econet.c index 487f879f5a19..099543f5401f 100644 --- a/net/econet/af_econet.c +++ b/net/econet/af_econet.c @@ -345,7 +345,7 @@ static int econet_sendmsg(struct kiocb *iocb, struct socket *sock, goto out_unlock; skb_reserve(skb, LL_RESERVED_SPACE(dev)); - skb->nh.raw = skb->data; + skb_reset_network_header(skb); eb = (struct ec_cb *)&skb->cb; diff --git a/net/ieee80211/ieee80211_rx.c b/net/ieee80211/ieee80211_rx.c index f39bf7c41012..be5ffaf6e8a5 100644 --- a/net/ieee80211/ieee80211_rx.c +++ b/net/ieee80211/ieee80211_rx.c @@ -792,7 +792,7 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb, skb2->dev = dev; skb2->protocol = __constant_htons(ETH_P_802_3); skb_reset_mac_header(skb2); - skb2->nh.raw = skb2->data; + skb_reset_network_header(skb2); /* skb2->nh.raw = skb2->data + ETH_HLEN; */ dev_queue_xmit(skb2); } diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c index e6e196cd3b8c..8c533ceb9709 100644 --- a/net/ipv4/arp.c +++ b/net/ipv4/arp.c @@ -578,7 +578,7 @@ struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip, return NULL; skb_reserve(skb, LL_RESERVED_SPACE(dev)); - skb->nh.raw = skb->data; + skb_reset_network_header(skb); arp = (struct arphdr *) skb_put(skb,sizeof(struct arphdr) + 2*(dev->addr_len+4)); skb->dev = dev; skb->protocol = htons(ETH_P_ARP); diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index 88f8aae873f4..ced2c4baf174 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c @@ -474,7 +474,7 @@ out: dst_release(skb2->dst); skb2->dst = NULL; skb_pull(skb2, skb->data - (u8*)eiph); - skb2->nh.raw = skb2->data; + skb_reset_network_header(skb2); /* Try to guess incoming interface */ memset(&fl, 0, sizeof(fl)); diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index ddba857bd243..32f1a23a80f9 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -593,7 +593,7 @@ slow_path: ip_copy_metadata(skb2, skb); skb_reserve(skb2, ll_rs); skb_put(skb2, len + hlen); - skb2->nh.raw = skb2->data; + skb_reset_network_header(skb2); skb2->h.raw = skb2->data + hlen; /* @@ -722,7 +722,7 @@ static inline int ip_ufo_append_data(struct sock *sk, skb_put(skb,fragheaderlen + transhdrlen); /* initialize network header pointer */ - skb->nh.raw = skb->data; + skb_reset_network_header(skb); /* initialize protocol header pointer */ skb->h.raw = skb->data + fragheaderlen; diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c index 3ec5ce0f5498..5f886c892861 100644 --- a/net/ipv4/ipip.c +++ b/net/ipv4/ipip.c @@ -405,7 +405,7 @@ out: dst_release(skb2->dst); skb2->dst = NULL; skb_pull(skb2, skb->data - (u8*)eiph); - skb2->nh.raw = skb2->data; + skb_reset_network_header(skb2); /* Try to guess incoming interface */ memset(&fl, 0, sizeof(fl)); @@ -487,7 +487,7 @@ static int ipip_rcv(struct sk_buff *skb) secpath_reset(skb); skb->mac.raw = skb->nh.raw; - skb->nh.raw = skb->data; + skb_reset_network_header(skb); skb->protocol = htons(ETH_P_IP); skb->pkt_type = PACKET_HOST; diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 486ab93127ce..d29861844903 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -2748,7 +2748,7 @@ int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void *arg) through good chunk of routing engine. */ skb_reset_mac_header(skb); - skb->nh.raw = skb->data; + skb_reset_network_header(skb); /* Bugfix: need to give ip_route_input enough of an IP header to not gag. */ skb->nh.iph->protocol = IPPROTO_ICMP; diff --git a/net/ipv4/xfrm4_mode_tunnel.c b/net/ipv4/xfrm4_mode_tunnel.c index 8e123e30cf61..ffc6005d1d55 100644 --- a/net/ipv4/xfrm4_mode_tunnel.c +++ b/net/ipv4/xfrm4_mode_tunnel.c @@ -129,7 +129,7 @@ static int xfrm4_tunnel_input(struct xfrm_state *x, struct sk_buff *skb) old_mac = skb_mac_header(skb); skb_set_mac_header(skb, -skb->mac_len); memmove(skb_mac_header(skb), old_mac, skb->mac_len); - skb->nh.raw = skb->data; + skb_reset_network_header(skb); err = 0; out: diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index a5f4562b5d29..4406546d3ce8 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -735,7 +735,7 @@ slow_path: ip6_copy_metadata(frag, skb); skb_reserve(frag, LL_RESERVED_SPACE(rt->u.dst.dev)); skb_put(frag, len + hlen + sizeof(struct frag_hdr)); - frag->nh.raw = frag->data; + skb_reset_network_header(frag); fh = (struct frag_hdr*)(frag->data + hlen); frag->h.raw = frag->data + hlen + sizeof(struct frag_hdr); @@ -976,7 +976,7 @@ static inline int ip6_ufo_append_data(struct sock *sk, skb_put(skb,fragheaderlen + transhdrlen); /* initialize network header pointer */ - skb->nh.raw = skb->data; + skb_reset_network_header(skb); /* initialize protocol header pointer */ skb->h.raw = skb->data + fragheaderlen; diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index d8c84d8d7cf8..30df8e6c42cc 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c @@ -525,7 +525,7 @@ ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, dst_release(skb2->dst); skb2->dst = NULL; skb_pull(skb2, offset); - skb2->nh.raw = skb2->data; + skb_reset_network_header(skb2); eiph = skb2->nh.iph; /* Try to guess incoming interface */ @@ -599,7 +599,7 @@ ip6ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, dst_release(skb2->dst); skb2->dst = NULL; skb_pull(skb2, offset); - skb2->nh.raw = skb2->data; + skb_reset_network_header(skb2); /* Try to guess incoming interface */ rt = rt6_lookup(&skb2->nh.ipv6h->saddr, NULL, 0, 0); @@ -704,7 +704,7 @@ static int ip6_tnl_rcv(struct sk_buff *skb, __u16 protocol, } secpath_reset(skb); skb->mac.raw = skb->nh.raw; - skb->nh.raw = skb->data; + skb_reset_network_header(skb); skb->protocol = htons(protocol); skb->pkt_type = PACKET_HOST; memset(skb->cb, 0, sizeof(struct inet6_skb_parm)); diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index 08d6ed3396e4..0477728578fe 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c @@ -340,7 +340,7 @@ out: dst_release(skb2->dst); skb2->dst = NULL; skb_pull(skb2, skb->data - (u8*)iph6); - skb2->nh.raw = skb2->data; + skb_reset_network_header(skb2); /* Try to guess incoming interface */ rt6i = rt6_lookup(&iph6->saddr, NULL, NULL, 0); @@ -383,7 +383,7 @@ static int ipip6_rcv(struct sk_buff *skb) if ((tunnel = ipip6_tunnel_lookup(iph->saddr, iph->daddr)) != NULL) { secpath_reset(skb); skb->mac.raw = skb->nh.raw; - skb->nh.raw = skb->data; + skb_reset_network_header(skb); IPCB(skb)->flags = 0; skb->protocol = htons(ETH_P_IPV6); skb->pkt_type = PACKET_HOST; diff --git a/net/ipv6/xfrm6_mode_beet.c b/net/ipv6/xfrm6_mode_beet.c index 79364b1e965a..c015bfde2b1c 100644 --- a/net/ipv6/xfrm6_mode_beet.c +++ b/net/ipv6/xfrm6_mode_beet.c @@ -45,7 +45,7 @@ static int xfrm6_beet_output(struct xfrm_state *x, struct sk_buff *skb) skb->h.raw = skb->data + hdr_len; memmove(skb->data, iph, hdr_len); - skb->nh.raw = skb->data; + skb_reset_network_header(skb); top_iph = skb->nh.ipv6h; skb->nh.raw = &top_iph->nexthdr; skb->h.ipv6h = top_iph + 1; @@ -68,7 +68,7 @@ static int xfrm6_beet_input(struct xfrm_state *x, struct sk_buff *skb) skb_push(skb, size); memmove(skb->data, skb->nh.raw, size); - skb->nh.raw = skb->data; + skb_reset_network_header(skb); old_mac = skb_mac_header(skb); skb_set_mac_header(skb, -skb->mac_len); diff --git a/net/ipv6/xfrm6_mode_tunnel.c b/net/ipv6/xfrm6_mode_tunnel.c index 5bb0677d3730..8ce5ef2d0b1c 100644 --- a/net/ipv6/xfrm6_mode_tunnel.c +++ b/net/ipv6/xfrm6_mode_tunnel.c @@ -53,7 +53,7 @@ static int xfrm6_tunnel_output(struct xfrm_state *x, struct sk_buff *skb) skb_push(skb, x->props.header_len); iph = skb->nh.ipv6h; - skb->nh.raw = skb->data; + skb_reset_network_header(skb); top_iph = skb->nh.ipv6h; skb->nh.raw = &top_iph->nexthdr; skb->h.ipv6h = top_iph + 1; @@ -111,7 +111,7 @@ static int xfrm6_tunnel_input(struct xfrm_state *x, struct sk_buff *skb) old_mac = skb_mac_header(skb); skb_set_mac_header(skb, -skb->mac_len); memmove(skb_mac_header(skb), old_mac, skb->mac_len); - skb->nh.raw = skb->data; + skb_reset_network_header(skb); err = 0; out: diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c index c52e9d6c75ec..56364a5f676a 100644 --- a/net/ipv6/xfrm6_output.c +++ b/net/ipv6/xfrm6_output.c @@ -82,7 +82,7 @@ static int xfrm6_output_one(struct sk_buff *skb) spin_unlock_bh(&x->lock); - skb->nh.raw = skb->data; + skb_reset_network_header(skb); if (!(skb->dst = dst_pop(dst))) { err = -EHOSTUNREACH; diff --git a/net/irda/irlap_frame.c b/net/irda/irlap_frame.c index 1b7e2490e2e1..7c815de4a3bf 100644 --- a/net/irda/irlap_frame.c +++ b/net/irda/irlap_frame.c @@ -94,7 +94,8 @@ void irlap_queue_xmit(struct irlap_cb *self, struct sk_buff *skb) /* Some common init stuff */ skb->dev = self->netdev; skb_reset_mac_header(skb); - skb->h.raw = skb->nh.raw = skb->data; + skb_reset_network_header(skb); + skb->h.raw = skb->data; skb->protocol = htons(ETH_P_IRDA); skb->priority = TC_PRIO_BESTEFFORT; diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c index acc94214bde6..a485496059c6 100644 --- a/net/iucv/af_iucv.c +++ b/net/iucv/af_iucv.c @@ -954,7 +954,7 @@ static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg) } skb->h.raw = skb->data; - skb->nh.raw = skb->data; + skb_reset_network_header(skb); skb->len = msg->length; } diff --git a/net/llc/llc_sap.c b/net/llc/llc_sap.c index 5fa31117e46b..e76bbbfb64bd 100644 --- a/net/llc/llc_sap.c +++ b/net/llc/llc_sap.c @@ -38,7 +38,8 @@ struct sk_buff *llc_alloc_frame(struct sock *sk, struct net_device *dev) if (skb) { skb_reset_mac_header(skb); skb_reserve(skb, 50); - skb->nh.raw = skb->h.raw = skb->data; + skb_reset_network_header(skb); + skb->h.raw = skb->data; skb->protocol = htons(ETH_P_802_2); skb->dev = dev; if (sk != NULL) diff --git a/net/netrom/nr_dev.c b/net/netrom/nr_dev.c index 17c3f1ef83e9..c7b5d930e732 100644 --- a/net/netrom/nr_dev.c +++ b/net/netrom/nr_dev.c @@ -57,7 +57,7 @@ int nr_rx_ip(struct sk_buff *skb, struct net_device *dev) /* Spoof incoming device */ skb->dev = dev; skb_reset_mac_header(skb); - skb->nh.raw = skb->data; + skb_reset_network_header(skb); skb->pkt_type = PACKET_HOST; netif_rx(skb); diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 73cb2d3e27d2..1225e751b3f1 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -401,14 +401,14 @@ static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock, * notable one here. This should really be fixed at the driver level. */ skb_reserve(skb, LL_RESERVED_SPACE(dev)); - skb->nh.raw = skb->data; + skb_reset_network_header(skb); /* Try to align data part correctly */ if (dev->hard_header) { skb->data -= dev->hard_header_len; skb->tail -= dev->hard_header_len; if (len < dev->hard_header_len) - skb->nh.raw = skb->data; + skb_reset_network_header(skb); } /* Returns -EFAULT on error */ @@ -768,7 +768,7 @@ static int packet_sendmsg(struct kiocb *iocb, struct socket *sock, goto out_unlock; skb_reserve(skb, LL_RESERVED_SPACE(dev)); - skb->nh.raw = skb->data; + skb_reset_network_header(skb); if (dev->hard_header) { int res; diff --git a/net/tipc/eth_media.c b/net/tipc/eth_media.c index 03a9db364538..67bb29b44d1b 100644 --- a/net/tipc/eth_media.c +++ b/net/tipc/eth_media.c @@ -73,7 +73,7 @@ static int send_msg(struct sk_buff *buf, struct tipc_bearer *tb_ptr, clone = skb_clone(buf, GFP_ATOMIC); if (clone) { - clone->nh.raw = clone->data; + skb_reset_network_header(clone); dev = ((struct eth_bearer *)(tb_ptr->usr_handle))->dev; clone->dev = dev; dev->hard_header(clone, dev, ETH_P_TIPC, diff --git a/net/x25/x25_dev.c b/net/x25/x25_dev.c index c7221de98a95..94fd12f3a909 100644 --- a/net/x25/x25_dev.c +++ b/net/x25/x25_dev.c @@ -191,7 +191,7 @@ void x25_send_frame(struct sk_buff *skb, struct x25_neigh *nb) { unsigned char *dptr; - skb->nh.raw = skb->data; + skb_reset_network_header(skb); switch (nb->dev->type) { case ARPHRD_X25: -- cgit v1.2.3-59-g8ed1b From bbe735e4247dba32568a305553b010081c8dea99 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Sat, 10 Mar 2007 22:16:10 -0300 Subject: [SK_BUFF]: Introduce skb_network_offset() For the quite common 'skb->nh.raw - skb->data' sequence. Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: David S. Miller --- drivers/isdn/i4l/isdn_net.c | 2 +- drivers/net/atl1/atl1_main.c | 2 +- drivers/net/chelsio/sge.c | 2 +- drivers/net/cxgb3/sge.c | 2 +- drivers/net/e1000/e1000_main.c | 2 +- drivers/net/gianfar.c | 2 +- drivers/net/ixgb/ixgb_main.c | 2 +- drivers/net/netxen/netxen_nic_hw.c | 2 +- include/linux/skbuff.h | 5 +++++ net/ax25/ax25_out.c | 4 ++-- net/core/neighbour.c | 6 +++--- net/ipv4/ip_output.c | 4 ++-- net/ipv6/icmp.c | 4 ++-- net/ipv6/ip6_output.c | 4 ++-- net/netfilter/nf_conntrack_core.c | 4 ++-- net/packet/af_packet.c | 8 ++++---- net/sched/sch_teql.c | 2 +- security/selinux/hooks.c | 4 ++-- 18 files changed, 33 insertions(+), 28 deletions(-) (limited to 'include') diff --git a/drivers/isdn/i4l/isdn_net.c b/drivers/isdn/i4l/isdn_net.c index fadb9291bc1b..0c2b3752e46e 100644 --- a/drivers/isdn/i4l/isdn_net.c +++ b/drivers/isdn/i4l/isdn_net.c @@ -1121,7 +1121,7 @@ isdn_net_adjust_hdr(struct sk_buff *skb, struct net_device *dev) if (!skb) return; if (lp->p_encap == ISDN_NET_ENCAP_ETHER) { - int pullsize = (ulong)skb->nh.raw - (ulong)skb->data - ETH_HLEN; + const int pullsize = skb_network_offset(skb) - ETH_HLEN; if (pullsize > 0) { printk(KERN_DEBUG "isdn_net: Pull junk %d\n", pullsize); skb_pull(skb, pullsize); diff --git a/drivers/net/atl1/atl1_main.c b/drivers/net/atl1/atl1_main.c index e3f181602e4f..793a61b2140f 100644 --- a/drivers/net/atl1/atl1_main.c +++ b/drivers/net/atl1/atl1_main.c @@ -1300,7 +1300,7 @@ static int atl1_tso(struct atl1_adapter *adapter, struct sk_buff *skb, ~csum_tcpudp_magic(skb->nh.iph->saddr, skb->nh.iph->daddr, 0, IPPROTO_TCP, 0); - ipofst = skb->nh.raw - skb->data; + ipofst = skb_network_offset(skb); if (ipofst != ENET_HEADER_SIZE) /* 802.3 frame */ tso->tsopl |= 1 << TSO_PARAM_ETHTYPE_SHIFT; diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c index 47fa8dcf7527..8cdee67d582f 100644 --- a/drivers/net/chelsio/sge.c +++ b/drivers/net/chelsio/sge.c @@ -1865,7 +1865,7 @@ int t1_start_xmit(struct sk_buff *skb, struct net_device *dev) ++st->tx_tso; - eth_type = skb->nh.raw - skb->data == ETH_HLEN ? + eth_type = skb_network_offset(skb) == ETH_HLEN ? CPL_ETH_II : CPL_ETH_II_VLAN; hdr = (struct cpl_tx_pkt_lso *)skb_push(skb, sizeof(*hdr)); diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c index 4dd712088bcf..7e9e9db4fb97 100644 --- a/drivers/net/cxgb3/sge.c +++ b/drivers/net/cxgb3/sge.c @@ -897,7 +897,7 @@ static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb, d->flit[2] = 0; cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT_LSO); hdr->cntrl = htonl(cntrl); - eth_type = skb->nh.raw - skb->data == ETH_HLEN ? + eth_type = skb_network_offset(skb) == ETH_HLEN ? CPL_ETH_II : CPL_ETH_II_VLAN; tso_info |= V_LSO_ETH_TYPE(eth_type) | V_LSO_IPHDR_WORDS(skb->nh.iph->ihl) | diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c index b28a915bd980..86161011b539 100644 --- a/drivers/net/e1000/e1000_main.c +++ b/drivers/net/e1000/e1000_main.c @@ -2910,7 +2910,7 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, 0); ipcse = 0; } - ipcss = skb->nh.raw - skb->data; + ipcss = skb_network_offset(skb); ipcso = (void *)&(skb->nh.iph->check) - (void *)skb->data; tucss = skb->h.raw - skb->data; tucso = (void *)&(skb->h.th->check) - (void *)skb->data; diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c index 1d019195a391..c7a70933c759 100644 --- a/drivers/net/gianfar.c +++ b/drivers/net/gianfar.c @@ -952,7 +952,7 @@ static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb) * frame (skb->data) and the start of the IP hdr. * l4os is the distance between the start of the * l3 hdr and the l4 hdr */ - fcb->l3os = (u16)(skb->nh.raw - skb->data - GMAC_FCB_LEN); + fcb->l3os = (u16)(skb_network_offset(skb) - GMAC_FCB_LEN); fcb->l4os = (u16)(skb->h.raw - skb->nh.raw); fcb->flags = flags; diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c index afc2ec72529e..cfb791bb45e2 100644 --- a/drivers/net/ixgb/ixgb_main.c +++ b/drivers/net/ixgb/ixgb_main.c @@ -1195,7 +1195,7 @@ ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb) skb->h.th->check = ~csum_tcpudp_magic(skb->nh.iph->saddr, skb->nh.iph->daddr, 0, IPPROTO_TCP, 0); - ipcss = skb->nh.raw - skb->data; + ipcss = skb_network_offset(skb); ipcso = (void *)&(skb->nh.iph->check) - (void *)skb->data; ipcse = skb->h.raw - skb->data - 1; tucss = skb->h.raw - skb->data; diff --git a/drivers/net/netxen/netxen_nic_hw.c b/drivers/net/netxen/netxen_nic_hw.c index 6537574a9cda..625e11ed6aae 100644 --- a/drivers/net/netxen/netxen_nic_hw.c +++ b/drivers/net/netxen/netxen_nic_hw.c @@ -386,7 +386,7 @@ void netxen_tso_check(struct netxen_adapter *adapter, } adapter->stats.xmitcsummed++; desc->tcp_hdr_offset = skb->h.raw - skb->data; - desc->ip_hdr_offset = skb->nh.raw - skb->data; + desc->ip_hdr_offset = skb_network_offset(skb); } int netxen_is_flash_supported(struct netxen_adapter *adapter) diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 6440c78fe625..47cc8b07c2b4 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -965,6 +965,11 @@ static inline void skb_reset_network_header(struct sk_buff *skb) skb->nh.raw = skb->data; } +static inline int skb_network_offset(const struct sk_buff *skb) +{ + return skb->nh.raw - skb->data; +} + static inline unsigned char *skb_mac_header(const struct sk_buff *skb) { return skb->mac.raw; diff --git a/net/ax25/ax25_out.c b/net/ax25/ax25_out.c index 6e08dc8dee40..02dea851a11a 100644 --- a/net/ax25/ax25_out.c +++ b/net/ax25/ax25_out.c @@ -148,7 +148,7 @@ void ax25_output(ax25_cb *ax25, int paclen, struct sk_buff *skb) if (ka9qfrag == 1) { skb_reserve(skbn, frontlen + 2); - skbn->nh.raw = skbn->data + (skb->nh.raw - skb->data); + skbn->nh.raw = skbn->data + skb_network_offset(skb); memcpy(skb_put(skbn, len), skb->data, len); p = skb_push(skbn, 2); @@ -161,7 +161,7 @@ void ax25_output(ax25_cb *ax25, int paclen, struct sk_buff *skb) } } else { skb_reserve(skbn, frontlen + 1); - skbn->nh.raw = skbn->data + (skb->nh.raw - skb->data); + skbn->nh.raw = skbn->data + skb_network_offset(skb); memcpy(skb_put(skbn, len), skb->data, len); p = skb_push(skbn, 1); *p = AX25_P_TEXT; diff --git a/net/core/neighbour.c b/net/core/neighbour.c index 841e3f32cab1..c5653c512b43 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -1125,7 +1125,7 @@ int neigh_compat_output(struct sk_buff *skb) { struct net_device *dev = skb->dev; - __skb_pull(skb, skb->nh.raw - skb->data); + __skb_pull(skb, skb_network_offset(skb)); if (dev->hard_header && dev->hard_header(skb, dev, ntohs(skb->protocol), NULL, NULL, @@ -1147,7 +1147,7 @@ int neigh_resolve_output(struct sk_buff *skb) if (!dst || !(neigh = dst->neighbour)) goto discard; - __skb_pull(skb, skb->nh.raw - skb->data); + __skb_pull(skb, skb_network_offset(skb)); if (!neigh_event_send(neigh, skb)) { int err; @@ -1190,7 +1190,7 @@ int neigh_connected_output(struct sk_buff *skb) struct neighbour *neigh = dst->neighbour; struct net_device *dev = neigh->dev; - __skb_pull(skb, skb->nh.raw - skb->data); + __skb_pull(skb, skb_network_offset(skb)); read_lock_bh(&neigh->lock); err = dev->hard_header(skb, dev, ntohs(skb->protocol), diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 99cd90c22310..669f5d97c6eb 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -96,7 +96,7 @@ __inline__ void ip_send_check(struct iphdr *iph) static int ip_dev_loopback_xmit(struct sk_buff *newskb) { skb_reset_mac_header(newskb); - __skb_pull(newskb, newskb->nh.raw - newskb->data); + __skb_pull(newskb, skb_network_offset(newskb)); newskb->pkt_type = PACKET_LOOPBACK; newskb->ip_summed = CHECKSUM_UNNECESSARY; BUG_TRAP(newskb->dst); @@ -1199,7 +1199,7 @@ int ip_push_pending_frames(struct sock *sk) /* move skb->data to ip header from ext header */ if (skb->data < skb->nh.raw) - __skb_pull(skb, skb->nh.raw - skb->data); + __skb_pull(skb, skb_network_offset(skb)); while ((tmp_skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) { __skb_pull(tmp_skb, skb->h.raw - skb->nh.raw); *tail_skb = tmp_skb; diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c index a91dfbce8433..aa4a0a59ffac 100644 --- a/net/ipv6/icmp.c +++ b/net/ipv6/icmp.c @@ -206,7 +206,7 @@ static __inline__ int opt_unrec(struct sk_buff *skb, __u32 offset) { u8 _optval, *op; - offset += skb->nh.raw - skb->data; + offset += skb_network_offset(skb); op = skb_header_pointer(skb, offset, sizeof(_optval), &_optval); if (op == NULL) return 1; @@ -431,7 +431,7 @@ void icmpv6_send(struct sk_buff *skb, int type, int code, __u32 info, tclass = 0; msg.skb = skb; - msg.offset = skb->nh.raw - skb->data; + msg.offset = skb_network_offset(skb); msg.type = type; len = skb->len - msg.offset; diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 8a7b5c760147..47d00210cba1 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -89,7 +89,7 @@ static inline int ip6_output_finish(struct sk_buff *skb) static int ip6_dev_loopback_xmit(struct sk_buff *newskb) { skb_reset_mac_header(newskb); - __skb_pull(newskb, newskb->nh.raw - newskb->data); + __skb_pull(newskb, skb_network_offset(newskb)); newskb->pkt_type = PACKET_LOOPBACK; newskb->ip_summed = CHECKSUM_UNNECESSARY; BUG_TRAP(newskb->dst); @@ -1330,7 +1330,7 @@ int ip6_push_pending_frames(struct sock *sk) /* move skb->data to ip header from ext header */ if (skb->data < skb->nh.raw) - __skb_pull(skb, skb->nh.raw - skb->data); + __skb_pull(skb, skb_network_offset(skb)); while ((tmp_skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) { __skb_pull(tmp_skb, skb->h.raw - skb->nh.raw); *tail_skb = tmp_skb; diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index b3a70eb6d42a..7694c51f1251 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -768,7 +768,7 @@ resolve_normal_ct(struct sk_buff *skb, struct nf_conntrack_tuple_hash *h; struct nf_conn *ct; - if (!nf_ct_get_tuple(skb, (unsigned int)(skb->nh.raw - skb->data), + if (!nf_ct_get_tuple(skb, skb_network_offset(skb), dataoff, l3num, protonum, &tuple, l3proto, l4proto)) { DEBUGP("resolve_normal_ct: Can't get tuple\n"); @@ -960,7 +960,7 @@ void __nf_ct_refresh_acct(struct nf_conn *ct, if (do_acct) { ct->counters[CTINFO2DIR(ctinfo)].packets++; ct->counters[CTINFO2DIR(ctinfo)].bytes += - skb->len - (unsigned int)(skb->nh.raw - skb->data); + skb->len - skb_network_offset(skb); if ((ct->counters[CTINFO2DIR(ctinfo)].packets & 0x80000000) || (ct->counters[CTINFO2DIR(ctinfo)].bytes & 0x80000000)) diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 1225e751b3f1..a059cc7be672 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -491,7 +491,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev, struct packet skb_push(skb, skb->data - skb_mac_header(skb)); else if (skb->pkt_type == PACKET_OUTGOING) { /* Special case: outgoing packets have ll header at head */ - skb_pull(skb, skb->nh.raw - skb->data); + skb_pull(skb, skb_network_offset(skb)); } } @@ -595,7 +595,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, struct packe skb_push(skb, skb->data - skb_mac_header(skb)); else if (skb->pkt_type == PACKET_OUTGOING) { /* Special case: outgoing packets have ll header at head */ - skb_pull(skb, skb->nh.raw - skb->data); + skb_pull(skb, skb_network_offset(skb)); } } @@ -613,7 +613,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, struct packe if (sk->sk_type == SOCK_DGRAM) { macoff = netoff = TPACKET_ALIGN(TPACKET_HDRLEN) + 16; } else { - unsigned maclen = skb->nh.raw - skb->data; + unsigned maclen = skb_network_offset(skb); netoff = TPACKET_ALIGN(TPACKET_HDRLEN + (maclen < 16 ? 16 : maclen)); macoff = netoff - maclen; } @@ -1145,7 +1145,7 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock, aux.tp_len = PACKET_SKB_CB(skb)->origlen; aux.tp_snaplen = skb->len; aux.tp_mac = 0; - aux.tp_net = skb->nh.raw - skb->data; + aux.tp_net = skb_network_offset(skb); put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux); } diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c index 587123c61af9..d24914db7861 100644 --- a/net/sched/sch_teql.c +++ b/net/sched/sch_teql.c @@ -323,7 +323,7 @@ restart: nores = 1; break; } - __skb_pull(skb, skb->nh.raw - skb->data); + __skb_pull(skb, skb_network_offset(skb)); } while ((q = NEXT_SLAVE(q)) != start); if (nores && skb_res == NULL) { diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index d41e24d6ae41..addb58501057 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c @@ -2944,7 +2944,7 @@ static int selinux_parse_skb_ipv4(struct sk_buff *skb, int offset, ihlen, ret = -EINVAL; struct iphdr _iph, *ih; - offset = skb->nh.raw - skb->data; + offset = skb_network_offset(skb); ih = skb_header_pointer(skb, offset, sizeof(_iph), &_iph); if (ih == NULL) goto out; @@ -3026,7 +3026,7 @@ static int selinux_parse_skb_ipv6(struct sk_buff *skb, int ret = -EINVAL, offset; struct ipv6hdr _ipv6h, *ip6; - offset = skb->nh.raw - skb->data; + offset = skb_network_offset(skb); ip6 = skb_header_pointer(skb, offset, sizeof(_ipv6h), &_ipv6h); if (ip6 == NULL) goto out; -- cgit v1.2.3-59-g8ed1b From d56f90a7c96da5187f0cdf07ee7434fe6aa78bbc Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Tue, 10 Apr 2007 20:50:43 -0700 Subject: [SK_BUFF]: Introduce skb_network_header() For the places where we need a pointer to the network header, it is still legal to touch skb->nh.raw directly if just adding to, subtracting from or setting it to another layer header. Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: David S. Miller --- drivers/isdn/i4l/isdn_net.c | 5 ++- drivers/net/bonding/bond_alb.c | 2 +- drivers/net/loopback.c | 7 ++-- drivers/net/pasemi_mac.c | 6 ++- drivers/s390/net/qeth_main.c | 6 ++- include/linux/if_pppox.h | 2 +- include/linux/skbuff.h | 5 +++ include/net/cipso_ipv4.h | 2 +- include/net/inet_ecn.h | 6 ++- include/net/llc_pdu.h | 4 +- include/net/pkt_cls.h | 2 +- net/bridge/br_netfilter.c | 12 +++--- net/core/dev.c | 9 +++-- net/core/filter.c | 2 +- net/dccp/ipv6.c | 8 ++-- net/decnet/dn_route.c | 4 +- net/ipv4/af_inet.c | 2 +- net/ipv4/ah4.c | 5 ++- net/ipv4/esp4.c | 7 ++-- net/ipv4/icmp.c | 4 +- net/ipv4/ip_fragment.c | 2 +- net/ipv4/ip_options.c | 12 +++--- net/ipv4/ip_output.c | 6 +-- net/ipv4/ip_sockglue.c | 8 ++-- net/ipv4/ipmr.c | 2 +- net/ipv4/ipvs/ip_vs_app.c | 4 +- net/ipv4/ipvs/ip_vs_core.c | 3 +- net/ipv4/netfilter/arpt_mangle.c | 2 +- net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c | 9 ++--- net/ipv4/raw.c | 2 +- net/ipv4/tcp_input.c | 2 +- net/ipv4/xfrm4_input.c | 2 +- net/ipv4/xfrm4_mode_beet.c | 4 +- net/ipv4/xfrm4_mode_transport.c | 4 +- net/ipv4/xfrm4_policy.c | 2 +- net/ipv6/af_inet6.c | 3 +- net/ipv6/ah6.c | 12 +++--- net/ipv6/datagram.c | 31 ++++++++------ net/ipv6/esp6.c | 4 +- net/ipv6/exthdrs.c | 56 +++++++++++++++----------- net/ipv6/icmp.c | 3 +- net/ipv6/ip6_input.c | 4 +- net/ipv6/ip6_output.c | 23 ++++++----- net/ipv6/ip6_tunnel.c | 5 ++- net/ipv6/ipcomp6.c | 4 +- net/ipv6/mip6.c | 29 +++++++------ net/ipv6/netfilter/nf_conntrack_reasm.c | 19 +++++---- net/ipv6/raw.c | 5 ++- net/ipv6/reassembly.c | 25 ++++++++---- net/ipv6/tcp_ipv6.c | 8 +++- net/ipv6/xfrm6_input.c | 6 +-- net/ipv6/xfrm6_mode_beet.c | 2 +- net/ipv6/xfrm6_mode_transport.c | 6 ++- net/ipv6/xfrm6_mode_tunnel.c | 8 ++-- net/ipv6/xfrm6_policy.c | 16 ++++---- net/netfilter/xt_TCPMSS.c | 4 +- net/sched/act_pedit.c | 2 +- net/sched/cls_u32.c | 2 +- net/sched/em_u32.c | 2 +- 59 files changed, 258 insertions(+), 185 deletions(-) (limited to 'include') diff --git a/drivers/isdn/i4l/isdn_net.c b/drivers/isdn/i4l/isdn_net.c index 0c2b3752e46e..cd3b1fa4a414 100644 --- a/drivers/isdn/i4l/isdn_net.c +++ b/drivers/isdn/i4l/isdn_net.c @@ -872,7 +872,8 @@ typedef struct { static void isdn_net_log_skb(struct sk_buff * skb, isdn_net_local * lp) { - u_char *p = skb->nh.raw; /* hopefully, this was set correctly */ + /* hopefully, this was set correctly */ + const u_char *p = skb_network_header(skb); unsigned short proto = ntohs(skb->protocol); int data_ofs; ip_ports *ipp; @@ -880,7 +881,7 @@ isdn_net_log_skb(struct sk_buff * skb, isdn_net_local * lp) addinfo[0] = '\0'; /* This check stolen from 2.1.72 dev_queue_xmit_nit() */ - if (skb->nh.raw < skb->data || skb->nh.raw >= skb->tail) { + if (p < skb->data || p >= skb->tail) { /* fall back to old isdn_net_log_packet method() */ char * buf = skb->data; diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c index 5c2a12c2b997..86cfcb3f8131 100644 --- a/drivers/net/bonding/bond_alb.c +++ b/drivers/net/bonding/bond_alb.c @@ -106,7 +106,7 @@ struct arp_pkt { static inline struct arp_pkt *arp_pkt(const struct sk_buff *skb) { - return (struct arp_pkt *)skb->nh.raw; + return (struct arp_pkt *)skb_network_header(skb); } /* Forward declaration */ diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c index af476d2a513d..9265c27b13b2 100644 --- a/drivers/net/loopback.c +++ b/drivers/net/loopback.c @@ -76,7 +76,8 @@ static DEFINE_PER_CPU(struct pcpu_lstats, pcpu_lstats); static void emulate_large_send_offload(struct sk_buff *skb) { struct iphdr *iph = skb->nh.iph; - struct tcphdr *th = (struct tcphdr*)(skb->nh.raw + (iph->ihl * 4)); + struct tcphdr *th = (struct tcphdr *)(skb_network_header(skb) + + (iph->ihl * 4)); unsigned int doffset = (iph->ihl + th->doff) * 4; unsigned int mtu = skb_shinfo(skb)->gso_size + doffset; unsigned int offset = 0; @@ -93,7 +94,7 @@ static void emulate_large_send_offload(struct sk_buff *skb) skb_set_mac_header(nskb, -ETH_HLEN); skb_reset_network_header(nskb); iph = nskb->nh.iph; - memcpy(nskb->data, skb->nh.raw, doffset); + memcpy(nskb->data, skb_network_header(skb), doffset); if (skb_copy_bits(skb, doffset + offset, nskb->data + doffset, @@ -108,7 +109,7 @@ static void emulate_large_send_offload(struct sk_buff *skb) memcpy(nskb->cb, skb->cb, sizeof(skb->cb)); nskb->pkt_type = skb->pkt_type; - th = (struct tcphdr*)(nskb->nh.raw + iph->ihl*4); + th = (struct tcphdr *)(skb_network_header(nskb) + iph->ihl * 4); iph->tot_len = htons(frag_size + doffset); iph->id = htons(id); iph->check = 0; diff --git a/drivers/net/pasemi_mac.c b/drivers/net/pasemi_mac.c index 3f4213f3d5de..82218720bc3e 100644 --- a/drivers/net/pasemi_mac.c +++ b/drivers/net/pasemi_mac.c @@ -729,16 +729,18 @@ static int pasemi_mac_start_tx(struct sk_buff *skb, struct net_device *dev) dflags = XCT_MACTX_O | XCT_MACTX_ST | XCT_MACTX_SS | XCT_MACTX_CRC_PAD; if (skb->ip_summed == CHECKSUM_PARTIAL) { + const unsigned char *nh = skb_network_header(skb); + switch (skb->nh.iph->protocol) { case IPPROTO_TCP: dflags |= XCT_MACTX_CSUM_TCP; dflags |= XCT_MACTX_IPH((skb->h.raw - skb->nh.raw) >> 2); - dflags |= XCT_MACTX_IPO(skb->nh.raw - skb->data); + dflags |= XCT_MACTX_IPO(nh - skb->data); break; case IPPROTO_UDP: dflags |= XCT_MACTX_CSUM_UDP; dflags |= XCT_MACTX_IPH((skb->h.raw - skb->nh.raw) >> 2); - dflags |= XCT_MACTX_IPO(skb->nh.raw - skb->data); + dflags |= XCT_MACTX_IPO(nh - skb->data); break; } } diff --git a/drivers/s390/net/qeth_main.c b/drivers/s390/net/qeth_main.c index c0ee6d94ea38..0ff29e0628b5 100644 --- a/drivers/s390/net/qeth_main.c +++ b/drivers/s390/net/qeth_main.c @@ -3778,9 +3778,11 @@ qeth_get_cast_type(struct qeth_card *card, struct sk_buff *skb) } /* try something else */ if (skb->protocol == ETH_P_IPV6) - return (skb->nh.raw[24] == 0xff) ? RTN_MULTICAST : 0; + return (skb_network_header(skb)[24] == 0xff) ? + RTN_MULTICAST : 0; else if (skb->protocol == ETH_P_IP) - return ((skb->nh.raw[16] & 0xf0) == 0xe0) ? RTN_MULTICAST : 0; + return ((skb_network_header(skb)[16] & 0xf0) == 0xe0) ? + RTN_MULTICAST : 0; /* ... */ if (!memcmp(skb->data, skb->dev->broadcast, 6)) return RTN_BROADCAST; diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h index 7044f8ab30a0..29d6579ff1a0 100644 --- a/include/linux/if_pppox.h +++ b/include/linux/if_pppox.h @@ -116,7 +116,7 @@ struct pppoe_hdr { static inline struct pppoe_hdr *pppoe_hdr(const struct sk_buff *skb) { - return (struct pppoe_hdr *)skb->nh.raw; + return (struct pppoe_hdr *)skb_network_header(skb); } struct pppoe_opt { diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 47cc8b07c2b4..76d30f34b986 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -960,6 +960,11 @@ static inline void skb_reserve(struct sk_buff *skb, int len) skb->tail += len; } +static inline unsigned char *skb_network_header(const struct sk_buff *skb) +{ + return skb->nh.raw; +} + static inline void skb_reset_network_header(struct sk_buff *skb) { skb->nh.raw = skb->data; diff --git a/include/net/cipso_ipv4.h b/include/net/cipso_ipv4.h index 4c9522c5178f..4f90f5554fac 100644 --- a/include/net/cipso_ipv4.h +++ b/include/net/cipso_ipv4.h @@ -120,7 +120,7 @@ extern int cipso_v4_rbm_strictvalid; */ #define CIPSO_V4_OPTEXIST(x) (IPCB(x)->opt.cipso != 0) -#define CIPSO_V4_OPTPTR(x) ((x)->nh.raw + IPCB(x)->opt.cipso) +#define CIPSO_V4_OPTPTR(x) (skb_network_header(x) + IPCB(x)->opt.cipso) /* * DOI List Functions diff --git a/include/net/inet_ecn.h b/include/net/inet_ecn.h index 10117c8503e8..b9ed3898e368 100644 --- a/include/net/inet_ecn.h +++ b/include/net/inet_ecn.h @@ -114,12 +114,14 @@ static inline int INET_ECN_set_ce(struct sk_buff *skb) { switch (skb->protocol) { case __constant_htons(ETH_P_IP): - if (skb->nh.raw + sizeof(struct iphdr) <= skb->tail) + if (skb_network_header(skb) + sizeof(struct iphdr) <= + skb->tail) return IP_ECN_set_ce(skb->nh.iph); break; case __constant_htons(ETH_P_IPV6): - if (skb->nh.raw + sizeof(struct ipv6hdr) <= skb->tail) + if (skb_network_header(skb) + sizeof(struct ipv6hdr) <= + skb->tail) return IP6_ECN_set_ce(skb->nh.ipv6h); break; } diff --git a/include/net/llc_pdu.h b/include/net/llc_pdu.h index 778f75a40b4d..4a8f58b17e43 100644 --- a/include/net/llc_pdu.h +++ b/include/net/llc_pdu.h @@ -203,7 +203,7 @@ struct llc_pdu_sn { static inline struct llc_pdu_sn *llc_pdu_sn_hdr(struct sk_buff *skb) { - return (struct llc_pdu_sn *)skb->nh.raw; + return (struct llc_pdu_sn *)skb_network_header(skb); } /* Un-numbered PDU format (3 bytes in length) */ @@ -215,7 +215,7 @@ struct llc_pdu_un { static inline struct llc_pdu_un *llc_pdu_un_hdr(struct sk_buff *skb) { - return (struct llc_pdu_un *)skb->nh.raw; + return (struct llc_pdu_un *)skb_network_header(skb); } /** diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h index 02647fe3d74b..8a6b0e7bded5 100644 --- a/include/net/pkt_cls.h +++ b/include/net/pkt_cls.h @@ -326,7 +326,7 @@ static inline unsigned char * tcf_get_base_ptr(struct sk_buff *skb, int layer) case TCF_LAYER_LINK: return skb->data; case TCF_LAYER_NETWORK: - return skb->nh.raw; + return skb_network_header(skb); case TCF_LAYER_TRANSPORT: return skb->h.raw; } diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c index 1163c4f69899..8a56d8963025 100644 --- a/net/bridge/br_netfilter.c +++ b/net/bridge/br_netfilter.c @@ -374,7 +374,8 @@ static int check_hbh_len(struct sk_buff *skb) { unsigned char *raw = (u8 *) (skb->nh.ipv6h + 1); u32 pkt_len; - int off = raw - skb->nh.raw; + const unsigned char *nh = skb_network_header(skb); + int off = raw - nh; int len = (raw[1] + 1) << 3; if ((raw + len) - skb->data > skb_headlen(skb)) @@ -384,9 +385,9 @@ static int check_hbh_len(struct sk_buff *skb) len -= 2; while (len > 0) { - int optlen = skb->nh.raw[off + 1] + 2; + int optlen = nh[off + 1] + 2; - switch (skb->nh.raw[off]) { + switch (nh[off]) { case IPV6_TLV_PAD0: optlen = 1; break; @@ -395,9 +396,9 @@ static int check_hbh_len(struct sk_buff *skb) break; case IPV6_TLV_JUMBO: - if (skb->nh.raw[off + 1] != 4 || (off & 3) != 2) + if (nh[off + 1] != 4 || (off & 3) != 2) goto bad; - pkt_len = ntohl(*(__be32 *) (skb->nh.raw + off + 2)); + pkt_len = ntohl(*(__be32 *) (nh + off + 2)); if (pkt_len <= IPV6_MAXPLEN || skb->nh.ipv6h->payload_len) goto bad; @@ -406,6 +407,7 @@ static int check_hbh_len(struct sk_buff *skb) if (pskb_trim_rcsum(skb, pkt_len + sizeof(struct ipv6hdr))) goto bad; + nh = skb_network_header(skb); break; default: if (optlen > len) diff --git a/net/core/dev.c b/net/core/dev.c index 1b0758254ba0..54ffe9db9b02 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1068,8 +1068,8 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev) */ skb_reset_mac_header(skb2); - if (skb2->nh.raw < skb2->data || - skb2->nh.raw > skb2->tail) { + if (skb_network_header(skb2) < skb2->data || + skb_network_header(skb2) > skb2->tail) { if (net_ratelimit()) printk(KERN_CRIT "protocol %04x is " "buggy, dev %s\n", @@ -1207,7 +1207,7 @@ struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features) BUG_ON(skb_shinfo(skb)->frag_list); skb_reset_mac_header(skb); - skb->mac_len = skb->nh.raw - skb->data; + skb->mac_len = skb->nh.raw - skb->mac.raw; __skb_pull(skb, skb->mac_len); if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) { @@ -1224,7 +1224,8 @@ struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features) segs = ERR_PTR(err); if (err || skb_gso_ok(skb, features)) break; - __skb_push(skb, skb->data - skb->nh.raw); + __skb_push(skb, (skb->data - + skb_network_header(skb))); } segs = ptype->gso_segment(skb, features); break; diff --git a/net/core/filter.c b/net/core/filter.c index 1cc128d05422..d2358a5e6339 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -42,7 +42,7 @@ static void *__load_pointer(struct sk_buff *skb, int k) u8 *ptr = NULL; if (k >= SKF_NET_OFF) - ptr = skb->nh.raw + k - SKF_NET_OFF; + ptr = skb_network_header(skb) + k - SKF_NET_OFF; else if (k >= SKF_LL_OFF) ptr = skb_mac_header(skb) + k - SKF_LL_OFF; diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c index 7f51e8db3967..627d0c3c51cf 100644 --- a/net/dccp/ipv6.c +++ b/net/dccp/ipv6.c @@ -261,8 +261,8 @@ static int dccp_v6_send_response(struct sock *sk, struct request_sock *req, if (rxopt->srcrt) opt = ipv6_invert_rthdr(sk, - (struct ipv6_rt_hdr *)(pktopts->nh.raw + - rxopt->srcrt)); + (struct ipv6_rt_hdr *)(skb_network_header(pktopts) + + rxopt->srcrt)); } if (opt != NULL && opt->srcrt != NULL) { @@ -573,8 +573,8 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk, if (rxopt->srcrt) opt = ipv6_invert_rthdr(sk, - (struct ipv6_rt_hdr *)(ireq6->pktopts->nh.raw + - rxopt->srcrt)); + (struct ipv6_rt_hdr *)(skb_network_header(ireq6->pktopts) + + rxopt->srcrt)); } if (dst == NULL) { diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c index 34079b7ba1d3..32a7db36c9e5 100644 --- a/net/decnet/dn_route.c +++ b/net/decnet/dn_route.c @@ -386,7 +386,7 @@ static int dn_return_short(struct sk_buff *skb) __le16 tmp; /* Add back headers */ - skb_push(skb, skb->data - skb->nh.raw); + skb_push(skb, skb->data - skb_network_header(skb)); if ((skb = skb_unshare(skb, GFP_ATOMIC)) == NULL) return NET_RX_DROP; @@ -425,7 +425,7 @@ static int dn_return_long(struct sk_buff *skb) unsigned char tmp[ETH_ALEN]; /* Add back all headers */ - skb_push(skb, skb->data - skb->nh.raw); + skb_push(skb, skb->data - skb_network_header(skb)); if ((skb = skb_unshare(skb, GFP_ATOMIC)) == NULL) return NET_RX_DROP; diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 6e5575b0abef..ab552a6098f9 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -1184,7 +1184,7 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb, int features) iph->id = htons(id++); iph->tot_len = htons(skb->len - skb->mac_len); iph->check = 0; - iph->check = ip_fast_csum(skb->nh.raw, iph->ihl); + iph->check = ip_fast_csum(skb_network_header(skb), iph->ihl); } while ((skb = skb->next)); out: diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c index 7194eb40b6d0..95ddbbd1552a 100644 --- a/net/ipv4/ah4.c +++ b/net/ipv4/ah4.c @@ -154,7 +154,7 @@ static int ah_input(struct xfrm_state *x, struct sk_buff *skb) ah = (struct ip_auth_hdr*)skb->data; iph = skb->nh.iph; - ihl = skb->data - skb->nh.raw; + ihl = skb->data - skb_network_header(skb); memcpy(work_buf, iph, ihl); iph->ttl = 0; @@ -181,7 +181,8 @@ static int ah_input(struct xfrm_state *x, struct sk_buff *skb) } } ((struct iphdr*)work_buf)->protocol = ah->nexthdr; - skb->h.raw = memcpy(skb->nh.raw += ah_hlen, work_buf, ihl); + skb->nh.raw += ah_hlen; + skb->h.raw = memcpy(skb_network_header(skb), work_buf, ihl); __skb_pull(skb, ah_hlen + ihl); return 0; diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c index 31041127eeb8..222d21e5bbeb 100644 --- a/net/ipv4/esp4.c +++ b/net/ipv4/esp4.c @@ -57,9 +57,10 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb) *(u8*)(trailer->tail + clen-skb->len - 2) = (clen - skb->len)-2; pskb_put(skb, trailer, clen - skb->len); - __skb_push(skb, skb->data - skb->nh.raw); + __skb_push(skb, skb->data - skb_network_header(skb)); top_iph = skb->nh.iph; - esph = (struct ip_esp_hdr *)(skb->nh.raw + top_iph->ihl*4); + esph = (struct ip_esp_hdr *)(skb_network_header(skb) + + top_iph->ihl * 4); top_iph->tot_len = htons(skb->len + alen); *(u8*)(trailer->tail - 1) = top_iph->protocol; @@ -222,7 +223,7 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb) if (x->encap) { struct xfrm_encap_tmpl *encap = x->encap; - struct udphdr *uh = (void *)(skb->nh.raw + ihl); + struct udphdr *uh = (void *)(skb_network_header(skb) + ihl); /* * 1) if the NAT-T peer's IP or port changed then diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index 4b7a0d946a0d..ff124d40c585 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c @@ -484,7 +484,7 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info) u8 _inner_type, *itp; itp = skb_header_pointer(skb_in, - skb_in->nh.raw + + skb_network_header(skb_in) + (iph->ihl << 2) + offsetof(struct icmphdr, type) - @@ -536,7 +536,7 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info) icmp_param.data.icmph.un.gateway = info; icmp_param.data.icmph.checksum = 0; icmp_param.skb = skb_in; - icmp_param.offset = skb_in->nh.raw - skb_in->data; + icmp_param.offset = skb_network_offset(skb_in); icmp_out_count(icmp_param.data.icmph.type); inet_sk(icmp_socket->sk)->tos = tos; ipc.addr = iph->saddr; diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c index 3dfd7581cfc6..268a6c7347f2 100644 --- a/net/ipv4/ip_fragment.c +++ b/net/ipv4/ip_fragment.c @@ -658,7 +658,7 @@ static struct sk_buff *ip_frag_reasm(struct ipq *qp, struct net_device *dev) } skb_shinfo(head)->frag_list = head->next; - skb_push(head, head->data - head->nh.raw); + skb_push(head, head->data - skb_network_header(head)); atomic_sub(head->truesize, &ip_frag_mem); for (fp=head->next; fp; fp = fp->next) { diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c index f906a80d5a87..f7e9db612565 100644 --- a/net/ipv4/ip_options.c +++ b/net/ipv4/ip_options.c @@ -40,7 +40,7 @@ void ip_options_build(struct sk_buff * skb, struct ip_options * opt, __be32 daddr, struct rtable *rt, int is_frag) { - unsigned char * iph = skb->nh.raw; + unsigned char *iph = skb_network_header(skb); memcpy(&(IPCB(skb)->opt), opt, sizeof(struct ip_options)); memcpy(iph+sizeof(struct iphdr), opt->__data, opt->optlen); @@ -104,7 +104,7 @@ int ip_options_echo(struct ip_options * dopt, struct sk_buff * skb) return 0; } - sptr = skb->nh.raw; + sptr = skb_network_header(skb); dptr = dopt->__data; if (skb->dst) @@ -217,7 +217,7 @@ int ip_options_echo(struct ip_options * dopt, struct sk_buff * skb) void ip_options_fragment(struct sk_buff * skb) { - unsigned char * optptr = skb->nh.raw + sizeof(struct iphdr); + unsigned char *optptr = skb_network_header(skb) + sizeof(struct iphdr); struct ip_options * opt = &(IPCB(skb)->opt); int l = opt->optlen; int optlen; @@ -264,7 +264,7 @@ int ip_options_compile(struct ip_options * opt, struct sk_buff * skb) if (!opt) { opt = &(IPCB(skb)->opt); - iph = skb->nh.raw; + iph = skb_network_header(skb); opt->optlen = ((struct iphdr *)iph)->ihl*4 - sizeof(struct iphdr); optptr = iph + sizeof(struct iphdr); opt->is_data = 0; @@ -563,7 +563,7 @@ void ip_forward_options(struct sk_buff *skb) struct ip_options * opt = &(IPCB(skb)->opt); unsigned char * optptr; struct rtable *rt = (struct rtable*)skb->dst; - unsigned char *raw = skb->nh.raw; + unsigned char *raw = skb_network_header(skb); if (opt->rr_needaddr) { optptr = (unsigned char *)raw + opt->rr; @@ -609,7 +609,7 @@ int ip_options_rcv_srr(struct sk_buff *skb) int srrspace, srrptr; __be32 nexthop; struct iphdr *iph = skb->nh.iph; - unsigned char * optptr = skb->nh.raw + opt->srr; + unsigned char *optptr = skb_network_header(skb) + opt->srr; struct rtable *rt = (struct rtable*)skb->dst; struct rtable *rt2; int err; diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 669f5d97c6eb..eae228469627 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -503,7 +503,7 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff*)) frag->h.raw = frag->data; __skb_push(frag, hlen); skb_reset_network_header(frag); - memcpy(frag->nh.raw, iph, hlen); + memcpy(skb_network_header(frag), iph, hlen); iph = frag->nh.iph; iph->tot_len = htons(frag->len); ip_copy_metadata(frag, skb); @@ -607,7 +607,7 @@ slow_path: * Copy the packet header into the new buffer. */ - memcpy(skb2->nh.raw, skb->data, hlen); + memcpy(skb_network_header(skb2), skb->data, hlen); /* * Copy a block of the IP datagram. @@ -1198,7 +1198,7 @@ int ip_push_pending_frames(struct sock *sk) tail_skb = &(skb_shinfo(skb)->frag_list); /* move skb->data to ip header from ext header */ - if (skb->data < skb->nh.raw) + if (skb->data < skb_network_header(skb)) __skb_pull(skb, skb_network_offset(skb)); while ((tmp_skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) { __skb_pull(tmp_skb, skb->h.raw - skb->nh.raw); diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index f8ab654b6a35..70888e1ef6b7 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c @@ -273,7 +273,8 @@ void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err, serr->ee.ee_pad = 0; serr->ee.ee_info = info; serr->ee.ee_data = 0; - serr->addr_offset = (u8*)&(((struct iphdr*)(skb->h.icmph+1))->daddr) - skb->nh.raw; + serr->addr_offset = (u8 *)&(((struct iphdr *)(skb->h.icmph + 1))->daddr) - + skb_network_header(skb); serr->port = port; skb->h.raw = payload; @@ -309,7 +310,7 @@ void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 port, u32 inf serr->ee.ee_pad = 0; serr->ee.ee_info = info; serr->ee.ee_data = 0; - serr->addr_offset = (u8*)&iph->daddr - skb->nh.raw; + serr->addr_offset = (u8 *)&iph->daddr - skb_network_header(skb); serr->port = port; skb->h.raw = skb->tail; @@ -355,7 +356,8 @@ int ip_recv_error(struct sock *sk, struct msghdr *msg, int len) sin = (struct sockaddr_in *)msg->msg_name; if (sin) { sin->sin_family = AF_INET; - sin->sin_addr.s_addr = *(__be32*)(skb->nh.raw + serr->addr_offset); + sin->sin_addr.s_addr = *(__be32 *)(skb_network_header(skb) + + serr->addr_offset); sin->sin_port = serr->port; memset(&sin->sin_zero, 0, sizeof(sin->sin_zero)); } diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index 51528d3cc334..4a8d99bca441 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c @@ -563,7 +563,7 @@ static int ipmr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert) */ msg = (struct igmpmsg*)skb_push(skb, sizeof(struct iphdr)); skb->nh.raw = skb->h.raw = (u8*)msg; - memcpy(msg, pkt->nh.raw, sizeof(struct iphdr)); + memcpy(msg, skb_network_header(pkt), sizeof(struct iphdr)); msg->im_msgtype = IGMPMSG_WHOLEPKT; msg->im_mbz = 0; msg->im_vif = reg_vif_num; diff --git a/net/ipv4/ipvs/ip_vs_app.c b/net/ipv4/ipvs/ip_vs_app.c index 22e104c6a493..f29d3a27eec6 100644 --- a/net/ipv4/ipvs/ip_vs_app.c +++ b/net/ipv4/ipvs/ip_vs_app.c @@ -338,7 +338,7 @@ static inline int app_tcp_pkt_out(struct ip_vs_conn *cp, struct sk_buff **pskb, if (!ip_vs_make_skb_writable(pskb, tcp_offset + sizeof(*th))) return 0; - th = (struct tcphdr *)((*pskb)->nh.raw + tcp_offset); + th = (struct tcphdr *)(skb_network_header(*pskb) + tcp_offset); /* * Remember seq number in case this pkt gets resized @@ -413,7 +413,7 @@ static inline int app_tcp_pkt_in(struct ip_vs_conn *cp, struct sk_buff **pskb, if (!ip_vs_make_skb_writable(pskb, tcp_offset + sizeof(*th))) return 0; - th = (struct tcphdr *)((*pskb)->nh.raw + tcp_offset); + th = (struct tcphdr *)(skb_network_header(*pskb) + tcp_offset); /* * Remember seq number in case this pkt gets resized diff --git a/net/ipv4/ipvs/ip_vs_core.c b/net/ipv4/ipvs/ip_vs_core.c index 858686d616a2..5d54dd2ce12f 100644 --- a/net/ipv4/ipvs/ip_vs_core.c +++ b/net/ipv4/ipvs/ip_vs_core.c @@ -559,7 +559,8 @@ void ip_vs_nat_icmp(struct sk_buff *skb, struct ip_vs_protocol *pp, { struct iphdr *iph = skb->nh.iph; unsigned int icmp_offset = iph->ihl*4; - struct icmphdr *icmph = (struct icmphdr *)(skb->nh.raw + icmp_offset); + struct icmphdr *icmph = (struct icmphdr *)(skb_network_header(skb) + + icmp_offset); struct iphdr *ciph = (struct iphdr *)(icmph + 1); if (inout) { diff --git a/net/ipv4/netfilter/arpt_mangle.c b/net/ipv4/netfilter/arpt_mangle.c index 709db4d3f48f..af1c8593eb19 100644 --- a/net/ipv4/netfilter/arpt_mangle.c +++ b/net/ipv4/netfilter/arpt_mangle.c @@ -31,7 +31,7 @@ target(struct sk_buff **pskb, } arp = (*pskb)->nh.arph; - arpptr = (*pskb)->nh.raw + sizeof(*arp); + arpptr = skb_network_header(*pskb) + sizeof(*arp); pln = arp->ar_pln; hln = arp->ar_hln; /* We assume that pln and hln were checked in the match */ diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c index 8f3e92d20df8..7cebbff0b0c3 100644 --- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c +++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c @@ -105,7 +105,7 @@ ipv4_prepare(struct sk_buff **pskb, unsigned int hooknum, unsigned int *dataoff, return -NF_DROP; } - *dataoff = (*pskb)->nh.raw - (*pskb)->data + (*pskb)->nh.iph->ihl*4; + *dataoff = skb_network_offset(*pskb) + (*pskb)->nh.iph->ihl * 4; *protonum = (*pskb)->nh.iph->protocol; return NF_ACCEPT; @@ -151,10 +151,9 @@ static unsigned int ipv4_conntrack_help(unsigned int hooknum, if (!help || !help->helper) return NF_ACCEPT; - return help->helper->help(pskb, - (*pskb)->nh.raw - (*pskb)->data - + (*pskb)->nh.iph->ihl*4, - ct, ctinfo); + return help->helper->help(pskb, (skb_network_offset(*pskb) + + (*pskb)->nh.iph->ihl * 4), + ct, ctinfo); } static unsigned int ipv4_conntrack_defrag(unsigned int hooknum, diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c index 67e5e3c035c1..a3d02fdfc066 100644 --- a/net/ipv4/raw.c +++ b/net/ipv4/raw.c @@ -256,7 +256,7 @@ int raw_rcv(struct sock *sk, struct sk_buff *skb) } nf_reset(skb); - skb_push(skb, skb->data - skb->nh.raw); + skb_push(skb, skb->data - skb_network_header(skb)); raw_rcv_skb(sk, skb); return 0; diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index f5e019cefc15..00190835cea1 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -3634,7 +3634,7 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list, return; skb_set_mac_header(nskb, skb_mac_header(skb) - skb->head); - nskb->nh.raw = nskb->data + (skb->nh.raw - skb->head); + nskb->nh.raw = nskb->data + (skb_network_header(skb) - skb->head); nskb->h.raw = nskb->data + (skb->h.raw - skb->head); skb_reserve(nskb, header); diff --git a/net/ipv4/xfrm4_input.c b/net/ipv4/xfrm4_input.c index 78e80deb7e89..d89969c502dd 100644 --- a/net/ipv4/xfrm4_input.c +++ b/net/ipv4/xfrm4_input.c @@ -146,7 +146,7 @@ int xfrm4_rcv_encap(struct sk_buff *skb, __u16 encap_type) return 0; } else { #ifdef CONFIG_NETFILTER - __skb_push(skb, skb->data - skb->nh.raw); + __skb_push(skb, skb->data - skb_network_header(skb)); skb->nh.iph->tot_len = htons(skb->len); ip_send_check(skb->nh.iph); diff --git a/net/ipv4/xfrm4_mode_beet.c b/net/ipv4/xfrm4_mode_beet.c index eaaf3565f3b2..505fca034a1f 100644 --- a/net/ipv4/xfrm4_mode_beet.c +++ b/net/ipv4/xfrm4_mode_beet.c @@ -98,7 +98,7 @@ static int xfrm4_beet_input(struct xfrm_state *x, struct sk_buff *skb) } skb->nh.raw = skb->data + (phlen - sizeof(*iph)); - memmove(skb->nh.raw, iph, sizeof(*iph)); + memmove(skb_network_header(skb), iph, sizeof(*iph)); skb->h.raw = skb->data + (phlen + optlen); skb->data = skb->h.raw; @@ -112,7 +112,7 @@ static int xfrm4_beet_input(struct xfrm_state *x, struct sk_buff *skb) else iph->protocol = protocol; iph->check = 0; - iph->check = ip_fast_csum(skb->nh.raw, iph->ihl); + iph->check = ip_fast_csum(skb_network_header(skb), iph->ihl); err = 0; out: return err; diff --git a/net/ipv4/xfrm4_mode_transport.c b/net/ipv4/xfrm4_mode_transport.c index a820dde2c862..b198087c073e 100644 --- a/net/ipv4/xfrm4_mode_transport.c +++ b/net/ipv4/xfrm4_mode_transport.c @@ -34,7 +34,7 @@ static int xfrm4_transport_output(struct xfrm_state *x, struct sk_buff *skb) skb_push(skb, x->props.header_len); skb_reset_network_header(skb); - memmove(skb->nh.raw, iph, ihl); + memmove(skb_network_header(skb), iph, ihl); return 0; } @@ -51,7 +51,7 @@ static int xfrm4_transport_input(struct xfrm_state *x, struct sk_buff *skb) int ihl = skb->data - skb->h.raw; if (skb->h.raw != skb->nh.raw) { - memmove(skb->h.raw, skb->nh.raw, ihl); + memmove(skb->h.raw, skb_network_header(skb), ihl); skb->nh.raw = skb->h.raw; } skb->nh.iph->tot_len = htons(skb->len + ihl); diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c index 98a833ce1114..fbb1d3decf02 100644 --- a/net/ipv4/xfrm4_policy.c +++ b/net/ipv4/xfrm4_policy.c @@ -210,7 +210,7 @@ static void _decode_session4(struct sk_buff *skb, struct flowi *fl) { struct iphdr *iph = skb->nh.iph; - u8 *xprth = skb->nh.raw + iph->ihl*4; + u8 *xprth = skb_network_header(skb) + iph->ihl * 4; memset(fl, 0, sizeof(struct flowi)); if (!(iph->frag_off & htons(IP_MF | IP_OFFSET))) { diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index 2ff070417955..7b917f856e1c 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c @@ -693,7 +693,8 @@ int ipv6_opt_accepted(struct sock *sk, struct sk_buff *skb) if (np->rxopt.all) { if ((opt->hop && (np->rxopt.bits.hopopts || np->rxopt.bits.ohopopts)) || - ((IPV6_FLOWINFO_MASK & *(__be32*)skb->nh.raw) && + ((IPV6_FLOWINFO_MASK & + *(__be32 *)skb_network_header(skb)) && np->rxopt.bits.rxflow) || (opt->srcrt && (np->rxopt.bits.srcrt || np->rxopt.bits.osrcrt)) || diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c index dc68b7269c3c..1c914386982f 100644 --- a/net/ipv6/ah6.c +++ b/net/ipv6/ah6.c @@ -238,8 +238,8 @@ static int ah6_output(struct xfrm_state *x, struct sk_buff *skb) top_iph = (struct ipv6hdr *)skb->data; top_iph->payload_len = htons(skb->len - sizeof(*top_iph)); - nexthdr = *skb->nh.raw; - *skb->nh.raw = IPPROTO_AH; + nexthdr = *skb_network_header(skb); + *skb_network_header(skb) = IPPROTO_AH; /* When there are no extension headers, we only need to save the first * 8 bytes of the base IP header. @@ -341,7 +341,7 @@ static int ah6_input(struct xfrm_state *x, struct sk_buff *skb) pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) goto out; - hdr_len = skb->data - skb->nh.raw; + hdr_len = skb->data - skb_network_header(skb); ah = (struct ipv6_auth_hdr*)skb->data; ahp = x->data; nexthdr = ah->nexthdr; @@ -354,7 +354,7 @@ static int ah6_input(struct xfrm_state *x, struct sk_buff *skb) if (!pskb_may_pull(skb, ah_hlen)) goto out; - tmp_hdr = kmemdup(skb->nh.raw, hdr_len, GFP_ATOMIC); + tmp_hdr = kmemdup(skb_network_header(skb), hdr_len, GFP_ATOMIC); if (!tmp_hdr) goto out; if (ipv6_clear_mutable_options(skb->nh.ipv6h, hdr_len, XFRM_POLICY_IN)) @@ -382,7 +382,9 @@ static int ah6_input(struct xfrm_state *x, struct sk_buff *skb) } } - skb->h.raw = memcpy(skb->nh.raw += ah_hlen, tmp_hdr, hdr_len); + skb->nh.raw += ah_hlen; + memcpy(skb_network_header(skb), tmp_hdr, hdr_len); + skb->h.raw = skb->nh.raw; __skb_pull(skb, ah_hlen + hdr_len); kfree(tmp_hdr); diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c index 31a20f17c854..7a86db6163ee 100644 --- a/net/ipv6/datagram.c +++ b/net/ipv6/datagram.c @@ -227,7 +227,8 @@ void ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err, serr->ee.ee_pad = 0; serr->ee.ee_info = info; serr->ee.ee_data = 0; - serr->addr_offset = (u8*)&(((struct ipv6hdr*)(icmph+1))->daddr) - skb->nh.raw; + serr->addr_offset = (u8 *)&(((struct ipv6hdr *)(icmph + 1))->daddr) - + skb_network_header(skb); serr->port = port; skb->h.raw = payload; @@ -264,7 +265,7 @@ void ipv6_local_error(struct sock *sk, int err, struct flowi *fl, u32 info) serr->ee.ee_pad = 0; serr->ee.ee_info = info; serr->ee.ee_data = 0; - serr->addr_offset = (u8*)&iph->daddr - skb->nh.raw; + serr->addr_offset = (u8 *)&iph->daddr - skb_network_header(skb); serr->port = fl->fl_ip_dport; skb->h.raw = skb->tail; @@ -310,21 +311,24 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len) sin = (struct sockaddr_in6 *)msg->msg_name; if (sin) { + const unsigned char *nh = skb_network_header(skb); sin->sin6_family = AF_INET6; sin->sin6_flowinfo = 0; sin->sin6_port = serr->port; sin->sin6_scope_id = 0; if (serr->ee.ee_origin == SO_EE_ORIGIN_ICMP6) { ipv6_addr_copy(&sin->sin6_addr, - (struct in6_addr *)(skb->nh.raw + serr->addr_offset)); + (struct in6_addr *)(nh + serr->addr_offset)); if (np->sndflow) - sin->sin6_flowinfo = *(__be32*)(skb->nh.raw + serr->addr_offset - 24) & IPV6_FLOWINFO_MASK; + sin->sin6_flowinfo = + (*(__be32 *)(nh + serr->addr_offset - 24) & + IPV6_FLOWINFO_MASK); if (ipv6_addr_type(&sin->sin6_addr) & IPV6_ADDR_LINKLOCAL) sin->sin6_scope_id = IP6CB(skb)->iif; } else { ipv6_addr_set(&sin->sin6_addr, 0, 0, htonl(0xffff), - *(__be32*)(skb->nh.raw + serr->addr_offset)); + *(__be32 *)(nh + serr->addr_offset)); } } @@ -382,6 +386,7 @@ int datagram_recv_ctl(struct sock *sk, struct msghdr *msg, struct sk_buff *skb) { struct ipv6_pinfo *np = inet6_sk(sk); struct inet6_skb_parm *opt = IP6CB(skb); + unsigned char *nh = skb_network_header(skb); if (np->rxopt.bits.rxinfo) { struct in6_pktinfo src_info; @@ -401,14 +406,14 @@ int datagram_recv_ctl(struct sock *sk, struct msghdr *msg, struct sk_buff *skb) put_cmsg(msg, SOL_IPV6, IPV6_TCLASS, sizeof(tclass), &tclass); } - if (np->rxopt.bits.rxflow && (*(__be32*)skb->nh.raw & IPV6_FLOWINFO_MASK)) { - __be32 flowinfo = *(__be32*)skb->nh.raw & IPV6_FLOWINFO_MASK; + if (np->rxopt.bits.rxflow && (*(__be32 *)nh & IPV6_FLOWINFO_MASK)) { + __be32 flowinfo = *(__be32 *)nh & IPV6_FLOWINFO_MASK; put_cmsg(msg, SOL_IPV6, IPV6_FLOWINFO, sizeof(flowinfo), &flowinfo); } /* HbH is allowed only once */ if (np->rxopt.bits.hopopts && opt->hop) { - u8 *ptr = skb->nh.raw + opt->hop; + u8 *ptr = nh + opt->hop; put_cmsg(msg, SOL_IPV6, IPV6_HOPOPTS, (ptr[1]+1)<<3, ptr); } @@ -428,7 +433,7 @@ int datagram_recv_ctl(struct sock *sk, struct msghdr *msg, struct sk_buff *skb) while (off <= opt->lastopt) { unsigned len; - u8 *ptr = skb->nh.raw + off; + u8 *ptr = nh + off; switch(nexthdr) { case IPPROTO_DSTOPTS: @@ -470,19 +475,19 @@ int datagram_recv_ctl(struct sock *sk, struct msghdr *msg, struct sk_buff *skb) put_cmsg(msg, SOL_IPV6, IPV6_2292HOPLIMIT, sizeof(hlim), &hlim); } if (np->rxopt.bits.ohopopts && opt->hop) { - u8 *ptr = skb->nh.raw + opt->hop; + u8 *ptr = nh + opt->hop; put_cmsg(msg, SOL_IPV6, IPV6_2292HOPOPTS, (ptr[1]+1)<<3, ptr); } if (np->rxopt.bits.odstopts && opt->dst0) { - u8 *ptr = skb->nh.raw + opt->dst0; + u8 *ptr = nh + opt->dst0; put_cmsg(msg, SOL_IPV6, IPV6_2292DSTOPTS, (ptr[1]+1)<<3, ptr); } if (np->rxopt.bits.osrcrt && opt->srcrt) { - struct ipv6_rt_hdr *rthdr = (struct ipv6_rt_hdr *)(skb->nh.raw + opt->srcrt); + struct ipv6_rt_hdr *rthdr = (struct ipv6_rt_hdr *)(nh + opt->srcrt); put_cmsg(msg, SOL_IPV6, IPV6_2292RTHDR, (rthdr->hdrlen+1) << 3, rthdr); } if (np->rxopt.bits.odstopts && opt->dst1) { - u8 *ptr = skb->nh.raw + opt->dst1; + u8 *ptr = nh + opt->dst1; put_cmsg(msg, SOL_IPV6, IPV6_2292DSTOPTS, (ptr[1]+1)<<3, ptr); } return 0; diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c index 363e63ffecca..6e6b57ac8013 100644 --- a/net/ipv6/esp6.c +++ b/net/ipv6/esp6.c @@ -92,8 +92,8 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb) top_iph = (struct ipv6hdr *)__skb_push(skb, hdr_len); esph = (struct ipv6_esp_hdr *)skb->h.raw; top_iph->payload_len = htons(skb->len + alen - sizeof(*top_iph)); - *(u8*)(trailer->tail - 1) = *skb->nh.raw; - *skb->nh.raw = IPPROTO_ESP; + *(u8 *)(trailer->tail - 1) = *skb_network_header(skb); + *skb_network_header(skb) = IPPROTO_ESP; esph->spi = x->id.spi; esph->seq_no = htonl(++x->replay.oseq); diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c index fce5abde554f..9ebf120ba6d3 100644 --- a/net/ipv6/exthdrs.c +++ b/net/ipv6/exthdrs.c @@ -50,13 +50,14 @@ int ipv6_find_tlv(struct sk_buff *skb, int offset, int type) { - int packet_len = skb->tail - skb->nh.raw; + const unsigned char *nh = skb_network_header(skb); + int packet_len = skb->tail - nh; struct ipv6_opt_hdr *hdr; int len; if (offset + 2 > packet_len) goto bad; - hdr = (struct ipv6_opt_hdr*)(skb->nh.raw + offset); + hdr = (struct ipv6_opt_hdr *)(nh + offset); len = ((hdr->hdrlen + 1) << 3); if (offset + len > packet_len) @@ -66,7 +67,7 @@ int ipv6_find_tlv(struct sk_buff *skb, int offset, int type) len -= 2; while (len > 0) { - int opttype = skb->nh.raw[offset]; + int opttype = nh[offset]; int optlen; if (opttype == type) @@ -77,7 +78,7 @@ int ipv6_find_tlv(struct sk_buff *skb, int offset, int type) optlen = 1; break; default: - optlen = skb->nh.raw[offset + 1] + 2; + optlen = nh[offset + 1] + 2; if (optlen > len) goto bad; break; @@ -113,7 +114,7 @@ static int ip6_tlvopt_unknown(struct sk_buff **skbp, int optoff) { struct sk_buff *skb = *skbp; - switch ((skb->nh.raw[optoff] & 0xC0) >> 6) { + switch ((skb_network_header(skb)[optoff] & 0xC0) >> 6) { case 0: /* ignore */ return 1; @@ -141,6 +142,7 @@ static int ip6_parse_tlv(struct tlvtype_proc *procs, struct sk_buff **skbp) { struct sk_buff *skb = *skbp; struct tlvtype_proc *curr; + const unsigned char *nh = skb_network_header(skb); int off = skb->h.raw - skb->nh.raw; int len = ((skb->h.raw[1]+1)<<3); @@ -151,9 +153,9 @@ static int ip6_parse_tlv(struct tlvtype_proc *procs, struct sk_buff **skbp) len -= 2; while (len > 0) { - int optlen = skb->nh.raw[off+1]+2; + int optlen = nh[off + 1] + 2; - switch (skb->nh.raw[off]) { + switch (nh[off]) { case IPV6_TLV_PAD0: optlen = 1; break; @@ -165,7 +167,7 @@ static int ip6_parse_tlv(struct tlvtype_proc *procs, struct sk_buff **skbp) if (optlen > len) goto bad; for (curr=procs; curr->type >= 0; curr++) { - if (curr->type == skb->nh.raw[off]) { + if (curr->type == nh[off]) { /* type specific length/alignment checks will be performed in the func(). */ @@ -211,7 +213,7 @@ static int ipv6_dest_hao(struct sk_buff **skbp, int optoff) opt->dsthao = opt->dst1; opt->dst1 = 0; - hao = (struct ipv6_destopt_hao *)(skb->nh.raw + optoff); + hao = (struct ipv6_destopt_hao *)(skb_network_header(skb) + optoff); if (hao->length != 16) { LIMIT_NETDEBUG( @@ -244,8 +246,9 @@ static int ipv6_dest_hao(struct sk_buff **skbp, int optoff) /* update all variable using below by copied skbuff */ *skbp = skb = skb2; - hao = (struct ipv6_destopt_hao *)(skb2->nh.raw + optoff); - ipv6h = (struct ipv6hdr *)skb2->nh.raw; + hao = (struct ipv6_destopt_hao *)(skb_network_header(skb2) + + optoff); + ipv6h = skb2->nh.ipv6h; } if (skb->ip_summed == CHECKSUM_COMPLETE) @@ -406,7 +409,8 @@ static int ipv6_rthdr_rcv(struct sk_buff **skbp) default: IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), IPSTATS_MIB_INHDRERRORS); - icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, (&hdr->type) - skb->nh.raw); + icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, + (&hdr->type) - skb_network_header(skb)); return -1; } @@ -443,7 +447,7 @@ looped_back: skb->h.raw += (hdr->hdrlen + 1) << 3; opt->dst0 = opt->dst1; opt->dst1 = 0; - opt->nhoff = (&hdr->nexthdr) - skb->nh.raw; + opt->nhoff = (&hdr->nexthdr) - skb_network_header(skb); return 1; } @@ -452,7 +456,9 @@ looped_back: if (hdr->hdrlen & 0x01) { IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), IPSTATS_MIB_INHDRERRORS); - icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, (&hdr->hdrlen) - skb->nh.raw); + icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, + ((&hdr->hdrlen) - + skb_network_header(skb))); return -1; } break; @@ -479,7 +485,9 @@ looped_back: if (hdr->segments_left > n) { IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), IPSTATS_MIB_INHDRERRORS); - icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, (&hdr->segments_left) - skb->nh.raw); + icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, + ((&hdr->segments_left) - + skb_network_header(skb))); return -1; } @@ -547,7 +555,7 @@ looped_back: dst_release(xchg(&skb->dst, NULL)); ip6_route_input(skb); if (skb->dst->error) { - skb_push(skb, skb->data - skb->nh.raw); + skb_push(skb, skb->data - skb_network_header(skb)); dst_input(skb); return -1; } @@ -565,7 +573,7 @@ looped_back: goto looped_back; } - skb_push(skb, skb->data - skb->nh.raw); + skb_push(skb, skb->data - skb_network_header(skb)); dst_input(skb); return -1; } @@ -656,13 +664,14 @@ EXPORT_SYMBOL_GPL(ipv6_invert_rthdr); static int ipv6_hop_ra(struct sk_buff **skbp, int optoff) { struct sk_buff *skb = *skbp; + const unsigned char *nh = skb_network_header(skb); - if (skb->nh.raw[optoff+1] == 2) { + if (nh[optoff + 1] == 2) { IP6CB(skb)->ra = optoff; return 1; } LIMIT_NETDEBUG(KERN_DEBUG "ipv6_hop_ra: wrong RA length %d\n", - skb->nh.raw[optoff+1]); + nh[optoff + 1]); kfree_skb(skb); return 0; } @@ -672,17 +681,18 @@ static int ipv6_hop_ra(struct sk_buff **skbp, int optoff) static int ipv6_hop_jumbo(struct sk_buff **skbp, int optoff) { struct sk_buff *skb = *skbp; + const unsigned char *nh = skb_network_header(skb); u32 pkt_len; - if (skb->nh.raw[optoff+1] != 4 || (optoff&3) != 2) { + if (nh[optoff + 1] != 4 || (optoff & 3) != 2) { LIMIT_NETDEBUG(KERN_DEBUG "ipv6_hop_jumbo: wrong jumbo opt length/alignment %d\n", - skb->nh.raw[optoff+1]); + nh[optoff+1]); IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), IPSTATS_MIB_INHDRERRORS); goto drop; } - pkt_len = ntohl(*(__be32*)(skb->nh.raw+optoff+2)); + pkt_len = ntohl(*(__be32 *)(nh + optoff + 2)); if (pkt_len <= IPV6_MAXPLEN) { IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), IPSTATS_MIB_INHDRERRORS); icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, optoff+2); @@ -727,7 +737,7 @@ int ipv6_parse_hopopts(struct sk_buff **skbp) struct inet6_skb_parm *opt = IP6CB(skb); /* - * skb->nh.raw is equal to skb->data, and + * skb_network_header(skb) is equal to skb->data, and * skb->h.raw - skb->nh.raw is always equal to * sizeof(struct ipv6hdr) by definition of * hop-by-hop options. diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c index aa4a0a59ffac..e5293b34229f 100644 --- a/net/ipv6/icmp.c +++ b/net/ipv6/icmp.c @@ -284,7 +284,8 @@ static void mip6_addr_swap(struct sk_buff *skb) if (opt->dsthao) { off = ipv6_find_tlv(skb, opt->dsthao, IPV6_TLV_HAO); if (likely(off >= 0)) { - hao = (struct ipv6_destopt_hao *)(skb->nh.raw + off); + hao = (struct ipv6_destopt_hao *) + (skb_network_header(skb) + off); ipv6_addr_copy(&tmp, &iph->saddr); ipv6_addr_copy(&iph->saddr, &hao->addr); ipv6_addr_copy(&hao->addr, &tmp); diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c index 61e7a6c8141d..aecc74da0721 100644 --- a/net/ipv6/ip6_input.c +++ b/net/ipv6/ip6_input.c @@ -163,7 +163,7 @@ resubmit: if (!pskb_pull(skb, skb->h.raw - skb->data)) goto discard; nhoff = IP6CB(skb)->nhoff; - nexthdr = skb->nh.raw[nhoff]; + nexthdr = skb_network_header(skb)[nhoff]; raw_sk = sk_head(&raw_v6_htable[nexthdr & (MAX_INET_PROTOS - 1)]); if (raw_sk && !ipv6_raw_deliver(skb, nexthdr)) @@ -181,7 +181,7 @@ resubmit: indefinitely. */ nf_reset(skb); - skb_postpull_rcsum(skb, skb->nh.raw, + skb_postpull_rcsum(skb, skb_network_header(skb), skb->h.raw - skb->nh.raw); hdr = skb->nh.ipv6h; if (ipv6_addr_is_multicast(&hdr->daddr) && diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 47d00210cba1..f1dfcc319717 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -323,10 +323,11 @@ static int ip6_forward_proxy_check(struct sk_buff *skb) if (nexthdr == IPPROTO_ICMPV6) { struct icmp6hdr *icmp6; - if (!pskb_may_pull(skb, skb->nh.raw + offset + 1 - skb->data)) + if (!pskb_may_pull(skb, (skb_network_header(skb) + + offset + 1 - skb->data))) return 0; - icmp6 = (struct icmp6hdr *)(skb->nh.raw + offset); + icmp6 = (struct icmp6hdr *)(skb_network_header(skb) + offset); switch (icmp6->icmp6_type) { case NDISC_ROUTER_SOLICITATION: @@ -392,7 +393,7 @@ int ip6_forward(struct sk_buff *skb) * that different fragments will go along one path. --ANK */ if (opt->ra) { - u8 *ptr = skb->nh.raw + opt->ra; + u8 *ptr = skb_network_header(skb) + opt->ra; if (ip6_call_ra_chain(skb, (ptr[2]<<8) + ptr[3])) return 0; } @@ -527,7 +528,7 @@ int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr) { u16 offset = sizeof(struct ipv6hdr); struct ipv6_opt_hdr *exthdr = (struct ipv6_opt_hdr*)(skb->nh.ipv6h + 1); - unsigned int packet_len = skb->tail - skb->nh.raw; + unsigned int packet_len = skb->tail - skb_network_header(skb); int found_rhdr = 0; *nexthdr = &skb->nh.ipv6h->nexthdr; @@ -554,7 +555,8 @@ int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr) offset += ipv6_optlen(exthdr); *nexthdr = &exthdr->nexthdr; - exthdr = (struct ipv6_opt_hdr*)(skb->nh.raw + offset); + exthdr = (struct ipv6_opt_hdr *)(skb_network_header(skb) + + offset); } return offset; @@ -620,7 +622,7 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) /* BUILD HEADER */ *prevhdr = NEXTHDR_FRAGMENT; - tmp_hdr = kmemdup(skb->nh.raw, hlen, GFP_ATOMIC); + tmp_hdr = kmemdup(skb_network_header(skb), hlen, GFP_ATOMIC); if (!tmp_hdr) { IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_FRAGFAILS); return -ENOMEM; @@ -630,7 +632,7 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) fh = (struct frag_hdr*)__skb_push(skb, sizeof(struct frag_hdr)); __skb_push(skb, hlen); skb_reset_network_header(skb); - memcpy(skb->nh.raw, tmp_hdr, hlen); + memcpy(skb_network_header(skb), tmp_hdr, hlen); ipv6_select_ident(skb, fh); fh->nexthdr = nexthdr; @@ -654,7 +656,8 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) fh = (struct frag_hdr*)__skb_push(frag, sizeof(struct frag_hdr)); __skb_push(frag, hlen); skb_reset_network_header(frag); - memcpy(frag->nh.raw, tmp_hdr, hlen); + memcpy(skb_network_header(frag), tmp_hdr, + hlen); offset += skb->len - hlen - sizeof(struct frag_hdr); fh->nexthdr = nexthdr; fh->reserved = 0; @@ -753,7 +756,7 @@ slow_path: /* * Copy the packet header into the new buffer. */ - memcpy(frag->nh.raw, skb->data, hlen); + memcpy(skb_network_header(frag), skb->data, hlen); /* * Build fragment header. @@ -1329,7 +1332,7 @@ int ip6_push_pending_frames(struct sock *sk) tail_skb = &(skb_shinfo(skb)->frag_list); /* move skb->data to ip header from ext header */ - if (skb->data < skb->nh.raw) + if (skb->data < skb_network_header(skb)) __skb_pull(skb, skb_network_offset(skb)); while ((tmp_skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) { __skb_pull(tmp_skb, skb->h.raw - skb->nh.raw); diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index a1e4f39c6793..aafbdfa8d785 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c @@ -995,9 +995,10 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) !ip6_tnl_xmit_ctl(t) || ip6_tnl_addr_conflict(t, ipv6h)) return -1; - if ((offset = parse_tlv_tnl_enc_lim(skb, skb->nh.raw)) > 0) { + offset = parse_tlv_tnl_enc_lim(skb, skb_network_header(skb)); + if (offset > 0) { struct ipv6_tlv_tnl_enc_lim *tel; - tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->nh.raw[offset]; + tel = (struct ipv6_tlv_tnl_enc_lim *)&skb_network_header(skb)[offset]; if (tel->encap_limit == 0) { icmpv6_send(skb, ICMPV6_PARAMPROB, ICMPV6_HDR_FIELD, offset + 2, skb->dev); diff --git a/net/ipv6/ipcomp6.c b/net/ipv6/ipcomp6.c index 5724ba9f75de..3e71d1691b7d 100644 --- a/net/ipv6/ipcomp6.c +++ b/net/ipv6/ipcomp6.c @@ -166,10 +166,10 @@ static int ipcomp6_output(struct xfrm_state *x, struct sk_buff *skb) top_iph->payload_len = htons(skb->len - sizeof(struct ipv6hdr)); ipch = (struct ipv6_comp_hdr *)start; - ipch->nexthdr = *skb->nh.raw; + ipch->nexthdr = *skb_network_header(skb); ipch->flags = 0; ipch->cpi = htons((u16 )ntohl(x->id.spi)); - *skb->nh.raw = IPPROTO_COMP; + *skb_network_header(skb) = IPPROTO_COMP; out_ok: return 0; diff --git a/net/ipv6/mip6.c b/net/ipv6/mip6.c index 0afcabdd8ed6..bb4033553f3b 100644 --- a/net/ipv6/mip6.c +++ b/net/ipv6/mip6.c @@ -99,14 +99,16 @@ int mip6_mh_filter(struct sock *sk, struct sk_buff *skb) if (mh->ip6mh_hdrlen < mip6_mh_len(mh->ip6mh_type)) { LIMIT_NETDEBUG(KERN_DEBUG "mip6: MH message too short: %d vs >=%d\n", mh->ip6mh_hdrlen, mip6_mh_len(mh->ip6mh_type)); - mip6_param_prob(skb, 0, (&mh->ip6mh_hdrlen) - skb->nh.raw); + mip6_param_prob(skb, 0, ((&mh->ip6mh_hdrlen) - + skb_network_header(skb))); return -1; } if (mh->ip6mh_proto != IPPROTO_NONE) { LIMIT_NETDEBUG(KERN_DEBUG "mip6: MH invalid payload proto = %d\n", mh->ip6mh_proto); - mip6_param_prob(skb, 0, (&mh->ip6mh_proto) - skb->nh.raw); + mip6_param_prob(skb, 0, ((&mh->ip6mh_proto) - + skb_network_header(skb))); return -1; } @@ -152,8 +154,8 @@ static int mip6_destopt_output(struct xfrm_state *x, struct sk_buff *skb) iph = (struct ipv6hdr *)skb->data; iph->payload_len = htons(skb->len - sizeof(*iph)); - nexthdr = *skb->nh.raw; - *skb->nh.raw = IPPROTO_DSTOPTS; + nexthdr = *skb_network_header(skb); + *skb_network_header(skb) = IPPROTO_DSTOPTS; dstopt = (struct ipv6_destopt_hdr *)skb->h.raw; dstopt->nexthdr = nexthdr; @@ -215,7 +217,8 @@ static int mip6_destopt_reject(struct xfrm_state *x, struct sk_buff *skb, struct if (likely(opt->dsthao)) { offset = ipv6_find_tlv(skb, opt->dsthao, IPV6_TLV_HAO); if (likely(offset >= 0)) - hao = (struct ipv6_destopt_hao *)(skb->nh.raw + offset); + hao = (struct ipv6_destopt_hao *) + (skb_network_header(skb) + offset); } skb_get_timestamp(skb, &stamp); @@ -254,7 +257,8 @@ static int mip6_destopt_offset(struct xfrm_state *x, struct sk_buff *skb, { u16 offset = sizeof(struct ipv6hdr); struct ipv6_opt_hdr *exthdr = (struct ipv6_opt_hdr*)(skb->nh.ipv6h + 1); - unsigned int packet_len = skb->tail - skb->nh.raw; + const unsigned char *nh = skb_network_header(skb); + unsigned int packet_len = skb->tail - nh; int found_rhdr = 0; *nexthdr = &skb->nh.ipv6h->nexthdr; @@ -288,7 +292,7 @@ static int mip6_destopt_offset(struct xfrm_state *x, struct sk_buff *skb, offset += ipv6_optlen(exthdr); *nexthdr = &exthdr->nexthdr; - exthdr = (struct ipv6_opt_hdr*)(skb->nh.raw + offset); + exthdr = (struct ipv6_opt_hdr *)(nh + offset); } return offset; @@ -361,8 +365,8 @@ static int mip6_rthdr_output(struct xfrm_state *x, struct sk_buff *skb) iph = (struct ipv6hdr *)skb->data; iph->payload_len = htons(skb->len - sizeof(*iph)); - nexthdr = *skb->nh.raw; - *skb->nh.raw = IPPROTO_ROUTING; + nexthdr = *skb_network_header(skb); + *skb_network_header(skb) = IPPROTO_ROUTING; rt2 = (struct rt2_hdr *)skb->h.raw; rt2->rt_hdr.nexthdr = nexthdr; @@ -384,7 +388,8 @@ static int mip6_rthdr_offset(struct xfrm_state *x, struct sk_buff *skb, { u16 offset = sizeof(struct ipv6hdr); struct ipv6_opt_hdr *exthdr = (struct ipv6_opt_hdr*)(skb->nh.ipv6h + 1); - unsigned int packet_len = skb->tail - skb->nh.raw; + const unsigned char *nh = skb_network_header(skb); + unsigned int packet_len = skb->tail - nh; int found_rhdr = 0; *nexthdr = &skb->nh.ipv6h->nexthdr; @@ -397,7 +402,7 @@ static int mip6_rthdr_offset(struct xfrm_state *x, struct sk_buff *skb, case NEXTHDR_ROUTING: if (offset + 3 <= packet_len) { struct ipv6_rt_hdr *rt; - rt = (struct ipv6_rt_hdr *)(skb->nh.raw + offset); + rt = (struct ipv6_rt_hdr *)(nh + offset); if (rt->type != 0) return offset; } @@ -417,7 +422,7 @@ static int mip6_rthdr_offset(struct xfrm_state *x, struct sk_buff *skb, offset += ipv6_optlen(exthdr); *nexthdr = &exthdr->nexthdr; - exthdr = (struct ipv6_opt_hdr*)(skb->nh.raw + offset); + exthdr = (struct ipv6_opt_hdr *)(nh + offset); } return offset; diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c index c311b9a12ca6..bc1d09584008 100644 --- a/net/ipv6/netfilter/nf_conntrack_reasm.c +++ b/net/ipv6/netfilter/nf_conntrack_reasm.c @@ -408,11 +408,12 @@ static int nf_ct_frag6_queue(struct nf_ct_frag6_queue *fq, struct sk_buff *skb, return -1; } - if (skb->ip_summed == CHECKSUM_COMPLETE) + if (skb->ip_summed == CHECKSUM_COMPLETE) { + const unsigned char *nh = skb_network_header(skb); skb->csum = csum_sub(skb->csum, - csum_partial(skb->nh.raw, - (u8*)(fhdr + 1) - skb->nh.raw, + csum_partial(nh, (u8 *)(fhdr + 1) - nh, 0)); + } /* Is this the final fragment? */ if (!(fhdr->frag_off & htons(IP6_MF))) { @@ -583,7 +584,9 @@ nf_ct_frag6_reasm(struct nf_ct_frag6_queue *fq, struct net_device *dev) BUG_TRAP(NFCT_FRAG6_CB(head)->offset == 0); /* Unfragmented part is taken from the first segment. */ - payload_len = (head->data - head->nh.raw) - sizeof(struct ipv6hdr) + fq->len - sizeof(struct frag_hdr); + payload_len = ((head->data - skb_network_header(head)) - + sizeof(struct ipv6hdr) + fq->len - + sizeof(struct frag_hdr)); if (payload_len > IPV6_MAXPLEN) { DEBUGP("payload len is too large.\n"); goto out_oversize; @@ -624,7 +627,7 @@ nf_ct_frag6_reasm(struct nf_ct_frag6_queue *fq, struct net_device *dev) /* We have to remove fragment header from datagram and to relocate * header in order to calculate ICV correctly. */ - head->nh.raw[fq->nhoffset] = head->h.raw[0]; + skb_network_header(head)[fq->nhoffset] = head->h.raw[0]; memmove(head->head + sizeof(struct frag_hdr), head->head, (head->data - head->head) - sizeof(struct frag_hdr)); head->mac.raw += sizeof(struct frag_hdr); @@ -632,7 +635,7 @@ nf_ct_frag6_reasm(struct nf_ct_frag6_queue *fq, struct net_device *dev) skb_shinfo(head)->frag_list = head->next; head->h.raw = head->data; - skb_push(head, head->data - head->nh.raw); + skb_push(head, head->data - skb_network_header(head)); atomic_sub(head->truesize, &nf_ct_frag6_mem); for (fp=head->next; fp; fp = fp->next) { @@ -653,7 +656,9 @@ nf_ct_frag6_reasm(struct nf_ct_frag6_queue *fq, struct net_device *dev) /* Yes, and fold redundant checksum back. 8) */ if (head->ip_summed == CHECKSUM_COMPLETE) - head->csum = csum_partial(head->nh.raw, head->h.raw-head->nh.raw, head->csum); + head->csum = csum_partial(skb_network_header(head), + head->h.raw - head->nh.raw, + head->csum); fq->fragments = NULL; diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index 5f26645195dc..9b2bcde73f19 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c @@ -361,7 +361,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb) skb->ip_summed = CHECKSUM_UNNECESSARY; if (skb->ip_summed == CHECKSUM_COMPLETE) { - skb_postpull_rcsum(skb, skb->nh.raw, + skb_postpull_rcsum(skb, skb_network_header(skb), skb->h.raw - skb->nh.raw); if (!csum_ipv6_magic(&skb->nh.ipv6h->saddr, &skb->nh.ipv6h->daddr, @@ -488,7 +488,8 @@ static int rawv6_push_pending_frames(struct sock *sk, struct flowi *fl, goto out; offset = rp->offset; - total_len = inet_sk(sk)->cork.length - (skb->nh.raw - skb->data); + total_len = inet_sk(sk)->cork.length - (skb_network_header(skb) - + skb->data); if (offset >= total_len - 1) { err = -EINVAL; ip6_flush_pending_frames(sk); diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c index 1dde449379fb..f85e49acb91a 100644 --- a/net/ipv6/reassembly.c +++ b/net/ipv6/reassembly.c @@ -436,13 +436,18 @@ static void ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb, if ((unsigned int)end > IPV6_MAXPLEN) { IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), IPSTATS_MIB_INHDRERRORS); - icmpv6_param_prob(skb,ICMPV6_HDR_FIELD, (u8*)&fhdr->frag_off - skb->nh.raw); + icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, + ((u8 *)&fhdr->frag_off - + skb_network_header(skb))); return; } - if (skb->ip_summed == CHECKSUM_COMPLETE) + if (skb->ip_summed == CHECKSUM_COMPLETE) { + const unsigned char *nh = skb_network_header(skb); skb->csum = csum_sub(skb->csum, - csum_partial(skb->nh.raw, (u8*)(fhdr+1)-skb->nh.raw, 0)); + csum_partial(nh, (u8 *)(fhdr + 1) - nh, + 0)); + } /* Is this the final fragment? */ if (!(fhdr->frag_off & htons(IP6_MF))) { @@ -605,7 +610,9 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff **skb_in, BUG_TRAP(FRAG6_CB(head)->offset == 0); /* Unfragmented part is taken from the first segment. */ - payload_len = (head->data - head->nh.raw) - sizeof(struct ipv6hdr) + fq->len - sizeof(struct frag_hdr); + payload_len = ((head->data - skb_network_header(head)) - + sizeof(struct ipv6hdr) + fq->len - + sizeof(struct frag_hdr)); if (payload_len > IPV6_MAXPLEN) goto out_oversize; @@ -639,7 +646,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff **skb_in, /* We have to remove fragment header from datagram and to relocate * header in order to calculate ICV correctly. */ nhoff = fq->nhoffset; - head->nh.raw[nhoff] = head->h.raw[0]; + skb_network_header(head)[nhoff] = head->h.raw[0]; memmove(head->head + sizeof(struct frag_hdr), head->head, (head->data - head->head) - sizeof(struct frag_hdr)); head->mac.raw += sizeof(struct frag_hdr); @@ -647,7 +654,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff **skb_in, skb_shinfo(head)->frag_list = head->next; head->h.raw = head->data; - skb_push(head, head->data - head->nh.raw); + skb_push(head, head->data - skb_network_header(head)); atomic_sub(head->truesize, &ip6_frag_mem); for (fp=head->next; fp; fp = fp->next) { @@ -671,7 +678,9 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff **skb_in, /* Yes, and fold redundant checksum back. 8) */ if (head->ip_summed == CHECKSUM_COMPLETE) - head->csum = csum_partial(head->nh.raw, head->h.raw-head->nh.raw, head->csum); + head->csum = csum_partial(skb_network_header(head), + head->h.raw - head->nh.raw, + head->csum); rcu_read_lock(); IP6_INC_STATS_BH(__in6_dev_get(dev), IPSTATS_MIB_REASMOKS); @@ -725,7 +734,7 @@ static int ipv6_frag_rcv(struct sk_buff **skbp) skb->h.raw += sizeof(struct frag_hdr); IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), IPSTATS_MIB_REASMOKS); - IP6CB(skb)->nhoff = (u8*)fhdr - skb->nh.raw; + IP6CB(skb)->nhoff = (u8 *)fhdr - skb_network_header(skb); return 1; } diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 92f99927d12d..80a52ab1e384 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -486,7 +486,9 @@ static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req, struct sk_buff *pktopts = treq->pktopts; struct inet6_skb_parm *rxopt = IP6CB(pktopts); if (rxopt->srcrt) - opt = ipv6_invert_rthdr(sk, (struct ipv6_rt_hdr*)(pktopts->nh.raw + rxopt->srcrt)); + opt = ipv6_invert_rthdr(sk, + (struct ipv6_rt_hdr *)(skb_network_header(pktopts) + + rxopt->srcrt)); } if (opt && opt->srcrt) { @@ -1389,7 +1391,9 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb, opt == NULL && treq->pktopts) { struct inet6_skb_parm *rxopt = IP6CB(treq->pktopts); if (rxopt->srcrt) - opt = ipv6_invert_rthdr(sk, (struct ipv6_rt_hdr *)(treq->pktopts->nh.raw + rxopt->srcrt)); + opt = ipv6_invert_rthdr(sk, + (struct ipv6_rt_hdr *)(skb_network_header(treq->pktopts) + + rxopt->srcrt)); } if (dst == NULL) { diff --git a/net/ipv6/xfrm6_input.c b/net/ipv6/xfrm6_input.c index 33a1b9200431..5c929f886129 100644 --- a/net/ipv6/xfrm6_input.c +++ b/net/ipv6/xfrm6_input.c @@ -28,7 +28,7 @@ int xfrm6_rcv_spi(struct sk_buff *skb, __be32 spi) unsigned int nhoff; nhoff = IP6CB(skb)->nhoff; - nexthdr = skb->nh.raw[nhoff]; + nexthdr = skb_network_header(skb)[nhoff]; seq = 0; if (!spi && (err = xfrm_parse_spi(skb, nexthdr, &spi, &seq)) != 0) @@ -58,7 +58,7 @@ int xfrm6_rcv_spi(struct sk_buff *skb, __be32 spi) if (nexthdr <= 0) goto drop_unlock; - skb->nh.raw[nhoff] = nexthdr; + skb_network_header(skb)[nhoff] = nexthdr; if (x->props.replay_window) xfrm_replay_advance(x, seq); @@ -113,7 +113,7 @@ int xfrm6_rcv_spi(struct sk_buff *skb, __be32 spi) } else { #ifdef CONFIG_NETFILTER skb->nh.ipv6h->payload_len = htons(skb->len); - __skb_push(skb, skb->data - skb->nh.raw); + __skb_push(skb, skb->data - skb_network_header(skb)); NF_HOOK(PF_INET6, NF_IP6_PRE_ROUTING, skb, skb->dev, NULL, ip6_rcv_finish); diff --git a/net/ipv6/xfrm6_mode_beet.c b/net/ipv6/xfrm6_mode_beet.c index c015bfde2b1c..247e2d5d2acf 100644 --- a/net/ipv6/xfrm6_mode_beet.c +++ b/net/ipv6/xfrm6_mode_beet.c @@ -67,7 +67,7 @@ static int xfrm6_beet_input(struct xfrm_state *x, struct sk_buff *skb) goto out; skb_push(skb, size); - memmove(skb->data, skb->nh.raw, size); + memmove(skb->data, skb_network_header(skb), size); skb_reset_network_header(skb); old_mac = skb_mac_header(skb); diff --git a/net/ipv6/xfrm6_mode_transport.c b/net/ipv6/xfrm6_mode_transport.c index 3a4b39b12bad..ace0bbf4f25d 100644 --- a/net/ipv6/xfrm6_mode_transport.c +++ b/net/ipv6/xfrm6_mode_transport.c @@ -53,8 +53,10 @@ static int xfrm6_transport_input(struct xfrm_state *x, struct sk_buff *skb) { int ihl = skb->data - skb->h.raw; - if (skb->h.raw != skb->nh.raw) - skb->nh.raw = memmove(skb->h.raw, skb->nh.raw, ihl); + if (skb->h.raw != skb->nh.raw) { + memmove(skb->h.raw, skb_network_header(skb), ihl); + skb->nh.raw = skb->h.raw; + } skb->nh.ipv6h->payload_len = htons(skb->len + ihl - sizeof(struct ipv6hdr)); skb->h.raw = skb->data; diff --git a/net/ipv6/xfrm6_mode_tunnel.c b/net/ipv6/xfrm6_mode_tunnel.c index 8ce5ef2d0b1c..498f17b5c42f 100644 --- a/net/ipv6/xfrm6_mode_tunnel.c +++ b/net/ipv6/xfrm6_mode_tunnel.c @@ -87,9 +87,10 @@ static int xfrm6_tunnel_input(struct xfrm_state *x, struct sk_buff *skb) { int err = -EINVAL; const unsigned char *old_mac; + const unsigned char *nh = skb_network_header(skb); - if (skb->nh.raw[IP6CB(skb)->nhoff] != IPPROTO_IPV6 - && skb->nh.raw[IP6CB(skb)->nhoff] != IPPROTO_IPIP) + if (nh[IP6CB(skb)->nhoff] != IPPROTO_IPV6 && + nh[IP6CB(skb)->nhoff] != IPPROTO_IPIP) goto out; if (!pskb_may_pull(skb, sizeof(struct ipv6hdr))) goto out; @@ -98,7 +99,8 @@ static int xfrm6_tunnel_input(struct xfrm_state *x, struct sk_buff *skb) (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) goto out; - if (skb->nh.raw[IP6CB(skb)->nhoff] == IPPROTO_IPV6) { + nh = skb_network_header(skb); + if (nh[IP6CB(skb)->nhoff] == IPPROTO_IPV6) { if (x->props.flags & XFRM_STATE_DECAP_DSCP) ipv6_copy_dscp(skb->nh.ipv6h, skb->h.ipv6h); if (!(x->props.flags & XFRM_STATE_NOECN)) diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c index d8a585bd2cb4..cb5a723d4cb4 100644 --- a/net/ipv6/xfrm6_policy.c +++ b/net/ipv6/xfrm6_policy.c @@ -273,14 +273,16 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl) u16 offset = skb->h.raw - skb->nh.raw; struct ipv6hdr *hdr = skb->nh.ipv6h; struct ipv6_opt_hdr *exthdr; - u8 nexthdr = skb->nh.raw[IP6CB(skb)->nhoff]; + const unsigned char *nh = skb_network_header(skb); + u8 nexthdr = nh[IP6CB(skb)->nhoff]; memset(fl, 0, sizeof(struct flowi)); ipv6_addr_copy(&fl->fl6_dst, &hdr->daddr); ipv6_addr_copy(&fl->fl6_src, &hdr->saddr); - while (pskb_may_pull(skb, skb->nh.raw + offset + 1 - skb->data)) { - exthdr = (struct ipv6_opt_hdr*)(skb->nh.raw + offset); + while (pskb_may_pull(skb, nh + offset + 1 - skb->data)) { + nh = skb_network_header(skb); + exthdr = (struct ipv6_opt_hdr *)(nh + offset); switch (nexthdr) { case NEXTHDR_ROUTING: @@ -288,7 +290,7 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl) case NEXTHDR_DEST: offset += ipv6_optlen(exthdr); nexthdr = exthdr->nexthdr; - exthdr = (struct ipv6_opt_hdr*)(skb->nh.raw + offset); + exthdr = (struct ipv6_opt_hdr *)(nh + offset); break; case IPPROTO_UDP: @@ -296,7 +298,7 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl) case IPPROTO_TCP: case IPPROTO_SCTP: case IPPROTO_DCCP: - if (pskb_may_pull(skb, skb->nh.raw + offset + 4 - skb->data)) { + if (pskb_may_pull(skb, nh + offset + 4 - skb->data)) { __be16 *ports = (__be16 *)exthdr; fl->fl_ip_sport = ports[0]; @@ -306,7 +308,7 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl) return; case IPPROTO_ICMPV6: - if (pskb_may_pull(skb, skb->nh.raw + offset + 2 - skb->data)) { + if (pskb_may_pull(skb, nh + offset + 2 - skb->data)) { u8 *icmp = (u8 *)exthdr; fl->fl_icmp_type = icmp[0]; @@ -317,7 +319,7 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl) #ifdef CONFIG_IPV6_MIP6 case IPPROTO_MH: - if (pskb_may_pull(skb, skb->nh.raw + offset + 3 - skb->data)) { + if (pskb_may_pull(skb, nh + offset + 3 - skb->data)) { struct ip6_mh *mh; mh = (struct ip6_mh *)exthdr; diff --git a/net/netfilter/xt_TCPMSS.c b/net/netfilter/xt_TCPMSS.c index db7e38c08de2..afc0c60e19d5 100644 --- a/net/netfilter/xt_TCPMSS.c +++ b/net/netfilter/xt_TCPMSS.c @@ -54,7 +54,7 @@ tcpmss_mangle_packet(struct sk_buff **pskb, return -1; tcplen = (*pskb)->len - tcphoff; - tcph = (struct tcphdr *)((*pskb)->nh.raw + tcphoff); + tcph = (struct tcphdr *)(skb_network_header(*pskb) + tcphoff); /* Since it passed flags test in tcp match, we know it is is not a fragment, and has data >= tcp header length. SYN @@ -113,7 +113,7 @@ tcpmss_mangle_packet(struct sk_buff **pskb, return -1; kfree_skb(*pskb); *pskb = newskb; - tcph = (struct tcphdr *)((*pskb)->nh.raw + tcphoff); + tcph = (struct tcphdr *)(skb_network_header(*pskb) + tcphoff); } skb_put((*pskb), TCPOLEN_MSS); diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c index 3d6a2fcc9ce4..20813eee8af4 100644 --- a/net/sched/act_pedit.c +++ b/net/sched/act_pedit.c @@ -136,7 +136,7 @@ static int tcf_pedit(struct sk_buff *skb, struct tc_action *a, } } - pptr = skb->nh.raw; + pptr = skb_network_header(skb); spin_lock(&p->tcf_lock); diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c index 0bcb16928d25..695b34051b9f 100644 --- a/net/sched/cls_u32.c +++ b/net/sched/cls_u32.c @@ -119,7 +119,7 @@ static int u32_classify(struct sk_buff *skb, struct tcf_proto *tp, struct tcf_re } stack[TC_U32_MAXDEPTH]; struct tc_u_hnode *ht = (struct tc_u_hnode*)tp->root; - u8 *ptr = skb->nh.raw; + u8 *ptr = skb_network_header(skb); struct tc_u_knode *n; int sdepth = 0; int off2 = 0; diff --git a/net/sched/em_u32.c b/net/sched/em_u32.c index cd0600c67969..0a2a7fe08de3 100644 --- a/net/sched/em_u32.c +++ b/net/sched/em_u32.c @@ -22,7 +22,7 @@ static int em_u32_match(struct sk_buff *skb, struct tcf_ematch *em, struct tcf_pkt_info *info) { struct tc_u32_key *key = (struct tc_u32_key *) em->data; - unsigned char *ptr = skb->nh.raw; + const unsigned char *ptr = skb_network_header(skb); if (info) { if (info->ptr) -- cgit v1.2.3-59-g8ed1b From c14d2450cb7fe1786e2ec325172baf66922bf597 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Sun, 11 Mar 2007 22:39:41 -0300 Subject: [SK_BUFF]: Introduce skb_set_network_header For the cases where the network header is being set to a offset from skb->data. Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: David S. Miller --- include/linux/skbuff.h | 5 +++++ net/ax25/ax25_out.c | 6 ++++-- net/ipv4/ip_output.c | 4 ++-- net/ipv4/tcp_input.c | 3 ++- net/ipv6/ip6_output.c | 4 ++-- 5 files changed, 15 insertions(+), 7 deletions(-) (limited to 'include') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 76d30f34b986..870438fba93f 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -970,6 +970,11 @@ static inline void skb_reset_network_header(struct sk_buff *skb) skb->nh.raw = skb->data; } +static inline void skb_set_network_header(struct sk_buff *skb, const int offset) +{ + skb->nh.raw = skb->data + offset; +} + static inline int skb_network_offset(const struct sk_buff *skb) { return skb->nh.raw - skb->data; diff --git a/net/ax25/ax25_out.c b/net/ax25/ax25_out.c index 02dea851a11a..e66953ce53e7 100644 --- a/net/ax25/ax25_out.c +++ b/net/ax25/ax25_out.c @@ -148,7 +148,8 @@ void ax25_output(ax25_cb *ax25, int paclen, struct sk_buff *skb) if (ka9qfrag == 1) { skb_reserve(skbn, frontlen + 2); - skbn->nh.raw = skbn->data + skb_network_offset(skb); + skb_set_network_header(skbn, + skb_network_offset(skb)); memcpy(skb_put(skbn, len), skb->data, len); p = skb_push(skbn, 2); @@ -161,7 +162,8 @@ void ax25_output(ax25_cb *ax25, int paclen, struct sk_buff *skb) } } else { skb_reserve(skbn, frontlen + 1); - skbn->nh.raw = skbn->data + skb_network_offset(skb); + skb_set_network_header(skbn, + skb_network_offset(skb)); memcpy(skb_put(skbn, len), skb->data, len); p = skb_push(skbn, 1); *p = AX25_P_TEXT; diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index eae228469627..15de9d43950e 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -928,9 +928,9 @@ alloc_new_skb: * Find where to start putting bytes. */ data = skb_put(skb, fraglen); - skb->nh.raw = data + exthdrlen; + skb_set_network_header(skb, exthdrlen); + skb->h.raw = skb->nh.raw + fragheaderlen; data += fragheaderlen; - skb->h.raw = data + exthdrlen; if (fraggap) { skb->csum = skb_copy_and_csum_bits( diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 00190835cea1..5da823a32250 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -3634,7 +3634,8 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list, return; skb_set_mac_header(nskb, skb_mac_header(skb) - skb->head); - nskb->nh.raw = nskb->data + (skb_network_header(skb) - skb->head); + skb_set_network_header(nskb, + skb_network_header(skb) - skb->head); nskb->h.raw = nskb->data + (skb->h.raw - skb->head); skb_reserve(nskb, header); diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index f1dfcc319717..bd25825c0ccd 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -1206,9 +1206,9 @@ alloc_new_skb: * Find where to start putting bytes */ data = skb_put(skb, fraglen); - skb->nh.raw = data + exthdrlen; + skb_set_network_header(skb, exthdrlen); data += fragheaderlen; - skb->h.raw = data + exthdrlen; + skb->h.raw = skb->nh.raw + fragheaderlen; if (fraggap) { skb->csum = skb_copy_and_csum_bits( -- cgit v1.2.3-59-g8ed1b From c9bdd4b5257406b0608385d19c40b5511decf4f6 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Mon, 12 Mar 2007 20:09:15 -0300 Subject: [IP]: Introduce ip_hdrlen() For the common sequence "skb->nh.iph->ihl * 4", removing a good number of open coded skb->nh.iph uses, now to go after the rest... Just out of curiosity, here are the idioms found to get the same result: skb->nh.iph->ihl << 2 skb->nh.iph->ihl<<2 skb->nh.iph->ihl * 4 skb->nh.iph->ihl*4 (skb->nh.iph)->ihl * sizeof(u32) Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: David S. Miller --- drivers/net/bnx2.c | 2 +- drivers/net/ehea/ehea_main.c | 4 ++-- drivers/net/netxen/netxen_nic_hw.c | 8 +++++--- drivers/net/netxen/netxen_nic_main.c | 6 +++--- drivers/net/sky2.c | 3 ++- drivers/net/tg3.c | 6 +++--- drivers/s390/net/qeth_eddp.c | 13 ++++++++----- include/net/ip.h | 7 ++++++- net/ipv4/ip_fragment.c | 4 ++-- net/ipv4/ip_input.c | 4 +--- net/ipv4/ipmr.c | 2 +- net/ipv4/ipvs/ip_vs_app.c | 4 ++-- net/ipv4/ipvs/ip_vs_core.c | 3 +-- net/ipv4/ipvs/ip_vs_proto_tcp.c | 12 +++++------- net/ipv4/ipvs/ip_vs_proto_udp.c | 12 ++++++------ net/ipv4/netfilter/ip_conntrack_amanda.c | 2 +- net/ipv4/netfilter/ip_conntrack_core.c | 3 +-- net/ipv4/netfilter/ip_conntrack_ftp.c | 4 ++-- net/ipv4/netfilter/ip_conntrack_helper_h323.c | 9 ++++----- net/ipv4/netfilter/ip_conntrack_helper_pptp.c | 4 ++-- net/ipv4/netfilter/ip_conntrack_irc.c | 4 ++-- net/ipv4/netfilter/ip_conntrack_proto_icmp.c | 8 ++++---- net/ipv4/netfilter/ip_conntrack_proto_sctp.c | 2 +- net/ipv4/netfilter/ip_conntrack_proto_tcp.c | 2 +- net/ipv4/netfilter/ip_conntrack_sip.c | 2 +- net/ipv4/netfilter/ip_conntrack_standalone.c | 2 +- net/ipv4/netfilter/ip_conntrack_tftp.c | 2 +- net/ipv4/netfilter/ip_nat_core.c | 11 +++++------ net/ipv4/netfilter/ip_nat_helper.c | 8 ++++---- net/ipv4/netfilter/ip_nat_helper_h323.c | 8 ++++---- net/ipv4/netfilter/ip_nat_sip.c | 10 +++++----- net/ipv4/netfilter/ip_nat_standalone.c | 7 +++---- net/ipv4/netfilter/ip_tables.c | 2 +- net/ipv4/netfilter/ipt_ECN.c | 7 ++++--- net/ipv4/netfilter/ipt_REJECT.c | 19 +++++++++---------- net/ipv4/netfilter/ipt_ecn.c | 4 ++-- net/ipv4/netfilter/iptable_filter.c | 3 ++- net/ipv4/netfilter/iptable_mangle.c | 3 ++- net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c | 8 ++++---- net/ipv4/netfilter/nf_conntrack_proto_icmp.c | 6 +++--- net/ipv4/netfilter/nf_nat_core.c | 14 +++++++------- net/ipv4/netfilter/nf_nat_h323.c | 8 ++++---- net/ipv4/netfilter/nf_nat_helper.c | 12 ++++++------ net/ipv4/netfilter/nf_nat_sip.c | 11 ++++++----- net/ipv4/netfilter/nf_nat_standalone.c | 7 +++---- net/ipv6/netfilter/ip6table_filter.c | 2 +- net/ipv6/netfilter/ip6table_mangle.c | 2 +- 47 files changed, 145 insertions(+), 141 deletions(-) (limited to 'include') diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c index e85f5ec48f96..b8091c55d441 100644 --- a/drivers/net/bnx2.c +++ b/drivers/net/bnx2.c @@ -4527,7 +4527,7 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev) if (skb->h.th->doff > 5) { tcp_opt_len = (skb->h.th->doff - 5) << 2; } - ip_tcp_len = (skb->nh.iph->ihl << 2) + sizeof(struct tcphdr); + ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr); skb->nh.iph->check = 0; skb->nh.iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len); diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c index 0e4042bc0a48..b1c90a4fe31e 100644 --- a/drivers/net/ehea/ehea_main.c +++ b/drivers/net/ehea/ehea_main.c @@ -1263,7 +1263,7 @@ static inline void write_ip_start_end(struct ehea_swqe *swqe, const struct sk_buff *skb) { swqe->ip_start = (u8)(((u64)skb->nh.iph) - ((u64)skb->data)); - swqe->ip_end = (u8)(swqe->ip_start + skb->nh.iph->ihl * 4 - 1); + swqe->ip_end = (u8)(swqe->ip_start + ip_hdrlen(skb) - 1); } static inline void write_tcp_offset_end(struct ehea_swqe *swqe, @@ -1300,7 +1300,7 @@ static void write_swqe2_TSO(struct sk_buff *skb, /* copy only eth/ip/tcp headers to immediate data and * the rest of skb->data to sg1entry */ - headersize = ETH_HLEN + (skb->nh.iph->ihl * 4) + (skb->h.th->doff * 4); + headersize = ETH_HLEN + ip_hdrlen(skb) + (skb->h.th->doff * 4); skb_data_size = skb->len - skb->data_len; diff --git a/drivers/net/netxen/netxen_nic_hw.c b/drivers/net/netxen/netxen_nic_hw.c index 625e11ed6aae..b2f5032937e3 100644 --- a/drivers/net/netxen/netxen_nic_hw.c +++ b/drivers/net/netxen/netxen_nic_hw.c @@ -35,6 +35,8 @@ #include "netxen_nic_hw.h" #include "netxen_nic_phan_reg.h" +#include + /* PCI Windowing for DDR regions. */ #define ADDR_IN_RANGE(addr, low, high) \ @@ -371,9 +373,9 @@ void netxen_tso_check(struct netxen_adapter *adapter, struct cmd_desc_type0 *desc, struct sk_buff *skb) { if (desc->mss) { - desc->total_hdr_length = sizeof(struct ethhdr) + - ((skb->nh.iph)->ihl * sizeof(u32)) + - ((skb->h.th)->doff * sizeof(u32)); + desc->total_hdr_length = (sizeof(struct ethhdr) + + ip_hdrlen(skb) + + skb->h.th->doff * 4); netxen_set_cmd_desc_opcode(desc, TX_TCP_LSO); } else if (skb->ip_summed == CHECKSUM_PARTIAL) { if (skb->nh.iph->protocol == IPPROTO_TCP) { diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c index 7d2525e76abb..b548a30e5c8e 100644 --- a/drivers/net/netxen/netxen_nic_main.c +++ b/drivers/net/netxen/netxen_nic_main.c @@ -41,6 +41,7 @@ #include #include +#include MODULE_DESCRIPTION("NetXen Multi port (1/10) Gigabit Network Driver"); MODULE_LICENSE("GPL"); @@ -778,9 +779,8 @@ static int netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) if (skb_shinfo(skb)->gso_size > 0) { no_of_desc++; - if (((skb->nh.iph)->ihl * sizeof(u32)) + - ((skb->h.th)->doff * sizeof(u32)) + - sizeof(struct ethhdr) > + if ((ip_hdrlen(skb) + skb->h.th->doff * 4 + + sizeof(struct ethhdr)) > (sizeof(struct cmd_desc_type0) - 2)) { no_of_desc++; } diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c index ac36152c68bf..51e994f26a84 100644 --- a/drivers/net/sky2.c +++ b/drivers/net/sky2.c @@ -32,6 +32,7 @@ #include #include #include +#include #include #include #include @@ -1392,7 +1393,7 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev) mss = skb_shinfo(skb)->gso_size; if (mss != 0) { mss += ((skb->h.th->doff - 5) * 4); /* TCP options */ - mss += (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr); + mss += ip_hdrlen(skb) + sizeof(struct tcphdr); mss += ETH_HLEN; if (mss != sky2->tx_last_mss) { diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index 256969e1300c..62a3bba0097d 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c @@ -40,6 +40,7 @@ #include #include +#include #include #include @@ -3909,8 +3910,7 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) mss |= (skb_headlen(skb) - ETH_HLEN) << 9; else { tcp_opt_len = ((skb->h.th->doff - 5) * 4); - ip_tcp_len = (skb->nh.iph->ihl * 4) + - sizeof(struct tcphdr); + ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr); skb->nh.iph->check = 0; skb->nh.iph->tot_len = htons(mss + ip_tcp_len + @@ -4064,7 +4064,7 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev) } tcp_opt_len = ((skb->h.th->doff - 5) * 4); - ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr); + ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr); hdr_len = ip_tcp_len + tcp_opt_len; if (unlikely((ETH_HLEN + hdr_len) > 80) && diff --git a/drivers/s390/net/qeth_eddp.c b/drivers/s390/net/qeth_eddp.c index 893125403c68..1574247abaa1 100644 --- a/drivers/s390/net/qeth_eddp.c +++ b/drivers/s390/net/qeth_eddp.c @@ -473,9 +473,11 @@ qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx, QETH_DBF_TEXT(trace, 5, "eddpficx"); /* create our segmentation headers and copy original headers */ if (skb->protocol == htons(ETH_P_IP)) - eddp = qeth_eddp_create_eddp_data(qhdr, (u8 *)skb->nh.iph, - skb->nh.iph->ihl*4, - (u8 *)skb->h.th, skb->h.th->doff*4); + eddp = qeth_eddp_create_eddp_data(qhdr, + skb_network_header(skb), + ip_hdrlen(skb), + skb->h.raw, + skb->h.th->doff * 4); else eddp = qeth_eddp_create_eddp_data(qhdr, (u8 *)skb->nh.ipv6h, sizeof(struct ipv6hdr), @@ -590,8 +592,9 @@ qeth_eddp_create_context_tcp(struct qeth_card *card, struct sk_buff *skb, QETH_DBF_TEXT(trace, 5, "creddpct"); if (skb->protocol == htons(ETH_P_IP)) ctx = qeth_eddp_create_context_generic(card, skb, - sizeof(struct qeth_hdr) + skb->nh.iph->ihl*4 + - skb->h.th->doff*4); + (sizeof(struct qeth_hdr) + + ip_hdrlen(skb) + + skb->h.th->doff * 4)); else if (skb->protocol == htons(ETH_P_IPV6)) ctx = qeth_eddp_create_context_generic(card, skb, sizeof(struct qeth_hdr) + sizeof(struct ipv6hdr) + diff --git a/include/net/ip.h b/include/net/ip.h index e79c3e3aa4f6..6f7ba32b199d 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -25,6 +25,7 @@ #include #include #include +#include #include #include @@ -43,6 +44,11 @@ struct inet_skb_parm #define IPSKB_REROUTED 16 }; +static inline unsigned int ip_hdrlen(const struct sk_buff *skb) +{ + return skb->nh.iph->ihl * 4; +} + struct ipcm_cookie { __be32 addr; @@ -74,7 +80,6 @@ struct msghdr; struct net_device; struct packet_type; struct rtable; -struct sk_buff; struct sockaddr; extern void ip_mc_dropsocket(struct sock *); diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c index 268a6c7347f2..af120b2d5331 100644 --- a/net/ipv4/ip_fragment.c +++ b/net/ipv4/ip_fragment.c @@ -483,7 +483,7 @@ static void ip_frag_queue(struct ipq *qp, struct sk_buff *skb) flags = offset & ~IP_OFFSET; offset &= IP_OFFSET; offset <<= 3; /* offset is in 8-byte chunks */ - ihl = skb->nh.iph->ihl * 4; + ihl = ip_hdrlen(skb); /* Determine the position of this fragment. */ end = offset + skb->len - ihl; @@ -624,7 +624,7 @@ static struct sk_buff *ip_frag_reasm(struct ipq *qp, struct net_device *dev) BUG_TRAP(FRAG_CB(head)->offset == 0); /* Allocate a new buffer for the datagram. */ - ihlen = head->nh.iph->ihl*4; + ihlen = ip_hdrlen(head); len = ihlen + qp->len; if (len > 65535) diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c index f38e97647ac0..2ee132b330fd 100644 --- a/net/ipv4/ip_input.c +++ b/net/ipv4/ip_input.c @@ -198,9 +198,7 @@ int ip_call_ra_chain(struct sk_buff *skb) static inline int ip_local_deliver_finish(struct sk_buff *skb) { - int ihl = skb->nh.iph->ihl*4; - - __skb_pull(skb, ihl); + __skb_pull(skb, ip_hdrlen(skb)); /* Point into the IP datagram, just past the header. */ skb->h.raw = skb->data; diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index aba3ff0bec97..54b7543190f1 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c @@ -539,7 +539,7 @@ static void ipmr_cache_resolve(struct mfc_cache *uc, struct mfc_cache *c) static int ipmr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert) { struct sk_buff *skb; - int ihl = pkt->nh.iph->ihl<<2; + const int ihl = ip_hdrlen(pkt); struct igmphdr *igmp; struct igmpmsg *msg; int ret; diff --git a/net/ipv4/ipvs/ip_vs_app.c b/net/ipv4/ipvs/ip_vs_app.c index f29d3a27eec6..e5beab28cd0f 100644 --- a/net/ipv4/ipvs/ip_vs_app.c +++ b/net/ipv4/ipvs/ip_vs_app.c @@ -331,7 +331,7 @@ static inline int app_tcp_pkt_out(struct ip_vs_conn *cp, struct sk_buff **pskb, struct ip_vs_app *app) { int diff; - unsigned int tcp_offset = (*pskb)->nh.iph->ihl*4; + const unsigned int tcp_offset = ip_hdrlen(*pskb); struct tcphdr *th; __u32 seq; @@ -406,7 +406,7 @@ static inline int app_tcp_pkt_in(struct ip_vs_conn *cp, struct sk_buff **pskb, struct ip_vs_app *app) { int diff; - unsigned int tcp_offset = (*pskb)->nh.iph->ihl*4; + const unsigned int tcp_offset = ip_hdrlen(*pskb); struct tcphdr *th; __u32 seq; diff --git a/net/ipv4/ipvs/ip_vs_core.c b/net/ipv4/ipvs/ip_vs_core.c index 5d54dd2ce12f..7893c00a91fe 100644 --- a/net/ipv4/ipvs/ip_vs_core.c +++ b/net/ipv4/ipvs/ip_vs_core.c @@ -713,8 +713,7 @@ static inline int is_tcp_reset(const struct sk_buff *skb) { struct tcphdr _tcph, *th; - th = skb_header_pointer(skb, skb->nh.iph->ihl * 4, - sizeof(_tcph), &_tcph); + th = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(_tcph), &_tcph); if (th == NULL) return 0; return th->rst; diff --git a/net/ipv4/ipvs/ip_vs_proto_tcp.c b/net/ipv4/ipvs/ip_vs_proto_tcp.c index 16a9ebee2fe6..e65382da713e 100644 --- a/net/ipv4/ipvs/ip_vs_proto_tcp.c +++ b/net/ipv4/ipvs/ip_vs_proto_tcp.c @@ -76,8 +76,7 @@ tcp_conn_schedule(struct sk_buff *skb, struct ip_vs_service *svc; struct tcphdr _tcph, *th; - th = skb_header_pointer(skb, skb->nh.iph->ihl*4, - sizeof(_tcph), &_tcph); + th = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(_tcph), &_tcph); if (th == NULL) { *verdict = NF_DROP; return 0; @@ -127,7 +126,7 @@ tcp_snat_handler(struct sk_buff **pskb, struct ip_vs_protocol *pp, struct ip_vs_conn *cp) { struct tcphdr *tcph; - unsigned int tcphoff = (*pskb)->nh.iph->ihl * 4; + const unsigned int tcphoff = ip_hdrlen(*pskb); /* csum_check requires unshared skb */ if (!ip_vs_make_skb_writable(pskb, tcphoff+sizeof(*tcph))) @@ -175,7 +174,7 @@ tcp_dnat_handler(struct sk_buff **pskb, struct ip_vs_protocol *pp, struct ip_vs_conn *cp) { struct tcphdr *tcph; - unsigned int tcphoff = (*pskb)->nh.iph->ihl * 4; + const unsigned int tcphoff = ip_hdrlen(*pskb); /* csum_check requires unshared skb */ if (!ip_vs_make_skb_writable(pskb, tcphoff+sizeof(*tcph))) @@ -224,7 +223,7 @@ tcp_dnat_handler(struct sk_buff **pskb, static int tcp_csum_check(struct sk_buff *skb, struct ip_vs_protocol *pp) { - unsigned int tcphoff = skb->nh.iph->ihl*4; + const unsigned int tcphoff = ip_hdrlen(skb); switch (skb->ip_summed) { case CHECKSUM_NONE: @@ -467,8 +466,7 @@ tcp_state_transition(struct ip_vs_conn *cp, int direction, { struct tcphdr _tcph, *th; - th = skb_header_pointer(skb, skb->nh.iph->ihl*4, - sizeof(_tcph), &_tcph); + th = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(_tcph), &_tcph); if (th == NULL) return 0; diff --git a/net/ipv4/ipvs/ip_vs_proto_udp.c b/net/ipv4/ipvs/ip_vs_proto_udp.c index 03f0a414cfa4..2cd950638923 100644 --- a/net/ipv4/ipvs/ip_vs_proto_udp.c +++ b/net/ipv4/ipvs/ip_vs_proto_udp.c @@ -22,7 +22,7 @@ #include #include - +#include static struct ip_vs_conn * udp_conn_in_get(const struct sk_buff *skb, struct ip_vs_protocol *pp, @@ -56,7 +56,7 @@ udp_conn_out_get(const struct sk_buff *skb, struct ip_vs_protocol *pp, struct ip_vs_conn *cp; __be16 _ports[2], *pptr; - pptr = skb_header_pointer(skb, skb->nh.iph->ihl*4, + pptr = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(_ports), _ports); if (pptr == NULL) return NULL; @@ -82,7 +82,7 @@ udp_conn_schedule(struct sk_buff *skb, struct ip_vs_protocol *pp, struct ip_vs_service *svc; struct udphdr _udph, *uh; - uh = skb_header_pointer(skb, skb->nh.iph->ihl*4, + uh = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(_udph), &_udph); if (uh == NULL) { *verdict = NF_DROP; @@ -133,7 +133,7 @@ udp_snat_handler(struct sk_buff **pskb, struct ip_vs_protocol *pp, struct ip_vs_conn *cp) { struct udphdr *udph; - unsigned int udphoff = (*pskb)->nh.iph->ihl * 4; + const unsigned int udphoff = ip_hdrlen(*pskb); /* csum_check requires unshared skb */ if (!ip_vs_make_skb_writable(pskb, udphoff+sizeof(*udph))) @@ -187,7 +187,7 @@ udp_dnat_handler(struct sk_buff **pskb, struct ip_vs_protocol *pp, struct ip_vs_conn *cp) { struct udphdr *udph; - unsigned int udphoff = (*pskb)->nh.iph->ihl * 4; + unsigned int udphoff = ip_hdrlen(*pskb); /* csum_check requires unshared skb */ if (!ip_vs_make_skb_writable(pskb, udphoff+sizeof(*udph))) @@ -239,7 +239,7 @@ static int udp_csum_check(struct sk_buff *skb, struct ip_vs_protocol *pp) { struct udphdr _udph, *uh; - unsigned int udphoff = skb->nh.iph->ihl*4; + const unsigned int udphoff = ip_hdrlen(skb); uh = skb_header_pointer(skb, udphoff, sizeof(_udph), &_udph); if (uh == NULL) diff --git a/net/ipv4/netfilter/ip_conntrack_amanda.c b/net/ipv4/netfilter/ip_conntrack_amanda.c index 4f561f52c83a..c40762c67d0e 100644 --- a/net/ipv4/netfilter/ip_conntrack_amanda.c +++ b/net/ipv4/netfilter/ip_conntrack_amanda.c @@ -103,7 +103,7 @@ static int help(struct sk_buff **pskb, ip_ct_refresh(ct, *pskb, master_timeout * HZ); /* No data? */ - dataoff = (*pskb)->nh.iph->ihl*4 + sizeof(struct udphdr); + dataoff = ip_hdrlen(*pskb) + sizeof(struct udphdr); if (dataoff >= (*pskb)->len) { if (net_ratelimit()) printk("amanda_help: skblen = %u\n", (*pskb)->len); diff --git a/net/ipv4/netfilter/ip_conntrack_core.c b/net/ipv4/netfilter/ip_conntrack_core.c index 23b99ae2cc37..8c013d9f6907 100644 --- a/net/ipv4/netfilter/ip_conntrack_core.c +++ b/net/ipv4/netfilter/ip_conntrack_core.c @@ -750,8 +750,7 @@ resolve_normal_ct(struct sk_buff *skb, IP_NF_ASSERT((skb->nh.iph->frag_off & htons(IP_OFFSET)) == 0); - if (!ip_ct_get_tuple(skb->nh.iph, skb, skb->nh.iph->ihl*4, - &tuple,proto)) + if (!ip_ct_get_tuple(skb->nh.iph, skb, ip_hdrlen(skb), &tuple,proto)) return NULL; /* look for tuple match */ diff --git a/net/ipv4/netfilter/ip_conntrack_ftp.c b/net/ipv4/netfilter/ip_conntrack_ftp.c index 1faa68ab9432..92389987e789 100644 --- a/net/ipv4/netfilter/ip_conntrack_ftp.c +++ b/net/ipv4/netfilter/ip_conntrack_ftp.c @@ -319,12 +319,12 @@ static int help(struct sk_buff **pskb, return NF_ACCEPT; } - th = skb_header_pointer(*pskb, (*pskb)->nh.iph->ihl*4, + th = skb_header_pointer(*pskb, ip_hdrlen(*pskb), sizeof(_tcph), &_tcph); if (th == NULL) return NF_ACCEPT; - dataoff = (*pskb)->nh.iph->ihl*4 + th->doff*4; + dataoff = ip_hdrlen(*pskb) + th->doff * 4; /* No data? */ if (dataoff >= (*pskb)->len) { DEBUGP("ftp: pskblen = %u\n", (*pskb)->len); diff --git a/net/ipv4/netfilter/ip_conntrack_helper_h323.c b/net/ipv4/netfilter/ip_conntrack_helper_h323.c index 53eb365ccc7e..5d638149b0e0 100644 --- a/net/ipv4/netfilter/ip_conntrack_helper_h323.c +++ b/net/ipv4/netfilter/ip_conntrack_helper_h323.c @@ -115,13 +115,13 @@ static int get_tpkt_data(struct sk_buff **pskb, struct ip_conntrack *ct, int tpktoff; /* Get TCP header */ - th = skb_header_pointer(*pskb, (*pskb)->nh.iph->ihl * 4, + th = skb_header_pointer(*pskb, ip_hdrlen(*pskb), sizeof(_tcph), &_tcph); if (th == NULL) return 0; /* Get TCP data offset */ - tcpdataoff = (*pskb)->nh.iph->ihl * 4 + th->doff * 4; + tcpdataoff = ip_hdrlen(*pskb) + th->doff * 4; /* Get TCP data length */ tcpdatalen = (*pskb)->len - tcpdataoff; @@ -1185,11 +1185,10 @@ static unsigned char *get_udp_data(struct sk_buff **pskb, int *datalen) struct udphdr _uh, *uh; int dataoff; - uh = skb_header_pointer(*pskb, (*pskb)->nh.iph->ihl * 4, sizeof(_uh), - &_uh); + uh = skb_header_pointer(*pskb, ip_hdrlen(*pskb), sizeof(_uh), &_uh); if (uh == NULL) return NULL; - dataoff = (*pskb)->nh.iph->ihl * 4 + sizeof(_uh); + dataoff = ip_hdrlen(*pskb) + sizeof(_uh); if (dataoff >= (*pskb)->len) return NULL; *datalen = (*pskb)->len - dataoff; diff --git a/net/ipv4/netfilter/ip_conntrack_helper_pptp.c b/net/ipv4/netfilter/ip_conntrack_helper_pptp.c index 2b760c5cf709..f5ab8e4b97cb 100644 --- a/net/ipv4/netfilter/ip_conntrack_helper_pptp.c +++ b/net/ipv4/netfilter/ip_conntrack_helper_pptp.c @@ -543,7 +543,7 @@ conntrack_pptp_help(struct sk_buff **pskb, struct pptp_pkt_hdr _pptph, *pptph; struct PptpControlHeader _ctlh, *ctlh; union pptp_ctrl_union _pptpReq, *pptpReq; - unsigned int tcplen = (*pskb)->len - (*pskb)->nh.iph->ihl * 4; + unsigned int tcplen = (*pskb)->len - ip_hdrlen(*pskb); unsigned int datalen, reqlen, nexthdr_off; int oldsstate, oldcstate; int ret; @@ -556,7 +556,7 @@ conntrack_pptp_help(struct sk_buff **pskb, return NF_ACCEPT; } - nexthdr_off = (*pskb)->nh.iph->ihl*4; + nexthdr_off = ip_hdrlen(*pskb); tcph = skb_header_pointer(*pskb, nexthdr_off, sizeof(_tcph), &_tcph); BUG_ON(!tcph); nexthdr_off += tcph->doff * 4; diff --git a/net/ipv4/netfilter/ip_conntrack_irc.c b/net/ipv4/netfilter/ip_conntrack_irc.c index 053e591f407a..ee99abe482e3 100644 --- a/net/ipv4/netfilter/ip_conntrack_irc.c +++ b/net/ipv4/netfilter/ip_conntrack_irc.c @@ -130,13 +130,13 @@ static int help(struct sk_buff **pskb, } /* Not a full tcp header? */ - th = skb_header_pointer(*pskb, (*pskb)->nh.iph->ihl*4, + th = skb_header_pointer(*pskb, ip_hdrlen(*pskb), sizeof(_tcph), &_tcph); if (th == NULL) return NF_ACCEPT; /* No data? */ - dataoff = (*pskb)->nh.iph->ihl*4 + th->doff*4; + dataoff = ip_hdrlen(*pskb) + th->doff * 4; if (dataoff >= (*pskb)->len) return NF_ACCEPT; diff --git a/net/ipv4/netfilter/ip_conntrack_proto_icmp.c b/net/ipv4/netfilter/ip_conntrack_proto_icmp.c index ad70c81a21e0..e253f3ee52d0 100644 --- a/net/ipv4/netfilter/ip_conntrack_proto_icmp.c +++ b/net/ipv4/netfilter/ip_conntrack_proto_icmp.c @@ -149,7 +149,7 @@ icmp_error_message(struct sk_buff *skb, IP_NF_ASSERT(skb->nfct == NULL); /* Not enough header? */ - inside = skb_header_pointer(skb, skb->nh.iph->ihl*4, sizeof(_in), &_in); + inside = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(_in), &_in); if (inside == NULL) return -NF_ACCEPT; @@ -161,7 +161,7 @@ icmp_error_message(struct sk_buff *skb, } innerproto = ip_conntrack_proto_find_get(inside->ip.protocol); - dataoff = skb->nh.iph->ihl*4 + sizeof(inside->icmp) + inside->ip.ihl*4; + dataoff = ip_hdrlen(skb) + sizeof(inside->icmp) + inside->ip.ihl * 4; /* Are they talking about one of our connections? */ if (!ip_ct_get_tuple(&inside->ip, skb, dataoff, &origtuple, innerproto)) { DEBUGP("icmp_error: ! get_tuple p=%u", inside->ip.protocol); @@ -214,7 +214,7 @@ icmp_error(struct sk_buff *skb, enum ip_conntrack_info *ctinfo, struct icmphdr _ih, *icmph; /* Not enough header? */ - icmph = skb_header_pointer(skb, skb->nh.iph->ihl*4, sizeof(_ih), &_ih); + icmph = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(_ih), &_ih); if (icmph == NULL) { if (LOG_INVALID(IPPROTO_ICMP)) nf_log_packet(PF_INET, 0, skb, NULL, NULL, NULL, @@ -224,7 +224,7 @@ icmp_error(struct sk_buff *skb, enum ip_conntrack_info *ctinfo, /* See ip_conntrack_proto_tcp.c */ if (ip_conntrack_checksum && hooknum == NF_IP_PRE_ROUTING && - nf_ip_checksum(skb, hooknum, skb->nh.iph->ihl * 4, 0)) { + nf_ip_checksum(skb, hooknum, ip_hdrlen(skb), 0)) { if (LOG_INVALID(IPPROTO_ICMP)) nf_log_packet(PF_INET, 0, skb, NULL, NULL, NULL, "ip_ct_icmp: bad ICMP checksum "); diff --git a/net/ipv4/netfilter/ip_conntrack_proto_sctp.c b/net/ipv4/netfilter/ip_conntrack_proto_sctp.c index e6942992b2f6..e29c436144b3 100644 --- a/net/ipv4/netfilter/ip_conntrack_proto_sctp.c +++ b/net/ipv4/netfilter/ip_conntrack_proto_sctp.c @@ -206,7 +206,7 @@ static int sctp_print_conntrack(struct seq_file *s, } #define for_each_sctp_chunk(skb, sch, _sch, offset, count) \ -for (offset = skb->nh.iph->ihl * 4 + sizeof(sctp_sctphdr_t), count = 0; \ +for (offset = ip_hdrlen(skb) + sizeof(sctp_sctphdr_t), count = 0; \ offset < skb->len && \ (sch = skb_header_pointer(skb, offset, sizeof(_sch), &_sch)); \ offset += (ntohs(sch->length) + 3) & ~3, count++) diff --git a/net/ipv4/netfilter/ip_conntrack_proto_tcp.c b/net/ipv4/netfilter/ip_conntrack_proto_tcp.c index 7ff11977eb4d..fce3a3c69815 100644 --- a/net/ipv4/netfilter/ip_conntrack_proto_tcp.c +++ b/net/ipv4/netfilter/ip_conntrack_proto_tcp.c @@ -771,7 +771,7 @@ void ip_conntrack_tcp_update(struct sk_buff *skb, enum ip_conntrack_dir dir) { struct iphdr *iph = skb->nh.iph; - struct tcphdr *tcph = (void *)skb->nh.iph + skb->nh.iph->ihl*4; + struct tcphdr *tcph = (void *)skb->nh.iph + ip_hdrlen(skb); __u32 end; #ifdef DEBUGP_VARS struct ip_ct_tcp_state *sender = &conntrack->proto.tcp.seen[dir]; diff --git a/net/ipv4/netfilter/ip_conntrack_sip.c b/net/ipv4/netfilter/ip_conntrack_sip.c index c59a962c1f61..7363e2a5cea4 100644 --- a/net/ipv4/netfilter/ip_conntrack_sip.c +++ b/net/ipv4/netfilter/ip_conntrack_sip.c @@ -402,7 +402,7 @@ static int sip_help(struct sk_buff **pskb, typeof(ip_nat_sip_hook) ip_nat_sip; /* No Data ? */ - dataoff = (*pskb)->nh.iph->ihl*4 + sizeof(struct udphdr); + dataoff = ip_hdrlen(*pskb) + sizeof(struct udphdr); if (dataoff >= (*pskb)->len) { DEBUGP("skb->len = %u\n", (*pskb)->len); return NF_ACCEPT; diff --git a/net/ipv4/netfilter/ip_conntrack_standalone.c b/net/ipv4/netfilter/ip_conntrack_standalone.c index 56b2f7546d1e..92609a4dcd74 100644 --- a/net/ipv4/netfilter/ip_conntrack_standalone.c +++ b/net/ipv4/netfilter/ip_conntrack_standalone.c @@ -458,7 +458,7 @@ static unsigned int ip_conntrack_local(unsigned int hooknum, { /* root is playing with raw sockets. */ if ((*pskb)->len < sizeof(struct iphdr) - || (*pskb)->nh.iph->ihl * 4 < sizeof(struct iphdr)) { + || ip_hdrlen(*pskb) < sizeof(struct iphdr)) { if (net_ratelimit()) printk("ipt_hook: happy cracking.\n"); return NF_ACCEPT; diff --git a/net/ipv4/netfilter/ip_conntrack_tftp.c b/net/ipv4/netfilter/ip_conntrack_tftp.c index 76e175e7a972..afc6809a3888 100644 --- a/net/ipv4/netfilter/ip_conntrack_tftp.c +++ b/net/ipv4/netfilter/ip_conntrack_tftp.c @@ -53,7 +53,7 @@ static int tftp_help(struct sk_buff **pskb, typeof(ip_nat_tftp_hook) ip_nat_tftp; tfh = skb_header_pointer(*pskb, - (*pskb)->nh.iph->ihl*4+sizeof(struct udphdr), + ip_hdrlen(*pskb) + sizeof(struct udphdr), sizeof(_tftph), &_tftph); if (tfh == NULL) return NF_ACCEPT; diff --git a/net/ipv4/netfilter/ip_nat_core.c b/net/ipv4/netfilter/ip_nat_core.c index 40737fdbe9a7..cf46930606f2 100644 --- a/net/ipv4/netfilter/ip_nat_core.c +++ b/net/ipv4/netfilter/ip_nat_core.c @@ -422,7 +422,7 @@ int ip_nat_icmp_reply_translation(struct ip_conntrack *ct, } *inside; struct ip_conntrack_protocol *proto; struct ip_conntrack_tuple inner, target; - int hdrlen = (*pskb)->nh.iph->ihl * 4; + int hdrlen = ip_hdrlen(*pskb); enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); unsigned long statusbit; enum ip_nat_manip_type manip = HOOK2MANIP(hooknum); @@ -430,7 +430,7 @@ int ip_nat_icmp_reply_translation(struct ip_conntrack *ct, if (!skb_make_writable(pskb, hdrlen + sizeof(*inside))) return 0; - inside = (void *)(*pskb)->data + (*pskb)->nh.iph->ihl*4; + inside = (void *)(*pskb)->data + ip_hdrlen(*pskb); /* We're actually going to mangle it beyond trivial checksum adjustment, so make sure the current checksum is correct. */ @@ -458,7 +458,7 @@ int ip_nat_icmp_reply_translation(struct ip_conntrack *ct, /* rcu_read_lock()ed by nf_hook_slow */ proto = __ip_conntrack_proto_find(inside->ip.protocol); - if (!ip_ct_get_tuple(&inside->ip, *pskb, (*pskb)->nh.iph->ihl*4 + + if (!ip_ct_get_tuple(&inside->ip, *pskb, ip_hdrlen(*pskb) + sizeof(struct icmphdr) + inside->ip.ihl*4, &inner, proto)) return 0; @@ -469,15 +469,14 @@ int ip_nat_icmp_reply_translation(struct ip_conntrack *ct, packet: PREROUTING (DST manip), routing produces ICMP, goes through POSTROUTING (which must correct the DST manip). */ if (!manip_pkt(inside->ip.protocol, pskb, - (*pskb)->nh.iph->ihl*4 - + sizeof(inside->icmp), + ip_hdrlen(*pskb) + sizeof(inside->icmp), &ct->tuplehash[!dir].tuple, !manip)) return 0; if ((*pskb)->ip_summed != CHECKSUM_PARTIAL) { /* Reloading "inside" here since manip_pkt inner. */ - inside = (void *)(*pskb)->data + (*pskb)->nh.iph->ihl*4; + inside = (void *)(*pskb)->data + ip_hdrlen(*pskb); inside->icmp.checksum = 0; inside->icmp.checksum = csum_fold(skb_checksum(*pskb, hdrlen, (*pskb)->len - hdrlen, diff --git a/net/ipv4/netfilter/ip_nat_helper.c b/net/ipv4/netfilter/ip_nat_helper.c index dc778cfef58b..25624e558562 100644 --- a/net/ipv4/netfilter/ip_nat_helper.c +++ b/net/ipv4/netfilter/ip_nat_helper.c @@ -322,8 +322,8 @@ ip_nat_sack_adjust(struct sk_buff **pskb, { unsigned int dir, optoff, optend; - optoff = (*pskb)->nh.iph->ihl*4 + sizeof(struct tcphdr); - optend = (*pskb)->nh.iph->ihl*4 + tcph->doff*4; + optoff = ip_hdrlen(*pskb) + sizeof(struct tcphdr); + optend = ip_hdrlen(*pskb) + tcph->doff * 4; if (!skb_make_writable(pskb, optend)) return 0; @@ -374,10 +374,10 @@ ip_nat_seq_adjust(struct sk_buff **pskb, this_way = &ct->nat.info.seq[dir]; other_way = &ct->nat.info.seq[!dir]; - if (!skb_make_writable(pskb, (*pskb)->nh.iph->ihl*4+sizeof(*tcph))) + if (!skb_make_writable(pskb, ip_hdrlen(*pskb) + sizeof(*tcph))) return 0; - tcph = (void *)(*pskb)->data + (*pskb)->nh.iph->ihl*4; + tcph = (void *)(*pskb)->data + ip_hdrlen(*pskb); if (after(ntohl(tcph->seq), this_way->correction_pos)) newseq = htonl(ntohl(tcph->seq) + this_way->offset_after); else diff --git a/net/ipv4/netfilter/ip_nat_helper_h323.c b/net/ipv4/netfilter/ip_nat_helper_h323.c index bdc99ef6159e..8b1e3388bd08 100644 --- a/net/ipv4/netfilter/ip_nat_helper_h323.c +++ b/net/ipv4/netfilter/ip_nat_helper_h323.c @@ -57,11 +57,11 @@ static int set_addr(struct sk_buff **pskb, } /* Relocate data pointer */ - th = skb_header_pointer(*pskb, (*pskb)->nh.iph->ihl * 4, + th = skb_header_pointer(*pskb, ip_hdrlen(*pskb), sizeof(_tcph), &_tcph); if (th == NULL) return -1; - *data = (*pskb)->data + (*pskb)->nh.iph->ihl * 4 + + *data = (*pskb)->data + ip_hdrlen(*pskb) + th->doff * 4 + dataoff; } else { if (!ip_nat_mangle_udp_packet(pskb, ct, ctinfo, @@ -75,8 +75,8 @@ static int set_addr(struct sk_buff **pskb, /* ip_nat_mangle_udp_packet uses skb_make_writable() to copy * or pull everything in a linear buffer, so we can safely * use the skb pointers now */ - *data = (*pskb)->data + (*pskb)->nh.iph->ihl * 4 + - sizeof(struct udphdr); + *data = ((*pskb)->data + ip_hdrlen(*pskb) + + sizeof(struct udphdr)); } return 0; diff --git a/net/ipv4/netfilter/ip_nat_sip.c b/net/ipv4/netfilter/ip_nat_sip.c index 325c5a9dc2ef..84953601762d 100644 --- a/net/ipv4/netfilter/ip_nat_sip.c +++ b/net/ipv4/netfilter/ip_nat_sip.c @@ -90,7 +90,7 @@ static int map_sip_addr(struct sk_buff **pskb, enum ip_conntrack_info ctinfo, if (!ip_nat_mangle_udp_packet(pskb, ct, ctinfo, matchoff, matchlen, addr, addrlen)) return 0; - *dptr = (*pskb)->data + (*pskb)->nh.iph->ihl*4 + sizeof(struct udphdr); + *dptr = (*pskb)->data + ip_hdrlen(*pskb) + sizeof(struct udphdr); return 1; } @@ -104,7 +104,7 @@ static unsigned int ip_nat_sip(struct sk_buff **pskb, struct addr_map map; int dataoff, datalen; - dataoff = (*pskb)->nh.iph->ihl*4 + sizeof(struct udphdr); + dataoff = ip_hdrlen(*pskb) + sizeof(struct udphdr); datalen = (*pskb)->len - dataoff; if (datalen < sizeof("SIP/2.0") - 1) return NF_DROP; @@ -153,7 +153,7 @@ static unsigned int mangle_sip_packet(struct sk_buff **pskb, return 0; /* We need to reload this. Thanks Patrick. */ - *dptr = (*pskb)->data + (*pskb)->nh.iph->ihl*4 + sizeof(struct udphdr); + *dptr = (*pskb)->data + ip_hdrlen(*pskb) + sizeof(struct udphdr); return 1; } @@ -166,7 +166,7 @@ static int mangle_content_len(struct sk_buff **pskb, char buffer[sizeof("65536")]; int bufflen; - dataoff = (*pskb)->nh.iph->ihl*4 + sizeof(struct udphdr); + dataoff = ip_hdrlen(*pskb) + sizeof(struct udphdr); /* Get actual SDP lenght */ if (ct_sip_get_info(dptr, (*pskb)->len - dataoff, &matchoff, @@ -199,7 +199,7 @@ static unsigned int mangle_sdp(struct sk_buff **pskb, char buffer[sizeof("nnn.nnn.nnn.nnn")]; unsigned int dataoff, bufflen; - dataoff = (*pskb)->nh.iph->ihl*4 + sizeof(struct udphdr); + dataoff = ip_hdrlen(*pskb) + sizeof(struct udphdr); /* Mangle owner and contact info. */ bufflen = sprintf(buffer, "%u.%u.%u.%u", NIPQUAD(newip)); diff --git a/net/ipv4/netfilter/ip_nat_standalone.c b/net/ipv4/netfilter/ip_nat_standalone.c index 6bcfdf6dfcc9..dbaaf78ff9a3 100644 --- a/net/ipv4/netfilter/ip_nat_standalone.c +++ b/net/ipv4/netfilter/ip_nat_standalone.c @@ -112,8 +112,7 @@ ip_nat_fn(unsigned int hooknum, if ((*pskb)->nh.iph->protocol == IPPROTO_ICMP) { struct icmphdr _hdr, *hp; - hp = skb_header_pointer(*pskb, - (*pskb)->nh.iph->ihl*4, + hp = skb_header_pointer(*pskb, ip_hdrlen(*pskb), sizeof(_hdr), &_hdr); if (hp != NULL && hp->type == ICMP_REDIRECT) @@ -211,7 +210,7 @@ ip_nat_out(unsigned int hooknum, /* root is playing with raw sockets. */ if ((*pskb)->len < sizeof(struct iphdr) - || (*pskb)->nh.iph->ihl * 4 < sizeof(struct iphdr)) + || ip_hdrlen(*pskb) < sizeof(struct iphdr)) return NF_ACCEPT; ret = ip_nat_fn(hooknum, pskb, in, out, okfn); @@ -244,7 +243,7 @@ ip_nat_local_fn(unsigned int hooknum, /* root is playing with raw sockets. */ if ((*pskb)->len < sizeof(struct iphdr) - || (*pskb)->nh.iph->ihl * 4 < sizeof(struct iphdr)) + || ip_hdrlen(*pskb) < sizeof(struct iphdr)) return NF_ACCEPT; ret = ip_nat_fn(hooknum, pskb, in, out, okfn); diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index 50cc4b92e284..f66966650212 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c @@ -198,7 +198,7 @@ int do_match(struct ipt_entry_match *m, { /* Stop iteration if it doesn't match */ if (!m->u.kernel.match->match(skb, in, out, m->u.kernel.match, m->data, - offset, skb->nh.iph->ihl*4, hotdrop)) + offset, ip_hdrlen(skb), hotdrop)) return 1; else return 0; diff --git a/net/ipv4/netfilter/ipt_ECN.c b/net/ipv4/netfilter/ipt_ECN.c index 4f565633631d..44daf9e1da35 100644 --- a/net/ipv4/netfilter/ipt_ECN.c +++ b/net/ipv4/netfilter/ipt_ECN.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include @@ -52,7 +53,7 @@ set_ect_tcp(struct sk_buff **pskb, const struct ipt_ECN_info *einfo) __be16 oldval; /* Not enought header? */ - tcph = skb_header_pointer(*pskb, (*pskb)->nh.iph->ihl*4, + tcph = skb_header_pointer(*pskb, ip_hdrlen(*pskb), sizeof(_tcph), &_tcph); if (!tcph) return 0; @@ -63,9 +64,9 @@ set_ect_tcp(struct sk_buff **pskb, const struct ipt_ECN_info *einfo) tcph->cwr == einfo->proto.tcp.cwr))) return 1; - if (!skb_make_writable(pskb, (*pskb)->nh.iph->ihl*4+sizeof(*tcph))) + if (!skb_make_writable(pskb, ip_hdrlen(*pskb) + sizeof(*tcph))) return 0; - tcph = (void *)(*pskb)->nh.iph + (*pskb)->nh.iph->ihl*4; + tcph = (void *)(*pskb)->nh.iph + ip_hdrlen(*pskb); oldval = ((__be16 *)tcph)[6]; if (einfo->operation & IPT_ECN_OP_SET_ECE) diff --git a/net/ipv4/netfilter/ipt_REJECT.c b/net/ipv4/netfilter/ipt_REJECT.c index 80f739e21824..01c04f0e5c91 100644 --- a/net/ipv4/netfilter/ipt_REJECT.c +++ b/net/ipv4/netfilter/ipt_REJECT.c @@ -43,7 +43,6 @@ MODULE_DESCRIPTION("iptables REJECT target module"); static void send_reset(struct sk_buff *oldskb, int hook) { struct sk_buff *nskb; - struct iphdr *iph = oldskb->nh.iph; struct tcphdr _otcph, *oth, *tcph; __be16 tmp_port; __be32 tmp_addr; @@ -54,7 +53,7 @@ static void send_reset(struct sk_buff *oldskb, int hook) if (oldskb->nh.iph->frag_off & htons(IP_OFFSET)) return; - oth = skb_header_pointer(oldskb, oldskb->nh.iph->ihl * 4, + oth = skb_header_pointer(oldskb, ip_hdrlen(oldskb), sizeof(_otcph), &_otcph); if (oth == NULL) return; @@ -64,7 +63,7 @@ static void send_reset(struct sk_buff *oldskb, int hook) return; /* Check checksum */ - if (nf_ip_checksum(oldskb, hook, iph->ihl * 4, IPPROTO_TCP)) + if (nf_ip_checksum(oldskb, hook, ip_hdrlen(oldskb), IPPROTO_TCP)) return; /* We need a linear, writeable skb. We also need to expand @@ -84,7 +83,7 @@ static void send_reset(struct sk_buff *oldskb, int hook) skb_shinfo(nskb)->gso_segs = 0; skb_shinfo(nskb)->gso_type = 0; - tcph = (struct tcphdr *)((u_int32_t*)nskb->nh.iph + nskb->nh.iph->ihl); + tcph = (struct tcphdr *)(skb_network_header(nskb) + ip_hdrlen(nskb)); /* Swap source and dest */ tmp_addr = nskb->nh.iph->saddr; @@ -96,7 +95,7 @@ static void send_reset(struct sk_buff *oldskb, int hook) /* Truncate to length (no data) */ tcph->doff = sizeof(struct tcphdr)/4; - skb_trim(nskb, nskb->nh.iph->ihl*4 + sizeof(struct tcphdr)); + skb_trim(nskb, ip_hdrlen(nskb) + sizeof(struct tcphdr)); nskb->nh.iph->tot_len = htons(nskb->len); if (tcph->ack) { @@ -105,9 +104,9 @@ static void send_reset(struct sk_buff *oldskb, int hook) tcph->ack_seq = 0; } else { needs_ack = 1; - tcph->ack_seq = htonl(ntohl(oth->seq) + oth->syn + oth->fin - + oldskb->len - oldskb->nh.iph->ihl*4 - - (oth->doff<<2)); + tcph->ack_seq = htonl(ntohl(oth->seq) + oth->syn + oth->fin + + oldskb->len - ip_hdrlen(oldskb) - + (oth->doff << 2)); tcph->seq = 0; } @@ -149,7 +148,7 @@ static void send_reset(struct sk_buff *oldskb, int hook) /* Adjust IP checksum */ nskb->nh.iph->check = 0; - nskb->nh.iph->check = ip_fast_csum((unsigned char *)nskb->nh.iph, + nskb->nh.iph->check = ip_fast_csum(skb_network_header(nskb), nskb->nh.iph->ihl); /* "Never happens" */ @@ -182,7 +181,7 @@ static unsigned int reject(struct sk_buff **pskb, /* Our naive response construction doesn't deal with IP options, and probably shouldn't try. */ - if ((*pskb)->nh.iph->ihl<<2 != sizeof(struct iphdr)) + if (ip_hdrlen(*pskb) != sizeof(struct iphdr)) return NF_DROP; /* WARNING: This code causes reentry within iptables. diff --git a/net/ipv4/netfilter/ipt_ecn.c b/net/ipv4/netfilter/ipt_ecn.c index 37508b2cfea6..b8ade3cc7757 100644 --- a/net/ipv4/netfilter/ipt_ecn.c +++ b/net/ipv4/netfilter/ipt_ecn.c @@ -11,6 +11,7 @@ #include #include +#include #include #include #include @@ -38,8 +39,7 @@ static inline int match_tcp(const struct sk_buff *skb, /* In practice, TCP match does this, so can't fail. But let's * be good citizens. */ - th = skb_header_pointer(skb, skb->nh.iph->ihl * 4, - sizeof(_tcph), &_tcph); + th = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(_tcph), &_tcph); if (th == NULL) { *hotdrop = 0; return 0; diff --git a/net/ipv4/netfilter/iptable_filter.c b/net/ipv4/netfilter/iptable_filter.c index d1d61e97b976..42728909eba0 100644 --- a/net/ipv4/netfilter/iptable_filter.c +++ b/net/ipv4/netfilter/iptable_filter.c @@ -13,6 +13,7 @@ #include #include #include +#include MODULE_LICENSE("GPL"); MODULE_AUTHOR("Netfilter Core Team "); @@ -102,7 +103,7 @@ ipt_local_out_hook(unsigned int hook, { /* root is playing with raw sockets. */ if ((*pskb)->len < sizeof(struct iphdr) - || (*pskb)->nh.iph->ihl * 4 < sizeof(struct iphdr)) { + || ip_hdrlen(*pskb) < sizeof(struct iphdr)) { if (net_ratelimit()) printk("ipt_hook: happy cracking.\n"); return NF_ACCEPT; diff --git a/net/ipv4/netfilter/iptable_mangle.c b/net/ipv4/netfilter/iptable_mangle.c index 98b66ef0c714..6cc3245f676a 100644 --- a/net/ipv4/netfilter/iptable_mangle.c +++ b/net/ipv4/netfilter/iptable_mangle.c @@ -17,6 +17,7 @@ #include #include #include +#include MODULE_LICENSE("GPL"); MODULE_AUTHOR("Netfilter Core Team "); @@ -136,7 +137,7 @@ ipt_local_hook(unsigned int hook, /* root is playing with raw sockets. */ if ((*pskb)->len < sizeof(struct iphdr) - || (*pskb)->nh.iph->ihl * 4 < sizeof(struct iphdr)) { + || ip_hdrlen(*pskb) < sizeof(struct iphdr)) { if (net_ratelimit()) printk("ipt_hook: happy cracking.\n"); return NF_ACCEPT; diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c index 7cebbff0b0c3..fa14eb77f9b6 100644 --- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c +++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c @@ -105,7 +105,7 @@ ipv4_prepare(struct sk_buff **pskb, unsigned int hooknum, unsigned int *dataoff, return -NF_DROP; } - *dataoff = skb_network_offset(*pskb) + (*pskb)->nh.iph->ihl * 4; + *dataoff = skb_network_offset(*pskb) + ip_hdrlen(*pskb); *protonum = (*pskb)->nh.iph->protocol; return NF_ACCEPT; @@ -151,8 +151,8 @@ static unsigned int ipv4_conntrack_help(unsigned int hooknum, if (!help || !help->helper) return NF_ACCEPT; - return help->helper->help(pskb, (skb_network_offset(*pskb) + - (*pskb)->nh.iph->ihl * 4), + return help->helper->help(pskb, + skb_network_offset(*pskb) + ip_hdrlen(*pskb), ct, ctinfo); } @@ -198,7 +198,7 @@ static unsigned int ipv4_conntrack_local(unsigned int hooknum, { /* root is playing with raw sockets. */ if ((*pskb)->len < sizeof(struct iphdr) - || (*pskb)->nh.iph->ihl * 4 < sizeof(struct iphdr)) { + || ip_hdrlen(*pskb) < sizeof(struct iphdr)) { if (net_ratelimit()) printk("ipt_hook: happy cracking.\n"); return NF_ACCEPT; diff --git a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c index 5fd1e5363c1a..e090e929e6e2 100644 --- a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c +++ b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c @@ -158,7 +158,7 @@ icmp_error_message(struct sk_buff *skb, NF_CT_ASSERT(skb->nfct == NULL); /* Not enough header? */ - inside = skb_header_pointer(skb, skb->nh.iph->ihl*4, sizeof(_in), &_in); + inside = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(_in), &_in); if (inside == NULL) return -NF_ACCEPT; @@ -172,7 +172,7 @@ icmp_error_message(struct sk_buff *skb, /* rcu_read_lock()ed by nf_hook_slow */ innerproto = __nf_ct_l4proto_find(PF_INET, inside->ip.protocol); - dataoff = skb->nh.iph->ihl*4 + sizeof(inside->icmp); + dataoff = ip_hdrlen(skb) + sizeof(inside->icmp); /* Are they talking about one of our connections? */ if (!nf_ct_get_tuple(skb, dataoff, dataoff + inside->ip.ihl*4, PF_INET, inside->ip.protocol, &origtuple, @@ -227,7 +227,7 @@ icmp_error(struct sk_buff *skb, unsigned int dataoff, struct icmphdr _ih, *icmph; /* Not enough header? */ - icmph = skb_header_pointer(skb, skb->nh.iph->ihl*4, sizeof(_ih), &_ih); + icmph = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(_ih), &_ih); if (icmph == NULL) { if (LOG_INVALID(IPPROTO_ICMP)) nf_log_packet(PF_INET, 0, skb, NULL, NULL, NULL, diff --git a/net/ipv4/netfilter/nf_nat_core.c b/net/ipv4/netfilter/nf_nat_core.c index 452e9d326684..ea02f00d2dac 100644 --- a/net/ipv4/netfilter/nf_nat_core.c +++ b/net/ipv4/netfilter/nf_nat_core.c @@ -431,7 +431,7 @@ int nf_nat_icmp_reply_translation(struct nf_conn *ct, } *inside; struct nf_conntrack_l4proto *l4proto; struct nf_conntrack_tuple inner, target; - int hdrlen = (*pskb)->nh.iph->ihl * 4; + int hdrlen = ip_hdrlen(*pskb); enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); unsigned long statusbit; enum nf_nat_manip_type manip = HOOK2MANIP(hooknum); @@ -439,7 +439,7 @@ int nf_nat_icmp_reply_translation(struct nf_conn *ct, if (!skb_make_writable(pskb, hdrlen + sizeof(*inside))) return 0; - inside = (void *)(*pskb)->data + (*pskb)->nh.iph->ihl*4; + inside = (void *)(*pskb)->data + ip_hdrlen(*pskb); /* We're actually going to mangle it beyond trivial checksum adjustment, so make sure the current checksum is correct. */ @@ -469,9 +469,9 @@ int nf_nat_icmp_reply_translation(struct nf_conn *ct, l4proto = __nf_ct_l4proto_find(PF_INET, inside->ip.protocol); if (!nf_ct_get_tuple(*pskb, - (*pskb)->nh.iph->ihl*4 + sizeof(struct icmphdr), - (*pskb)->nh.iph->ihl*4 + - sizeof(struct icmphdr) + inside->ip.ihl*4, + ip_hdrlen(*pskb) + sizeof(struct icmphdr), + (ip_hdrlen(*pskb) + + sizeof(struct icmphdr) + inside->ip.ihl * 4), (u_int16_t)AF_INET, inside->ip.protocol, &inner, l3proto, l4proto)) @@ -483,14 +483,14 @@ int nf_nat_icmp_reply_translation(struct nf_conn *ct, packet: PREROUTING (DST manip), routing produces ICMP, goes through POSTROUTING (which must correct the DST manip). */ if (!manip_pkt(inside->ip.protocol, pskb, - (*pskb)->nh.iph->ihl*4 + sizeof(inside->icmp), + ip_hdrlen(*pskb) + sizeof(inside->icmp), &ct->tuplehash[!dir].tuple, !manip)) return 0; if ((*pskb)->ip_summed != CHECKSUM_PARTIAL) { /* Reloading "inside" here since manip_pkt inner. */ - inside = (void *)(*pskb)->data + (*pskb)->nh.iph->ihl*4; + inside = (void *)(*pskb)->data + ip_hdrlen(*pskb); inside->icmp.checksum = 0; inside->icmp.checksum = csum_fold(skb_checksum(*pskb, hdrlen, diff --git a/net/ipv4/netfilter/nf_nat_h323.c b/net/ipv4/netfilter/nf_nat_h323.c index 9cbf3f9be13b..2eb3832db3a4 100644 --- a/net/ipv4/netfilter/nf_nat_h323.c +++ b/net/ipv4/netfilter/nf_nat_h323.c @@ -55,11 +55,11 @@ static int set_addr(struct sk_buff **pskb, } /* Relocate data pointer */ - th = skb_header_pointer(*pskb, (*pskb)->nh.iph->ihl * 4, + th = skb_header_pointer(*pskb, ip_hdrlen(*pskb), sizeof(_tcph), &_tcph); if (th == NULL) return -1; - *data = (*pskb)->data + (*pskb)->nh.iph->ihl * 4 + + *data = (*pskb)->data + ip_hdrlen(*pskb) + th->doff * 4 + dataoff; } else { if (!nf_nat_mangle_udp_packet(pskb, ct, ctinfo, @@ -73,8 +73,8 @@ static int set_addr(struct sk_buff **pskb, /* nf_nat_mangle_udp_packet uses skb_make_writable() to copy * or pull everything in a linear buffer, so we can safely * use the skb pointers now */ - *data = (*pskb)->data + (*pskb)->nh.iph->ihl * 4 + - sizeof(struct udphdr); + *data = ((*pskb)->data + ip_hdrlen(*pskb) + + sizeof(struct udphdr)); } return 0; diff --git a/net/ipv4/netfilter/nf_nat_helper.c b/net/ipv4/netfilter/nf_nat_helper.c index 49a90c39ffce..723302afd840 100644 --- a/net/ipv4/netfilter/nf_nat_helper.c +++ b/net/ipv4/netfilter/nf_nat_helper.c @@ -190,7 +190,7 @@ nf_nat_mangle_tcp_packet(struct sk_buff **pskb, (int)rep_len - (int)match_len, ct, ctinfo); /* Tell TCP window tracking about seq change */ - nf_conntrack_tcp_update(*pskb, (*pskb)->nh.iph->ihl*4, + nf_conntrack_tcp_update(*pskb, ip_hdrlen(*pskb), ct, CTINFO2DIR(ctinfo)); } return 1; @@ -318,8 +318,8 @@ nf_nat_sack_adjust(struct sk_buff **pskb, unsigned int dir, optoff, optend; struct nf_conn_nat *nat = nfct_nat(ct); - optoff = (*pskb)->nh.iph->ihl*4 + sizeof(struct tcphdr); - optend = (*pskb)->nh.iph->ihl*4 + tcph->doff*4; + optoff = ip_hdrlen(*pskb) + sizeof(struct tcphdr); + optend = ip_hdrlen(*pskb) + tcph->doff * 4; if (!skb_make_writable(pskb, optend)) return 0; @@ -371,10 +371,10 @@ nf_nat_seq_adjust(struct sk_buff **pskb, this_way = &nat->info.seq[dir]; other_way = &nat->info.seq[!dir]; - if (!skb_make_writable(pskb, (*pskb)->nh.iph->ihl*4+sizeof(*tcph))) + if (!skb_make_writable(pskb, ip_hdrlen(*pskb) + sizeof(*tcph))) return 0; - tcph = (void *)(*pskb)->data + (*pskb)->nh.iph->ihl*4; + tcph = (void *)(*pskb)->data + ip_hdrlen(*pskb); if (after(ntohl(tcph->seq), this_way->correction_pos)) newseq = htonl(ntohl(tcph->seq) + this_way->offset_after); else @@ -399,7 +399,7 @@ nf_nat_seq_adjust(struct sk_buff **pskb, if (!nf_nat_sack_adjust(pskb, tcph, ct, ctinfo)) return 0; - nf_conntrack_tcp_update(*pskb, (*pskb)->nh.iph->ihl*4, ct, dir); + nf_conntrack_tcp_update(*pskb, ip_hdrlen(*pskb), ct, dir); return 1; } diff --git a/net/ipv4/netfilter/nf_nat_sip.c b/net/ipv4/netfilter/nf_nat_sip.c index b12cd7c314ca..bfd88e4e0685 100644 --- a/net/ipv4/netfilter/nf_nat_sip.c +++ b/net/ipv4/netfilter/nf_nat_sip.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include @@ -92,7 +93,7 @@ static int map_sip_addr(struct sk_buff **pskb, enum ip_conntrack_info ctinfo, if (!nf_nat_mangle_udp_packet(pskb, ct, ctinfo, matchoff, matchlen, addr, addrlen)) return 0; - *dptr = (*pskb)->data + (*pskb)->nh.iph->ihl*4 + sizeof(struct udphdr); + *dptr = (*pskb)->data + ip_hdrlen(*pskb) + sizeof(struct udphdr); return 1; } @@ -106,7 +107,7 @@ static unsigned int ip_nat_sip(struct sk_buff **pskb, struct addr_map map; int dataoff, datalen; - dataoff = (*pskb)->nh.iph->ihl*4 + sizeof(struct udphdr); + dataoff = ip_hdrlen(*pskb) + sizeof(struct udphdr); datalen = (*pskb)->len - dataoff; if (datalen < sizeof("SIP/2.0") - 1) return NF_DROP; @@ -155,7 +156,7 @@ static unsigned int mangle_sip_packet(struct sk_buff **pskb, return 0; /* We need to reload this. Thanks Patrick. */ - *dptr = (*pskb)->data + (*pskb)->nh.iph->ihl*4 + sizeof(struct udphdr); + *dptr = (*pskb)->data + ip_hdrlen(*pskb) + sizeof(struct udphdr); return 1; } @@ -168,7 +169,7 @@ static int mangle_content_len(struct sk_buff **pskb, char buffer[sizeof("65536")]; int bufflen; - dataoff = (*pskb)->nh.iph->ihl*4 + sizeof(struct udphdr); + dataoff = ip_hdrlen(*pskb) + sizeof(struct udphdr); /* Get actual SDP lenght */ if (ct_sip_get_info(ct, dptr, (*pskb)->len - dataoff, &matchoff, @@ -200,7 +201,7 @@ static unsigned int mangle_sdp(struct sk_buff **pskb, char buffer[sizeof("nnn.nnn.nnn.nnn")]; unsigned int dataoff, bufflen; - dataoff = (*pskb)->nh.iph->ihl*4 + sizeof(struct udphdr); + dataoff = ip_hdrlen(*pskb) + sizeof(struct udphdr); /* Mangle owner and contact info. */ bufflen = sprintf(buffer, "%u.%u.%u.%u", NIPQUAD(newip)); diff --git a/net/ipv4/netfilter/nf_nat_standalone.c b/net/ipv4/netfilter/nf_nat_standalone.c index 15aa3db8cb33..61ca272165a1 100644 --- a/net/ipv4/netfilter/nf_nat_standalone.c +++ b/net/ipv4/netfilter/nf_nat_standalone.c @@ -101,8 +101,7 @@ nf_nat_fn(unsigned int hooknum, if ((*pskb)->nh.iph->protocol == IPPROTO_ICMP) { struct icmphdr _hdr, *hp; - hp = skb_header_pointer(*pskb, - (*pskb)->nh.iph->ihl*4, + hp = skb_header_pointer(*pskb, ip_hdrlen(*pskb), sizeof(_hdr), &_hdr); if (hp != NULL && hp->type == ICMP_REDIRECT) @@ -203,7 +202,7 @@ nf_nat_out(unsigned int hooknum, /* root is playing with raw sockets. */ if ((*pskb)->len < sizeof(struct iphdr) || - (*pskb)->nh.iph->ihl * 4 < sizeof(struct iphdr)) + ip_hdrlen(*pskb) < sizeof(struct iphdr)) return NF_ACCEPT; ret = nf_nat_fn(hooknum, pskb, in, out, okfn); @@ -236,7 +235,7 @@ nf_nat_local_fn(unsigned int hooknum, /* root is playing with raw sockets. */ if ((*pskb)->len < sizeof(struct iphdr) || - (*pskb)->nh.iph->ihl * 4 < sizeof(struct iphdr)) + ip_hdrlen(*pskb) < sizeof(struct iphdr)) return NF_ACCEPT; ret = nf_nat_fn(hooknum, pskb, in, out, okfn); diff --git a/net/ipv6/netfilter/ip6table_filter.c b/net/ipv6/netfilter/ip6table_filter.c index 112a21d0c6da..76f0cf66f95c 100644 --- a/net/ipv6/netfilter/ip6table_filter.c +++ b/net/ipv6/netfilter/ip6table_filter.c @@ -102,7 +102,7 @@ ip6t_local_out_hook(unsigned int hook, #if 0 /* root is playing with raw sockets. */ if ((*pskb)->len < sizeof(struct iphdr) - || (*pskb)->nh.iph->ihl * 4 < sizeof(struct iphdr)) { + || ip_hdrlen(*pskb) < sizeof(struct iphdr)) { if (net_ratelimit()) printk("ip6t_hook: happy cracking.\n"); return NF_ACCEPT; diff --git a/net/ipv6/netfilter/ip6table_mangle.c b/net/ipv6/netfilter/ip6table_mangle.c index 0c468d35a937..da2c1994539b 100644 --- a/net/ipv6/netfilter/ip6table_mangle.c +++ b/net/ipv6/netfilter/ip6table_mangle.c @@ -138,7 +138,7 @@ ip6t_local_hook(unsigned int hook, #if 0 /* root is playing with raw sockets. */ if ((*pskb)->len < sizeof(struct iphdr) - || (*pskb)->nh.iph->ihl * 4 < sizeof(struct iphdr)) { + || ip_hdrlen(*pskb) < sizeof(struct iphdr)) { if (net_ratelimit()) printk("ip6t_hook: happy cracking.\n"); return NF_ACCEPT; -- cgit v1.2.3-59-g8ed1b From eddc9ec53be2ecdbf4efe0efd4a83052594f0ac0 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Fri, 20 Apr 2007 22:47:35 -0700 Subject: [SK_BUFF]: Introduce ip_hdr(), remove skb->nh.iph Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: David S. Miller --- drivers/ieee1394/eth1394.c | 2 +- drivers/net/8139cp.c | 4 +- drivers/net/atl1/atl1_main.c | 15 +++--- drivers/net/bnx2.c | 18 ++++---- drivers/net/bonding/bond_alb.c | 17 ++++--- drivers/net/bonding/bond_main.c | 2 +- drivers/net/chelsio/sge.c | 4 +- drivers/net/cxgb3/sge.c | 2 +- drivers/net/e1000/e1000_main.c | 16 +++---- drivers/net/ehea/ehea_main.c | 20 ++++---- drivers/net/gianfar.c | 2 +- drivers/net/ioc3-eth.c | 4 +- drivers/net/ixgb/ixgb_main.c | 12 +++-- drivers/net/loopback.c | 6 +-- drivers/net/mv643xx_eth.c | 4 +- drivers/net/netxen/netxen_nic_hw.c | 4 +- drivers/net/ns83820.c | 4 +- drivers/net/pasemi_mac.c | 2 +- drivers/net/r8169.c | 2 +- drivers/net/sky2.c | 2 +- drivers/net/spider_net.c | 2 +- drivers/net/tg3.c | 30 ++++++------ drivers/net/via-velocity.c | 2 +- drivers/s390/net/qeth_main.c | 15 +++--- drivers/s390/net/qeth_tso.h | 12 ++--- include/linux/ip.h | 9 ++++ include/linux/skbuff.h | 1 - include/net/inet_ecn.h | 2 +- include/net/ip.h | 2 +- include/net/pkt_cls.h | 4 +- net/atm/mpc.c | 2 +- net/bridge/br_netfilter.c | 10 ++-- net/core/netpoll.c | 2 +- net/core/pktgen.c | 2 +- net/dccp/ipv4.c | 39 ++++++++-------- net/econet/af_econet.c | 4 +- net/ieee80211/ieee80211_tx.c | 2 +- net/ipv4/af_inet.c | 10 ++-- net/ipv4/ah4.c | 4 +- net/ipv4/arp.c | 6 +-- net/ipv4/cipso_ipv4.c | 2 +- net/ipv4/esp4.c | 4 +- net/ipv4/icmp.c | 10 ++-- net/ipv4/igmp.c | 14 +++--- net/ipv4/ip_forward.c | 4 +- net/ipv4/ip_fragment.c | 7 ++- net/ipv4/ip_gre.c | 10 ++-- net/ipv4/ip_input.c | 18 ++++---- net/ipv4/ip_options.c | 14 +++--- net/ipv4/ip_output.c | 20 ++++---- net/ipv4/ip_sockglue.c | 13 +++--- net/ipv4/ipcomp.c | 50 ++++++++------------ net/ipv4/ipconfig.c | 6 +-- net/ipv4/ipip.c | 15 +++--- net/ipv4/ipmr.c | 55 +++++++++++----------- net/ipv4/ipvs/ip_vs_app.c | 4 +- net/ipv4/ipvs/ip_vs_core.c | 38 +++++++-------- net/ipv4/ipvs/ip_vs_dh.c | 2 +- net/ipv4/ipvs/ip_vs_ftp.c | 4 +- net/ipv4/ipvs/ip_vs_lblc.c | 2 +- net/ipv4/ipvs/ip_vs_lblcr.c | 2 +- net/ipv4/ipvs/ip_vs_proto_tcp.c | 12 ++--- net/ipv4/ipvs/ip_vs_proto_udp.c | 14 +++--- net/ipv4/ipvs/ip_vs_sh.c | 2 +- net/ipv4/ipvs/ip_vs_xmit.c | 24 +++++----- net/ipv4/netfilter.c | 8 ++-- net/ipv4/netfilter/ip_conntrack_core.c | 20 ++++---- net/ipv4/netfilter/ip_conntrack_helper_h323.c | 12 ++--- net/ipv4/netfilter/ip_conntrack_netbios_ns.c | 2 +- net/ipv4/netfilter/ip_conntrack_proto_sctp.c | 4 +- net/ipv4/netfilter/ip_conntrack_proto_tcp.c | 16 +++---- net/ipv4/netfilter/ip_conntrack_proto_udp.c | 8 ++-- net/ipv4/netfilter/ip_conntrack_standalone.c | 2 +- net/ipv4/netfilter/ip_nat_helper.c | 12 ++--- net/ipv4/netfilter/ip_nat_helper_h323.c | 2 +- net/ipv4/netfilter/ip_nat_rule.c | 2 +- net/ipv4/netfilter/ip_nat_snmp_basic.c | 4 +- net/ipv4/netfilter/ip_nat_standalone.c | 10 ++-- net/ipv4/netfilter/ip_tables.c | 4 +- net/ipv4/netfilter/ipt_CLUSTERIP.c | 4 +- net/ipv4/netfilter/ipt_ECN.c | 8 ++-- net/ipv4/netfilter/ipt_NETMAP.c | 4 +- net/ipv4/netfilter/ipt_REJECT.c | 26 +++++------ net/ipv4/netfilter/ipt_TOS.c | 4 +- net/ipv4/netfilter/ipt_TTL.c | 2 +- net/ipv4/netfilter/ipt_addrtype.c | 2 +- net/ipv4/netfilter/ipt_ecn.c | 4 +- net/ipv4/netfilter/ipt_iprange.c | 2 +- net/ipv4/netfilter/ipt_recent.c | 6 +-- net/ipv4/netfilter/ipt_tos.c | 2 +- net/ipv4/netfilter/ipt_ttl.c | 9 ++-- net/ipv4/netfilter/iptable_mangle.c | 25 ++++++---- net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c | 10 ++-- net/ipv4/netfilter/nf_nat_h323.c | 2 +- net/ipv4/netfilter/nf_nat_helper.c | 12 ++--- net/ipv4/netfilter/nf_nat_rule.c | 2 +- net/ipv4/netfilter/nf_nat_snmp_basic.c | 4 +- net/ipv4/netfilter/nf_nat_standalone.c | 11 ++--- net/ipv4/raw.c | 4 +- net/ipv4/route.c | 6 +-- net/ipv4/syncookies.c | 8 ++-- net/ipv4/tcp_ipv4.c | 64 +++++++++++++------------- net/ipv4/udp.c | 17 +++---- net/ipv4/xfrm4_input.c | 21 +++++---- net/ipv4/xfrm4_mode_beet.c | 10 ++-- net/ipv4/xfrm4_mode_transport.c | 11 ++--- net/ipv4/xfrm4_mode_tunnel.c | 10 ++-- net/ipv4/xfrm4_output.c | 3 +- net/ipv4/xfrm4_policy.c | 2 +- net/ipv4/xfrm4_tunnel.c | 3 +- net/ipv6/datagram.c | 3 +- net/ipv6/ip6_tunnel.c | 8 ++-- net/ipv6/sit.c | 4 +- net/ipv6/udp.c | 2 +- net/netfilter/nf_conntrack_netbios_ns.c | 2 +- net/netfilter/xt_DSCP.c | 4 +- net/netfilter/xt_TCPMSS.c | 4 +- net/netfilter/xt_dscp.c | 2 +- net/netfilter/xt_hashlimit.c | 6 +-- net/netfilter/xt_length.c | 2 +- net/netfilter/xt_pkttype.c | 2 +- net/rxrpc/connection.c | 2 +- net/rxrpc/transport.c | 4 +- net/sched/cls_rsvp.h | 2 +- net/sched/sch_atm.c | 4 +- net/sched/sch_dsmark.c | 4 +- net/sched/sch_sfq.c | 2 +- net/sctp/input.c | 2 +- net/sctp/ipv6.c | 4 +- net/sctp/protocol.c | 8 ++-- net/sctp/sm_make_chunk.c | 4 +- net/sctp/sm_statefuns.c | 2 +- 132 files changed, 565 insertions(+), 564 deletions(-) (limited to 'include') diff --git a/drivers/ieee1394/eth1394.c b/drivers/ieee1394/eth1394.c index db2346f4d207..a364003ba47f 100644 --- a/drivers/ieee1394/eth1394.c +++ b/drivers/ieee1394/eth1394.c @@ -1668,7 +1668,7 @@ static int ether1394_tx (struct sk_buff *skb, struct net_device *dev) if (memcmp(eth->h_dest, dev->broadcast, ETH1394_ALEN) == 0 || proto == htons(ETH_P_ARP) || (proto == htons(ETH_P_IP) && - IN_MULTICAST(ntohl(skb->nh.iph->daddr)))) { + IN_MULTICAST(ntohl(ip_hdr(skb)->daddr)))) { tx_type = ETH1394_GASP; dest_node = LOCAL_BUS | ALL_NODES; max_payload = priv->bc_maxpayload - ETHER1394_GASP_OVERHEAD; diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c index 2f704cb06e7b..e8c9f27817b0 100644 --- a/drivers/net/8139cp.c +++ b/drivers/net/8139cp.c @@ -806,7 +806,7 @@ static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev) if (mss) flags |= LargeSend | ((mss & MSSMask) << MSSShift); else if (skb->ip_summed == CHECKSUM_PARTIAL) { - const struct iphdr *ip = skb->nh.iph; + const struct iphdr *ip = ip_hdr(skb); if (ip->protocol == IPPROTO_TCP) flags |= IPCS | TCPCS; else if (ip->protocol == IPPROTO_UDP) @@ -825,7 +825,7 @@ static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev) u32 first_len, first_eor; dma_addr_t first_mapping; int frag, first_entry = entry; - const struct iphdr *ip = skb->nh.iph; + const struct iphdr *ip = ip_hdr(skb); /* We must give this initial chunk to the device last. * Otherwise we could race with the device. diff --git a/drivers/net/atl1/atl1_main.c b/drivers/net/atl1/atl1_main.c index 793a61b2140f..d2be79a30f8a 100644 --- a/drivers/net/atl1/atl1_main.c +++ b/drivers/net/atl1/atl1_main.c @@ -1294,17 +1294,18 @@ static int atl1_tso(struct atl1_adapter *adapter, struct sk_buff *skb, } if (skb->protocol == ntohs(ETH_P_IP)) { - skb->nh.iph->tot_len = 0; - skb->nh.iph->check = 0; - skb->h.th->check = - ~csum_tcpudp_magic(skb->nh.iph->saddr, - skb->nh.iph->daddr, 0, - IPPROTO_TCP, 0); + struct iphdr *iph = ip_hdr(skb); + + iph->tot_len = 0; + iph->check = 0; + skb->h.th->check = ~csum_tcpudp_magic(iph->saddr, + iph->daddr, 0, + IPPROTO_TCP, 0); ipofst = skb_network_offset(skb); if (ipofst != ENET_HEADER_SIZE) /* 802.3 frame */ tso->tsopl |= 1 << TSO_PARAM_ETHTYPE_SHIFT; - tso->tsopl |= (skb->nh.iph->ihl & + tso->tsopl |= (iph->ihl & CSUM_PARAM_IPHL_MASK) << CSUM_PARAM_IPHL_SHIFT; tso->tsopl |= ((skb->h.th->doff << 2) & TSO_PARAM_TCPHDRLEN_MASK) << TSO_PARAM_TCPHDRLEN_SHIFT; diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c index b8091c55d441..eb0c4f1d4483 100644 --- a/drivers/net/bnx2.c +++ b/drivers/net/bnx2.c @@ -4513,6 +4513,7 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev) if ((mss = skb_shinfo(skb)->gso_size) && (skb->len > (bp->dev->mtu + ETH_HLEN))) { u32 tcp_opt_len, ip_tcp_len; + struct iphdr *iph; if (skb_header_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) { @@ -4529,16 +4530,15 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev) } ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr); - skb->nh.iph->check = 0; - skb->nh.iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len); - skb->h.th->check = - ~csum_tcpudp_magic(skb->nh.iph->saddr, - skb->nh.iph->daddr, - 0, IPPROTO_TCP, 0); + iph = ip_hdr(skb); + iph->check = 0; + iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len); + skb->h.th->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, + 0, IPPROTO_TCP, 0); - if (tcp_opt_len || (skb->nh.iph->ihl > 5)) { - vlan_tag_flags |= ((skb->nh.iph->ihl - 5) + - (tcp_opt_len >> 2)) << 8; + if (tcp_opt_len || (iph->ihl > 5)) { + vlan_tag_flags |= ((iph->ihl - 5) + + (tcp_opt_len >> 2)) << 8; } } else diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c index 86cfcb3f8131..8555afa574a4 100644 --- a/drivers/net/bonding/bond_alb.c +++ b/drivers/net/bonding/bond_alb.c @@ -112,7 +112,7 @@ static inline struct arp_pkt *arp_pkt(const struct sk_buff *skb) /* Forward declaration */ static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[]); -static inline u8 _simple_hash(u8 *hash_start, int hash_size) +static inline u8 _simple_hash(const u8 *hash_start, int hash_size) { int i; u8 hash = 0; @@ -1268,7 +1268,7 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev) int hash_size = 0; int do_tx_balance = 1; u32 hash_index = 0; - u8 *hash_start = NULL; + const u8 *hash_start = NULL; int res = 1; skb_reset_mac_header(skb); @@ -1285,15 +1285,18 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev) } switch (ntohs(skb->protocol)) { - case ETH_P_IP: + case ETH_P_IP: { + const struct iphdr *iph = ip_hdr(skb); + if ((memcmp(eth_data->h_dest, mac_bcast, ETH_ALEN) == 0) || - (skb->nh.iph->daddr == ip_bcast) || - (skb->nh.iph->protocol == IPPROTO_IGMP)) { + (iph->daddr == ip_bcast) || + (iph->protocol == IPPROTO_IGMP)) { do_tx_balance = 0; break; } - hash_start = (char*)&(skb->nh.iph->daddr); - hash_size = sizeof(skb->nh.iph->daddr); + hash_start = (char *)&(iph->daddr); + hash_size = sizeof(iph->daddr); + } break; case ETH_P_IPV6: if (memcmp(eth_data->h_dest, mac_bcast, ETH_ALEN) == 0) { diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index e4724d874e7c..7f11388893fc 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -3476,7 +3476,7 @@ static int bond_xmit_hash_policy_l34(struct sk_buff *skb, struct net_device *bond_dev, int count) { struct ethhdr *data = (struct ethhdr *)skb->data; - struct iphdr *iph = skb->nh.iph; + struct iphdr *iph = ip_hdr(skb); u16 *layer4hdr = (u16 *)((u32 *)iph + iph->ihl); int layer4_xor = 0; diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c index 8cdee67d582f..c357f45a16c3 100644 --- a/drivers/net/chelsio/sge.c +++ b/drivers/net/chelsio/sge.c @@ -1871,7 +1871,7 @@ int t1_start_xmit(struct sk_buff *skb, struct net_device *dev) hdr = (struct cpl_tx_pkt_lso *)skb_push(skb, sizeof(*hdr)); hdr->opcode = CPL_TX_PKT_LSO; hdr->ip_csum_dis = hdr->l4_csum_dis = 0; - hdr->ip_hdr_words = skb->nh.iph->ihl; + hdr->ip_hdr_words = ip_hdr(skb)->ihl; hdr->tcp_hdr_words = skb->h.th->doff; hdr->eth_type_mss = htons(MK_ETH_TYPE_MSS(eth_type, skb_shinfo(skb)->gso_size)); @@ -1912,7 +1912,7 @@ int t1_start_xmit(struct sk_buff *skb, struct net_device *dev) if (!(adapter->flags & UDP_CSUM_CAPABLE) && skb->ip_summed == CHECKSUM_PARTIAL && - skb->nh.iph->protocol == IPPROTO_UDP) { + ip_hdr(skb)->protocol == IPPROTO_UDP) { if (unlikely(skb_checksum_help(skb))) { pr_debug("%s: unable to do udp checksum\n", dev->name); dev_kfree_skb_any(skb); diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c index 7e9e9db4fb97..892e5dcafa04 100644 --- a/drivers/net/cxgb3/sge.c +++ b/drivers/net/cxgb3/sge.c @@ -900,7 +900,7 @@ static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb, eth_type = skb_network_offset(skb) == ETH_HLEN ? CPL_ETH_II : CPL_ETH_II_VLAN; tso_info |= V_LSO_ETH_TYPE(eth_type) | - V_LSO_IPHDR_WORDS(skb->nh.iph->ihl) | + V_LSO_IPHDR_WORDS(ip_hdr(skb)->ihl) | V_LSO_TCPHDR_WORDS(skb->h.th->doff); hdr->lso_info = htonl(tso_info); flits = 3; diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c index 86161011b539..c324866c9789 100644 --- a/drivers/net/e1000/e1000_main.c +++ b/drivers/net/e1000/e1000_main.c @@ -2890,14 +2890,12 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2)); mss = skb_shinfo(skb)->gso_size; if (skb->protocol == htons(ETH_P_IP)) { - skb->nh.iph->tot_len = 0; - skb->nh.iph->check = 0; - skb->h.th->check = - ~csum_tcpudp_magic(skb->nh.iph->saddr, - skb->nh.iph->daddr, - 0, - IPPROTO_TCP, - 0); + struct iphdr *iph = ip_hdr(skb); + iph->tot_len = 0; + iph->check = 0; + skb->h.th->check = ~csum_tcpudp_magic(iph->saddr, + iph->daddr, 0, + IPPROTO_TCP, 0); cmd_length = E1000_TXD_CMD_IP; ipcse = skb->h.raw - skb->data - 1; } else if (skb->protocol == htons(ETH_P_IPV6)) { @@ -2911,7 +2909,7 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, ipcse = 0; } ipcss = skb_network_offset(skb); - ipcso = (void *)&(skb->nh.iph->check) - (void *)skb->data; + ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data; tucss = skb->h.raw - skb->data; tucso = (void *)&(skb->h.th->check) - (void *)skb->data; tucse = 0; diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c index b1c90a4fe31e..0dc701e611e5 100644 --- a/drivers/net/ehea/ehea_main.c +++ b/drivers/net/ehea/ehea_main.c @@ -1262,7 +1262,7 @@ static int ehea_clean_portres(struct ehea_port *port, struct ehea_port_res *pr) static inline void write_ip_start_end(struct ehea_swqe *swqe, const struct sk_buff *skb) { - swqe->ip_start = (u8)(((u64)skb->nh.iph) - ((u64)skb->data)); + swqe->ip_start = skb_network_offset(skb); swqe->ip_end = (u8)(swqe->ip_start + ip_hdrlen(skb) - 1); } @@ -1688,6 +1688,7 @@ static void ehea_xmit2(struct sk_buff *skb, struct net_device *dev, struct ehea_swqe *swqe, u32 lkey) { if (skb->protocol == htons(ETH_P_IP)) { + const struct iphdr *iph = ip_hdr(skb); /* IPv4 */ swqe->tx_control |= EHEA_SWQE_CRC | EHEA_SWQE_IP_CHECKSUM @@ -1697,15 +1698,15 @@ static void ehea_xmit2(struct sk_buff *skb, struct net_device *dev, write_ip_start_end(swqe, skb); - if (skb->nh.iph->protocol == IPPROTO_UDP) { - if ((skb->nh.iph->frag_off & IP_MF) || - (skb->nh.iph->frag_off & IP_OFFSET)) + if (iph->protocol == IPPROTO_UDP) { + if ((iph->frag_off & IP_MF) || + (iph->frag_off & IP_OFFSET)) /* IP fragment, so don't change cs */ swqe->tx_control &= ~EHEA_SWQE_TCP_CHECKSUM; else write_udp_offset_end(swqe, skb); - } else if (skb->nh.iph->protocol == IPPROTO_TCP) { + } else if (iph->protocol == IPPROTO_TCP) { write_tcp_offset_end(swqe, skb); } @@ -1731,10 +1732,11 @@ static void ehea_xmit3(struct sk_buff *skb, struct net_device *dev, int i; if (skb->protocol == htons(ETH_P_IP)) { + const struct iphdr *iph = ip_hdr(skb); /* IPv4 */ write_ip_start_end(swqe, skb); - if (skb->nh.iph->protocol == IPPROTO_TCP) { + if (iph->protocol == IPPROTO_TCP) { swqe->tx_control |= EHEA_SWQE_CRC | EHEA_SWQE_IP_CHECKSUM | EHEA_SWQE_TCP_CHECKSUM @@ -1742,9 +1744,9 @@ static void ehea_xmit3(struct sk_buff *skb, struct net_device *dev, write_tcp_offset_end(swqe, skb); - } else if (skb->nh.iph->protocol == IPPROTO_UDP) { - if ((skb->nh.iph->frag_off & IP_MF) || - (skb->nh.iph->frag_off & IP_OFFSET)) + } else if (iph->protocol == IPPROTO_UDP) { + if ((iph->frag_off & IP_MF) || + (iph->frag_off & IP_OFFSET)) /* IP fragment, so don't change cs */ swqe->tx_control |= EHEA_SWQE_CRC | EHEA_SWQE_IMM_DATA_PRESENT; diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c index c7a70933c759..c9abc96a0919 100644 --- a/drivers/net/gianfar.c +++ b/drivers/net/gianfar.c @@ -942,7 +942,7 @@ static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb) /* Tell the controller what the protocol is */ /* And provide the already calculated phcs */ - if (skb->nh.iph->protocol == IPPROTO_UDP) { + if (ip_hdr(skb)->protocol == IPPROTO_UDP) { flags |= TXFCB_UDP; fcb->phcs = skb->h.uh->check; } else diff --git a/drivers/net/ioc3-eth.c b/drivers/net/ioc3-eth.c index ea07aa5ba51b..d375e786b4b3 100644 --- a/drivers/net/ioc3-eth.c +++ b/drivers/net/ioc3-eth.c @@ -1393,9 +1393,9 @@ static int ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev) * manually. */ if (skb->ip_summed == CHECKSUM_PARTIAL) { - int proto = ntohs(skb->nh.iph->protocol); + const struct iphdr *ih = ip_hdr(skb); + const int proto = ntohs(ih->protocol); unsigned int csoff; - struct iphdr *ih = skb->nh.iph; uint32_t csum, ehsum; uint16_t *eh; diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c index cfb791bb45e2..bba4dcaf92e9 100644 --- a/drivers/net/ixgb/ixgb_main.c +++ b/drivers/net/ixgb/ixgb_main.c @@ -1182,6 +1182,8 @@ ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb) if (likely(skb_is_gso(skb))) { struct ixgb_buffer *buffer_info; + struct iphdr *iph; + if (skb_header_cloned(skb)) { err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); if (err) @@ -1190,13 +1192,13 @@ ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb) hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2)); mss = skb_shinfo(skb)->gso_size; - skb->nh.iph->tot_len = 0; - skb->nh.iph->check = 0; - skb->h.th->check = ~csum_tcpudp_magic(skb->nh.iph->saddr, - skb->nh.iph->daddr, + iph = ip_hdr(skb); + iph->tot_len = 0; + iph->check = 0; + skb->h.th->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0, IPPROTO_TCP, 0); ipcss = skb_network_offset(skb); - ipcso = (void *)&(skb->nh.iph->check) - (void *)skb->data; + ipcso = (void *)&(iph->check) - (void *)skb->data; ipcse = skb->h.raw - skb->data - 1; tucss = skb->h.raw - skb->data; tucso = (void *)&(skb->h.th->check) - (void *)skb->data; diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c index 9265c27b13b2..20b5cb101368 100644 --- a/drivers/net/loopback.c +++ b/drivers/net/loopback.c @@ -75,7 +75,7 @@ static DEFINE_PER_CPU(struct pcpu_lstats, pcpu_lstats); #ifdef LOOPBACK_TSO static void emulate_large_send_offload(struct sk_buff *skb) { - struct iphdr *iph = skb->nh.iph; + struct iphdr *iph = ip_hdr(skb); struct tcphdr *th = (struct tcphdr *)(skb_network_header(skb) + (iph->ihl * 4)); unsigned int doffset = (iph->ihl + th->doff) * 4; @@ -93,7 +93,7 @@ static void emulate_large_send_offload(struct sk_buff *skb) skb_reserve(nskb, 32); skb_set_mac_header(nskb, -ETH_HLEN); skb_reset_network_header(nskb); - iph = nskb->nh.iph; + iph = ip_hdr(nskb); memcpy(nskb->data, skb_network_header(skb), doffset); if (skb_copy_bits(skb, doffset + offset, @@ -145,7 +145,7 @@ static int loopback_xmit(struct sk_buff *skb, struct net_device *dev) #ifdef LOOPBACK_TSO if (skb_is_gso(skb)) { BUG_ON(skb->protocol != htons(ETH_P_IP)); - BUG_ON(skb->nh.iph->protocol != IPPROTO_TCP); + BUG_ON(ip_hdr(skb)->protocol != IPPROTO_TCP); emulate_large_send_offload(skb); return 0; diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c index cd9369a285e2..6b39a268ec29 100644 --- a/drivers/net/mv643xx_eth.c +++ b/drivers/net/mv643xx_eth.c @@ -1161,9 +1161,9 @@ static void eth_tx_submit_descs_for_skb(struct mv643xx_private *mp, cmd_sts |= ETH_GEN_TCP_UDP_CHECKSUM | ETH_GEN_IP_V_4_CHECKSUM | - skb->nh.iph->ihl << ETH_TX_IHL_SHIFT; + ip_hdr(skb)->ihl << ETH_TX_IHL_SHIFT; - switch (skb->nh.iph->protocol) { + switch (ip_hdr(skb)->protocol) { case IPPROTO_UDP: cmd_sts |= ETH_UDP_FRAME; desc->l4i_chk = skb->h.uh->check; diff --git a/drivers/net/netxen/netxen_nic_hw.c b/drivers/net/netxen/netxen_nic_hw.c index b2f5032937e3..28d68c3550ef 100644 --- a/drivers/net/netxen/netxen_nic_hw.c +++ b/drivers/net/netxen/netxen_nic_hw.c @@ -378,9 +378,9 @@ void netxen_tso_check(struct netxen_adapter *adapter, skb->h.th->doff * 4); netxen_set_cmd_desc_opcode(desc, TX_TCP_LSO); } else if (skb->ip_summed == CHECKSUM_PARTIAL) { - if (skb->nh.iph->protocol == IPPROTO_TCP) { + if (ip_hdr(skb)->protocol == IPPROTO_TCP) { netxen_set_cmd_desc_opcode(desc, TX_TCP_PKT); - } else if (skb->nh.iph->protocol == IPPROTO_UDP) { + } else if (ip_hdr(skb)->protocol == IPPROTO_UDP) { netxen_set_cmd_desc_opcode(desc, TX_UDP_PKT); } else { return; diff --git a/drivers/net/ns83820.c b/drivers/net/ns83820.c index 747988b12ecd..6a32338623f1 100644 --- a/drivers/net/ns83820.c +++ b/drivers/net/ns83820.c @@ -1156,9 +1156,9 @@ again: extsts = 0; if (skb->ip_summed == CHECKSUM_PARTIAL) { extsts |= EXTSTS_IPPKT; - if (IPPROTO_TCP == skb->nh.iph->protocol) + if (IPPROTO_TCP == ip_hdr(skb)->protocol) extsts |= EXTSTS_TCPPKT; - else if (IPPROTO_UDP == skb->nh.iph->protocol) + else if (IPPROTO_UDP == ip_hdr(skb)->protocol) extsts |= EXTSTS_UDPPKT; } diff --git a/drivers/net/pasemi_mac.c b/drivers/net/pasemi_mac.c index 82218720bc3e..1d8129986cc5 100644 --- a/drivers/net/pasemi_mac.c +++ b/drivers/net/pasemi_mac.c @@ -731,7 +731,7 @@ static int pasemi_mac_start_tx(struct sk_buff *skb, struct net_device *dev) if (skb->ip_summed == CHECKSUM_PARTIAL) { const unsigned char *nh = skb_network_header(skb); - switch (skb->nh.iph->protocol) { + switch (ip_hdr(skb)->protocol) { case IPPROTO_TCP: dflags |= XCT_MACTX_CSUM_TCP; dflags |= XCT_MACTX_IPH((skb->h.raw - skb->nh.raw) >> 2); diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c index 34280f94e9ff..45876a854f00 100644 --- a/drivers/net/r8169.c +++ b/drivers/net/r8169.c @@ -2284,7 +2284,7 @@ static inline u32 rtl8169_tso_csum(struct sk_buff *skb, struct net_device *dev) return LargeSend | ((mss & MSSMask) << MSSShift); } if (skb->ip_summed == CHECKSUM_PARTIAL) { - const struct iphdr *ip = skb->nh.iph; + const struct iphdr *ip = ip_hdr(skb); if (ip->protocol == IPPROTO_TCP) return IPCS | TCPCS; diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c index 51e994f26a84..a37bb205f3d3 100644 --- a/drivers/net/sky2.c +++ b/drivers/net/sky2.c @@ -1428,7 +1428,7 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev) tcpsum |= offset + skb->csum_offset; /* sum write */ ctrl = CALSUM | WR_SUM | INIT_SUM | LOCK_SUM; - if (skb->nh.iph->protocol == IPPROTO_UDP) + if (ip_hdr(skb)->protocol == IPPROTO_UDP) ctrl |= UDPTCP; if (tcpsum != sky2->tx_tcpsum) { diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c index f7e0ac7f789a..230da14b1b68 100644 --- a/drivers/net/spider_net.c +++ b/drivers/net/spider_net.c @@ -720,7 +720,7 @@ spider_net_prepare_tx_descr(struct spider_net_card *card, spin_unlock_irqrestore(&chain->lock, flags); if (skb->protocol == htons(ETH_P_IP) && skb->ip_summed == CHECKSUM_PARTIAL) - switch (skb->nh.iph->protocol) { + switch (ip_hdr(skb)->protocol) { case IPPROTO_TCP: hwdescr->dmac_cmd_status |= SPIDER_NET_DMAC_TCP; break; diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index 62a3bba0097d..76a31afe20de 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c @@ -3909,12 +3909,13 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) mss |= (skb_headlen(skb) - ETH_HLEN) << 9; else { + struct iphdr *iph = ip_hdr(skb); + tcp_opt_len = ((skb->h.th->doff - 5) * 4); ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr); - skb->nh.iph->check = 0; - skb->nh.iph->tot_len = htons(mss + ip_tcp_len + - tcp_opt_len); + iph->check = 0; + iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len); mss |= (ip_tcp_len + tcp_opt_len) << 9; } @@ -4055,6 +4056,7 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev) mss = 0; if (skb->len > (tp->dev->mtu + ETH_HLEN) && (mss = skb_shinfo(skb)->gso_size) != 0) { + struct iphdr *iph; int tcp_opt_len, ip_tcp_len, hdr_len; if (skb_header_cloned(skb) && @@ -4074,34 +4076,32 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev) base_flags |= (TXD_FLAG_CPU_PRE_DMA | TXD_FLAG_CPU_POST_DMA); - skb->nh.iph->check = 0; - skb->nh.iph->tot_len = htons(mss + hdr_len); + iph = ip_hdr(skb); + iph->check = 0; + iph->tot_len = htons(mss + hdr_len); if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) { skb->h.th->check = 0; base_flags &= ~TXD_FLAG_TCPUDP_CSUM; } else { - skb->h.th->check = - ~csum_tcpudp_magic(skb->nh.iph->saddr, - skb->nh.iph->daddr, - 0, IPPROTO_TCP, 0); + skb->h.th->check = ~csum_tcpudp_magic(iph->saddr, + iph->daddr, 0, + IPPROTO_TCP, 0); } if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) || (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) { - if (tcp_opt_len || skb->nh.iph->ihl > 5) { + if (tcp_opt_len || iph->ihl > 5) { int tsflags; - tsflags = ((skb->nh.iph->ihl - 5) + - (tcp_opt_len >> 2)); + tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2); mss |= (tsflags << 11); } } else { - if (tcp_opt_len || skb->nh.iph->ihl > 5) { + if (tcp_opt_len || iph->ihl > 5) { int tsflags; - tsflags = ((skb->nh.iph->ihl - 5) + - (tcp_opt_len >> 2)); + tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2); base_flags |= tsflags << 12; } } diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c index 9f6cc1569b3e..422eaf8ea12d 100644 --- a/drivers/net/via-velocity.c +++ b/drivers/net/via-velocity.c @@ -2006,7 +2006,7 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev) */ if ((vptr->flags & VELOCITY_FLAGS_TX_CSUM) && (skb->ip_summed == CHECKSUM_PARTIAL)) { - struct iphdr *ip = skb->nh.iph; + const struct iphdr *ip = ip_hdr(skb); if (ip->protocol == IPPROTO_TCP) td_ptr->tdesc1.TCR |= TCR0_TCPCK; else if (ip->protocol == IPPROTO_UDP) diff --git a/drivers/s390/net/qeth_main.c b/drivers/s390/net/qeth_main.c index 0ff29e0628b5..8a07d548a05a 100644 --- a/drivers/s390/net/qeth_main.c +++ b/drivers/s390/net/qeth_main.c @@ -3820,18 +3820,20 @@ qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb, return card->info.is_multicast_different & (card->qdio.no_out_queues - 1); if (card->qdio.do_prio_queueing && (ipv == 4)) { + const u8 tos = ip_hdr(skb)->tos; + if (card->qdio.do_prio_queueing==QETH_PRIO_Q_ING_TOS){ - if (skb->nh.iph->tos & IP_TOS_NOTIMPORTANT) + if (tos & IP_TOS_NOTIMPORTANT) return 3; - if (skb->nh.iph->tos & IP_TOS_HIGHRELIABILITY) + if (tos & IP_TOS_HIGHRELIABILITY) return 2; - if (skb->nh.iph->tos & IP_TOS_HIGHTHROUGHPUT) + if (tos & IP_TOS_HIGHTHROUGHPUT) return 1; - if (skb->nh.iph->tos & IP_TOS_LOWDELAY) + if (tos & IP_TOS_LOWDELAY) return 0; } if (card->qdio.do_prio_queueing==QETH_PRIO_Q_ING_PREC) - return 3 - (skb->nh.iph->tos >> 6); + return 3 - (tos >> 6); } else if (card->qdio.do_prio_queueing && (ipv == 6)) { /* TODO: IPv6!!! */ } @@ -4041,7 +4043,8 @@ qeth_fill_header(struct qeth_card *card, struct qeth_hdr *hdr, *((u32 *) skb->dst->neighbour->primary_key); } else { /* fill in destination address used in ip header */ - *((u32 *) (&hdr->hdr.l3.dest_addr[12])) = skb->nh.iph->daddr; + *((u32 *)(&hdr->hdr.l3.dest_addr[12])) = + ip_hdr(skb)->daddr; } } else if (ipv == 6) { /* IPv6 or passthru */ hdr->hdr.l3.flags = qeth_get_qeth_hdr_flags6(cast_type); diff --git a/drivers/s390/net/qeth_tso.h b/drivers/s390/net/qeth_tso.h index 14504afb044e..255cb2e9c796 100644 --- a/drivers/s390/net/qeth_tso.h +++ b/drivers/s390/net/qeth_tso.h @@ -40,7 +40,7 @@ qeth_tso_fill_header(struct qeth_card *card, struct sk_buff *skb) QETH_DBF_TEXT(trace, 5, "tsofhdr"); hdr = (struct qeth_hdr_tso *) skb->data; - iph = skb->nh.iph; + iph = ip_hdr(skb); tcph = skb->h.th; /*fix header to TSO values ...*/ hdr->hdr.hdr.l3.id = QETH_HEADER_TYPE_TSO; @@ -63,13 +63,9 @@ qeth_tso_fill_header(struct qeth_card *card, struct sk_buff *skb) static inline void qeth_tso_set_tcpip_header(struct qeth_card *card, struct sk_buff *skb) { - struct iphdr *iph; - struct ipv6hdr *ip6h; - struct tcphdr *tcph; - - iph = skb->nh.iph; - ip6h = skb->nh.ipv6h; - tcph = skb->h.th; + struct iphdr *iph = ip_hdr(skb); + struct ipv6hdr *ip6h = skb->nh.ipv6h; + struct tcphdr *tcph = skb->h.th; tcph->check = 0; if (skb->protocol == ETH_P_IPV6) { diff --git a/include/linux/ip.h b/include/linux/ip.h index 1d36b971a8b5..f2f26db16f57 100644 --- a/include/linux/ip.h +++ b/include/linux/ip.h @@ -104,6 +104,15 @@ struct iphdr { /*The options start here. */ }; +#ifdef __KERNEL__ +#include + +static inline struct iphdr *ip_hdr(const struct sk_buff *skb) +{ + return (struct iphdr *)skb_network_header(skb); +} +#endif + struct ip_auth_hdr { __u8 nexthdr; __u8 hdrlen; /* This one is measured in 32 bit units! */ diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 870438fba93f..62f841b5b700 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -247,7 +247,6 @@ struct sk_buff { } h; union { - struct iphdr *iph; struct ipv6hdr *ipv6h; struct arphdr *arph; unsigned char *raw; diff --git a/include/net/inet_ecn.h b/include/net/inet_ecn.h index b9ed3898e368..6fd4452c15d9 100644 --- a/include/net/inet_ecn.h +++ b/include/net/inet_ecn.h @@ -116,7 +116,7 @@ static inline int INET_ECN_set_ce(struct sk_buff *skb) case __constant_htons(ETH_P_IP): if (skb_network_header(skb) + sizeof(struct iphdr) <= skb->tail) - return IP_ECN_set_ce(skb->nh.iph); + return IP_ECN_set_ce(ip_hdr(skb)); break; case __constant_htons(ETH_P_IPV6): diff --git a/include/net/ip.h b/include/net/ip.h index 6f7ba32b199d..75f226d26e0d 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -46,7 +46,7 @@ struct inet_skb_parm static inline unsigned int ip_hdrlen(const struct sk_buff *skb) { - return skb->nh.iph->ihl * 4; + return ip_hdr(skb)->ihl * 4; } struct ipcm_cookie diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h index 8a6b0e7bded5..880eb7b54164 100644 --- a/include/net/pkt_cls.h +++ b/include/net/pkt_cls.h @@ -334,8 +334,8 @@ static inline unsigned char * tcf_get_base_ptr(struct sk_buff *skb, int layer) return NULL; } -static inline int tcf_valid_offset(struct sk_buff *skb, unsigned char *ptr, - int len) +static inline int tcf_valid_offset(const struct sk_buff *skb, + const unsigned char *ptr, const int len) { return unlikely((ptr + len) < skb->tail && ptr > skb->head); } diff --git a/net/atm/mpc.c b/net/atm/mpc.c index bc15728fd847..4d2592c14090 100644 --- a/net/atm/mpc.c +++ b/net/atm/mpc.c @@ -715,7 +715,7 @@ static void mpc_push(struct atm_vcc *vcc, struct sk_buff *skb) new_skb->protocol = eth_type_trans(new_skb, dev); skb_reset_network_header(new_skb); - eg->latest_ip_addr = new_skb->nh.iph->saddr; + eg->latest_ip_addr = ip_hdr(new_skb)->saddr; eg->packets_rcvd++; mpc->eg_ops->put(eg); diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c index 8a56d8963025..ebe740f6b902 100644 --- a/net/bridge/br_netfilter.c +++ b/net/bridge/br_netfilter.c @@ -48,8 +48,8 @@ #define skb_origaddr(skb) (((struct bridge_skb_cb *) \ (skb->nf_bridge->data))->daddr.ipv4) -#define store_orig_dstaddr(skb) (skb_origaddr(skb) = (skb)->nh.iph->daddr) -#define dnat_took_place(skb) (skb_origaddr(skb) != (skb)->nh.iph->daddr) +#define store_orig_dstaddr(skb) (skb_origaddr(skb) = ip_hdr(skb)->daddr) +#define dnat_took_place(skb) (skb_origaddr(skb) != ip_hdr(skb)->daddr) #ifdef CONFIG_SYSCTL static struct ctl_table_header *brnf_sysctl_header; @@ -265,7 +265,7 @@ static int br_nf_pre_routing_finish_bridge(struct sk_buff *skb) static int br_nf_pre_routing_finish(struct sk_buff *skb) { struct net_device *dev = skb->dev; - struct iphdr *iph = skb->nh.iph; + struct iphdr *iph = ip_hdr(skb); struct nf_bridge_info *nf_bridge = skb->nf_bridge; int err; @@ -520,14 +520,14 @@ static unsigned int br_nf_pre_routing(unsigned int hook, struct sk_buff **pskb, if (!pskb_may_pull(skb, sizeof(struct iphdr))) goto inhdr_error; - iph = skb->nh.iph; + iph = ip_hdr(skb); if (iph->ihl < 5 || iph->version != 4) goto inhdr_error; if (!pskb_may_pull(skb, 4 * iph->ihl)) goto inhdr_error; - iph = skb->nh.iph; + iph = ip_hdr(skb); if (ip_fast_csum((__u8 *) iph, iph->ihl) != 0) goto inhdr_error; diff --git a/net/core/netpoll.c b/net/core/netpoll.c index 44e030eb6e75..c4cec17be334 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c @@ -310,7 +310,7 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len) skb_push(skb, sizeof(*iph)); skb_reset_network_header(skb); - iph = skb->nh.iph; + iph = ip_hdr(skb); /* iph->version = 4; iph->ihl = 5; */ put_unaligned(0x45, (unsigned char *)iph); diff --git a/net/core/pktgen.c b/net/core/pktgen.c index 10d33fc233b3..e0faff8eb652 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c @@ -2391,7 +2391,7 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev, VLAN_TAG_SIZE(pkt_dev) - SVLAN_TAG_SIZE(pkt_dev); skb->dev = odev; skb->pkt_type = PACKET_HOST; - skb->nh.iph = iph; + skb->nh.raw = (unsigned char *)iph; skb->h.uh = udph; if (pkt_dev->nfrags <= 0) diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c index 4a83978aa660..b85437dae0e7 100644 --- a/net/dccp/ipv4.c +++ b/net/dccp/ipv4.c @@ -363,8 +363,8 @@ EXPORT_SYMBOL_GPL(dccp_v4_send_check); static inline u64 dccp_v4_init_sequence(const struct sk_buff *skb) { - return secure_dccp_sequence_number(skb->nh.iph->daddr, - skb->nh.iph->saddr, + return secure_dccp_sequence_number(ip_hdr(skb)->daddr, + ip_hdr(skb)->saddr, dccp_hdr(skb)->dccph_dport, dccp_hdr(skb)->dccph_sport); } @@ -405,7 +405,7 @@ struct sock *dccp_v4_request_recv_sock(struct sock *sk, struct sk_buff *skb, newinet->opt = ireq->opt; ireq->opt = NULL; newinet->mc_index = inet_iif(skb); - newinet->mc_ttl = skb->nh.iph->ttl; + newinet->mc_ttl = ip_hdr(skb)->ttl; newinet->id = jiffies; dccp_sync_mss(newsk, dst_mtu(dst)); @@ -428,7 +428,7 @@ EXPORT_SYMBOL_GPL(dccp_v4_request_recv_sock); static struct sock *dccp_v4_hnd_req(struct sock *sk, struct sk_buff *skb) { const struct dccp_hdr *dh = dccp_hdr(skb); - const struct iphdr *iph = skb->nh.iph; + const struct iphdr *iph = ip_hdr(skb); struct sock *nsk; struct request_sock **prev; /* Find possible connection requests. */ @@ -460,8 +460,8 @@ static struct dst_entry* dccp_v4_route_skb(struct sock *sk, struct rtable *rt; struct flowi fl = { .oif = ((struct rtable *)skb->dst)->rt_iif, .nl_u = { .ip4_u = - { .daddr = skb->nh.iph->saddr, - .saddr = skb->nh.iph->daddr, + { .daddr = ip_hdr(skb)->saddr, + .saddr = ip_hdr(skb)->daddr, .tos = RT_CONN_FLAGS(sk) } }, .proto = sk->sk_protocol, .uli_u = { .ports = @@ -513,6 +513,7 @@ static void dccp_v4_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb) { int err; struct dccp_hdr *rxdh = dccp_hdr(rxskb), *dh; + const struct iphdr *rxiph; const int dccp_hdr_reset_len = sizeof(struct dccp_hdr) + sizeof(struct dccp_hdr_ext) + sizeof(struct dccp_hdr_reset); @@ -559,13 +560,13 @@ static void dccp_v4_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb) dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), DCCP_SKB_CB(rxskb)->dccpd_seq); dccp_csum_outgoing(skb); - dh->dccph_checksum = dccp_v4_csum_finish(skb, rxskb->nh.iph->saddr, - rxskb->nh.iph->daddr); + rxiph = ip_hdr(rxskb); + dh->dccph_checksum = dccp_v4_csum_finish(skb, rxiph->saddr, + rxiph->daddr); bh_lock_sock(dccp_v4_ctl_socket->sk); err = ip_build_and_send_pkt(skb, dccp_v4_ctl_socket->sk, - rxskb->nh.iph->daddr, - rxskb->nh.iph->saddr, NULL); + rxiph->daddr, rxiph->saddr, NULL); bh_unlock_sock(dccp_v4_ctl_socket->sk); if (net_xmit_eval(err) == 0) { @@ -640,8 +641,8 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb) goto drop_and_free; ireq = inet_rsk(req); - ireq->loc_addr = skb->nh.iph->daddr; - ireq->rmt_addr = skb->nh.iph->saddr; + ireq->loc_addr = ip_hdr(skb)->daddr; + ireq->rmt_addr = ip_hdr(skb)->saddr; ireq->opt = NULL; /* @@ -809,6 +810,7 @@ EXPORT_SYMBOL_GPL(dccp_invalid_packet); static int dccp_v4_rcv(struct sk_buff *skb) { const struct dccp_hdr *dh; + const struct iphdr *iph; struct sock *sk; int min_cov; @@ -817,8 +819,9 @@ static int dccp_v4_rcv(struct sk_buff *skb) if (dccp_invalid_packet(skb)) goto discard_it; + iph = ip_hdr(skb); /* Step 1: If header checksum is incorrect, drop packet and return */ - if (dccp_v4_csum_finish(skb, skb->nh.iph->saddr, skb->nh.iph->daddr)) { + if (dccp_v4_csum_finish(skb, iph->saddr, iph->daddr)) { DCCP_WARN("dropped packet with invalid checksum\n"); goto discard_it; } @@ -832,8 +835,8 @@ static int dccp_v4_rcv(struct sk_buff *skb) "src=%u.%u.%u.%u@%-5d " "dst=%u.%u.%u.%u@%-5d seq=%llu", dccp_packet_name(dh->dccph_type), - NIPQUAD(skb->nh.iph->saddr), ntohs(dh->dccph_sport), - NIPQUAD(skb->nh.iph->daddr), ntohs(dh->dccph_dport), + NIPQUAD(iph->saddr), ntohs(dh->dccph_sport), + NIPQUAD(iph->daddr), ntohs(dh->dccph_dport), (unsigned long long) DCCP_SKB_CB(skb)->dccpd_seq); if (dccp_packet_without_ack(skb)) { @@ -848,10 +851,8 @@ static int dccp_v4_rcv(struct sk_buff *skb) /* Step 2: * Look up flow ID in table and get corresponding socket */ sk = __inet_lookup(&dccp_hashinfo, - skb->nh.iph->saddr, dh->dccph_sport, - skb->nh.iph->daddr, dh->dccph_dport, - inet_iif(skb)); - + iph->saddr, dh->dccph_sport, + iph->daddr, dh->dccph_dport, inet_iif(skb)); /* * Step 2: * If no socket ... diff --git a/net/econet/af_econet.c b/net/econet/af_econet.c index 099543f5401f..dcc2e4b6b2fe 100644 --- a/net/econet/af_econet.c +++ b/net/econet/af_econet.c @@ -848,7 +848,7 @@ static void aun_send_response(__u32 addr, unsigned long seq, int code, int cb) static void aun_incoming(struct sk_buff *skb, struct aunhdr *ah, size_t len) { - struct iphdr *ip = skb->nh.iph; + struct iphdr *ip = ip_hdr(skb); unsigned char stn = ntohl(ip->saddr) & 0xff; struct sock *sk; struct sk_buff *newskb; @@ -946,7 +946,7 @@ static void aun_data_available(struct sock *sk, int slen) data = skb->h.raw + sizeof(struct udphdr); ah = (struct aunhdr *)data; len = skb->len - sizeof(struct udphdr); - ip = skb->nh.iph; + ip = ip_hdr(skb); switch (ah->code) { diff --git a/net/ieee80211/ieee80211_tx.c b/net/ieee80211/ieee80211_tx.c index 3fca4345ebe5..62a8a2b76539 100644 --- a/net/ieee80211/ieee80211_tx.c +++ b/net/ieee80211/ieee80211_tx.c @@ -228,7 +228,7 @@ static int ieee80211_classify(struct sk_buff *skb) if (eth->h_proto != htons(ETH_P_IP)) return 0; - ip = skb->nh.iph; + ip = ip_hdr(skb); switch (ip->tos & 0xfc) { case 0x20: return 2; diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index ab552a6098f9..e7720c72a6e2 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -1112,7 +1112,7 @@ static int inet_gso_send_check(struct sk_buff *skb) if (unlikely(!pskb_may_pull(skb, sizeof(*iph)))) goto out; - iph = skb->nh.iph; + iph = ip_hdr(skb); ihl = iph->ihl * 4; if (ihl < sizeof(*iph)) goto out; @@ -1121,7 +1121,7 @@ static int inet_gso_send_check(struct sk_buff *skb) goto out; skb->h.raw = __skb_pull(skb, ihl); - iph = skb->nh.iph; + iph = ip_hdr(skb); proto = iph->protocol & (MAX_INET_PROTOS - 1); err = -EPROTONOSUPPORT; @@ -1155,7 +1155,7 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb, int features) if (unlikely(!pskb_may_pull(skb, sizeof(*iph)))) goto out; - iph = skb->nh.iph; + iph = ip_hdr(skb); ihl = iph->ihl * 4; if (ihl < sizeof(*iph)) goto out; @@ -1164,7 +1164,7 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb, int features) goto out; skb->h.raw = __skb_pull(skb, ihl); - iph = skb->nh.iph; + iph = ip_hdr(skb); id = ntohs(iph->id); proto = iph->protocol & (MAX_INET_PROTOS - 1); segs = ERR_PTR(-EPROTONOSUPPORT); @@ -1180,7 +1180,7 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb, int features) skb = segs; do { - iph = skb->nh.iph; + iph = ip_hdr(skb); iph->id = htons(id++); iph->tot_len = htons(skb->len - skb->mac_len); iph->check = 0; diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c index 95ddbbd1552a..00fd31da252e 100644 --- a/net/ipv4/ah4.c +++ b/net/ipv4/ah4.c @@ -65,7 +65,7 @@ static int ah_output(struct xfrm_state *x, struct sk_buff *skb) char buf[60]; } tmp_iph; - top_iph = skb->nh.iph; + top_iph = ip_hdr(skb); iph = &tmp_iph.iph; iph->tos = top_iph->tos; @@ -152,7 +152,7 @@ static int ah_input(struct xfrm_state *x, struct sk_buff *skb) skb->ip_summed = CHECKSUM_NONE; ah = (struct ip_auth_hdr*)skb->data; - iph = skb->nh.iph; + iph = ip_hdr(skb); ihl = skb->data - skb_network_header(skb); memcpy(work_buf, iph, ihl); diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c index fd36eebbd90a..01d0e8dd17d8 100644 --- a/net/ipv4/arp.c +++ b/net/ipv4/arp.c @@ -342,13 +342,13 @@ static void arp_solicit(struct neighbour *neigh, struct sk_buff *skb) switch (IN_DEV_ARP_ANNOUNCE(in_dev)) { default: case 0: /* By default announce any local IP */ - if (skb && inet_addr_type(skb->nh.iph->saddr) == RTN_LOCAL) - saddr = skb->nh.iph->saddr; + if (skb && inet_addr_type(ip_hdr(skb)->saddr) == RTN_LOCAL) + saddr = ip_hdr(skb)->saddr; break; case 1: /* Restrict announcements of saddr in same subnet */ if (!skb) break; - saddr = skb->nh.iph->saddr; + saddr = ip_hdr(skb)->saddr; if (inet_addr_type(saddr) == RTN_LOCAL) { /* saddr should be known to target */ if (inet_addr_onlink(in_dev, target, saddr)) diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c index b0182aa2c81a..11a3404d65af 100644 --- a/net/ipv4/cipso_ipv4.c +++ b/net/ipv4/cipso_ipv4.c @@ -1676,7 +1676,7 @@ validate_return: */ void cipso_v4_error(struct sk_buff *skb, int error, u32 gateway) { - if (skb->nh.iph->protocol == IPPROTO_ICMP || error != -EACCES) + if (ip_hdr(skb)->protocol == IPPROTO_ICMP || error != -EACCES) return; if (gateway) diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c index 222d21e5bbeb..ed3deed66445 100644 --- a/net/ipv4/esp4.c +++ b/net/ipv4/esp4.c @@ -58,7 +58,7 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb) pskb_put(skb, trailer, clen - skb->len); __skb_push(skb, skb->data - skb_network_header(skb)); - top_iph = skb->nh.iph; + top_iph = ip_hdr(skb); esph = (struct ip_esp_hdr *)(skb_network_header(skb) + top_iph->ihl * 4); top_iph->tot_len = htons(skb->len + alen); @@ -218,7 +218,7 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb) /* ... check padding bits here. Silly. :-) */ - iph = skb->nh.iph; + iph = ip_hdr(skb); ihl = iph->ihl * 4; if (x->encap) { diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index ff124d40c585..4d70c21c50aa 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c @@ -392,7 +392,7 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb) icmp_param->data.icmph.checksum = 0; icmp_out_count(icmp_param->data.icmph.type); - inet->tos = skb->nh.iph->tos; + inet->tos = ip_hdr(skb)->tos; daddr = ipc.addr = rt->rt_src; ipc.opt = NULL; if (icmp_param->replyopts.optlen) { @@ -404,7 +404,7 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb) struct flowi fl = { .nl_u = { .ip4_u = { .daddr = daddr, .saddr = rt->rt_spec_dst, - .tos = RT_TOS(skb->nh.iph->tos) } }, + .tos = RT_TOS(ip_hdr(skb)->tos) } }, .proto = IPPROTO_ICMP }; security_skb_classify_flow(skb, &fl); if (ip_route_output_key(&rt, &fl)) @@ -448,7 +448,7 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info) * Check this, icmp_send is called from the most obscure devices * sometimes. */ - iph = skb_in->nh.iph; + iph = ip_hdr(skb_in); if ((u8 *)iph < skb_in->head || (u8 *)(iph + 1) > skb_in->tail) goto out; @@ -676,7 +676,7 @@ static void icmp_unreach(struct sk_buff *skb) printk(KERN_WARNING "%u.%u.%u.%u sent an invalid ICMP " "type %u, code %u " "error to a broadcast: %u.%u.%u.%u on %s\n", - NIPQUAD(skb->nh.iph->saddr), + NIPQUAD(ip_hdr(skb)->saddr), icmph->type, icmph->code, NIPQUAD(iph->daddr), skb->dev->name); @@ -751,7 +751,7 @@ static void icmp_redirect(struct sk_buff *skb) */ case ICMP_REDIR_HOST: case ICMP_REDIR_HOSTTOS: - ip_rt_redirect(skb->nh.iph->saddr, iph->daddr, + ip_rt_redirect(ip_hdr(skb)->saddr, iph->daddr, skb->h.icmph->un.gateway, iph->saddr, skb->dev); break; diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index 0687a7235a6c..f511d03e2439 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c @@ -315,7 +315,7 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size) skb_reserve(skb, LL_RESERVED_SPACE(dev)); skb_reset_network_header(skb); - pip = skb->nh.iph; + pip = ip_hdr(skb); skb_put(skb, sizeof(struct iphdr) + 4); pip->version = 4; @@ -345,16 +345,14 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size) static int igmpv3_sendpack(struct sk_buff *skb) { - struct iphdr *pip = skb->nh.iph; + struct iphdr *pip = ip_hdr(skb); struct igmphdr *pig = skb->h.igmph; - int iplen, igmplen; + const int iplen = skb->tail - skb->nh.raw; + const int igmplen = skb->tail - skb->h.raw; - iplen = skb->tail - (unsigned char *)skb->nh.iph; pip->tot_len = htons(iplen); ip_send_check(pip); - - igmplen = skb->tail - (unsigned char *)skb->h.igmph; - pig->csum = ip_compute_csum((void *)skb->h.igmph, igmplen); + pig->csum = ip_compute_csum(skb->h.igmph, igmplen); return NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL, skb->dev, dst_output); @@ -667,7 +665,7 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc, skb_reserve(skb, LL_RESERVED_SPACE(dev)); skb_reset_network_header(skb); - iph = skb->nh.iph; + iph = ip_hdr(skb); skb_put(skb, sizeof(struct iphdr) + 4); iph->version = 4; diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c index 369e721c4bab..467ebedb99ba 100644 --- a/net/ipv4/ip_forward.c +++ b/net/ipv4/ip_forward.c @@ -74,7 +74,7 @@ int ip_forward(struct sk_buff *skb) * that reaches zero, we must reply an ICMP control message telling * that the packet's lifetime expired. */ - if (skb->nh.iph->ttl <= 1) + if (ip_hdr(skb)->ttl <= 1) goto too_many_hops; if (!xfrm4_route_forward(skb)) @@ -88,7 +88,7 @@ int ip_forward(struct sk_buff *skb) /* We are about to mangle packet. Copy it! */ if (skb_cow(skb, LL_RESERVED_SPACE(rt->u.dst.dev)+rt->u.dst.header_len)) goto drop; - iph = skb->nh.iph; + iph = ip_hdr(skb); /* Decrease ttl after skb cow done */ ip_decrease_ttl(iph); diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c index af120b2d5331..0231bdcb2ab7 100644 --- a/net/ipv4/ip_fragment.c +++ b/net/ipv4/ip_fragment.c @@ -479,7 +479,7 @@ static void ip_frag_queue(struct ipq *qp, struct sk_buff *skb) goto err; } - offset = ntohs(skb->nh.iph->frag_off); + offset = ntohs(ip_hdr(skb)->frag_off); flags = offset & ~IP_OFFSET; offset &= IP_OFFSET; offset <<= 3; /* offset is in 8-byte chunks */ @@ -676,7 +676,7 @@ static struct sk_buff *ip_frag_reasm(struct ipq *qp, struct net_device *dev) head->dev = dev; head->tstamp = qp->stamp; - iph = head->nh.iph; + iph = ip_hdr(head); iph->frag_off = 0; iph->tot_len = htons(len); IP_INC_STATS_BH(IPSTATS_MIB_REASMOKS); @@ -700,7 +700,6 @@ out_fail: /* Process an incoming IP datagram fragment. */ struct sk_buff *ip_defrag(struct sk_buff *skb, u32 user) { - struct iphdr *iph = skb->nh.iph; struct ipq *qp; struct net_device *dev; @@ -713,7 +712,7 @@ struct sk_buff *ip_defrag(struct sk_buff *skb, u32 user) dev = skb->dev; /* Lookup (or create) queue header */ - if ((qp = ip_find(iph, user)) != NULL) { + if ((qp = ip_find(ip_hdr(skb), user)) != NULL) { struct sk_buff *ret = NULL; spin_lock(&qp->lock); diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index 7c6fda6fe846..851f46b910f2 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c @@ -533,7 +533,7 @@ static inline void ipgre_ecn_decapsulate(struct iphdr *iph, struct sk_buff *skb) { if (INET_ECN_is_ce(iph->tos)) { if (skb->protocol == htons(ETH_P_IP)) { - IP_ECN_set_ce(skb->nh.iph); + IP_ECN_set_ce(ip_hdr(skb)); } else if (skb->protocol == htons(ETH_P_IPV6)) { IP6_ECN_set_ce(skb->nh.ipv6h); } @@ -565,7 +565,7 @@ static int ipgre_rcv(struct sk_buff *skb) if (!pskb_may_pull(skb, 16)) goto drop_nolock; - iph = skb->nh.iph; + iph = ip_hdr(skb); h = skb->data; flags = *(__be16*)h; @@ -670,7 +670,7 @@ static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) { struct ip_tunnel *tunnel = netdev_priv(dev); struct net_device_stats *stats = &tunnel->stat; - struct iphdr *old_iph = skb->nh.iph; + struct iphdr *old_iph = ip_hdr(skb); struct iphdr *tiph; u8 tos; __be16 df; @@ -825,7 +825,7 @@ static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) skb_set_owner_w(new_skb, skb->sk); dev_kfree_skb(skb); skb = new_skb; - old_iph = skb->nh.iph; + old_iph = ip_hdr(skb); } skb->h.raw = skb->nh.raw; @@ -841,7 +841,7 @@ static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) * Push down and install the IPIP header. */ - iph = skb->nh.iph; + iph = ip_hdr(skb); iph->version = 4; iph->ihl = sizeof(struct iphdr) >> 2; iph->frag_off = df; diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c index 2ee132b330fd..237880a80432 100644 --- a/net/ipv4/ip_input.c +++ b/net/ipv4/ip_input.c @@ -158,7 +158,7 @@ DEFINE_SNMP_STAT(struct ipstats_mib, ip_statistics) __read_mostly; int ip_call_ra_chain(struct sk_buff *skb) { struct ip_ra_chain *ra; - u8 protocol = skb->nh.iph->protocol; + u8 protocol = ip_hdr(skb)->protocol; struct sock *last = NULL; read_lock(&ip_ra_lock); @@ -171,7 +171,7 @@ int ip_call_ra_chain(struct sk_buff *skb) if (sk && inet_sk(sk)->num == protocol && (!sk->sk_bound_dev_if || sk->sk_bound_dev_if == skb->dev->ifindex)) { - if (skb->nh.iph->frag_off & htons(IP_MF|IP_OFFSET)) { + if (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) { skb = ip_defrag(skb, IP_DEFRAG_CALL_RA_CHAIN); if (skb == NULL) { read_unlock(&ip_ra_lock); @@ -206,7 +206,7 @@ static inline int ip_local_deliver_finish(struct sk_buff *skb) rcu_read_lock(); { /* Note: See raw.c and net/raw.h, RAWV4_HTABLE_SIZE==MAX_INET_PROTOS */ - int protocol = skb->nh.iph->protocol; + int protocol = ip_hdr(skb)->protocol; int hash; struct sock *raw_sk; struct net_protocol *ipprot; @@ -218,7 +218,7 @@ static inline int ip_local_deliver_finish(struct sk_buff *skb) /* If there maybe a raw socket we must check - if not we * don't care less */ - if (raw_sk && !raw_v4_input(skb, skb->nh.iph, hash)) + if (raw_sk && !raw_v4_input(skb, ip_hdr(skb), hash)) raw_sk = NULL; if ((ipprot = rcu_dereference(inet_protos[hash])) != NULL) { @@ -264,7 +264,7 @@ int ip_local_deliver(struct sk_buff *skb) * Reassemble IP fragments. */ - if (skb->nh.iph->frag_off & htons(IP_MF|IP_OFFSET)) { + if (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) { skb = ip_defrag(skb, IP_DEFRAG_LOCAL_DELIVER); if (!skb) return 0; @@ -292,7 +292,7 @@ static inline int ip_rcv_options(struct sk_buff *skb) goto drop; } - iph = skb->nh.iph; + iph = ip_hdr(skb); if (ip_options_compile(NULL, skb)) { IP_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS); @@ -328,7 +328,7 @@ drop: static inline int ip_rcv_finish(struct sk_buff *skb) { - struct iphdr *iph = skb->nh.iph; + const struct iphdr *iph = ip_hdr(skb); /* * Initialise the virtual path cache for the packet. It describes @@ -389,7 +389,7 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, if (!pskb_may_pull(skb, sizeof(struct iphdr))) goto inhdr_error; - iph = skb->nh.iph; + iph = ip_hdr(skb); /* * RFC1122: 3.1.2.2 MUST silently discard any IP frame that fails the checksum. @@ -408,7 +408,7 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, if (!pskb_may_pull(skb, iph->ihl*4)) goto inhdr_error; - iph = skb->nh.iph; + iph = ip_hdr(skb); if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl))) goto inhdr_error; diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c index f7e9db612565..251346828cb4 100644 --- a/net/ipv4/ip_options.c +++ b/net/ipv4/ip_options.c @@ -110,7 +110,7 @@ int ip_options_echo(struct ip_options * dopt, struct sk_buff * skb) if (skb->dst) daddr = ((struct rtable*)skb->dst)->rt_spec_dst; else - daddr = skb->nh.iph->daddr; + daddr = ip_hdr(skb)->daddr; if (sopt->rr) { optlen = sptr[sopt->rr+1]; @@ -180,7 +180,8 @@ int ip_options_echo(struct ip_options * dopt, struct sk_buff * skb) /* * RFC1812 requires to fix illegal source routes. */ - if (memcmp(&skb->nh.iph->saddr, &start[soffset+3], 4) == 0) + if (memcmp(&ip_hdr(skb)->saddr, + &start[soffset + 3], 4) == 0) doffset -= 4; } if (doffset > 3) { @@ -269,7 +270,8 @@ int ip_options_compile(struct ip_options * opt, struct sk_buff * skb) optptr = iph + sizeof(struct iphdr); opt->is_data = 0; } else { - optptr = opt->is_data ? opt->__data : (unsigned char*)&(skb->nh.iph[1]); + optptr = opt->is_data ? opt->__data : + (unsigned char *)&(ip_hdr(skb)[1]); iph = optptr - sizeof(struct iphdr); } @@ -587,7 +589,7 @@ void ip_forward_options(struct sk_buff *skb) if (srrptr + 3 <= srrspace) { opt->is_changed = 1; ip_rt_get_source(&optptr[srrptr-1], rt); - skb->nh.iph->daddr = rt->rt_dst; + ip_hdr(skb)->daddr = rt->rt_dst; optptr[2] = srrptr+4; } else if (net_ratelimit()) printk(KERN_CRIT "ip_forward(): Argh! Destination lost!\n"); @@ -599,7 +601,7 @@ void ip_forward_options(struct sk_buff *skb) } if (opt->is_changed) { opt->is_changed = 0; - ip_send_check(skb->nh.iph); + ip_send_check(ip_hdr(skb)); } } @@ -608,7 +610,7 @@ int ip_options_rcv_srr(struct sk_buff *skb) struct ip_options *opt = &(IPCB(skb)->opt); int srrspace, srrptr; __be32 nexthop; - struct iphdr *iph = skb->nh.iph; + struct iphdr *iph = ip_hdr(skb); unsigned char *optptr = skb_network_header(skb) + opt->srr; struct rtable *rt = (struct rtable*)skb->dst; struct rtable *rt2; diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 15de9d43950e..1abc48899f2d 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -127,7 +127,7 @@ int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk, /* Build the IP header. */ skb_push(skb, sizeof(struct iphdr) + (opt ? opt->optlen : 0)); skb_reset_network_header(skb); - iph = skb->nh.iph; + iph = ip_hdr(skb); iph->version = 4; iph->ihl = 5; iph->tos = inet->tos; @@ -245,7 +245,7 @@ int ip_mc_output(struct sk_buff *skb) /* Multicasts with ttl 0 must not go beyond the host */ - if (skb->nh.iph->ttl == 0) { + if (ip_hdr(skb)->ttl == 0) { kfree_skb(skb); return 0; } @@ -332,7 +332,7 @@ packet_routed: /* OK, we know where to send it, allocate and build IP header. */ skb_push(skb, sizeof(struct iphdr) + (opt ? opt->optlen : 0)); skb_reset_network_header(skb); - iph = skb->nh.iph; + iph = ip_hdr(skb); *((__be16 *)iph) = htons((4 << 12) | (5 << 8) | (inet->tos & 0xff)); iph->tot_len = htons(skb->len); if (ip_dont_fragment(sk, &rt->u.dst) && !ipfragok) @@ -428,7 +428,7 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff*)) * Point into the IP datagram header. */ - iph = skb->nh.iph; + iph = ip_hdr(skb); if (unlikely((iph->frag_off & htons(IP_DF)) && !skb->local_df)) { IP_INC_STATS(IPSTATS_MIB_FRAGFAILS); @@ -504,7 +504,7 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff*)) __skb_push(frag, hlen); skb_reset_network_header(frag); memcpy(skb_network_header(frag), iph, hlen); - iph = frag->nh.iph; + iph = ip_hdr(frag); iph->tot_len = htons(frag->len); ip_copy_metadata(frag, skb); if (offset == 0) @@ -619,7 +619,7 @@ slow_path: /* * Fill in the new header fields. */ - iph = skb2->nh.iph; + iph = ip_hdr(skb2); iph->frag_off = htons((offset >> 3)); /* ANK: dirty, but effective trick. Upgrade options only if @@ -1125,7 +1125,7 @@ ssize_t ip_append_page(struct sock *sk, struct page *page, */ data = skb_put(skb, fragheaderlen + fraggap); skb_reset_network_header(skb); - iph = skb->nh.iph; + iph = ip_hdr(skb); data += fragheaderlen; skb->h.raw = data; @@ -1352,7 +1352,7 @@ void ip_send_reply(struct sock *sk, struct sk_buff *skb, struct ip_reply_arg *ar struct flowi fl = { .nl_u = { .ip4_u = { .daddr = daddr, .saddr = rt->rt_spec_dst, - .tos = RT_TOS(skb->nh.iph->tos) } }, + .tos = RT_TOS(ip_hdr(skb)->tos) } }, /* Not quite clean, but right. */ .uli_u = { .ports = { .sport = skb->h.th->dest, @@ -1370,9 +1370,9 @@ void ip_send_reply(struct sock *sk, struct sk_buff *skb, struct ip_reply_arg *ar with locally disabled BH and that sk cannot be already spinlocked. */ bh_lock_sock(sk); - inet->tos = skb->nh.iph->tos; + inet->tos = ip_hdr(skb)->tos; sk->sk_priority = skb->priority; - sk->sk_protocol = skb->nh.iph->protocol; + sk->sk_protocol = ip_hdr(skb)->protocol; ip_append_data(sk, ip_reply_glue_bits, arg->iov->iov_base, len, 0, &ipc, rt, MSG_DONTWAIT); if ((skb = skb_peek(&sk->sk_write_queue)) != NULL) { diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index 70888e1ef6b7..fabc250e16dd 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c @@ -59,7 +59,7 @@ static void ip_cmsg_recv_pktinfo(struct msghdr *msg, struct sk_buff *skb) struct in_pktinfo info; struct rtable *rt = (struct rtable *)skb->dst; - info.ipi_addr.s_addr = skb->nh.iph->daddr; + info.ipi_addr.s_addr = ip_hdr(skb)->daddr; if (rt) { info.ipi_ifindex = rt->rt_iif; info.ipi_spec_dst.s_addr = rt->rt_spec_dst; @@ -73,13 +73,13 @@ static void ip_cmsg_recv_pktinfo(struct msghdr *msg, struct sk_buff *skb) static void ip_cmsg_recv_ttl(struct msghdr *msg, struct sk_buff *skb) { - int ttl = skb->nh.iph->ttl; + int ttl = ip_hdr(skb)->ttl; put_cmsg(msg, SOL_IP, IP_TTL, sizeof(int), &ttl); } static void ip_cmsg_recv_tos(struct msghdr *msg, struct sk_buff *skb) { - put_cmsg(msg, SOL_IP, IP_TOS, 1, &skb->nh.iph->tos); + put_cmsg(msg, SOL_IP, IP_TOS, 1, &ip_hdr(skb)->tos); } static void ip_cmsg_recv_opts(struct msghdr *msg, struct sk_buff *skb) @@ -87,7 +87,8 @@ static void ip_cmsg_recv_opts(struct msghdr *msg, struct sk_buff *skb) if (IPCB(skb)->opt.optlen == 0) return; - put_cmsg(msg, SOL_IP, IP_RECVOPTS, IPCB(skb)->opt.optlen, skb->nh.iph+1); + put_cmsg(msg, SOL_IP, IP_RECVOPTS, IPCB(skb)->opt.optlen, + ip_hdr(skb) + 1); } @@ -299,7 +300,7 @@ void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 port, u32 inf skb_put(skb, sizeof(struct iphdr)); skb_reset_network_header(skb); - iph = skb->nh.iph; + iph = ip_hdr(skb); iph->daddr = daddr; serr = SKB_EXT_ERR(skb); @@ -369,7 +370,7 @@ int ip_recv_error(struct sock *sk, struct msghdr *msg, int len) struct inet_sock *inet = inet_sk(sk); sin->sin_family = AF_INET; - sin->sin_addr.s_addr = skb->nh.iph->saddr; + sin->sin_addr.s_addr = ip_hdr(skb)->saddr; sin->sin_port = 0; memset(&sin->sin_zero, 0, sizeof(sin->sin_zero)); if (inet->cmsg_flags) diff --git a/net/ipv4/ipcomp.c b/net/ipv4/ipcomp.c index aa704b88f014..8eb46064c525 100644 --- a/net/ipv4/ipcomp.c +++ b/net/ipv4/ipcomp.c @@ -43,21 +43,15 @@ static LIST_HEAD(ipcomp_tfms_list); static int ipcomp_decompress(struct xfrm_state *x, struct sk_buff *skb) { - int err, plen, dlen; struct ipcomp_data *ipcd = x->data; - u8 *start, *scratch; - struct crypto_comp *tfm; - int cpu; - - plen = skb->len; - dlen = IPCOMP_SCRATCH_SIZE; - start = skb->data; + const int plen = skb->len; + int dlen = IPCOMP_SCRATCH_SIZE; + const u8 *start = skb->data; + const int cpu = get_cpu(); + u8 *scratch = *per_cpu_ptr(ipcomp_scratches, cpu); + struct crypto_comp *tfm = *per_cpu_ptr(ipcd->tfms, cpu); + int err = crypto_comp_decompress(tfm, start, plen, scratch, &dlen); - cpu = get_cpu(); - scratch = *per_cpu_ptr(ipcomp_scratches, cpu); - tfm = *per_cpu_ptr(ipcd->tfms, cpu); - - err = crypto_comp_decompress(tfm, start, plen, scratch, &dlen); if (err) goto out; @@ -90,7 +84,7 @@ static int ipcomp_input(struct xfrm_state *x, struct sk_buff *skb) skb->ip_summed = CHECKSUM_NONE; /* Remove ipcomp header and decompress original payload */ - iph = skb->nh.iph; + iph = ip_hdr(skb); ipch = (void *)skb->data; iph->protocol = ipch->nexthdr; skb->h.raw = skb->nh.raw + sizeof(*ipch); @@ -103,23 +97,16 @@ out: static int ipcomp_compress(struct xfrm_state *x, struct sk_buff *skb) { - int err, plen, dlen, ihlen; - struct iphdr *iph = skb->nh.iph; struct ipcomp_data *ipcd = x->data; - u8 *start, *scratch; - struct crypto_comp *tfm; - int cpu; + const int ihlen = ip_hdrlen(skb); + const int plen = skb->len - ihlen; + int dlen = IPCOMP_SCRATCH_SIZE; + u8 *start = skb->data + ihlen; + const int cpu = get_cpu(); + u8 *scratch = *per_cpu_ptr(ipcomp_scratches, cpu); + struct crypto_comp *tfm = *per_cpu_ptr(ipcd->tfms, cpu); + int err = crypto_comp_compress(tfm, start, plen, scratch, &dlen); - ihlen = iph->ihl * 4; - plen = skb->len - ihlen; - dlen = IPCOMP_SCRATCH_SIZE; - start = skb->data + ihlen; - - cpu = get_cpu(); - scratch = *per_cpu_ptr(ipcomp_scratches, cpu); - tfm = *per_cpu_ptr(ipcd->tfms, cpu); - - err = crypto_comp_compress(tfm, start, plen, scratch, &dlen); if (err) goto out; @@ -142,12 +129,11 @@ out: static int ipcomp_output(struct xfrm_state *x, struct sk_buff *skb) { int err; - struct iphdr *iph; struct ip_comp_hdr *ipch; struct ipcomp_data *ipcd = x->data; int hdr_len = 0; + struct iphdr *iph = ip_hdr(skb); - iph = skb->nh.iph; iph->tot_len = htons(skb->len); hdr_len = iph->ihl * 4; if ((skb->len - hdr_len) < ipcd->threshold) { @@ -159,7 +145,7 @@ static int ipcomp_output(struct xfrm_state *x, struct sk_buff *skb) goto out_ok; err = ipcomp_compress(x, skb); - iph = skb->nh.iph; + iph = ip_hdr(skb); if (err) { goto out_ok; diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c index 6e8998409cbe..6b91c9f5d57a 100644 --- a/net/ipv4/ipconfig.c +++ b/net/ipv4/ipconfig.c @@ -703,7 +703,7 @@ static void __init ic_bootp_send_if(struct ic_device *d, unsigned long jiffies_d /* Construct IP header */ skb_reset_network_header(skb); - h = skb->nh.iph; + h = ip_hdr(skb); h->version = 4; h->ihl = 5; h->tot_len = htons(sizeof(struct bootp_pkt)); @@ -846,7 +846,7 @@ static int __init ic_bootp_recv(struct sk_buff *skb, struct net_device *dev, str sizeof(struct udphdr))) goto drop; - b = (struct bootp_pkt *) skb->nh.iph; + b = (struct bootp_pkt *)skb_network_header(skb); h = &b->iph; if (h->ihl != 5 || h->version != 4 || h->protocol != IPPROTO_UDP) @@ -884,7 +884,7 @@ static int __init ic_bootp_recv(struct sk_buff *skb, struct net_device *dev, str if (!pskb_may_pull(skb, skb->len)) goto drop; - b = (struct bootp_pkt *) skb->nh.iph; + b = (struct bootp_pkt *)skb_network_header(skb); h = &b->iph; /* One reply at a time, please. */ diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c index 843cc09f961f..b7f6ff4705b0 100644 --- a/net/ipv4/ipip.c +++ b/net/ipv4/ipip.c @@ -461,9 +461,10 @@ out: #endif } -static inline void ipip_ecn_decapsulate(struct iphdr *outer_iph, struct sk_buff *skb) +static inline void ipip_ecn_decapsulate(const struct iphdr *outer_iph, + struct sk_buff *skb) { - struct iphdr *inner_iph = skb->nh.iph; + struct iphdr *inner_iph = ip_hdr(skb); if (INET_ECN_is_ce(outer_iph->tos)) IP_ECN_set_ce(inner_iph); @@ -471,10 +472,8 @@ static inline void ipip_ecn_decapsulate(struct iphdr *outer_iph, struct sk_buff static int ipip_rcv(struct sk_buff *skb) { - struct iphdr *iph; struct ip_tunnel *tunnel; - - iph = skb->nh.iph; + const struct iphdr *iph = ip_hdr(skb); read_lock(&ipip_lock); if ((tunnel = ipip_tunnel_lookup(iph->saddr, iph->daddr)) != NULL) { @@ -521,7 +520,7 @@ static int ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) __be16 df = tiph->frag_off; struct rtable *rt; /* Route to the other host */ struct net_device *tdev; /* Device to other host */ - struct iphdr *old_iph = skb->nh.iph; + struct iphdr *old_iph = ip_hdr(skb); struct iphdr *iph; /* Our new IP header */ int max_headroom; /* The extra header space needed */ __be32 dst = tiph->daddr; @@ -615,7 +614,7 @@ static int ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) skb_set_owner_w(new_skb, skb->sk); dev_kfree_skb(skb); skb = new_skb; - old_iph = skb->nh.iph; + old_iph = ip_hdr(skb); } skb->h.raw = skb->nh.raw; @@ -631,7 +630,7 @@ static int ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) * Push down and install the IPIP header. */ - iph = skb->nh.iph; + iph = ip_hdr(skb); iph->version = 4; iph->ihl = sizeof(struct iphdr)>>2; iph->frag_off = df; diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index b24dffe3bd46..e0021499093f 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c @@ -303,7 +303,7 @@ static void ipmr_destroy_unres(struct mfc_cache *c) atomic_dec(&cache_resolve_queue_len); while ((skb=skb_dequeue(&c->mfc_un.unres.unresolved))) { - if (skb->nh.iph->version == 0) { + if (ip_hdr(skb)->version == 0) { struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr)); nlh->nlmsg_type = NLMSG_ERROR; nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr)); @@ -509,7 +509,7 @@ static void ipmr_cache_resolve(struct mfc_cache *uc, struct mfc_cache *c) */ while ((skb=__skb_dequeue(&uc->mfc_un.unres.unresolved))) { - if (skb->nh.iph->version == 0) { + if (ip_hdr(skb)->version == 0) { struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr)); if (ipmr_fill_mroute(skb, c, NLMSG_DATA(nlh)) > 0) { @@ -569,8 +569,9 @@ static int ipmr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert) msg->im_msgtype = IGMPMSG_WHOLEPKT; msg->im_mbz = 0; msg->im_vif = reg_vif_num; - skb->nh.iph->ihl = sizeof(struct iphdr) >> 2; - skb->nh.iph->tot_len = htons(ntohs(pkt->nh.iph->tot_len) + sizeof(struct iphdr)); + ip_hdr(skb)->ihl = sizeof(struct iphdr) >> 2; + ip_hdr(skb)->tot_len = htons(ntohs(ip_hdr(pkt)->tot_len) + + sizeof(struct iphdr)); } else #endif { @@ -579,10 +580,10 @@ static int ipmr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert) * Copy the IP header */ - skb->nh.iph = (struct iphdr *)skb_put(skb, ihl); + skb->nh.raw = skb_put(skb, ihl); memcpy(skb->data,pkt->data,ihl); - skb->nh.iph->protocol = 0; /* Flag to the kernel this is a route add */ - msg = (struct igmpmsg*)skb->nh.iph; + ip_hdr(skb)->protocol = 0; /* Flag to the kernel this is a route add */ + msg = (struct igmpmsg *)skb_network_header(skb); msg->im_vif = vifi; skb->dst = dst_clone(pkt->dst); @@ -594,7 +595,7 @@ static int ipmr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert) igmp->type = msg->im_msgtype = assert; igmp->code = 0; - skb->nh.iph->tot_len=htons(skb->len); /* Fix the length */ + ip_hdr(skb)->tot_len = htons(skb->len); /* Fix the length */ skb->h.raw = skb->nh.raw; } @@ -624,11 +625,12 @@ ipmr_cache_unresolved(vifi_t vifi, struct sk_buff *skb) { int err; struct mfc_cache *c; + const struct iphdr *iph = ip_hdr(skb); spin_lock_bh(&mfc_unres_lock); for (c=mfc_unres_queue; c; c=c->next) { - if (c->mfc_mcastgrp == skb->nh.iph->daddr && - c->mfc_origin == skb->nh.iph->saddr) + if (c->mfc_mcastgrp == iph->daddr && + c->mfc_origin == iph->saddr) break; } @@ -648,9 +650,9 @@ ipmr_cache_unresolved(vifi_t vifi, struct sk_buff *skb) /* * Fill in the new cache entry */ - c->mfc_parent=-1; - c->mfc_origin=skb->nh.iph->saddr; - c->mfc_mcastgrp=skb->nh.iph->daddr; + c->mfc_parent = -1; + c->mfc_origin = iph->saddr; + c->mfc_mcastgrp = iph->daddr; /* * Reflect first query at mrouted. @@ -1096,12 +1098,12 @@ static struct notifier_block ip_mr_notifier={ static void ip_encap(struct sk_buff *skb, __be32 saddr, __be32 daddr) { struct iphdr *iph; - struct iphdr *old_iph = skb->nh.iph; + struct iphdr *old_iph = ip_hdr(skb); skb_push(skb, sizeof(struct iphdr)); - skb->h.ipiph = skb->nh.iph; + skb->h.raw = skb->nh.raw; skb_reset_network_header(skb); - iph = skb->nh.iph; + iph = ip_hdr(skb); iph->version = 4; iph->tos = old_iph->tos; @@ -1137,7 +1139,7 @@ static inline int ipmr_forward_finish(struct sk_buff *skb) static void ipmr_queue_xmit(struct sk_buff *skb, struct mfc_cache *c, int vifi) { - struct iphdr *iph = skb->nh.iph; + const struct iphdr *iph = ip_hdr(skb); struct vif_device *vif = &vif_table[vifi]; struct net_device *dev; struct rtable *rt; @@ -1203,8 +1205,7 @@ static void ipmr_queue_xmit(struct sk_buff *skb, struct mfc_cache *c, int vifi) dst_release(skb->dst); skb->dst = &rt->u.dst; - iph = skb->nh.iph; - ip_decrease_ttl(iph); + ip_decrease_ttl(ip_hdr(skb)); /* FIXME: forward and output firewalls used to be called here. * What do we do with netfilter? -- RR */ @@ -1304,7 +1305,7 @@ static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local * Forward the frame */ for (ct = cache->mfc_un.res.maxvif-1; ct >= cache->mfc_un.res.minvif; ct--) { - if (skb->nh.iph->ttl > cache->mfc_un.res.ttls[ct]) { + if (ip_hdr(skb)->ttl > cache->mfc_un.res.ttls[ct]) { if (psend != -1) { struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); if (skb2) @@ -1350,7 +1351,7 @@ int ip_mr_input(struct sk_buff *skb) if (IPCB(skb)->opt.router_alert) { if (ip_call_ra_chain(skb)) return 0; - } else if (skb->nh.iph->protocol == IPPROTO_IGMP){ + } else if (ip_hdr(skb)->protocol == IPPROTO_IGMP){ /* IGMPv1 (and broken IGMPv2 implementations sort of Cisco IOS <= 11.2(8)) do not put router alert option to IGMP packets destined to routable @@ -1369,7 +1370,7 @@ int ip_mr_input(struct sk_buff *skb) } read_lock(&mrt_lock); - cache = ipmr_cache_find(skb->nh.iph->saddr, skb->nh.iph->daddr); + cache = ipmr_cache_find(ip_hdr(skb)->saddr, ip_hdr(skb)->daddr); /* * No usable cache entry @@ -1580,6 +1581,7 @@ int ipmr_get_route(struct sk_buff *skb, struct rtmsg *rtm, int nowait) if (cache==NULL) { struct sk_buff *skb2; + struct iphdr *iph; struct net_device *dev; int vif; @@ -1601,10 +1603,11 @@ int ipmr_get_route(struct sk_buff *skb, struct rtmsg *rtm, int nowait) skb_push(skb2, sizeof(struct iphdr)); skb_reset_network_header(skb2); - skb2->nh.iph->ihl = sizeof(struct iphdr)>>2; - skb2->nh.iph->saddr = rt->rt_src; - skb2->nh.iph->daddr = rt->rt_dst; - skb2->nh.iph->version = 0; + iph = ip_hdr(skb2); + iph->ihl = sizeof(struct iphdr) >> 2; + iph->saddr = rt->rt_src; + iph->daddr = rt->rt_dst; + iph->version = 0; err = ipmr_cache_unresolved(vif, skb2); read_unlock(&mrt_lock); return err; diff --git a/net/ipv4/ipvs/ip_vs_app.c b/net/ipv4/ipvs/ip_vs_app.c index e5beab28cd0f..c8a822c0aa75 100644 --- a/net/ipv4/ipvs/ip_vs_app.c +++ b/net/ipv4/ipvs/ip_vs_app.c @@ -577,7 +577,6 @@ static const struct file_operations ip_vs_app_fops = { int ip_vs_skb_replace(struct sk_buff *skb, gfp_t pri, char *o_buf, int o_len, char *n_buf, int n_len) { - struct iphdr *iph; int diff; int o_offset; int o_left; @@ -607,8 +606,7 @@ int ip_vs_skb_replace(struct sk_buff *skb, gfp_t pri, } /* must update the iph total length here */ - iph = skb->nh.iph; - iph->tot_len = htons(skb->len); + ip_hdr(skb)->tot_len = htons(skb->len); LeaveFunction(9); return 0; diff --git a/net/ipv4/ipvs/ip_vs_core.c b/net/ipv4/ipvs/ip_vs_core.c index 7893c00a91fe..62cfbed317bf 100644 --- a/net/ipv4/ipvs/ip_vs_core.c +++ b/net/ipv4/ipvs/ip_vs_core.c @@ -212,7 +212,7 @@ ip_vs_sched_persist(struct ip_vs_service *svc, __be16 ports[2]) { struct ip_vs_conn *cp = NULL; - struct iphdr *iph = skb->nh.iph; + struct iphdr *iph = ip_hdr(skb); struct ip_vs_dest *dest; struct ip_vs_conn *ct; __be16 dport; /* destination port to forward */ @@ -381,7 +381,7 @@ struct ip_vs_conn * ip_vs_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) { struct ip_vs_conn *cp = NULL; - struct iphdr *iph = skb->nh.iph; + struct iphdr *iph = ip_hdr(skb); struct ip_vs_dest *dest; __be16 _ports[2], *pptr; @@ -447,7 +447,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb, struct ip_vs_protocol *pp) { __be16 _ports[2], *pptr; - struct iphdr *iph = skb->nh.iph; + struct iphdr *iph = ip_hdr(skb); pptr = skb_header_pointer(skb, iph->ihl*4, sizeof(_ports), _ports); @@ -546,7 +546,7 @@ ip_vs_gather_frags(struct sk_buff *skb, u_int32_t user) { skb = ip_defrag(skb, user); if (skb) - ip_send_check(skb->nh.iph); + ip_send_check(ip_hdr(skb)); return skb; } @@ -557,7 +557,7 @@ ip_vs_gather_frags(struct sk_buff *skb, u_int32_t user) void ip_vs_nat_icmp(struct sk_buff *skb, struct ip_vs_protocol *pp, struct ip_vs_conn *cp, int inout) { - struct iphdr *iph = skb->nh.iph; + struct iphdr *iph = ip_hdr(skb); unsigned int icmp_offset = iph->ihl*4; struct icmphdr *icmph = (struct icmphdr *)(skb_network_header(skb) + icmp_offset); @@ -618,14 +618,14 @@ static int ip_vs_out_icmp(struct sk_buff **pskb, int *related) *related = 1; /* reassemble IP fragments */ - if (skb->nh.iph->frag_off & htons(IP_MF|IP_OFFSET)) { + if (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) { skb = ip_vs_gather_frags(skb, IP_DEFRAG_VS_OUT); if (!skb) return NF_STOLEN; *pskb = skb; } - iph = skb->nh.iph; + iph = ip_hdr(skb); offset = ihl = iph->ihl * 4; ic = skb_header_pointer(skb, offset, sizeof(_icmph), &_icmph); if (ic == NULL) @@ -740,14 +740,14 @@ ip_vs_out(unsigned int hooknum, struct sk_buff **pskb, if (skb->ipvs_property) return NF_ACCEPT; - iph = skb->nh.iph; + iph = ip_hdr(skb); if (unlikely(iph->protocol == IPPROTO_ICMP)) { int related, verdict = ip_vs_out_icmp(pskb, &related); if (related) return verdict; skb = *pskb; - iph = skb->nh.iph; + iph = ip_hdr(skb); } pp = ip_vs_proto_get(iph->protocol); @@ -760,7 +760,7 @@ ip_vs_out(unsigned int hooknum, struct sk_buff **pskb, skb = ip_vs_gather_frags(skb, IP_DEFRAG_VS_OUT); if (!skb) return NF_STOLEN; - iph = skb->nh.iph; + iph = ip_hdr(skb); *pskb = skb; } @@ -810,8 +810,8 @@ ip_vs_out(unsigned int hooknum, struct sk_buff **pskb, if (pp->snat_handler && !pp->snat_handler(pskb, pp, cp)) goto drop; skb = *pskb; - skb->nh.iph->saddr = cp->vaddr; - ip_send_check(skb->nh.iph); + ip_hdr(skb)->saddr = cp->vaddr; + ip_send_check(ip_hdr(skb)); /* For policy routing, packets originating from this * machine itself may be routed differently to packets @@ -861,7 +861,7 @@ ip_vs_in_icmp(struct sk_buff **pskb, int *related, unsigned int hooknum) *related = 1; /* reassemble IP fragments */ - if (skb->nh.iph->frag_off & htons(IP_MF|IP_OFFSET)) { + if (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) { skb = ip_vs_gather_frags(skb, hooknum == NF_IP_LOCAL_IN ? IP_DEFRAG_VS_IN : IP_DEFRAG_VS_FWD); @@ -870,7 +870,7 @@ ip_vs_in_icmp(struct sk_buff **pskb, int *related, unsigned int hooknum) *pskb = skb; } - iph = skb->nh.iph; + iph = ip_hdr(skb); offset = ihl = iph->ihl * 4; ic = skb_header_pointer(skb, offset, sizeof(_icmph), &_icmph); if (ic == NULL) @@ -966,19 +966,19 @@ ip_vs_in(unsigned int hooknum, struct sk_buff **pskb, || skb->dev == &loopback_dev || skb->sk)) { IP_VS_DBG(12, "packet type=%d proto=%d daddr=%d.%d.%d.%d ignored\n", skb->pkt_type, - skb->nh.iph->protocol, - NIPQUAD(skb->nh.iph->daddr)); + ip_hdr(skb)->protocol, + NIPQUAD(ip_hdr(skb)->daddr)); return NF_ACCEPT; } - iph = skb->nh.iph; + iph = ip_hdr(skb); if (unlikely(iph->protocol == IPPROTO_ICMP)) { int related, verdict = ip_vs_in_icmp(pskb, &related, hooknum); if (related) return verdict; skb = *pskb; - iph = skb->nh.iph; + iph = ip_hdr(skb); } /* Protocol supported? */ @@ -1064,7 +1064,7 @@ ip_vs_forward_icmp(unsigned int hooknum, struct sk_buff **pskb, { int r; - if ((*pskb)->nh.iph->protocol != IPPROTO_ICMP) + if (ip_hdr(*pskb)->protocol != IPPROTO_ICMP) return NF_ACCEPT; return ip_vs_in_icmp(pskb, &r, hooknum); diff --git a/net/ipv4/ipvs/ip_vs_dh.c b/net/ipv4/ipvs/ip_vs_dh.c index 502111fba872..dcf5d46aaa5e 100644 --- a/net/ipv4/ipvs/ip_vs_dh.c +++ b/net/ipv4/ipvs/ip_vs_dh.c @@ -204,7 +204,7 @@ ip_vs_dh_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) { struct ip_vs_dest *dest; struct ip_vs_dh_bucket *tbl; - struct iphdr *iph = skb->nh.iph; + struct iphdr *iph = ip_hdr(skb); IP_VS_DBG(6, "ip_vs_dh_schedule(): Scheduling...\n"); diff --git a/net/ipv4/ipvs/ip_vs_ftp.c b/net/ipv4/ipvs/ip_vs_ftp.c index 847c47af040c..25bd68967305 100644 --- a/net/ipv4/ipvs/ip_vs_ftp.c +++ b/net/ipv4/ipvs/ip_vs_ftp.c @@ -159,7 +159,7 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp, return 0; if (cp->app_data == &ip_vs_ftp_pasv) { - iph = (*pskb)->nh.iph; + iph = ip_hdr(*pskb); th = (struct tcphdr *)&(((char *)iph)[iph->ihl*4]); data = (char *)th + (th->doff << 2); data_limit = (*pskb)->tail; @@ -262,7 +262,7 @@ static int ip_vs_ftp_in(struct ip_vs_app *app, struct ip_vs_conn *cp, /* * Detecting whether it is passive */ - iph = (*pskb)->nh.iph; + iph = ip_hdr(*pskb); th = (struct tcphdr *)&(((char *)iph)[iph->ihl*4]); /* Since there may be OPTIONS in the TCP packet and the HLEN is diff --git a/net/ipv4/ipvs/ip_vs_lblc.c b/net/ipv4/ipvs/ip_vs_lblc.c index c801273cb881..052f4ed59174 100644 --- a/net/ipv4/ipvs/ip_vs_lblc.c +++ b/net/ipv4/ipvs/ip_vs_lblc.c @@ -521,7 +521,7 @@ ip_vs_lblc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) struct ip_vs_dest *dest; struct ip_vs_lblc_table *tbl; struct ip_vs_lblc_entry *en; - struct iphdr *iph = skb->nh.iph; + struct iphdr *iph = ip_hdr(skb); IP_VS_DBG(6, "ip_vs_lblc_schedule(): Scheduling...\n"); diff --git a/net/ipv4/ipvs/ip_vs_lblcr.c b/net/ipv4/ipvs/ip_vs_lblcr.c index 23f9b9e73c85..6225acac7a3b 100644 --- a/net/ipv4/ipvs/ip_vs_lblcr.c +++ b/net/ipv4/ipvs/ip_vs_lblcr.c @@ -775,7 +775,7 @@ ip_vs_lblcr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) struct ip_vs_dest *dest; struct ip_vs_lblcr_table *tbl; struct ip_vs_lblcr_entry *en; - struct iphdr *iph = skb->nh.iph; + struct iphdr *iph = ip_hdr(skb); IP_VS_DBG(6, "ip_vs_lblcr_schedule(): Scheduling...\n"); diff --git a/net/ipv4/ipvs/ip_vs_proto_tcp.c b/net/ipv4/ipvs/ip_vs_proto_tcp.c index e65382da713e..e65577a77006 100644 --- a/net/ipv4/ipvs/ip_vs_proto_tcp.c +++ b/net/ipv4/ipvs/ip_vs_proto_tcp.c @@ -83,8 +83,8 @@ tcp_conn_schedule(struct sk_buff *skb, } if (th->syn && - (svc = ip_vs_service_get(skb->mark, skb->nh.iph->protocol, - skb->nh.iph->daddr, th->dest))) { + (svc = ip_vs_service_get(skb->mark, ip_hdr(skb)->protocol, + ip_hdr(skb)->daddr, th->dest))) { if (ip_vs_todrop()) { /* * It seems that we are very loaded. @@ -142,7 +142,7 @@ tcp_snat_handler(struct sk_buff **pskb, return 0; } - tcph = (void *)(*pskb)->nh.iph + tcphoff; + tcph = (void *)ip_hdr(*pskb) + tcphoff; tcph->source = cp->vport; /* Adjust TCP checksums */ @@ -193,7 +193,7 @@ tcp_dnat_handler(struct sk_buff **pskb, return 0; } - tcph = (void *)(*pskb)->nh.iph + tcphoff; + tcph = (void *)ip_hdr(*pskb) + tcphoff; tcph->dest = cp->dport; /* @@ -229,9 +229,9 @@ tcp_csum_check(struct sk_buff *skb, struct ip_vs_protocol *pp) case CHECKSUM_NONE: skb->csum = skb_checksum(skb, tcphoff, skb->len - tcphoff, 0); case CHECKSUM_COMPLETE: - if (csum_tcpudp_magic(skb->nh.iph->saddr, skb->nh.iph->daddr, + if (csum_tcpudp_magic(ip_hdr(skb)->saddr, ip_hdr(skb)->daddr, skb->len - tcphoff, - skb->nh.iph->protocol, skb->csum)) { + ip_hdr(skb)->protocol, skb->csum)) { IP_VS_DBG_RL_PKT(0, pp, skb, 0, "Failed checksum for"); return 0; diff --git a/net/ipv4/ipvs/ip_vs_proto_udp.c b/net/ipv4/ipvs/ip_vs_proto_udp.c index 2cd950638923..8ee5fe6a101d 100644 --- a/net/ipv4/ipvs/ip_vs_proto_udp.c +++ b/net/ipv4/ipvs/ip_vs_proto_udp.c @@ -89,8 +89,8 @@ udp_conn_schedule(struct sk_buff *skb, struct ip_vs_protocol *pp, return 0; } - if ((svc = ip_vs_service_get(skb->mark, skb->nh.iph->protocol, - skb->nh.iph->daddr, uh->dest))) { + if ((svc = ip_vs_service_get(skb->mark, ip_hdr(skb)->protocol, + ip_hdr(skb)->daddr, uh->dest))) { if (ip_vs_todrop()) { /* * It seems that we are very loaded. @@ -151,7 +151,7 @@ udp_snat_handler(struct sk_buff **pskb, return 0; } - udph = (void *)(*pskb)->nh.iph + udphoff; + udph = (void *)ip_hdr(*pskb) + udphoff; udph->source = cp->vport; /* @@ -206,7 +206,7 @@ udp_dnat_handler(struct sk_buff **pskb, return 0; } - udph = (void *)(*pskb)->nh.iph + udphoff; + udph = (void *)ip_hdr(*pskb) + udphoff; udph->dest = cp->dport; /* @@ -251,10 +251,10 @@ udp_csum_check(struct sk_buff *skb, struct ip_vs_protocol *pp) skb->csum = skb_checksum(skb, udphoff, skb->len - udphoff, 0); case CHECKSUM_COMPLETE: - if (csum_tcpudp_magic(skb->nh.iph->saddr, - skb->nh.iph->daddr, + if (csum_tcpudp_magic(ip_hdr(skb)->saddr, + ip_hdr(skb)->daddr, skb->len - udphoff, - skb->nh.iph->protocol, + ip_hdr(skb)->protocol, skb->csum)) { IP_VS_DBG_RL_PKT(0, pp, skb, 0, "Failed checksum for"); diff --git a/net/ipv4/ipvs/ip_vs_sh.c b/net/ipv4/ipvs/ip_vs_sh.c index 338668f88fe2..1b25b00ef1e1 100644 --- a/net/ipv4/ipvs/ip_vs_sh.c +++ b/net/ipv4/ipvs/ip_vs_sh.c @@ -201,7 +201,7 @@ ip_vs_sh_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) { struct ip_vs_dest *dest; struct ip_vs_sh_bucket *tbl; - struct iphdr *iph = skb->nh.iph; + struct iphdr *iph = ip_hdr(skb); IP_VS_DBG(6, "ip_vs_sh_schedule(): Scheduling...\n"); diff --git a/net/ipv4/ipvs/ip_vs_xmit.c b/net/ipv4/ipvs/ip_vs_xmit.c index d1403d0855ed..a7aee6822033 100644 --- a/net/ipv4/ipvs/ip_vs_xmit.c +++ b/net/ipv4/ipvs/ip_vs_xmit.c @@ -156,7 +156,7 @@ ip_vs_bypass_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, struct ip_vs_protocol *pp) { struct rtable *rt; /* Route to the other host */ - struct iphdr *iph = skb->nh.iph; + struct iphdr *iph = ip_hdr(skb); u8 tos = iph->tos; int mtu; struct flowi fl = { @@ -193,7 +193,7 @@ ip_vs_bypass_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, ip_rt_put(rt); return NF_STOLEN; } - ip_send_check(skb->nh.iph); + ip_send_check(ip_hdr(skb)); /* drop old route */ dst_release(skb->dst); @@ -226,7 +226,7 @@ ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, { struct rtable *rt; /* Route to the other host */ int mtu; - struct iphdr *iph = skb->nh.iph; + struct iphdr *iph = ip_hdr(skb); EnterFunction(10); @@ -266,8 +266,8 @@ ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, /* mangle the packet */ if (pp->dnat_handler && !pp->dnat_handler(&skb, pp, cp)) goto tx_error; - skb->nh.iph->daddr = cp->daddr; - ip_send_check(skb->nh.iph); + ip_hdr(skb)->daddr = cp->daddr; + ip_send_check(ip_hdr(skb)); IP_VS_DBG_PKT(10, pp, skb, 0, "After DNAT"); @@ -320,7 +320,7 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, { struct rtable *rt; /* Route to the other host */ struct net_device *tdev; /* Device to other host */ - struct iphdr *old_iph = skb->nh.iph; + struct iphdr *old_iph = ip_hdr(skb); u8 tos = old_iph->tos; __be16 df = old_iph->frag_off; struct iphdr *iph; /* Our new IP header */ @@ -377,7 +377,7 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, } kfree_skb(skb); skb = new_skb; - old_iph = skb->nh.iph; + old_iph = ip_hdr(skb); } skb->h.raw = (void *) old_iph; @@ -396,7 +396,7 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, /* * Push down and install the IPIP header. */ - iph = skb->nh.iph; + iph = ip_hdr(skb); iph->version = 4; iph->ihl = sizeof(struct iphdr)>>2; iph->frag_off = df; @@ -436,7 +436,7 @@ ip_vs_dr_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, struct ip_vs_protocol *pp) { struct rtable *rt; /* Route to the other host */ - struct iphdr *iph = skb->nh.iph; + struct iphdr *iph = ip_hdr(skb); int mtu; EnterFunction(10); @@ -461,7 +461,7 @@ ip_vs_dr_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, ip_rt_put(rt); return NF_STOLEN; } - ip_send_check(skb->nh.iph); + ip_send_check(ip_hdr(skb)); /* drop old route */ dst_release(skb->dst); @@ -515,12 +515,12 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, * mangle and send the packet here (only for VS/NAT) */ - if (!(rt = __ip_vs_get_out_rt(cp, RT_TOS(skb->nh.iph->tos)))) + if (!(rt = __ip_vs_get_out_rt(cp, RT_TOS(ip_hdr(skb)->tos)))) goto tx_error_icmp; /* MTU checking */ mtu = dst_mtu(&rt->u.dst); - if ((skb->len > mtu) && (skb->nh.iph->frag_off & htons(IP_DF))) { + if ((skb->len > mtu) && (ip_hdr(skb)->frag_off & htons(IP_DF))) { ip_rt_put(rt); icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu)); IP_VS_DBG_RL("ip_vs_in_icmp(): frag needed\n"); diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c index 6069a11514f6..b44192924f95 100644 --- a/net/ipv4/netfilter.c +++ b/net/ipv4/netfilter.c @@ -10,7 +10,7 @@ /* route_me_harder function, used by iptable_nat, iptable_mangle + ip_queue */ int ip_route_me_harder(struct sk_buff **pskb, unsigned addr_type) { - struct iphdr *iph = (*pskb)->nh.iph; + const struct iphdr *iph = ip_hdr(*pskb); struct rtable *rt; struct flowi fl = {}; struct dst_entry *odst; @@ -142,7 +142,7 @@ static void nf_ip_saveroute(const struct sk_buff *skb, struct nf_info *info) struct ip_rt_info *rt_info = nf_info_reroute(info); if (info->hook == NF_IP_LOCAL_OUT) { - const struct iphdr *iph = skb->nh.iph; + const struct iphdr *iph = ip_hdr(skb); rt_info->tos = iph->tos; rt_info->daddr = iph->daddr; @@ -155,7 +155,7 @@ static int nf_ip_reroute(struct sk_buff **pskb, const struct nf_info *info) const struct ip_rt_info *rt_info = nf_info_reroute(info); if (info->hook == NF_IP_LOCAL_OUT) { - struct iphdr *iph = (*pskb)->nh.iph; + const struct iphdr *iph = ip_hdr(*pskb); if (!(iph->tos == rt_info->tos && iph->daddr == rt_info->daddr @@ -168,7 +168,7 @@ static int nf_ip_reroute(struct sk_buff **pskb, const struct nf_info *info) __sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook, unsigned int dataoff, u_int8_t protocol) { - struct iphdr *iph = skb->nh.iph; + const struct iphdr *iph = ip_hdr(skb); __sum16 csum = 0; switch (skb->ip_summed) { diff --git a/net/ipv4/netfilter/ip_conntrack_core.c b/net/ipv4/netfilter/ip_conntrack_core.c index 8c013d9f6907..986c0c81294f 100644 --- a/net/ipv4/netfilter/ip_conntrack_core.c +++ b/net/ipv4/netfilter/ip_conntrack_core.c @@ -748,9 +748,9 @@ resolve_normal_ct(struct sk_buff *skb, struct ip_conntrack_tuple_hash *h; struct ip_conntrack *ct; - IP_NF_ASSERT((skb->nh.iph->frag_off & htons(IP_OFFSET)) == 0); + IP_NF_ASSERT((ip_hdr(skb)->frag_off & htons(IP_OFFSET)) == 0); - if (!ip_ct_get_tuple(skb->nh.iph, skb, ip_hdrlen(skb), &tuple,proto)) + if (!ip_ct_get_tuple(ip_hdr(skb), skb, ip_hdrlen(skb), &tuple,proto)) return NULL; /* look for tuple match */ @@ -811,10 +811,10 @@ unsigned int ip_conntrack_in(unsigned int hooknum, } /* Never happen */ - if ((*pskb)->nh.iph->frag_off & htons(IP_OFFSET)) { + if (ip_hdr(*pskb)->frag_off & htons(IP_OFFSET)) { if (net_ratelimit()) { printk(KERN_ERR "ip_conntrack_in: Frag of proto %u (hook=%u)\n", - (*pskb)->nh.iph->protocol, hooknum); + ip_hdr(*pskb)->protocol, hooknum); } return NF_DROP; } @@ -825,17 +825,17 @@ unsigned int ip_conntrack_in(unsigned int hooknum, if ((*pskb)->pkt_type == PACKET_BROADCAST) { printk("Broadcast packet!\n"); return NF_ACCEPT; - } else if (((*pskb)->nh.iph->daddr & htonl(0x000000FF)) + } else if ((ip_hdr(*pskb)->daddr & htonl(0x000000FF)) == htonl(0x000000FF)) { printk("Should bcast: %u.%u.%u.%u->%u.%u.%u.%u (sk=%p, ptype=%u)\n", - NIPQUAD((*pskb)->nh.iph->saddr), - NIPQUAD((*pskb)->nh.iph->daddr), + NIPQUAD(ip_hdr(*pskb)->saddr), + NIPQUAD(ip_hdr(*pskb)->daddr), (*pskb)->sk, (*pskb)->pkt_type); } #endif /* rcu_read_lock()ed by nf_hook_slow */ - proto = __ip_conntrack_proto_find((*pskb)->nh.iph->protocol); + proto = __ip_conntrack_proto_find(ip_hdr(*pskb)->protocol); /* It may be an special packet, error, unclean... * inverse of the return code tells to the netfilter @@ -1152,7 +1152,7 @@ void __ip_ct_refresh_acct(struct ip_conntrack *ct, if (do_acct) { ct->counters[CTINFO2DIR(ctinfo)].packets++; ct->counters[CTINFO2DIR(ctinfo)].bytes += - ntohs(skb->nh.iph->tot_len); + ntohs(ip_hdr(skb)->tot_len); if ((ct->counters[CTINFO2DIR(ctinfo)].packets & 0x80000000) || (ct->counters[CTINFO2DIR(ctinfo)].bytes & 0x80000000)) event |= IPCT_COUNTER_FILLING; @@ -1210,7 +1210,7 @@ ip_ct_gather_frags(struct sk_buff *skb, u_int32_t user) local_bh_enable(); if (skb) - ip_send_check(skb->nh.iph); + ip_send_check(ip_hdr(skb)); return skb; } diff --git a/net/ipv4/netfilter/ip_conntrack_helper_h323.c b/net/ipv4/netfilter/ip_conntrack_helper_h323.c index 5d638149b0e0..cecb6e0c8ed0 100644 --- a/net/ipv4/netfilter/ip_conntrack_helper_h323.c +++ b/net/ipv4/netfilter/ip_conntrack_helper_h323.c @@ -576,8 +576,8 @@ static int h245_help(struct sk_buff **pskb, struct ip_conntrack *ct, /* Process each TPKT */ while (get_tpkt_data(pskb, ct, ctinfo, &data, &datalen, &dataoff)) { DEBUGP("ip_ct_h245: TPKT %u.%u.%u.%u->%u.%u.%u.%u, len=%d\n", - NIPQUAD((*pskb)->nh.iph->saddr), - NIPQUAD((*pskb)->nh.iph->daddr), datalen); + NIPQUAD(ip_hdr(*pskb)->saddr), + NIPQUAD(ip_hdr(*pskb)->daddr), datalen); /* Decode H.245 signal */ ret = DecodeMultimediaSystemControlMessage(data, datalen, @@ -1128,8 +1128,8 @@ static int q931_help(struct sk_buff **pskb, struct ip_conntrack *ct, /* Process each TPKT */ while (get_tpkt_data(pskb, ct, ctinfo, &data, &datalen, &dataoff)) { DEBUGP("ip_ct_q931: TPKT %u.%u.%u.%u->%u.%u.%u.%u, len=%d\n", - NIPQUAD((*pskb)->nh.iph->saddr), - NIPQUAD((*pskb)->nh.iph->daddr), datalen); + NIPQUAD(ip_hdr(*pskb)->saddr), + NIPQUAD(ip_hdr(*pskb)->daddr), datalen); /* Decode Q.931 signal */ ret = DecodeQ931(data, datalen, &q931); @@ -1741,8 +1741,8 @@ static int ras_help(struct sk_buff **pskb, struct ip_conntrack *ct, if (data == NULL) goto accept; DEBUGP("ip_ct_ras: RAS message %u.%u.%u.%u->%u.%u.%u.%u, len=%d\n", - NIPQUAD((*pskb)->nh.iph->saddr), - NIPQUAD((*pskb)->nh.iph->daddr), datalen); + NIPQUAD(ip_hdr(*pskb)->saddr), + NIPQUAD(ip_hdr(*pskb)->daddr), datalen); /* Decode RAS message */ ret = DecodeRasMessage(data, datalen, &ras); diff --git a/net/ipv4/netfilter/ip_conntrack_netbios_ns.c b/net/ipv4/netfilter/ip_conntrack_netbios_ns.c index cc6dd49c9da0..df07c5f1d874 100644 --- a/net/ipv4/netfilter/ip_conntrack_netbios_ns.c +++ b/net/ipv4/netfilter/ip_conntrack_netbios_ns.c @@ -45,7 +45,7 @@ static int help(struct sk_buff **pskb, struct ip_conntrack *ct, enum ip_conntrack_info ctinfo) { struct ip_conntrack_expect *exp; - struct iphdr *iph = (*pskb)->nh.iph; + struct iphdr *iph = ip_hdr(*pskb); struct rtable *rt = (struct rtable *)(*pskb)->dst; struct in_device *in_dev; __be32 mask = 0; diff --git a/net/ipv4/netfilter/ip_conntrack_proto_sctp.c b/net/ipv4/netfilter/ip_conntrack_proto_sctp.c index e29c436144b3..91d0c05c8e86 100644 --- a/net/ipv4/netfilter/ip_conntrack_proto_sctp.c +++ b/net/ipv4/netfilter/ip_conntrack_proto_sctp.c @@ -316,7 +316,7 @@ static int sctp_packet(struct ip_conntrack *conntrack, enum ip_conntrack_info ctinfo) { enum sctp_conntrack newconntrack, oldsctpstate; - struct iphdr *iph = skb->nh.iph; + struct iphdr *iph = ip_hdr(skb); sctp_sctphdr_t _sctph, *sh; sctp_chunkhdr_t _sch, *sch; u_int32_t offset, count; @@ -430,7 +430,7 @@ static int sctp_new(struct ip_conntrack *conntrack, const struct sk_buff *skb) { enum sctp_conntrack newconntrack; - struct iphdr *iph = skb->nh.iph; + struct iphdr *iph = ip_hdr(skb); sctp_sctphdr_t _sctph, *sh; sctp_chunkhdr_t _sch, *sch; u_int32_t offset, count; diff --git a/net/ipv4/netfilter/ip_conntrack_proto_tcp.c b/net/ipv4/netfilter/ip_conntrack_proto_tcp.c index fce3a3c69815..d03436edfd93 100644 --- a/net/ipv4/netfilter/ip_conntrack_proto_tcp.c +++ b/net/ipv4/netfilter/ip_conntrack_proto_tcp.c @@ -770,8 +770,8 @@ void ip_conntrack_tcp_update(struct sk_buff *skb, struct ip_conntrack *conntrack, enum ip_conntrack_dir dir) { - struct iphdr *iph = skb->nh.iph; - struct tcphdr *tcph = (void *)skb->nh.iph + ip_hdrlen(skb); + struct iphdr *iph = ip_hdr(skb); + struct tcphdr *tcph = (void *)iph + ip_hdrlen(skb); __u32 end; #ifdef DEBUGP_VARS struct ip_ct_tcp_state *sender = &conntrack->proto.tcp.seen[dir]; @@ -834,13 +834,13 @@ static int tcp_error(struct sk_buff *skb, enum ip_conntrack_info *ctinfo, unsigned int hooknum) { - struct iphdr *iph = skb->nh.iph; + const unsigned int hdrlen = ip_hdrlen(skb); struct tcphdr _tcph, *th; - unsigned int tcplen = skb->len - iph->ihl * 4; + unsigned int tcplen = skb->len - hdrlen; u_int8_t tcpflags; /* Smaller that minimal TCP header? */ - th = skb_header_pointer(skb, iph->ihl * 4, + th = skb_header_pointer(skb, hdrlen, sizeof(_tcph), &_tcph); if (th == NULL) { if (LOG_INVALID(IPPROTO_TCP)) @@ -863,7 +863,7 @@ static int tcp_error(struct sk_buff *skb, */ /* FIXME: Source route IP option packets --RR */ if (ip_conntrack_checksum && hooknum == NF_IP_PRE_ROUTING && - nf_ip_checksum(skb, hooknum, iph->ihl * 4, IPPROTO_TCP)) { + nf_ip_checksum(skb, hooknum, hdrlen, IPPROTO_TCP)) { if (LOG_INVALID(IPPROTO_TCP)) nf_log_packet(PF_INET, 0, skb, NULL, NULL, NULL, "ip_ct_tcp: bad TCP checksum "); @@ -889,7 +889,7 @@ static int tcp_packet(struct ip_conntrack *conntrack, { enum tcp_conntrack new_state, old_state; enum ip_conntrack_dir dir; - struct iphdr *iph = skb->nh.iph; + struct iphdr *iph = ip_hdr(skb); struct tcphdr *th, _tcph; unsigned long timeout; unsigned int index; @@ -1062,7 +1062,7 @@ static int tcp_new(struct ip_conntrack *conntrack, const struct sk_buff *skb) { enum tcp_conntrack new_state; - struct iphdr *iph = skb->nh.iph; + struct iphdr *iph = ip_hdr(skb); struct tcphdr *th, _tcph; #ifdef DEBUGP_VARS struct ip_ct_tcp_state *sender = &conntrack->proto.tcp.seen[0]; diff --git a/net/ipv4/netfilter/ip_conntrack_proto_udp.c b/net/ipv4/netfilter/ip_conntrack_proto_udp.c index 14c30c646c7f..3b47987bf1bb 100644 --- a/net/ipv4/netfilter/ip_conntrack_proto_udp.c +++ b/net/ipv4/netfilter/ip_conntrack_proto_udp.c @@ -89,12 +89,12 @@ static int udp_new(struct ip_conntrack *conntrack, const struct sk_buff *skb) static int udp_error(struct sk_buff *skb, enum ip_conntrack_info *ctinfo, unsigned int hooknum) { - struct iphdr *iph = skb->nh.iph; - unsigned int udplen = skb->len - iph->ihl * 4; + const unsigned int hdrlen = ip_hdrlen(skb); + unsigned int udplen = skb->len - hdrlen; struct udphdr _hdr, *hdr; /* Header is too small? */ - hdr = skb_header_pointer(skb, iph->ihl*4, sizeof(_hdr), &_hdr); + hdr = skb_header_pointer(skb, hdrlen, sizeof(_hdr), &_hdr); if (hdr == NULL) { if (LOG_INVALID(IPPROTO_UDP)) nf_log_packet(PF_INET, 0, skb, NULL, NULL, NULL, @@ -119,7 +119,7 @@ static int udp_error(struct sk_buff *skb, enum ip_conntrack_info *ctinfo, * because the checksum is assumed to be correct. * FIXME: Source route IP option packets --RR */ if (ip_conntrack_checksum && hooknum == NF_IP_PRE_ROUTING && - nf_ip_checksum(skb, hooknum, iph->ihl * 4, IPPROTO_UDP)) { + nf_ip_checksum(skb, hooknum, hdrlen, IPPROTO_UDP)) { if (LOG_INVALID(IPPROTO_UDP)) nf_log_packet(PF_INET, 0, skb, NULL, NULL, NULL, "ip_ct_udp: bad UDP checksum "); diff --git a/net/ipv4/netfilter/ip_conntrack_standalone.c b/net/ipv4/netfilter/ip_conntrack_standalone.c index 92609a4dcd74..c32200153d62 100644 --- a/net/ipv4/netfilter/ip_conntrack_standalone.c +++ b/net/ipv4/netfilter/ip_conntrack_standalone.c @@ -439,7 +439,7 @@ static unsigned int ip_conntrack_defrag(unsigned int hooknum, #endif /* Gather fragments. */ - if ((*pskb)->nh.iph->frag_off & htons(IP_MF|IP_OFFSET)) { + if (ip_hdr(*pskb)->frag_off & htons(IP_MF | IP_OFFSET)) { *pskb = ip_ct_gather_frags(*pskb, hooknum == NF_IP_PRE_ROUTING ? IP_DEFRAG_CONNTRACK_IN : diff --git a/net/ipv4/netfilter/ip_nat_helper.c b/net/ipv4/netfilter/ip_nat_helper.c index 25624e558562..4cddc2951744 100644 --- a/net/ipv4/netfilter/ip_nat_helper.c +++ b/net/ipv4/netfilter/ip_nat_helper.c @@ -94,7 +94,7 @@ static void mangle_contents(struct sk_buff *skb, unsigned char *data; BUG_ON(skb_is_nonlinear(skb)); - data = (unsigned char *)skb->nh.iph + dataoff; + data = skb_network_header(skb) + dataoff; /* move post-replacement */ memmove(data + match_offset + rep_len, @@ -118,8 +118,8 @@ static void mangle_contents(struct sk_buff *skb, } /* fix IP hdr checksum information */ - skb->nh.iph->tot_len = htons(skb->len); - ip_send_check(skb->nh.iph); + ip_hdr(skb)->tot_len = htons(skb->len); + ip_send_check(ip_hdr(skb)); } /* Unusual, but possible case. */ @@ -173,7 +173,7 @@ ip_nat_mangle_tcp_packet(struct sk_buff **pskb, SKB_LINEAR_ASSERT(*pskb); - iph = (*pskb)->nh.iph; + iph = ip_hdr(*pskb); tcph = (void *)iph + iph->ihl*4; oldlen = (*pskb)->len - iph->ihl*4; @@ -227,7 +227,7 @@ ip_nat_mangle_udp_packet(struct sk_buff **pskb, int datalen, oldlen; /* UDP helpers might accidentally mangle the wrong packet */ - iph = (*pskb)->nh.iph; + iph = ip_hdr(*pskb); if ((*pskb)->len < iph->ihl*4 + sizeof(*udph) + match_offset + match_len) return 0; @@ -240,7 +240,7 @@ ip_nat_mangle_udp_packet(struct sk_buff **pskb, && !enlarge_skb(pskb, rep_len - match_len)) return 0; - iph = (*pskb)->nh.iph; + iph = ip_hdr(*pskb); udph = (void *)iph + iph->ihl*4; oldlen = (*pskb)->len - iph->ihl*4; diff --git a/net/ipv4/netfilter/ip_nat_helper_h323.c b/net/ipv4/netfilter/ip_nat_helper_h323.c index 8b1e3388bd08..0d9444f9236b 100644 --- a/net/ipv4/netfilter/ip_nat_helper_h323.c +++ b/net/ipv4/netfilter/ip_nat_helper_h323.c @@ -46,7 +46,7 @@ static int set_addr(struct sk_buff **pskb, buf.port = htons(port); addroff += dataoff; - if ((*pskb)->nh.iph->protocol == IPPROTO_TCP) { + if (ip_hdr(*pskb)->protocol == IPPROTO_TCP) { if (!ip_nat_mangle_tcp_packet(pskb, ct, ctinfo, addroff, sizeof(buf), (char *) &buf, sizeof(buf))) { diff --git a/net/ipv4/netfilter/ip_nat_rule.c b/net/ipv4/netfilter/ip_nat_rule.c index 080eb1d92200..25415a91e023 100644 --- a/net/ipv4/netfilter/ip_nat_rule.c +++ b/net/ipv4/netfilter/ip_nat_rule.c @@ -158,7 +158,7 @@ static unsigned int ipt_dnat_target(struct sk_buff **pskb, if (hooknum == NF_IP_LOCAL_OUT && mr->range[0].flags & IP_NAT_RANGE_MAP_IPS) - warn_if_extra_mangle((*pskb)->nh.iph->daddr, + warn_if_extra_mangle(ip_hdr(*pskb)->daddr, mr->range[0].min_ip); return ip_nat_setup_info(ct, &mr->range[0], hooknum); diff --git a/net/ipv4/netfilter/ip_nat_snmp_basic.c b/net/ipv4/netfilter/ip_nat_snmp_basic.c index e41d0efae515..025e04587789 100644 --- a/net/ipv4/netfilter/ip_nat_snmp_basic.c +++ b/net/ipv4/netfilter/ip_nat_snmp_basic.c @@ -1193,7 +1193,7 @@ static int snmp_translate(struct ip_conntrack *ct, enum ip_conntrack_info ctinfo, struct sk_buff **pskb) { - struct iphdr *iph = (*pskb)->nh.iph; + struct iphdr *iph = ip_hdr(*pskb); struct udphdr *udph = (struct udphdr *)((__be32 *)iph + iph->ihl); u_int16_t udplen = ntohs(udph->len); u_int16_t paylen = udplen - sizeof(struct udphdr); @@ -1234,7 +1234,7 @@ static int help(struct sk_buff **pskb, { int dir = CTINFO2DIR(ctinfo); unsigned int ret; - struct iphdr *iph = (*pskb)->nh.iph; + struct iphdr *iph = ip_hdr(*pskb); struct udphdr *udph = (struct udphdr *)((u_int32_t *)iph + iph->ihl); /* SNMP replies and originating SNMP traps get mangled */ diff --git a/net/ipv4/netfilter/ip_nat_standalone.c b/net/ipv4/netfilter/ip_nat_standalone.c index dbaaf78ff9a3..32f7bf661fc8 100644 --- a/net/ipv4/netfilter/ip_nat_standalone.c +++ b/net/ipv4/netfilter/ip_nat_standalone.c @@ -97,7 +97,7 @@ ip_nat_fn(unsigned int hooknum, /* We never see fragments: conntrack defrags on pre-routing and local-out, and ip_nat_out protects post-routing. */ - IP_NF_ASSERT(!((*pskb)->nh.iph->frag_off + IP_NF_ASSERT(!(ip_hdr(*pskb)->frag_off & htons(IP_MF|IP_OFFSET))); ct = ip_conntrack_get(*pskb, &ctinfo); @@ -109,7 +109,7 @@ ip_nat_fn(unsigned int hooknum, /* Exception: ICMP redirect to new connection (not in hash table yet). We must not let this through, in case we're doing NAT to the same network. */ - if ((*pskb)->nh.iph->protocol == IPPROTO_ICMP) { + if (ip_hdr(*pskb)->protocol == IPPROTO_ICMP) { struct icmphdr _hdr, *hp; hp = skb_header_pointer(*pskb, ip_hdrlen(*pskb), @@ -128,7 +128,7 @@ ip_nat_fn(unsigned int hooknum, switch (ctinfo) { case IP_CT_RELATED: case IP_CT_RELATED+IP_CT_IS_REPLY: - if ((*pskb)->nh.iph->protocol == IPPROTO_ICMP) { + if (ip_hdr(*pskb)->protocol == IPPROTO_ICMP) { if (!ip_nat_icmp_reply_translation(ct, ctinfo, hooknum, pskb)) return NF_DROP; @@ -184,11 +184,11 @@ ip_nat_in(unsigned int hooknum, int (*okfn)(struct sk_buff *)) { unsigned int ret; - __be32 daddr = (*pskb)->nh.iph->daddr; + __be32 daddr = ip_hdr(*pskb)->daddr; ret = ip_nat_fn(hooknum, pskb, in, out, okfn); if (ret != NF_DROP && ret != NF_STOLEN - && daddr != (*pskb)->nh.iph->daddr) { + && daddr != ip_hdr(*pskb)->daddr) { dst_release((*pskb)->dst); (*pskb)->dst = NULL; } diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index f66966650212..39ab8ae282e2 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c @@ -231,7 +231,7 @@ ipt_do_table(struct sk_buff **pskb, struct xt_table_info *private; /* Initialization */ - ip = (*pskb)->nh.iph; + ip = ip_hdr(*pskb); datalen = (*pskb)->len - ip->ihl * 4; indev = in ? in->name : nulldevname; outdev = out ? out->name : nulldevname; @@ -320,7 +320,7 @@ ipt_do_table(struct sk_buff **pskb, = 0x57acc001; #endif /* Target might have changed stuff. */ - ip = (*pskb)->nh.iph; + ip = ip_hdr(*pskb); datalen = (*pskb)->len - ip->ihl * 4; if (verdict == IPT_CONTINUE) diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c index 42b08029e867..af5b82b8ceb7 100644 --- a/net/ipv4/netfilter/ipt_CLUSTERIP.c +++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c @@ -240,7 +240,7 @@ clusterip_del_node(struct clusterip_config *c, u_int16_t nodenum) static inline u_int32_t clusterip_hashfn(struct sk_buff *skb, struct clusterip_config *config) { - struct iphdr *iph = skb->nh.iph; + struct iphdr *iph = ip_hdr(skb); unsigned long hashval; u_int16_t sport, dport; u_int16_t *ports; @@ -328,7 +328,7 @@ target(struct sk_buff **pskb, /* special case: ICMP error handling. conntrack distinguishes between * error messages (RELATED) and information requests (see below) */ - if ((*pskb)->nh.iph->protocol == IPPROTO_ICMP + if (ip_hdr(*pskb)->protocol == IPPROTO_ICMP && (ctinfo == IP_CT_RELATED || ctinfo == IP_CT_RELATED+IP_CT_IS_REPLY)) return XT_CONTINUE; diff --git a/net/ipv4/netfilter/ipt_ECN.c b/net/ipv4/netfilter/ipt_ECN.c index 44daf9e1da35..97c0e53c8b22 100644 --- a/net/ipv4/netfilter/ipt_ECN.c +++ b/net/ipv4/netfilter/ipt_ECN.c @@ -30,13 +30,13 @@ MODULE_DESCRIPTION("iptables ECN modification module"); static inline int set_ect_ip(struct sk_buff **pskb, const struct ipt_ECN_info *einfo) { - struct iphdr *iph = (*pskb)->nh.iph; + struct iphdr *iph = ip_hdr(*pskb); if ((iph->tos & IPT_ECN_IP_MASK) != (einfo->ip_ect & IPT_ECN_IP_MASK)) { __u8 oldtos; if (!skb_make_writable(pskb, sizeof(struct iphdr))) return 0; - iph = (*pskb)->nh.iph; + iph = ip_hdr(*pskb); oldtos = iph->tos; iph->tos &= ~IPT_ECN_IP_MASK; iph->tos |= (einfo->ip_ect & IPT_ECN_IP_MASK); @@ -66,7 +66,7 @@ set_ect_tcp(struct sk_buff **pskb, const struct ipt_ECN_info *einfo) if (!skb_make_writable(pskb, ip_hdrlen(*pskb) + sizeof(*tcph))) return 0; - tcph = (void *)(*pskb)->nh.iph + ip_hdrlen(*pskb); + tcph = (void *)ip_hdr(*pskb) + ip_hdrlen(*pskb); oldval = ((__be16 *)tcph)[6]; if (einfo->operation & IPT_ECN_OP_SET_ECE) @@ -94,7 +94,7 @@ target(struct sk_buff **pskb, return NF_DROP; if (einfo->operation & (IPT_ECN_OP_SET_ECE | IPT_ECN_OP_SET_CWR) - && (*pskb)->nh.iph->protocol == IPPROTO_TCP) + && ip_hdr(*pskb)->protocol == IPPROTO_TCP) if (!set_ect_tcp(pskb, einfo)) return NF_DROP; diff --git a/net/ipv4/netfilter/ipt_NETMAP.c b/net/ipv4/netfilter/ipt_NETMAP.c index fd7aaa347cd8..d03f165722da 100644 --- a/net/ipv4/netfilter/ipt_NETMAP.c +++ b/net/ipv4/netfilter/ipt_NETMAP.c @@ -75,9 +75,9 @@ target(struct sk_buff **pskb, netmask = ~(mr->range[0].min_ip ^ mr->range[0].max_ip); if (hooknum == NF_IP_PRE_ROUTING || hooknum == NF_IP_LOCAL_OUT) - new_ip = (*pskb)->nh.iph->daddr & ~netmask; + new_ip = ip_hdr(*pskb)->daddr & ~netmask; else - new_ip = (*pskb)->nh.iph->saddr & ~netmask; + new_ip = ip_hdr(*pskb)->saddr & ~netmask; new_ip |= mr->range[0].min_ip & netmask; newrange = ((struct ip_nat_range) diff --git a/net/ipv4/netfilter/ipt_REJECT.c b/net/ipv4/netfilter/ipt_REJECT.c index 01c04f0e5c91..1399e7c183ba 100644 --- a/net/ipv4/netfilter/ipt_REJECT.c +++ b/net/ipv4/netfilter/ipt_REJECT.c @@ -43,6 +43,7 @@ MODULE_DESCRIPTION("iptables REJECT target module"); static void send_reset(struct sk_buff *oldskb, int hook) { struct sk_buff *nskb; + struct iphdr *niph; struct tcphdr _otcph, *oth, *tcph; __be16 tmp_port; __be32 tmp_addr; @@ -50,7 +51,7 @@ static void send_reset(struct sk_buff *oldskb, int hook) unsigned int addr_type; /* IP header checks: fragment. */ - if (oldskb->nh.iph->frag_off & htons(IP_OFFSET)) + if (ip_hdr(oldskb)->frag_off & htons(IP_OFFSET)) return; oth = skb_header_pointer(oldskb, ip_hdrlen(oldskb), @@ -86,9 +87,10 @@ static void send_reset(struct sk_buff *oldskb, int hook) tcph = (struct tcphdr *)(skb_network_header(nskb) + ip_hdrlen(nskb)); /* Swap source and dest */ - tmp_addr = nskb->nh.iph->saddr; - nskb->nh.iph->saddr = nskb->nh.iph->daddr; - nskb->nh.iph->daddr = tmp_addr; + niph = ip_hdr(nskb); + tmp_addr = niph->saddr; + niph->saddr = niph->daddr; + niph->daddr = tmp_addr; tmp_port = tcph->source; tcph->source = tcph->dest; tcph->dest = tmp_port; @@ -96,7 +98,7 @@ static void send_reset(struct sk_buff *oldskb, int hook) /* Truncate to length (no data) */ tcph->doff = sizeof(struct tcphdr)/4; skb_trim(nskb, ip_hdrlen(nskb) + sizeof(struct tcphdr)); - nskb->nh.iph->tot_len = htons(nskb->len); + niph->tot_len = htons(nskb->len); if (tcph->ack) { needs_ack = 0; @@ -121,14 +123,13 @@ static void send_reset(struct sk_buff *oldskb, int hook) /* Adjust TCP checksum */ tcph->check = 0; tcph->check = tcp_v4_check(sizeof(struct tcphdr), - nskb->nh.iph->saddr, - nskb->nh.iph->daddr, + niph->saddr, niph->daddr, csum_partial((char *)tcph, sizeof(struct tcphdr), 0)); /* Set DF, id = 0 */ - nskb->nh.iph->frag_off = htons(IP_DF); - nskb->nh.iph->id = 0; + niph->frag_off = htons(IP_DF); + niph->id = 0; addr_type = RTN_UNSPEC; if (hook != NF_IP_FORWARD @@ -144,12 +145,11 @@ static void send_reset(struct sk_buff *oldskb, int hook) nskb->ip_summed = CHECKSUM_NONE; /* Adjust IP TTL */ - nskb->nh.iph->ttl = dst_metric(nskb->dst, RTAX_HOPLIMIT); + niph->ttl = dst_metric(nskb->dst, RTAX_HOPLIMIT); /* Adjust IP checksum */ - nskb->nh.iph->check = 0; - nskb->nh.iph->check = ip_fast_csum(skb_network_header(nskb), - nskb->nh.iph->ihl); + niph->check = 0; + niph->check = ip_fast_csum(skb_network_header(nskb), niph->ihl); /* "Never happens" */ if (nskb->len > dst_mtu(nskb->dst)) diff --git a/net/ipv4/netfilter/ipt_TOS.c b/net/ipv4/netfilter/ipt_TOS.c index cedf9f7d9d6e..0ad02f249837 100644 --- a/net/ipv4/netfilter/ipt_TOS.c +++ b/net/ipv4/netfilter/ipt_TOS.c @@ -29,13 +29,13 @@ target(struct sk_buff **pskb, const void *targinfo) { const struct ipt_tos_target_info *tosinfo = targinfo; - struct iphdr *iph = (*pskb)->nh.iph; + struct iphdr *iph = ip_hdr(*pskb); if ((iph->tos & IPTOS_TOS_MASK) != tosinfo->tos) { __u8 oldtos; if (!skb_make_writable(pskb, sizeof(struct iphdr))) return NF_DROP; - iph = (*pskb)->nh.iph; + iph = ip_hdr(*pskb); oldtos = iph->tos; iph->tos = (iph->tos & IPTOS_PREC_MASK) | tosinfo->tos; nf_csum_replace2(&iph->check, htons(oldtos), htons(iph->tos)); diff --git a/net/ipv4/netfilter/ipt_TTL.c b/net/ipv4/netfilter/ipt_TTL.c index 64be31c22ba9..a991ec7bd4e7 100644 --- a/net/ipv4/netfilter/ipt_TTL.c +++ b/net/ipv4/netfilter/ipt_TTL.c @@ -32,7 +32,7 @@ ipt_ttl_target(struct sk_buff **pskb, if (!skb_make_writable(pskb, (*pskb)->len)) return NF_DROP; - iph = (*pskb)->nh.iph; + iph = ip_hdr(*pskb); switch (info->mode) { case IPT_TTL_SET: diff --git a/net/ipv4/netfilter/ipt_addrtype.c b/net/ipv4/netfilter/ipt_addrtype.c index cfa0472617f6..a652a1451552 100644 --- a/net/ipv4/netfilter/ipt_addrtype.c +++ b/net/ipv4/netfilter/ipt_addrtype.c @@ -33,7 +33,7 @@ static int match(const struct sk_buff *skb, int offset, unsigned int protoff, int *hotdrop) { const struct ipt_addrtype_info *info = matchinfo; - const struct iphdr *iph = skb->nh.iph; + const struct iphdr *iph = ip_hdr(skb); int ret = 1; if (info->source) diff --git a/net/ipv4/netfilter/ipt_ecn.c b/net/ipv4/netfilter/ipt_ecn.c index b8ade3cc7757..3b4ca0c5c121 100644 --- a/net/ipv4/netfilter/ipt_ecn.c +++ b/net/ipv4/netfilter/ipt_ecn.c @@ -27,7 +27,7 @@ MODULE_LICENSE("GPL"); static inline int match_ip(const struct sk_buff *skb, const struct ipt_ecn_info *einfo) { - return ((skb->nh.iph->tos&IPT_ECN_IP_MASK) == einfo->ip_ect); + return (ip_hdr(skb)->tos & IPT_ECN_IP_MASK) == einfo->ip_ect; } static inline int match_tcp(const struct sk_buff *skb, @@ -80,7 +80,7 @@ static int match(const struct sk_buff *skb, return 0; if (info->operation & (IPT_ECN_OP_MATCH_ECE|IPT_ECN_OP_MATCH_CWR)) { - if (skb->nh.iph->protocol != IPPROTO_TCP) + if (ip_hdr(skb)->protocol != IPPROTO_TCP) return 0; if (!match_tcp(skb, info, hotdrop)) return 0; diff --git a/net/ipv4/netfilter/ipt_iprange.c b/net/ipv4/netfilter/ipt_iprange.c index bc5d5e6091e4..33af9e940887 100644 --- a/net/ipv4/netfilter/ipt_iprange.c +++ b/net/ipv4/netfilter/ipt_iprange.c @@ -32,7 +32,7 @@ match(const struct sk_buff *skb, int offset, unsigned int protoff, int *hotdrop) { const struct ipt_iprange_info *info = matchinfo; - const struct iphdr *iph = skb->nh.iph; + const struct iphdr *iph = ip_hdr(skb); if (info->flags & IPRANGE_SRC) { if (((ntohl(iph->saddr) < ntohl(info->src.min_ip)) diff --git a/net/ipv4/netfilter/ipt_recent.c b/net/ipv4/netfilter/ipt_recent.c index aecb9c48e152..15a9e8bbb7cc 100644 --- a/net/ipv4/netfilter/ipt_recent.c +++ b/net/ipv4/netfilter/ipt_recent.c @@ -183,11 +183,11 @@ ipt_recent_match(const struct sk_buff *skb, int ret = info->invert; if (info->side == IPT_RECENT_DEST) - addr = skb->nh.iph->daddr; + addr = ip_hdr(skb)->daddr; else - addr = skb->nh.iph->saddr; + addr = ip_hdr(skb)->saddr; - ttl = skb->nh.iph->ttl; + ttl = ip_hdr(skb)->ttl; /* use TTL as seen before forwarding */ if (out && !skb->sk) ttl++; diff --git a/net/ipv4/netfilter/ipt_tos.c b/net/ipv4/netfilter/ipt_tos.c index 5d33b51d49d8..d314844af12b 100644 --- a/net/ipv4/netfilter/ipt_tos.c +++ b/net/ipv4/netfilter/ipt_tos.c @@ -30,7 +30,7 @@ match(const struct sk_buff *skb, { const struct ipt_tos_info *info = matchinfo; - return (skb->nh.iph->tos == info->tos) ^ info->invert; + return (ip_hdr(skb)->tos == info->tos) ^ info->invert; } static struct xt_match tos_match = { diff --git a/net/ipv4/netfilter/ipt_ttl.c b/net/ipv4/netfilter/ipt_ttl.c index 1eca9f400374..9615c04a2fc6 100644 --- a/net/ipv4/netfilter/ipt_ttl.c +++ b/net/ipv4/netfilter/ipt_ttl.c @@ -26,19 +26,20 @@ static int match(const struct sk_buff *skb, int offset, unsigned int protoff, int *hotdrop) { const struct ipt_ttl_info *info = matchinfo; + const u8 ttl = ip_hdr(skb)->ttl; switch (info->mode) { case IPT_TTL_EQ: - return (skb->nh.iph->ttl == info->ttl); + return (ttl == info->ttl); break; case IPT_TTL_NE: - return (!(skb->nh.iph->ttl == info->ttl)); + return (!(ttl == info->ttl)); break; case IPT_TTL_LT: - return (skb->nh.iph->ttl < info->ttl); + return (ttl < info->ttl); break; case IPT_TTL_GT: - return (skb->nh.iph->ttl > info->ttl); + return (ttl > info->ttl); break; default: printk(KERN_WARNING "ipt_ttl: unknown mode %d\n", diff --git a/net/ipv4/netfilter/iptable_mangle.c b/net/ipv4/netfilter/iptable_mangle.c index 6cc3245f676a..26e60fbe7ee0 100644 --- a/net/ipv4/netfilter/iptable_mangle.c +++ b/net/ipv4/netfilter/iptable_mangle.c @@ -131,6 +131,7 @@ ipt_local_hook(unsigned int hook, int (*okfn)(struct sk_buff *)) { unsigned int ret; + const struct iphdr *iph; u_int8_t tos; __be32 saddr, daddr; u_int32_t mark; @@ -145,19 +146,23 @@ ipt_local_hook(unsigned int hook, /* Save things which could affect route */ mark = (*pskb)->mark; - saddr = (*pskb)->nh.iph->saddr; - daddr = (*pskb)->nh.iph->daddr; - tos = (*pskb)->nh.iph->tos; + iph = ip_hdr(*pskb); + saddr = iph->saddr; + daddr = iph->daddr; + tos = iph->tos; ret = ipt_do_table(pskb, hook, in, out, &packet_mangler); /* Reroute for ANY change. */ - if (ret != NF_DROP && ret != NF_STOLEN && ret != NF_QUEUE - && ((*pskb)->nh.iph->saddr != saddr - || (*pskb)->nh.iph->daddr != daddr - || (*pskb)->mark != mark - || (*pskb)->nh.iph->tos != tos)) - if (ip_route_me_harder(pskb, RTN_UNSPEC)) - ret = NF_DROP; + if (ret != NF_DROP && ret != NF_STOLEN && ret != NF_QUEUE) { + iph = ip_hdr(*pskb); + + if (iph->saddr != saddr || + iph->daddr != daddr || + (*pskb)->mark != mark || + iph->tos != tos) + if (ip_route_me_harder(pskb, RTN_UNSPEC)) + ret = NF_DROP; + } return ret; } diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c index fa14eb77f9b6..d52ca0c1ce8d 100644 --- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c +++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c @@ -87,7 +87,7 @@ nf_ct_ipv4_gather_frags(struct sk_buff *skb, u_int32_t user) local_bh_enable(); if (skb) - ip_send_check(skb->nh.iph); + ip_send_check(ip_hdr(skb)); return skb; } @@ -97,16 +97,16 @@ ipv4_prepare(struct sk_buff **pskb, unsigned int hooknum, unsigned int *dataoff, u_int8_t *protonum) { /* Never happen */ - if ((*pskb)->nh.iph->frag_off & htons(IP_OFFSET)) { + if (ip_hdr(*pskb)->frag_off & htons(IP_OFFSET)) { if (net_ratelimit()) { printk(KERN_ERR "ipv4_prepare: Frag of proto %u (hook=%u)\n", - (*pskb)->nh.iph->protocol, hooknum); + ip_hdr(*pskb)->protocol, hooknum); } return -NF_DROP; } *dataoff = skb_network_offset(*pskb) + ip_hdrlen(*pskb); - *protonum = (*pskb)->nh.iph->protocol; + *protonum = ip_hdr(*pskb)->protocol; return NF_ACCEPT; } @@ -170,7 +170,7 @@ static unsigned int ipv4_conntrack_defrag(unsigned int hooknum, #endif /* Gather fragments. */ - if ((*pskb)->nh.iph->frag_off & htons(IP_MF|IP_OFFSET)) { + if (ip_hdr(*pskb)->frag_off & htons(IP_MF | IP_OFFSET)) { *pskb = nf_ct_ipv4_gather_frags(*pskb, hooknum == NF_IP_PRE_ROUTING ? IP_DEFRAG_CONNTRACK_IN : diff --git a/net/ipv4/netfilter/nf_nat_h323.c b/net/ipv4/netfilter/nf_nat_h323.c index 2eb3832db3a4..3c58fea0d391 100644 --- a/net/ipv4/netfilter/nf_nat_h323.c +++ b/net/ipv4/netfilter/nf_nat_h323.c @@ -44,7 +44,7 @@ static int set_addr(struct sk_buff **pskb, buf.port = port; addroff += dataoff; - if ((*pskb)->nh.iph->protocol == IPPROTO_TCP) { + if (ip_hdr(*pskb)->protocol == IPPROTO_TCP) { if (!nf_nat_mangle_tcp_packet(pskb, ct, ctinfo, addroff, sizeof(buf), (char *) &buf, sizeof(buf))) { diff --git a/net/ipv4/netfilter/nf_nat_helper.c b/net/ipv4/netfilter/nf_nat_helper.c index 723302afd840..c2c92ff12781 100644 --- a/net/ipv4/netfilter/nf_nat_helper.c +++ b/net/ipv4/netfilter/nf_nat_helper.c @@ -87,7 +87,7 @@ static void mangle_contents(struct sk_buff *skb, unsigned char *data; BUG_ON(skb_is_nonlinear(skb)); - data = (unsigned char *)skb->nh.iph + dataoff; + data = skb_network_header(skb) + dataoff; /* move post-replacement */ memmove(data + match_offset + rep_len, @@ -111,8 +111,8 @@ static void mangle_contents(struct sk_buff *skb, } /* fix IP hdr checksum information */ - skb->nh.iph->tot_len = htons(skb->len); - ip_send_check(skb->nh.iph); + ip_hdr(skb)->tot_len = htons(skb->len); + ip_send_check(ip_hdr(skb)); } /* Unusual, but possible case. */ @@ -166,7 +166,7 @@ nf_nat_mangle_tcp_packet(struct sk_buff **pskb, SKB_LINEAR_ASSERT(*pskb); - iph = (*pskb)->nh.iph; + iph = ip_hdr(*pskb); tcph = (void *)iph + iph->ihl*4; oldlen = (*pskb)->len - iph->ihl*4; @@ -221,7 +221,7 @@ nf_nat_mangle_udp_packet(struct sk_buff **pskb, int datalen, oldlen; /* UDP helpers might accidentally mangle the wrong packet */ - iph = (*pskb)->nh.iph; + iph = ip_hdr(*pskb); if ((*pskb)->len < iph->ihl*4 + sizeof(*udph) + match_offset + match_len) return 0; @@ -234,7 +234,7 @@ nf_nat_mangle_udp_packet(struct sk_buff **pskb, !enlarge_skb(pskb, rep_len - match_len)) return 0; - iph = (*pskb)->nh.iph; + iph = ip_hdr(*pskb); udph = (void *)iph + iph->ihl*4; oldlen = (*pskb)->len - iph->ihl*4; diff --git a/net/ipv4/netfilter/nf_nat_rule.c b/net/ipv4/netfilter/nf_nat_rule.c index 147a4370cf03..2a283397a8b6 100644 --- a/net/ipv4/netfilter/nf_nat_rule.c +++ b/net/ipv4/netfilter/nf_nat_rule.c @@ -191,7 +191,7 @@ static unsigned int ipt_dnat_target(struct sk_buff **pskb, if (hooknum == NF_IP_LOCAL_OUT && mr->range[0].flags & IP_NAT_RANGE_MAP_IPS) - warn_if_extra_mangle((*pskb)->nh.iph->daddr, + warn_if_extra_mangle(ip_hdr(*pskb)->daddr, mr->range[0].min_ip); return nf_nat_setup_info(ct, &mr->range[0], hooknum); diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c index ce5c4939a6ee..0cc0d97585df 100644 --- a/net/ipv4/netfilter/nf_nat_snmp_basic.c +++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c @@ -1194,7 +1194,7 @@ static int snmp_translate(struct nf_conn *ct, enum ip_conntrack_info ctinfo, struct sk_buff **pskb) { - struct iphdr *iph = (*pskb)->nh.iph; + struct iphdr *iph = ip_hdr(*pskb); struct udphdr *udph = (struct udphdr *)((__be32 *)iph + iph->ihl); u_int16_t udplen = ntohs(udph->len); u_int16_t paylen = udplen - sizeof(struct udphdr); @@ -1235,7 +1235,7 @@ static int help(struct sk_buff **pskb, unsigned int protoff, { int dir = CTINFO2DIR(ctinfo); unsigned int ret; - struct iphdr *iph = (*pskb)->nh.iph; + struct iphdr *iph = ip_hdr(*pskb); struct udphdr *udph = (struct udphdr *)((u_int32_t *)iph + iph->ihl); /* SNMP replies and originating SNMP traps get mangled */ diff --git a/net/ipv4/netfilter/nf_nat_standalone.c b/net/ipv4/netfilter/nf_nat_standalone.c index 61ca272165a1..64bbed2ba780 100644 --- a/net/ipv4/netfilter/nf_nat_standalone.c +++ b/net/ipv4/netfilter/nf_nat_standalone.c @@ -86,8 +86,7 @@ nf_nat_fn(unsigned int hooknum, /* We never see fragments: conntrack defrags on pre-routing and local-out, and nf_nat_out protects post-routing. */ - NF_CT_ASSERT(!((*pskb)->nh.iph->frag_off - & htons(IP_MF|IP_OFFSET))); + NF_CT_ASSERT(!(ip_hdr(*pskb)->frag_off & htons(IP_MF | IP_OFFSET))); ct = nf_ct_get(*pskb, &ctinfo); /* Can't track? It's not due to stress, or conntrack would @@ -98,7 +97,7 @@ nf_nat_fn(unsigned int hooknum, /* Exception: ICMP redirect to new connection (not in hash table yet). We must not let this through, in case we're doing NAT to the same network. */ - if ((*pskb)->nh.iph->protocol == IPPROTO_ICMP) { + if (ip_hdr(*pskb)->protocol == IPPROTO_ICMP) { struct icmphdr _hdr, *hp; hp = skb_header_pointer(*pskb, ip_hdrlen(*pskb), @@ -121,7 +120,7 @@ nf_nat_fn(unsigned int hooknum, switch (ctinfo) { case IP_CT_RELATED: case IP_CT_RELATED+IP_CT_IS_REPLY: - if ((*pskb)->nh.iph->protocol == IPPROTO_ICMP) { + if (ip_hdr(*pskb)->protocol == IPPROTO_ICMP) { if (!nf_nat_icmp_reply_translation(ct, ctinfo, hooknum, pskb)) return NF_DROP; @@ -176,11 +175,11 @@ nf_nat_in(unsigned int hooknum, int (*okfn)(struct sk_buff *)) { unsigned int ret; - __be32 daddr = (*pskb)->nh.iph->daddr; + __be32 daddr = ip_hdr(*pskb)->daddr; ret = nf_nat_fn(hooknum, pskb, in, out, okfn); if (ret != NF_DROP && ret != NF_STOLEN && - daddr != (*pskb)->nh.iph->daddr) { + daddr != ip_hdr(*pskb)->daddr) { dst_release((*pskb)->dst); (*pskb)->dst = NULL; } diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c index c3757bb270ca..ac57afa7c316 100644 --- a/net/ipv4/raw.c +++ b/net/ipv4/raw.c @@ -292,7 +292,7 @@ static int raw_send_hdrinc(struct sock *sk, void *from, size_t length, skb->dst = dst_clone(&rt->u.dst); skb_reset_network_header(skb); - iph = skb->nh.iph; + iph = ip_hdr(skb); skb_put(skb, length); skb->ip_summed = CHECKSUM_NONE; @@ -615,7 +615,7 @@ static int raw_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, /* Copy the address. */ if (sin) { sin->sin_family = AF_INET; - sin->sin_addr.s_addr = skb->nh.iph->saddr; + sin->sin_addr.s_addr = ip_hdr(skb)->saddr; sin->sin_port = 0; memset(&sin->sin_zero, 0, sizeof(sin->sin_zero)); } diff --git a/net/ipv4/route.c b/net/ipv4/route.c index e50ad7dbbde8..58417393dec1 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -1519,7 +1519,7 @@ static void ipv4_link_failure(struct sk_buff *skb) static int ip_rt_bug(struct sk_buff *skb) { printk(KERN_DEBUG "ip_rt_bug: %u.%u.%u.%u -> %u.%u.%u.%u, %s\n", - NIPQUAD(skb->nh.iph->saddr), NIPQUAD(skb->nh.iph->daddr), + NIPQUAD(ip_hdr(skb)->saddr), NIPQUAD(ip_hdr(skb)->daddr), skb->dev ? skb->dev->name : "?"); kfree_skb(skb); return 0; @@ -2134,7 +2134,7 @@ int ip_route_input(struct sk_buff *skb, __be32 daddr, __be32 saddr, rcu_read_lock(); if ((in_dev = __in_dev_get_rcu(dev)) != NULL) { int our = ip_check_mc(in_dev, daddr, saddr, - skb->nh.iph->protocol); + ip_hdr(skb)->protocol); if (our #ifdef CONFIG_IP_MROUTE || (!LOCAL_MCAST(daddr) && IN_DEV_MFORWARD(in_dev)) @@ -2751,7 +2751,7 @@ int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void *arg) skb_reset_network_header(skb); /* Bugfix: need to give ip_route_input enough of an IP header to not gag. */ - skb->nh.iph->protocol = IPPROTO_ICMP; + ip_hdr(skb)->protocol = IPPROTO_ICMP; skb_reserve(skb, MAX_HEADER + sizeof(struct iphdr)); src = tb[RTA_SRC] ? nla_get_be32(tb[RTA_SRC]) : 0; diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c index 33016cc90f0b..261607178491 100644 --- a/net/ipv4/syncookies.c +++ b/net/ipv4/syncookies.c @@ -138,7 +138,7 @@ __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb, __u16 *mssp) NET_INC_STATS_BH(LINUX_MIB_SYNCOOKIESSENT); - return secure_tcp_syn_cookie(skb->nh.iph->saddr, skb->nh.iph->daddr, + return secure_tcp_syn_cookie(ip_hdr(skb)->saddr, ip_hdr(skb)->daddr, skb->h.th->source, skb->h.th->dest, ntohl(skb->h.th->seq), jiffies / (HZ * 60), mssind); @@ -162,7 +162,7 @@ static inline int cookie_check(struct sk_buff *skb, __u32 cookie) seq = ntohl(skb->h.th->seq)-1; mssind = check_tcp_syn_cookie(cookie, - skb->nh.iph->saddr, skb->nh.iph->daddr, + ip_hdr(skb)->saddr, ip_hdr(skb)->daddr, skb->h.th->source, skb->h.th->dest, seq, jiffies / (HZ * 60), COUNTER_TRIES); @@ -224,8 +224,8 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, treq->snt_isn = cookie; req->mss = mss; ireq->rmt_port = skb->h.th->source; - ireq->loc_addr = skb->nh.iph->daddr; - ireq->rmt_addr = skb->nh.iph->saddr; + ireq->loc_addr = ip_hdr(skb)->daddr; + ireq->rmt_addr = ip_hdr(skb)->saddr; ireq->opt = NULL; /* We throwed the options of the initial SYN away, so we hope diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 3326681b8429..3a86d6b887ac 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -125,8 +125,8 @@ void tcp_unhash(struct sock *sk) static inline __u32 tcp_v4_init_sequence(struct sk_buff *skb) { - return secure_tcp_sequence_number(skb->nh.iph->daddr, - skb->nh.iph->saddr, + return secure_tcp_sequence_number(ip_hdr(skb)->daddr, + ip_hdr(skb)->saddr, skb->h.th->dest, skb->h.th->source); } @@ -515,13 +515,13 @@ void tcp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb) int tcp_v4_gso_send_check(struct sk_buff *skb) { - struct iphdr *iph; + const struct iphdr *iph; struct tcphdr *th; if (!pskb_may_pull(skb, sizeof(*th))) return -EINVAL; - iph = skb->nh.iph; + iph = ip_hdr(skb); th = skb->h.th; th->check = 0; @@ -585,7 +585,7 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb) arg.iov[0].iov_len = sizeof(rep.th); #ifdef CONFIG_TCP_MD5SIG - key = sk ? tcp_v4_md5_do_lookup(sk, skb->nh.iph->daddr) : NULL; + key = sk ? tcp_v4_md5_do_lookup(sk, ip_hdr(skb)->daddr) : NULL; if (key) { rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | @@ -597,14 +597,14 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb) tcp_v4_do_calc_md5_hash((__u8 *)&rep.opt[1], key, - skb->nh.iph->daddr, - skb->nh.iph->saddr, + ip_hdr(skb)->daddr, + ip_hdr(skb)->saddr, &rep.th, IPPROTO_TCP, arg.iov[0].iov_len); } #endif - arg.csum = csum_tcpudp_nofold(skb->nh.iph->daddr, - skb->nh.iph->saddr, /* XXX */ + arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr, + ip_hdr(skb)->saddr, /* XXX */ sizeof(struct tcphdr), IPPROTO_TCP, 0); arg.csumoffset = offsetof(struct tcphdr, check) / 2; @@ -670,7 +670,7 @@ static void tcp_v4_send_ack(struct tcp_timewait_sock *twsk, * skb->sk) holds true, but we program defensively. */ if (!twsk && skb->sk) { - key = tcp_v4_md5_do_lookup(skb->sk, skb->nh.iph->daddr); + key = tcp_v4_md5_do_lookup(skb->sk, ip_hdr(skb)->daddr); } else if (twsk && twsk->tw_md5_keylen) { tw_key.key = twsk->tw_md5_key; tw_key.keylen = twsk->tw_md5_keylen; @@ -690,14 +690,14 @@ static void tcp_v4_send_ack(struct tcp_timewait_sock *twsk, tcp_v4_do_calc_md5_hash((__u8 *)&rep.opt[offset], key, - skb->nh.iph->daddr, - skb->nh.iph->saddr, + ip_hdr(skb)->daddr, + ip_hdr(skb)->saddr, &rep.th, IPPROTO_TCP, arg.iov[0].iov_len); } #endif - arg.csum = csum_tcpudp_nofold(skb->nh.iph->daddr, - skb->nh.iph->saddr, /* XXX */ + arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr, + ip_hdr(skb)->saddr, /* XXX */ arg.iov[0].iov_len, IPPROTO_TCP, 0); arg.csumoffset = offsetof(struct tcphdr, check) / 2; @@ -1133,7 +1133,7 @@ static int tcp_v4_inbound_md5_hash(struct sock *sk, struct sk_buff *skb) */ __u8 *hash_location = NULL; struct tcp_md5sig_key *hash_expected; - struct iphdr *iph = skb->nh.iph; + const struct iphdr *iph = ip_hdr(skb); struct tcphdr *th = skb->h.th; int length = (th->doff << 2) - sizeof(struct tcphdr); int genhash; @@ -1251,8 +1251,8 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) struct inet_request_sock *ireq; struct tcp_options_received tmp_opt; struct request_sock *req; - __be32 saddr = skb->nh.iph->saddr; - __be32 daddr = skb->nh.iph->daddr; + __be32 saddr = ip_hdr(skb)->saddr; + __be32 daddr = ip_hdr(skb)->daddr; __u32 isn = TCP_SKB_CB(skb)->when; struct dst_entry *dst = NULL; #ifdef CONFIG_SYN_COOKIES @@ -1439,7 +1439,7 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb, newinet->opt = ireq->opt; ireq->opt = NULL; newinet->mc_index = inet_iif(skb); - newinet->mc_ttl = skb->nh.iph->ttl; + newinet->mc_ttl = ip_hdr(skb)->ttl; inet_csk(newsk)->icsk_ext_hdr_len = 0; if (newinet->opt) inet_csk(newsk)->icsk_ext_hdr_len = newinet->opt->optlen; @@ -1482,7 +1482,7 @@ exit: static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb) { struct tcphdr *th = skb->h.th; - struct iphdr *iph = skb->nh.iph; + const struct iphdr *iph = ip_hdr(skb); struct sock *nsk; struct request_sock **prev; /* Find possible connection requests. */ @@ -1491,9 +1491,8 @@ static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb) if (req) return tcp_check_req(sk, skb, req, prev); - nsk = inet_lookup_established(&tcp_hashinfo, skb->nh.iph->saddr, - th->source, skb->nh.iph->daddr, - th->dest, inet_iif(skb)); + nsk = inet_lookup_established(&tcp_hashinfo, iph->saddr, th->source, + iph->daddr, th->dest, inet_iif(skb)); if (nsk) { if (nsk->sk_state != TCP_TIME_WAIT) { @@ -1513,15 +1512,17 @@ static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb) static __sum16 tcp_v4_checksum_init(struct sk_buff *skb) { + const struct iphdr *iph = ip_hdr(skb); + if (skb->ip_summed == CHECKSUM_COMPLETE) { - if (!tcp_v4_check(skb->len, skb->nh.iph->saddr, - skb->nh.iph->daddr, skb->csum)) { + if (!tcp_v4_check(skb->len, iph->saddr, + iph->daddr, skb->csum)) { skb->ip_summed = CHECKSUM_UNNECESSARY; return 0; } } - skb->csum = csum_tcpudp_nofold(skb->nh.iph->saddr, skb->nh.iph->daddr, + skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr, skb->len, IPPROTO_TCP, 0); if (skb->len <= 76) { @@ -1610,6 +1611,7 @@ csum_err: int tcp_v4_rcv(struct sk_buff *skb) { + const struct iphdr *iph; struct tcphdr *th; struct sock *sk; int ret; @@ -1639,18 +1641,17 @@ int tcp_v4_rcv(struct sk_buff *skb) goto bad_packet; th = skb->h.th; + iph = ip_hdr(skb); TCP_SKB_CB(skb)->seq = ntohl(th->seq); TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin + skb->len - th->doff * 4); TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq); TCP_SKB_CB(skb)->when = 0; - TCP_SKB_CB(skb)->flags = skb->nh.iph->tos; + TCP_SKB_CB(skb)->flags = iph->tos; TCP_SKB_CB(skb)->sacked = 0; - sk = __inet_lookup(&tcp_hashinfo, skb->nh.iph->saddr, th->source, - skb->nh.iph->daddr, th->dest, - inet_iif(skb)); - + sk = __inet_lookup(&tcp_hashinfo, iph->saddr, th->source, + iph->daddr, th->dest, inet_iif(skb)); if (!sk) goto no_tcp_socket; @@ -1724,8 +1725,7 @@ do_time_wait: switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) { case TCP_TW_SYN: { struct sock *sk2 = inet_lookup_listener(&tcp_hashinfo, - skb->nh.iph->daddr, - th->dest, + iph->daddr, th->dest, inet_iif(skb)); if (sk2) { inet_twsk_deschedule(inet_twsk(sk), &tcp_death_row); diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 1bbf5510cf3a..b4cad50c18e9 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -867,7 +867,7 @@ try_again: { sin->sin_family = AF_INET; sin->sin_port = skb->h.uh->source; - sin->sin_addr.s_addr = skb->nh.iph->saddr; + sin->sin_addr.s_addr = ip_hdr(skb)->saddr; memset(sin->sin_zero, 0, sizeof(sin->sin_zero)); } if (inet->cmsg_flags) @@ -990,7 +990,7 @@ static int udp_encap_rcv(struct sock * sk, struct sk_buff *skb) return 0; /* Now we can update and verify the packet length... */ - iph = skb->nh.iph; + iph = ip_hdr(skb); iphlen = iph->ihl << 2; iph->tot_len = htons(ntohs(iph->tot_len) - len); if (skb->len < iphlen + len) { @@ -1168,6 +1168,7 @@ static int __udp4_lib_mcast_deliver(struct sk_buff *skb, static inline int udp4_csum_init(struct sk_buff *skb, struct udphdr *uh, int proto) { + const struct iphdr *iph; int err; UDP_SKB_CB(skb)->partial_cov = 0; @@ -1179,16 +1180,16 @@ static inline int udp4_csum_init(struct sk_buff *skb, struct udphdr *uh, return err; } + iph = ip_hdr(skb); if (uh->check == 0) { skb->ip_summed = CHECKSUM_UNNECESSARY; } else if (skb->ip_summed == CHECKSUM_COMPLETE) { - if (!csum_tcpudp_magic(skb->nh.iph->saddr, skb->nh.iph->daddr, - skb->len, proto, skb->csum)) + if (!csum_tcpudp_magic(iph->saddr, iph->daddr, skb->len, + proto, skb->csum)) skb->ip_summed = CHECKSUM_UNNECESSARY; } if (skb->ip_summed != CHECKSUM_UNNECESSARY) - skb->csum = csum_tcpudp_nofold(skb->nh.iph->saddr, - skb->nh.iph->daddr, + skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr, skb->len, proto, 0); /* Probably, we should checksum udp header (it should be in cache * in any case) and data in tiny packets (< rx copybreak). @@ -1208,8 +1209,8 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[], struct udphdr *uh = skb->h.uh; unsigned short ulen; struct rtable *rt = (struct rtable*)skb->dst; - __be32 saddr = skb->nh.iph->saddr; - __be32 daddr = skb->nh.iph->daddr; + __be32 saddr = ip_hdr(skb)->saddr; + __be32 daddr = ip_hdr(skb)->daddr; /* * Validate the packet. diff --git a/net/ipv4/xfrm4_input.c b/net/ipv4/xfrm4_input.c index d89969c502dd..5ceca951d73f 100644 --- a/net/ipv4/xfrm4_input.c +++ b/net/ipv4/xfrm4_input.c @@ -28,7 +28,7 @@ static int xfrm4_parse_spi(struct sk_buff *skb, u8 nexthdr, __be32 *spi, __be32 switch (nexthdr) { case IPPROTO_IPIP: case IPPROTO_IPV6: - *spi = skb->nh.iph->saddr; + *spi = ip_hdr(skb)->saddr; *seq = 0; return 0; } @@ -39,9 +39,9 @@ static int xfrm4_parse_spi(struct sk_buff *skb, u8 nexthdr, __be32 *spi, __be32 #ifdef CONFIG_NETFILTER static inline int xfrm4_rcv_encap_finish(struct sk_buff *skb) { - struct iphdr *iph = skb->nh.iph; - if (skb->dst == NULL) { + const struct iphdr *iph = ip_hdr(skb); + if (ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, skb->dev)) goto drop; @@ -55,18 +55,18 @@ drop: int xfrm4_rcv_encap(struct sk_buff *skb, __u16 encap_type) { - int err; __be32 spi, seq; struct xfrm_state *xfrm_vec[XFRM_MAX_DEPTH]; struct xfrm_state *x; int xfrm_nr = 0; int decaps = 0; + int err = xfrm4_parse_spi(skb, ip_hdr(skb)->protocol, &spi, &seq); - if ((err = xfrm4_parse_spi(skb, skb->nh.iph->protocol, &spi, &seq)) != 0) + if (err != 0) goto drop; do { - struct iphdr *iph = skb->nh.iph; + const struct iphdr *iph = ip_hdr(skb); if (xfrm_nr == XFRM_MAX_DEPTH) goto drop; @@ -113,7 +113,8 @@ int xfrm4_rcv_encap(struct sk_buff *skb, __u16 encap_type) break; } - if ((err = xfrm_parse_spi(skb, skb->nh.iph->protocol, &spi, &seq)) < 0) + err = xfrm_parse_spi(skb, ip_hdr(skb)->protocol, &spi, &seq); + if (err < 0) goto drop; } while (!err); @@ -147,14 +148,14 @@ int xfrm4_rcv_encap(struct sk_buff *skb, __u16 encap_type) } else { #ifdef CONFIG_NETFILTER __skb_push(skb, skb->data - skb_network_header(skb)); - skb->nh.iph->tot_len = htons(skb->len); - ip_send_check(skb->nh.iph); + ip_hdr(skb)->tot_len = htons(skb->len); + ip_send_check(ip_hdr(skb)); NF_HOOK(PF_INET, NF_IP_PRE_ROUTING, skb, skb->dev, NULL, xfrm4_rcv_encap_finish); return 0; #else - return -skb->nh.iph->protocol; + return -ip_hdr(skb)->protocol; #endif } diff --git a/net/ipv4/xfrm4_mode_beet.c b/net/ipv4/xfrm4_mode_beet.c index 505fca034a1f..9e5ba12c6c75 100644 --- a/net/ipv4/xfrm4_mode_beet.c +++ b/net/ipv4/xfrm4_mode_beet.c @@ -32,8 +32,8 @@ static int xfrm4_beet_output(struct xfrm_state *x, struct sk_buff *skb) struct iphdr *iph, *top_iph = NULL; int hdrlen, optlen; - iph = skb->nh.iph; - skb->h.ipiph = iph; + iph = ip_hdr(skb); + skb->h.raw = skb->nh.raw; hdrlen = 0; optlen = iph->ihl * 4 - sizeof(*iph); @@ -42,7 +42,7 @@ static int xfrm4_beet_output(struct xfrm_state *x, struct sk_buff *skb) skb_push(skb, x->props.header_len + hdrlen); skb_reset_network_header(skb); - top_iph = skb->nh.iph; + top_iph = ip_hdr(skb); skb->h.raw += sizeof(*iph) - hdrlen; memmove(top_iph, iph, sizeof(*iph)); @@ -70,7 +70,7 @@ static int xfrm4_beet_output(struct xfrm_state *x, struct sk_buff *skb) static int xfrm4_beet_input(struct xfrm_state *x, struct sk_buff *skb) { - struct iphdr *iph = skb->nh.iph; + struct iphdr *iph = ip_hdr(skb); int phlen = 0; int optlen = 0; __u8 ph_nexthdr = 0, protocol = 0; @@ -102,7 +102,7 @@ static int xfrm4_beet_input(struct xfrm_state *x, struct sk_buff *skb) skb->h.raw = skb->data + (phlen + optlen); skb->data = skb->h.raw; - iph = skb->nh.iph; + iph = ip_hdr(skb); iph->ihl = (sizeof(*iph) + optlen) / 4; iph->tot_len = htons(skb->len + iph->ihl * 4); iph->daddr = x->sel.daddr.a4; diff --git a/net/ipv4/xfrm4_mode_transport.c b/net/ipv4/xfrm4_mode_transport.c index b198087c073e..124f24bc4dbc 100644 --- a/net/ipv4/xfrm4_mode_transport.c +++ b/net/ipv4/xfrm4_mode_transport.c @@ -23,13 +23,10 @@ */ static int xfrm4_transport_output(struct xfrm_state *x, struct sk_buff *skb) { - struct iphdr *iph; - int ihl; + struct iphdr *iph = ip_hdr(skb); + int ihl = iph->ihl * 4; - iph = skb->nh.iph; - skb->h.ipiph = iph; - - ihl = iph->ihl * 4; + skb->h.raw = skb->nh.raw; skb->h.raw += ihl; skb_push(skb, x->props.header_len); @@ -54,7 +51,7 @@ static int xfrm4_transport_input(struct xfrm_state *x, struct sk_buff *skb) memmove(skb->h.raw, skb_network_header(skb), ihl); skb->nh.raw = skb->h.raw; } - skb->nh.iph->tot_len = htons(skb->len + ihl); + ip_hdr(skb)->tot_len = htons(skb->len + ihl); skb->h.raw = skb->data; return 0; } diff --git a/net/ipv4/xfrm4_mode_tunnel.c b/net/ipv4/xfrm4_mode_tunnel.c index bec851f278e5..faa1b9a76e76 100644 --- a/net/ipv4/xfrm4_mode_tunnel.c +++ b/net/ipv4/xfrm4_mode_tunnel.c @@ -16,7 +16,7 @@ static inline void ipip_ecn_decapsulate(struct sk_buff *skb) { - struct iphdr *outer_iph = skb->nh.iph; + struct iphdr *outer_iph = ip_hdr(skb); struct iphdr *inner_iph = skb->h.ipiph; if (INET_ECN_is_ce(outer_iph->tos)) @@ -46,12 +46,12 @@ static int xfrm4_tunnel_output(struct xfrm_state *x, struct sk_buff *skb) struct iphdr *iph, *top_iph; int flags; - iph = skb->nh.iph; + iph = ip_hdr(skb); skb->h.ipiph = iph; skb_push(skb, x->props.header_len); skb_reset_network_header(skb); - top_iph = skb->nh.iph; + top_iph = ip_hdr(skb); top_iph->ihl = 5; top_iph->version = 4; @@ -91,7 +91,7 @@ static int xfrm4_tunnel_output(struct xfrm_state *x, struct sk_buff *skb) static int xfrm4_tunnel_input(struct xfrm_state *x, struct sk_buff *skb) { - struct iphdr *iph = skb->nh.iph; + struct iphdr *iph = ip_hdr(skb); const unsigned char *old_mac; int err = -EINVAL; @@ -113,7 +113,7 @@ static int xfrm4_tunnel_input(struct xfrm_state *x, struct sk_buff *skb) (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) goto out; - iph = skb->nh.iph; + iph = ip_hdr(skb); if (iph->protocol == IPPROTO_IPIP) { if (x->props.flags & XFRM_STATE_DECAP_DSCP) ipv4_copy_dscp(iph, skb->h.ipiph); diff --git a/net/ipv4/xfrm4_output.c b/net/ipv4/xfrm4_output.c index 038ca160fe2c..44ef208a75cb 100644 --- a/net/ipv4/xfrm4_output.c +++ b/net/ipv4/xfrm4_output.c @@ -22,14 +22,13 @@ static int xfrm4_tunnel_check_size(struct sk_buff *skb) { int mtu, ret = 0; struct dst_entry *dst; - struct iphdr *iph = skb->nh.iph; if (IPCB(skb)->flags & IPSKB_XFRM_TUNNEL_SIZE) goto out; IPCB(skb)->flags |= IPSKB_XFRM_TUNNEL_SIZE; - if (!(iph->frag_off & htons(IP_DF)) || skb->local_df) + if (!(ip_hdr(skb)->frag_off & htons(IP_DF)) || skb->local_df) goto out; dst = skb->dst; diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c index fbb1d3decf02..f1c32ff59d16 100644 --- a/net/ipv4/xfrm4_policy.c +++ b/net/ipv4/xfrm4_policy.c @@ -209,7 +209,7 @@ error: static void _decode_session4(struct sk_buff *skb, struct flowi *fl) { - struct iphdr *iph = skb->nh.iph; + struct iphdr *iph = ip_hdr(skb); u8 *xprth = skb_network_header(skb) + iph->ihl * 4; memset(fl, 0, sizeof(struct flowi)); diff --git a/net/ipv4/xfrm4_tunnel.c b/net/ipv4/xfrm4_tunnel.c index 3eef06454da9..568510304553 100644 --- a/net/ipv4/xfrm4_tunnel.c +++ b/net/ipv4/xfrm4_tunnel.c @@ -12,9 +12,8 @@ static int ipip_output(struct xfrm_state *x, struct sk_buff *skb) { - struct iphdr *iph; + struct iphdr *iph = ip_hdr(skb); - iph = skb->nh.iph; iph->tot_len = htons(skb->len); ip_send_check(iph); diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c index 7a86db6163ee..ac95d3bfdfb7 100644 --- a/net/ipv6/datagram.c +++ b/net/ipv6/datagram.c @@ -349,8 +349,7 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len) struct inet_sock *inet = inet_sk(sk); ipv6_addr_set(&sin->sin6_addr, 0, 0, - htonl(0xffff), - skb->nh.iph->saddr); + htonl(0xffff), ip_hdr(skb)->saddr); if (inet->cmsg_flags) ip_cmsg_recv(msg, skb); } diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index aafbdfa8d785..bb65779be7a6 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c @@ -526,7 +526,7 @@ ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, skb2->dst = NULL; skb_pull(skb2, offset); skb_reset_network_header(skb2); - eiph = skb2->nh.iph; + eiph = ip_hdr(skb2); /* Try to guess incoming interface */ memset(&fl, 0, sizeof(fl)); @@ -625,10 +625,10 @@ static void ip4ip6_dscp_ecn_decapsulate(struct ip6_tnl *t, __u8 dsfield = ipv6_get_dsfield(ipv6h) & ~INET_ECN_MASK; if (t->parms.flags & IP6_TNL_F_RCV_DSCP_COPY) - ipv4_change_dsfield(skb->nh.iph, INET_ECN_MASK, dsfield); + ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, dsfield); if (INET_ECN_is_ce(dsfield)) - IP_ECN_set_ce(skb->nh.iph); + IP_ECN_set_ce(ip_hdr(skb)); } static void ip6ip6_dscp_ecn_decapsulate(struct ip6_tnl *t, @@ -944,7 +944,7 @@ static inline int ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) { struct ip6_tnl *t = netdev_priv(dev); - struct iphdr *iph = skb->nh.iph; + struct iphdr *iph = ip_hdr(skb); int encap_limit = -1; struct flowi fl; __u8 dsfield; diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index 62883d41b6c3..e33ac3c3a9ca 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c @@ -377,7 +377,7 @@ static int ipip6_rcv(struct sk_buff *skb) if (!pskb_may_pull(skb, sizeof(struct ipv6hdr))) goto out; - iph = skb->nh.iph; + iph = ip_hdr(skb); read_lock(&ipip6_lock); if ((tunnel = ipip6_tunnel_lookup(iph->saddr, iph->daddr)) != NULL) { @@ -565,7 +565,7 @@ static int ipip6_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) * Push down and install the IPIP header. */ - iph = skb->nh.iph; + iph = ip_hdr(skb); iph->version = 4; iph->ihl = sizeof(struct iphdr)>>2; if (mtu > IPV6_MIN_MTU) diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 87b06a80102b..e991e606ab1f 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -178,7 +178,7 @@ try_again: if (skb->protocol == htons(ETH_P_IP)) ipv6_addr_set(&sin6->sin6_addr, 0, 0, - htonl(0xffff), skb->nh.iph->saddr); + htonl(0xffff), ip_hdr(skb)->saddr); else { ipv6_addr_copy(&sin6->sin6_addr, &skb->nh.ipv6h->saddr); if (ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL) diff --git a/net/netfilter/nf_conntrack_netbios_ns.c b/net/netfilter/nf_conntrack_netbios_ns.c index bb26a658cc1c..1093478cc007 100644 --- a/net/netfilter/nf_conntrack_netbios_ns.c +++ b/net/netfilter/nf_conntrack_netbios_ns.c @@ -46,7 +46,7 @@ static int help(struct sk_buff **pskb, unsigned int protoff, struct nf_conn *ct, enum ip_conntrack_info ctinfo) { struct nf_conntrack_expect *exp; - struct iphdr *iph = (*pskb)->nh.iph; + struct iphdr *iph = ip_hdr(*pskb); struct rtable *rt = (struct rtable *)(*pskb)->dst; struct in_device *in_dev; __be32 mask = 0; diff --git a/net/netfilter/xt_DSCP.c b/net/netfilter/xt_DSCP.c index a7cc75aeb38d..de647bd54893 100644 --- a/net/netfilter/xt_DSCP.c +++ b/net/netfilter/xt_DSCP.c @@ -35,13 +35,13 @@ static unsigned int target(struct sk_buff **pskb, const void *targinfo) { const struct xt_DSCP_info *dinfo = targinfo; - u_int8_t dscp = ipv4_get_dsfield((*pskb)->nh.iph) >> XT_DSCP_SHIFT; + u_int8_t dscp = ipv4_get_dsfield(ip_hdr(*pskb)) >> XT_DSCP_SHIFT; if (dscp != dinfo->dscp) { if (!skb_make_writable(pskb, sizeof(struct iphdr))) return NF_DROP; - ipv4_change_dsfield((*pskb)->nh.iph, (__u8)(~XT_DSCP_MASK), + ipv4_change_dsfield(ip_hdr(*pskb), (__u8)(~XT_DSCP_MASK), dinfo->dscp << XT_DSCP_SHIFT); } diff --git a/net/netfilter/xt_TCPMSS.c b/net/netfilter/xt_TCPMSS.c index afc0c60e19d5..9e948ce27600 100644 --- a/net/netfilter/xt_TCPMSS.c +++ b/net/netfilter/xt_TCPMSS.c @@ -145,7 +145,7 @@ xt_tcpmss_target4(struct sk_buff **pskb, const struct xt_target *target, const void *targinfo) { - struct iphdr *iph = (*pskb)->nh.iph; + struct iphdr *iph = ip_hdr(*pskb); __be16 newlen; int ret; @@ -154,7 +154,7 @@ xt_tcpmss_target4(struct sk_buff **pskb, if (ret < 0) return NF_DROP; if (ret > 0) { - iph = (*pskb)->nh.iph; + iph = ip_hdr(*pskb); newlen = htons(ntohs(iph->tot_len) + ret); nf_csum_replace2(&iph->check, iph->tot_len, newlen); iph->tot_len = newlen; diff --git a/net/netfilter/xt_dscp.c b/net/netfilter/xt_dscp.c index 26c7f4ad102a..9ec294cd2436 100644 --- a/net/netfilter/xt_dscp.c +++ b/net/netfilter/xt_dscp.c @@ -34,7 +34,7 @@ static int match(const struct sk_buff *skb, int *hotdrop) { const struct xt_dscp_info *info = matchinfo; - u_int8_t dscp = ipv4_get_dsfield(skb->nh.iph) >> XT_DSCP_SHIFT; + u_int8_t dscp = ipv4_get_dsfield(ip_hdr(skb)) >> XT_DSCP_SHIFT; return (dscp == info->dscp) ^ !!info->invert; } diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c index 9f37d593ca38..47af19ab03cf 100644 --- a/net/netfilter/xt_hashlimit.c +++ b/net/netfilter/xt_hashlimit.c @@ -380,14 +380,14 @@ hashlimit_init_dst(struct xt_hashlimit_htable *hinfo, struct dsthash_dst *dst, switch (hinfo->family) { case AF_INET: if (hinfo->cfg.mode & XT_HASHLIMIT_HASH_DIP) - dst->addr.ip.dst = skb->nh.iph->daddr; + dst->addr.ip.dst = ip_hdr(skb)->daddr; if (hinfo->cfg.mode & XT_HASHLIMIT_HASH_SIP) - dst->addr.ip.src = skb->nh.iph->saddr; + dst->addr.ip.src = ip_hdr(skb)->saddr; if (!(hinfo->cfg.mode & (XT_HASHLIMIT_HASH_DPT | XT_HASHLIMIT_HASH_SPT))) return 0; - nexthdr = skb->nh.iph->protocol; + nexthdr = ip_hdr(skb)->protocol; break; #if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE) case AF_INET6: diff --git a/net/netfilter/xt_length.c b/net/netfilter/xt_length.c index 32fb998d9bac..65fdb2166996 100644 --- a/net/netfilter/xt_length.c +++ b/net/netfilter/xt_length.c @@ -31,7 +31,7 @@ match(const struct sk_buff *skb, int *hotdrop) { const struct xt_length_info *info = matchinfo; - u_int16_t pktlen = ntohs(skb->nh.iph->tot_len); + u_int16_t pktlen = ntohs(ip_hdr(skb)->tot_len); return (pktlen >= info->min && pktlen <= info->max) ^ info->invert; } diff --git a/net/netfilter/xt_pkttype.c b/net/netfilter/xt_pkttype.c index 16e7b0804287..e1409fc5c288 100644 --- a/net/netfilter/xt_pkttype.c +++ b/net/netfilter/xt_pkttype.c @@ -34,7 +34,7 @@ static int match(const struct sk_buff *skb, const struct xt_pkttype_info *info = matchinfo; if (skb->pkt_type == PACKET_LOOPBACK) - type = (MULTICAST(skb->nh.iph->daddr) + type = (MULTICAST(ip_hdr(skb)->daddr) ? PACKET_MULTICAST : PACKET_BROADCAST); else diff --git a/net/rxrpc/connection.c b/net/rxrpc/connection.c index a7c929a9fdca..e601fa87bb77 100644 --- a/net/rxrpc/connection.c +++ b/net/rxrpc/connection.c @@ -267,7 +267,7 @@ int rxrpc_connection_lookup(struct rxrpc_peer *peer, /* fill in the specifics */ candidate->addr.sin_family = AF_INET; candidate->addr.sin_port = x_port; - candidate->addr.sin_addr.s_addr = pkt->nh.iph->saddr; + candidate->addr.sin_addr.s_addr = ip_hdr(pkt)->saddr; candidate->in_epoch = x_epoch; candidate->out_epoch = x_epoch; candidate->in_clientflag = RXRPC_CLIENT_INITIATED; diff --git a/net/rxrpc/transport.c b/net/rxrpc/transport.c index 8e57be2df936..cac078b74068 100644 --- a/net/rxrpc/transport.c +++ b/net/rxrpc/transport.c @@ -478,7 +478,7 @@ void rxrpc_trans_receive_packet(struct rxrpc_transport *trans) return; } - addr = pkt->nh.iph->saddr; + addr = ip_hdr(pkt)->saddr; port = pkt->h.uh->source; _net("Rx Received UDP packet from %08x:%04hu", @@ -626,7 +626,7 @@ int rxrpc_trans_immediate_abort(struct rxrpc_transport *trans, memset(&sin,0,sizeof(sin)); sin.sin_family = AF_INET; sin.sin_port = msg->pkt->h.uh->source; - sin.sin_addr.s_addr = msg->pkt->nh.iph->saddr; + sin.sin_addr.s_addr = ip_hdr(msg->pkt)->saddr; msghdr.msg_name = &sin; msghdr.msg_namelen = sizeof(sin); diff --git a/net/sched/cls_rsvp.h b/net/sched/cls_rsvp.h index b6ac0e287872..cb8cf5bfa053 100644 --- a/net/sched/cls_rsvp.h +++ b/net/sched/cls_rsvp.h @@ -145,7 +145,7 @@ static int rsvp_classify(struct sk_buff *skb, struct tcf_proto *tp, #if RSVP_DST_LEN == 4 struct ipv6hdr *nhptr = skb->nh.ipv6h; #else - struct iphdr *nhptr = skb->nh.iph; + struct iphdr *nhptr = ip_hdr(skb); #endif restart: diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c index afb3bbd571f2..baca8743c12b 100644 --- a/net/sched/sch_atm.c +++ b/net/sched/sch_atm.c @@ -503,7 +503,7 @@ static void sch_atm_dequeue(unsigned long data) } D2PRINTK("atm_tc_dequeue: sending on class %p\n",flow); /* remove any LL header somebody else has attached */ - skb_pull(skb,(char *) skb->nh.iph-(char *) skb->data); + skb_pull(skb, skb_network_offset(skb)); if (skb_headroom(skb) < flow->hdr_len) { struct sk_buff *new; @@ -513,7 +513,7 @@ static void sch_atm_dequeue(unsigned long data) skb = new; } D2PRINTK("sch_atm_dequeue: ip %p, data %p\n", - skb->nh.iph,skb->data); + skb_network_header(skb), skb->data); ATM_SKB(skb)->vcc = flow->vcc; memcpy(skb_push(skb,flow->hdr_len),flow->hdr, flow->hdr_len); diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c index 96324cf4e6a9..45b5734dd72a 100644 --- a/net/sched/sch_dsmark.c +++ b/net/sched/sch_dsmark.c @@ -216,7 +216,7 @@ static int dsmark_enqueue(struct sk_buff *skb,struct Qdisc *sch) /* FIXME: Safe with non-linear skbs? --RR */ switch (skb->protocol) { case __constant_htons(ETH_P_IP): - skb->tc_index = ipv4_get_dsfield(skb->nh.iph) + skb->tc_index = ipv4_get_dsfield(ip_hdr(skb)) & ~INET_ECN_MASK; break; case __constant_htons(ETH_P_IPV6): @@ -292,7 +292,7 @@ static struct sk_buff *dsmark_dequeue(struct Qdisc *sch) switch (skb->protocol) { case __constant_htons(ETH_P_IP): - ipv4_change_dsfield(skb->nh.iph, p->mask[index], + ipv4_change_dsfield(ip_hdr(skb), p->mask[index], p->value[index]); break; case __constant_htons(ETH_P_IPV6): diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c index 66f32051a99b..02081bc9e0d1 100644 --- a/net/sched/sch_sfq.c +++ b/net/sched/sch_sfq.c @@ -137,7 +137,7 @@ static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb) switch (skb->protocol) { case __constant_htons(ETH_P_IP): { - struct iphdr *iph = skb->nh.iph; + const struct iphdr *iph = ip_hdr(skb); h = iph->daddr; h2 = iph->saddr^iph->protocol; if (!(iph->frag_off&htons(IP_MF|IP_OFFSET)) && diff --git a/net/sctp/input.c b/net/sctp/input.c index 2b0863aba3fb..595fe32b3d41 100644 --- a/net/sctp/input.c +++ b/net/sctp/input.c @@ -154,7 +154,7 @@ int sctp_rcv(struct sk_buff *skb) if (skb->len < sizeof(struct sctp_chunkhdr)) goto discard_it; - family = ipver2af(skb->nh.iph->version); + family = ipver2af(ip_hdr(skb)->version); af = sctp_get_af_specific(family); if (unlikely(!af)) goto discard_it; diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c index 5f9b145b0b90..742f9ff42fbf 100644 --- a/net/sctp/ipv6.c +++ b/net/sctp/ipv6.c @@ -770,9 +770,9 @@ static void sctp_inet6_skb_msgname(struct sk_buff *skb, char *msgname, /* Map ipv4 address into v4-mapped-on-v6 address. */ if (sctp_sk(skb->sk)->v4mapped && - skb->nh.iph->version == 4) { + ip_hdr(skb)->version == 4) { sctp_v4_map_v6((union sctp_addr *)sin6); - sin6->sin6_addr.s6_addr32[3] = skb->nh.iph->saddr; + sin6->sin6_addr.s6_addr32[3] = ip_hdr(skb)->saddr; return; } diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index e17a823ca90f..08f92ba4ebd7 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c @@ -238,10 +238,10 @@ static void sctp_v4_from_skb(union sctp_addr *addr, struct sk_buff *skb, sh = (struct sctphdr *) skb->h.raw; if (is_saddr) { *port = sh->source; - from = &skb->nh.iph->saddr; + from = &ip_hdr(skb)->saddr; } else { *port = sh->dest; - from = &skb->nh.iph->daddr; + from = &ip_hdr(skb)->daddr; } memcpy(&addr->v4.sin_addr.s_addr, from, sizeof(struct in_addr)); } @@ -530,7 +530,7 @@ static int sctp_v4_skb_iif(const struct sk_buff *skb) /* Was this packet marked by Explicit Congestion Notification? */ static int sctp_v4_is_ce(const struct sk_buff *skb) { - return INET_ECN_is_ce(skb->nh.iph->tos); + return INET_ECN_is_ce(ip_hdr(skb)->tos); } /* Create and initialize a new sk for the socket returned by accept(). */ @@ -739,7 +739,7 @@ static void sctp_inet_skb_msgname(struct sk_buff *skb, char *msgname, int *len) sin = (struct sockaddr_in *)msgname; sh = (struct sctphdr *)skb->h.raw; sin->sin_port = sh->source; - sin->sin_addr.s_addr = skb->nh.iph->saddr; + sin->sin_addr.s_addr = ip_hdr(skb)->saddr; } } diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index f7fb29d5a0c7..60c5b59d4c65 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c @@ -86,7 +86,7 @@ int sctp_chunk_iif(const struct sctp_chunk *chunk) struct sctp_af *af; int iif = 0; - af = sctp_get_af_specific(ipver2af(chunk->skb->nh.iph->version)); + af = sctp_get_af_specific(ipver2af(ip_hdr(chunk->skb)->version)); if (af) iif = af->skb_iif(chunk->skb); @@ -1233,7 +1233,7 @@ struct sctp_association *sctp_make_temp_asoc(const struct sctp_endpoint *ep, asoc->temp = 1; skb = chunk->skb; /* Create an entry for the source address of the packet. */ - af = sctp_get_af_specific(ipver2af(skb->nh.iph->version)); + af = sctp_get_af_specific(ipver2af(ip_hdr(skb)->version)); if (unlikely(!af)) goto fail; af->from_skb(&asoc->c.peer_addr, skb, 1); diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index e9097cf614ba..bf502c499c81 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c @@ -5286,7 +5286,7 @@ static int sctp_eat_data(const struct sctp_association *asoc, chunk->ecn_ce_done = 1; af = sctp_get_af_specific( - ipver2af(chunk->skb->nh.iph->version)); + ipver2af(ip_hdr(chunk->skb)->version)); if (af && af->is_ce(chunk->skb) && asoc->peer.ecn_capable) { /* Do real work as sideffect. */ -- cgit v1.2.3-59-g8ed1b From d0a92be05ed4aea7d35c2b257e3f9173565fe4eb Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Mon, 12 Mar 2007 20:56:31 -0300 Subject: [SK_BUFF]: Introduce arp_hdr(), remove skb->nh.arph Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: David S. Miller --- drivers/net/bonding/bond_main.c | 2 +- drivers/net/chelsio/sge.c | 2 +- include/linux/if_arp.h | 9 +++++++++ include/linux/skbuff.h | 1 - net/bridge/br_netfilter.c | 2 +- net/core/netpoll.c | 2 +- net/ipv4/arp.c | 4 ++-- net/ipv4/netfilter/arp_tables.c | 4 ++-- net/ipv4/netfilter/arpt_mangle.c | 2 +- net/ipv4/netfilter/ipt_CLUSTERIP.c | 2 +- 10 files changed, 19 insertions(+), 11 deletions(-) (limited to 'include') diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 7f11388893fc..76d3504505bd 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -2524,7 +2524,7 @@ static int bond_arp_rcv(struct sk_buff *skb, struct net_device *dev, struct pack (2 * sizeof(u32))))) goto out_unlock; - arp = skb->nh.arph; + arp = arp_hdr(skb); if (arp->ar_hln != dev->addr_len || skb->pkt_type == PACKET_OTHERHOST || skb->pkt_type == PACKET_LOOPBACK || diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c index c357f45a16c3..a4204dff3636 100644 --- a/drivers/net/chelsio/sge.c +++ b/drivers/net/chelsio/sge.c @@ -1925,7 +1925,7 @@ int t1_start_xmit(struct sk_buff *skb, struct net_device *dev) */ if ((unlikely(!adapter->sge->espibug_skb[dev->if_port]))) { if (skb->protocol == htons(ETH_P_ARP) && - skb->nh.arph->ar_op == htons(ARPOP_REQUEST)) { + arp_hdr(skb)->ar_op == htons(ARPOP_REQUEST)) { adapter->sge->espibug_skb[dev->if_port] = skb; /* We want to re-use this skb later. We * simply bump the reference count and it diff --git a/include/linux/if_arp.h b/include/linux/if_arp.h index 7f5714214ee3..ed7b93c3083a 100644 --- a/include/linux/if_arp.h +++ b/include/linux/if_arp.h @@ -148,4 +148,13 @@ struct arphdr }; +#ifdef __KERNEL__ +#include + +static inline struct arphdr *arp_hdr(const struct sk_buff *skb) +{ + return (struct arphdr *)skb_network_header(skb); +} +#endif + #endif /* _LINUX_IF_ARP_H */ diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 62f841b5b700..9cb674b12b29 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -248,7 +248,6 @@ struct sk_buff { union { struct ipv6hdr *ipv6h; - struct arphdr *arph; unsigned char *raw; } nh; diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c index ebe740f6b902..0ee74b1e4770 100644 --- a/net/bridge/br_netfilter.c +++ b/net/bridge/br_netfilter.c @@ -670,7 +670,7 @@ static unsigned int br_nf_forward_arp(unsigned int hook, struct sk_buff **pskb, (*pskb)->nh.raw += VLAN_HLEN; } - if (skb->nh.arph->ar_pln != 4) { + if (arp_hdr(skb)->ar_pln != 4) { if (IS_VLAN_ARP(skb)) { skb_push(*pskb, VLAN_HLEN); (*pskb)->nh.raw -= VLAN_HLEN; diff --git a/net/core/netpoll.c b/net/core/netpoll.c index c4cec17be334..496b06244a8e 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c @@ -363,7 +363,7 @@ static void arp_reply(struct sk_buff *skb) skb_reset_network_header(skb); skb->h.raw = skb->data; - arp = skb->nh.arph; + arp = arp_hdr(skb); if ((arp->ar_hrd != htons(ARPHRD_ETHER) && arp->ar_hrd != htons(ARPHRD_IEEE802)) || diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c index 01d0e8dd17d8..7110779a0244 100644 --- a/net/ipv4/arp.c +++ b/net/ipv4/arp.c @@ -721,7 +721,7 @@ static int arp_process(struct sk_buff *skb) if (in_dev == NULL) goto out; - arp = skb->nh.arph; + arp = arp_hdr(skb); switch (dev_type) { default: @@ -937,7 +937,7 @@ static int arp_rcv(struct sk_buff *skb, struct net_device *dev, (2 * sizeof(u32))))) goto freeskb; - arp = skb->nh.arph; + arp = arp_hdr(skb); if (arp->ar_hln != dev->addr_len || dev->flags & IFF_NOARP || skb->pkt_type == PACKET_OTHERHOST || diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c index 57b0221f9e24..cae41215e3c7 100644 --- a/net/ipv4/netfilter/arp_tables.c +++ b/net/ipv4/netfilter/arp_tables.c @@ -245,7 +245,7 @@ unsigned int arpt_do_table(struct sk_buff **pskb, e = get_entry(table_base, private->hook_entry[hook]); back = get_entry(table_base, private->underflow[hook]); - arp = (*pskb)->nh.arph; + arp = arp_hdr(*pskb); do { if (arp_packet_match(arp, (*pskb)->dev, indev, outdev, &e->arp)) { struct arpt_entry_target *t; @@ -297,7 +297,7 @@ unsigned int arpt_do_table(struct sk_buff **pskb, t->data); /* Target might have changed stuff. */ - arp = (*pskb)->nh.arph; + arp = arp_hdr(*pskb); if (verdict == ARPT_CONTINUE) e = (void *)e + e->next_offset; diff --git a/net/ipv4/netfilter/arpt_mangle.c b/net/ipv4/netfilter/arpt_mangle.c index af1c8593eb19..b4450f1ccc1b 100644 --- a/net/ipv4/netfilter/arpt_mangle.c +++ b/net/ipv4/netfilter/arpt_mangle.c @@ -30,7 +30,7 @@ target(struct sk_buff **pskb, *pskb = nskb; } - arp = (*pskb)->nh.arph; + arp = arp_hdr(*pskb); arpptr = skb_network_header(*pskb) + sizeof(*arp); pln = arp->ar_pln; hln = arp->ar_hln; diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c index af5b82b8ceb7..d3b16817a991 100644 --- a/net/ipv4/netfilter/ipt_CLUSTERIP.c +++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c @@ -521,7 +521,7 @@ arp_mangle(unsigned int hook, const struct net_device *out, int (*okfn)(struct sk_buff *)) { - struct arphdr *arp = (*pskb)->nh.arph; + struct arphdr *arp = arp_hdr(*pskb); struct arp_payload *payload; struct clusterip_config *c; -- cgit v1.2.3-59-g8ed1b From 0660e03f6b18f19b6bbafe7583265a51b90daf36 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Wed, 25 Apr 2007 17:54:47 -0700 Subject: [SK_BUFF]: Introduce ipv6_hdr(), remove skb->nh.ipv6h Now the skb->nh union has just one member, .raw, i.e. it is just like the skb->mac union, strange, no? I'm just leaving it like that till the transport layer is done with, when we'll rename skb->mac.raw to skb->mac_header (or ->mac_header_offset?), ditto for ->{h,nh}. Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: David S. Miller --- drivers/net/bonding/bond_alb.c | 4 +- drivers/net/e1000/e1000_main.c | 10 ++-- drivers/s390/net/qeth_eddp.c | 8 +-- drivers/s390/net/qeth_main.c | 3 +- drivers/s390/net/qeth_tso.h | 2 +- include/linux/ipv6.h | 5 ++ include/linux/skbuff.h | 1 - include/net/inet_ecn.h | 2 +- net/bridge/br_netfilter.c | 6 +-- net/core/pktgen.c | 2 +- net/dccp/ipv6.c | 32 +++++------ net/ipv4/ip_gre.c | 4 +- net/ipv4/xfrm4_mode_tunnel.c | 2 +- net/ipv6/ah6.c | 14 ++--- net/ipv6/datagram.c | 16 +++--- net/ipv6/esp6.c | 2 +- net/ipv6/exthdrs.c | 20 +++---- net/ipv6/icmp.c | 20 +++---- net/ipv6/ip6_input.c | 8 +-- net/ipv6/ip6_output.c | 37 +++++++------ net/ipv6/ip6_tunnel.c | 16 +++--- net/ipv6/ipcomp6.c | 2 +- net/ipv6/ipv6_sockglue.c | 6 +-- net/ipv6/mcast.c | 15 +++--- net/ipv6/mip6.c | 20 +++---- net/ipv6/ndisc.c | 50 ++++++++--------- net/ipv6/netfilter.c | 8 +-- net/ipv6/netfilter/ip6_tables.c | 8 +-- net/ipv6/netfilter/ip6t_HL.c | 2 +- net/ipv6/netfilter/ip6t_LOG.c | 2 +- net/ipv6/netfilter/ip6t_REJECT.c | 8 +-- net/ipv6/netfilter/ip6t_eui64.c | 4 +- net/ipv6/netfilter/ip6t_hl.c | 2 +- net/ipv6/netfilter/ip6t_ipv6header.c | 2 +- net/ipv6/netfilter/ip6table_mangle.c | 14 ++--- net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c | 19 +++---- net/ipv6/netfilter/nf_conntrack_reasm.c | 16 +++--- net/ipv6/raw.c | 17 +++--- net/ipv6/reassembly.c | 12 ++--- net/ipv6/route.c | 4 +- net/ipv6/sit.c | 8 +-- net/ipv6/tcp_ipv6.c | 75 +++++++++++++------------- net/ipv6/udp.c | 17 +++--- net/ipv6/xfrm6_input.c | 4 +- net/ipv6/xfrm6_mode_beet.c | 6 +-- net/ipv6/xfrm6_mode_ro.c | 2 +- net/ipv6/xfrm6_mode_transport.c | 4 +- net/ipv6/xfrm6_mode_tunnel.c | 10 ++-- net/ipv6/xfrm6_policy.c | 2 +- net/ipv6/xfrm6_tunnel.c | 2 +- net/netfilter/xt_DSCP.c | 4 +- net/netfilter/xt_TCPMSS.c | 4 +- net/netfilter/xt_dscp.c | 2 +- net/netfilter/xt_hashlimit.c | 4 +- net/netfilter/xt_length.c | 3 +- net/sched/cls_rsvp.h | 2 +- net/sched/sch_dsmark.c | 4 +- net/sched/sch_sfq.c | 2 +- net/sctp/ipv6.c | 8 +-- 59 files changed, 296 insertions(+), 292 deletions(-) (limited to 'include') diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c index 8555afa574a4..b8cf777542fa 100644 --- a/drivers/net/bonding/bond_alb.c +++ b/drivers/net/bonding/bond_alb.c @@ -1304,8 +1304,8 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev) break; } - hash_start = (char*)&(skb->nh.ipv6h->daddr); - hash_size = sizeof(skb->nh.ipv6h->daddr); + hash_start = (char *)&(ipv6_hdr(skb)->daddr); + hash_size = sizeof(ipv6_hdr(skb)->daddr); break; case ETH_P_IPX: if (ipx_hdr(skb)->ipx_checksum != diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c index c324866c9789..a3d9986b4170 100644 --- a/drivers/net/e1000/e1000_main.c +++ b/drivers/net/e1000/e1000_main.c @@ -2899,13 +2899,11 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, cmd_length = E1000_TXD_CMD_IP; ipcse = skb->h.raw - skb->data - 1; } else if (skb->protocol == htons(ETH_P_IPV6)) { - skb->nh.ipv6h->payload_len = 0; + ipv6_hdr(skb)->payload_len = 0; skb->h.th->check = - ~csum_ipv6_magic(&skb->nh.ipv6h->saddr, - &skb->nh.ipv6h->daddr, - 0, - IPPROTO_TCP, - 0); + ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, + &ipv6_hdr(skb)->daddr, + 0, IPPROTO_TCP, 0); ipcse = 0; } ipcss = skb_network_offset(skb); diff --git a/drivers/s390/net/qeth_eddp.c b/drivers/s390/net/qeth_eddp.c index 1574247abaa1..90da58b4e532 100644 --- a/drivers/s390/net/qeth_eddp.c +++ b/drivers/s390/net/qeth_eddp.c @@ -479,9 +479,11 @@ qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx, skb->h.raw, skb->h.th->doff * 4); else - eddp = qeth_eddp_create_eddp_data(qhdr, (u8 *)skb->nh.ipv6h, - sizeof(struct ipv6hdr), - (u8 *)skb->h.th, skb->h.th->doff*4); + eddp = qeth_eddp_create_eddp_data(qhdr, + skb_network_header(skb), + sizeof(struct ipv6hdr), + skb->h.raw, + skb->h.th->doff * 4); if (eddp == NULL) { QETH_DBF_TEXT(trace, 2, "eddpfcnm"); diff --git a/drivers/s390/net/qeth_main.c b/drivers/s390/net/qeth_main.c index 8a07d548a05a..df7f279ec408 100644 --- a/drivers/s390/net/qeth_main.c +++ b/drivers/s390/net/qeth_main.c @@ -4053,7 +4053,8 @@ qeth_fill_header(struct qeth_card *card, struct qeth_hdr *hdr, skb->dst->neighbour->primary_key, 16); } else { /* fill in destination address used in ip header */ - memcpy(hdr->hdr.l3.dest_addr, &skb->nh.ipv6h->daddr, 16); + memcpy(hdr->hdr.l3.dest_addr, + &ipv6_hdr(skb)->daddr, 16); } } else { /* passthrough */ if((skb->dev->type == ARPHRD_IEEE802_TR) && diff --git a/drivers/s390/net/qeth_tso.h b/drivers/s390/net/qeth_tso.h index 255cb2e9c796..4040bdd8c327 100644 --- a/drivers/s390/net/qeth_tso.h +++ b/drivers/s390/net/qeth_tso.h @@ -64,7 +64,7 @@ static inline void qeth_tso_set_tcpip_header(struct qeth_card *card, struct sk_buff *skb) { struct iphdr *iph = ip_hdr(skb); - struct ipv6hdr *ip6h = skb->nh.ipv6h; + struct ipv6hdr *ip6h = ipv6_hdr(skb); struct tcphdr *tcph = skb->h.th; tcph->check = 0; diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index e046b22a2222..ec79c59b2077 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h @@ -223,6 +223,11 @@ enum { #include /* struct ipv6_mc_socklist */ #include +static inline struct ipv6hdr *ipv6_hdr(const struct sk_buff *skb) +{ + return (struct ipv6hdr *)skb_network_header(skb); +} + /* This structure contains results of exthdrs parsing as offsets from skb->nh. diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 9cb674b12b29..31806a7ce40e 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -247,7 +247,6 @@ struct sk_buff { } h; union { - struct ipv6hdr *ipv6h; unsigned char *raw; } nh; diff --git a/include/net/inet_ecn.h b/include/net/inet_ecn.h index 6fd4452c15d9..06a2c69a89e5 100644 --- a/include/net/inet_ecn.h +++ b/include/net/inet_ecn.h @@ -122,7 +122,7 @@ static inline int INET_ECN_set_ce(struct sk_buff *skb) case __constant_htons(ETH_P_IPV6): if (skb_network_header(skb) + sizeof(struct ipv6hdr) <= skb->tail) - return IP6_ECN_set_ce(skb->nh.ipv6h); + return IP6_ECN_set_ce(ipv6_hdr(skb)); break; } diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c index 0ee74b1e4770..f2796c97b4a2 100644 --- a/net/bridge/br_netfilter.c +++ b/net/bridge/br_netfilter.c @@ -372,7 +372,7 @@ static struct net_device *setup_pre_routing(struct sk_buff *skb) /* We only check the length. A bridge shouldn't do any hop-by-hop stuff anyway */ static int check_hbh_len(struct sk_buff *skb) { - unsigned char *raw = (u8 *) (skb->nh.ipv6h + 1); + unsigned char *raw = (u8 *)(ipv6_hdr(skb) + 1); u32 pkt_len; const unsigned char *nh = skb_network_header(skb); int off = raw - nh; @@ -400,7 +400,7 @@ static int check_hbh_len(struct sk_buff *skb) goto bad; pkt_len = ntohl(*(__be32 *) (nh + off + 2)); if (pkt_len <= IPV6_MAXPLEN || - skb->nh.ipv6h->payload_len) + ipv6_hdr(skb)->payload_len) goto bad; if (pkt_len > skb->len - sizeof(struct ipv6hdr)) goto bad; @@ -441,7 +441,7 @@ static unsigned int br_nf_pre_routing_ipv6(unsigned int hook, if (!pskb_may_pull(skb, sizeof(struct ipv6hdr))) goto inhdr_error; - hdr = skb->nh.ipv6h; + hdr = ipv6_hdr(skb); if (hdr->version != 6) goto inhdr_error; diff --git a/net/core/pktgen.c b/net/core/pktgen.c index e0faff8eb652..ee82364c8f31 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c @@ -2736,7 +2736,7 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev, skb->protocol = protocol; skb->dev = odev; skb->pkt_type = PACKET_HOST; - skb->nh.ipv6h = iph; + skb->nh.raw = (unsigned char *)iph; skb->h.uh = udph; if (pkt_dev->nfrags <= 0) diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c index 627d0c3c51cf..64eac2515aa2 100644 --- a/net/dccp/ipv6.c +++ b/net/dccp/ipv6.c @@ -84,8 +84,8 @@ static inline __u32 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr, static inline __u32 dccp_v6_init_sequence(struct sk_buff *skb) { - return secure_dccpv6_sequence_number(skb->nh.ipv6h->daddr.s6_addr32, - skb->nh.ipv6h->saddr.s6_addr32, + return secure_dccpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32, + ipv6_hdr(skb)->saddr.s6_addr32, dccp_hdr(skb)->dccph_dport, dccp_hdr(skb)->dccph_sport ); @@ -313,6 +313,7 @@ static void dccp_v6_reqsk_destructor(struct request_sock *req) static void dccp_v6_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb) { struct dccp_hdr *rxdh = dccp_hdr(rxskb), *dh; + struct ipv6hdr *rxip6h; const u32 dccp_hdr_reset_len = sizeof(struct dccp_hdr) + sizeof(struct dccp_hdr_ext) + sizeof(struct dccp_hdr_reset); @@ -352,12 +353,13 @@ static void dccp_v6_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb) dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), DCCP_SKB_CB(rxskb)->dccpd_seq); dccp_csum_outgoing(skb); - dh->dccph_checksum = dccp_v6_csum_finish(skb, &rxskb->nh.ipv6h->saddr, - &rxskb->nh.ipv6h->daddr); + rxip6h = ipv6_hdr(rxskb); + dh->dccph_checksum = dccp_v6_csum_finish(skb, &rxip6h->saddr, + &rxip6h->daddr); memset(&fl, 0, sizeof(fl)); - ipv6_addr_copy(&fl.fl6_dst, &rxskb->nh.ipv6h->saddr); - ipv6_addr_copy(&fl.fl6_src, &rxskb->nh.ipv6h->daddr); + ipv6_addr_copy(&fl.fl6_dst, &rxip6h->saddr); + ipv6_addr_copy(&fl.fl6_src, &rxip6h->daddr); fl.proto = IPPROTO_DCCP; fl.oif = inet6_iif(rxskb); @@ -390,7 +392,7 @@ static struct request_sock_ops dccp6_request_sock_ops = { static struct sock *dccp_v6_hnd_req(struct sock *sk,struct sk_buff *skb) { const struct dccp_hdr *dh = dccp_hdr(skb); - const struct ipv6hdr *iph = skb->nh.ipv6h; + const struct ipv6hdr *iph = ipv6_hdr(skb); struct sock *nsk; struct request_sock **prev; /* Find possible connection requests. */ @@ -460,8 +462,8 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb) goto drop_and_free; ireq6 = inet6_rsk(req); - ipv6_addr_copy(&ireq6->rmt_addr, &skb->nh.ipv6h->saddr); - ipv6_addr_copy(&ireq6->loc_addr, &skb->nh.ipv6h->daddr); + ipv6_addr_copy(&ireq6->rmt_addr, &ipv6_hdr(skb)->saddr); + ipv6_addr_copy(&ireq6->loc_addr, &ipv6_hdr(skb)->daddr); ireq6->pktopts = NULL; if (ipv6_opt_accepted(sk, skb) || @@ -546,7 +548,7 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk, newnp->pktoptions = NULL; newnp->opt = NULL; newnp->mcast_oif = inet6_iif(skb); - newnp->mcast_hops = skb->nh.ipv6h->hop_limit; + newnp->mcast_hops = ipv6_hdr(skb)->hop_limit; /* * No need to charge this sock to the relevant IPv6 refcnt debug socks count @@ -653,7 +655,7 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk, } newnp->opt = NULL; newnp->mcast_oif = inet6_iif(skb); - newnp->mcast_hops = skb->nh.ipv6h->hop_limit; + newnp->mcast_hops = ipv6_hdr(skb)->hop_limit; /* * Clone native IPv6 options from listening socket (if any) @@ -826,8 +828,8 @@ static int dccp_v6_rcv(struct sk_buff **pskb) goto discard_it; /* Step 1: If header checksum is incorrect, drop packet and return. */ - if (dccp_v6_csum_finish(skb, &skb->nh.ipv6h->saddr, - &skb->nh.ipv6h->daddr)) { + if (dccp_v6_csum_finish(skb, &ipv6_hdr(skb)->saddr, + &ipv6_hdr(skb)->daddr)) { DCCP_WARN("dropped packet with invalid checksum\n"); goto discard_it; } @@ -844,9 +846,9 @@ static int dccp_v6_rcv(struct sk_buff **pskb) /* Step 2: * Look up flow ID in table and get corresponding socket */ - sk = __inet6_lookup(&dccp_hashinfo, &skb->nh.ipv6h->saddr, + sk = __inet6_lookup(&dccp_hashinfo, &ipv6_hdr(skb)->saddr, dh->dccph_sport, - &skb->nh.ipv6h->daddr, ntohs(dh->dccph_dport), + &ipv6_hdr(skb)->daddr, ntohs(dh->dccph_dport), inet6_iif(skb)); /* * Step 2: diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index 851f46b910f2..969fe31723a7 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c @@ -535,7 +535,7 @@ static inline void ipgre_ecn_decapsulate(struct iphdr *iph, struct sk_buff *skb) if (skb->protocol == htons(ETH_P_IP)) { IP_ECN_set_ce(ip_hdr(skb)); } else if (skb->protocol == htons(ETH_P_IPV6)) { - IP6_ECN_set_ce(skb->nh.ipv6h); + IP6_ECN_set_ce(ipv6_hdr(skb)); } } } @@ -721,7 +721,7 @@ static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) addr_type = ipv6_addr_type(addr6); if (addr_type == IPV6_ADDR_ANY) { - addr6 = &skb->nh.ipv6h->daddr; + addr6 = &ipv6_hdr(skb)->daddr; addr_type = ipv6_addr_type(addr6); } diff --git a/net/ipv4/xfrm4_mode_tunnel.c b/net/ipv4/xfrm4_mode_tunnel.c index faa1b9a76e76..edba75610a46 100644 --- a/net/ipv4/xfrm4_mode_tunnel.c +++ b/net/ipv4/xfrm4_mode_tunnel.c @@ -26,7 +26,7 @@ static inline void ipip_ecn_decapsulate(struct sk_buff *skb) static inline void ipip6_ecn_decapsulate(struct iphdr *iph, struct sk_buff *skb) { if (INET_ECN_is_ce(iph->tos)) - IP6_ECN_set_ce(skb->nh.ipv6h); + IP6_ECN_set_ce(ipv6_hdr(skb)); } /* Add encapsulation header. diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c index 1c914386982f..b682d2368c2a 100644 --- a/net/ipv6/ah6.c +++ b/net/ipv6/ah6.c @@ -325,6 +325,7 @@ static int ah6_input(struct xfrm_state *x, struct sk_buff *skb) */ struct ipv6_auth_hdr *ah; + struct ipv6hdr *ip6h; struct ah_data *ahp; unsigned char *tmp_hdr = NULL; u16 hdr_len; @@ -357,13 +358,14 @@ static int ah6_input(struct xfrm_state *x, struct sk_buff *skb) tmp_hdr = kmemdup(skb_network_header(skb), hdr_len, GFP_ATOMIC); if (!tmp_hdr) goto out; - if (ipv6_clear_mutable_options(skb->nh.ipv6h, hdr_len, XFRM_POLICY_IN)) + ip6h = ipv6_hdr(skb); + if (ipv6_clear_mutable_options(ip6h, hdr_len, XFRM_POLICY_IN)) goto free_out; - skb->nh.ipv6h->priority = 0; - skb->nh.ipv6h->flow_lbl[0] = 0; - skb->nh.ipv6h->flow_lbl[1] = 0; - skb->nh.ipv6h->flow_lbl[2] = 0; - skb->nh.ipv6h->hop_limit = 0; + ip6h->priority = 0; + ip6h->flow_lbl[0] = 0; + ip6h->flow_lbl[1] = 0; + ip6h->flow_lbl[2] = 0; + ip6h->hop_limit = 0; { u8 auth_data[MAX_AH_AUTH_LEN]; diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c index ac95d3bfdfb7..f429290c2c37 100644 --- a/net/ipv6/datagram.c +++ b/net/ipv6/datagram.c @@ -254,7 +254,7 @@ void ipv6_local_error(struct sock *sk, int err, struct flowi *fl, u32 info) skb_put(skb, sizeof(struct ipv6hdr)); skb_reset_network_header(skb); - iph = skb->nh.ipv6h; + iph = ipv6_hdr(skb); ipv6_addr_copy(&iph->daddr, &fl->fl6_dst); serr = SKB_EXT_ERR(skb); @@ -340,7 +340,7 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len) sin->sin6_flowinfo = 0; sin->sin6_scope_id = 0; if (serr->ee.ee_origin == SO_EE_ORIGIN_ICMP6) { - ipv6_addr_copy(&sin->sin6_addr, &skb->nh.ipv6h->saddr); + ipv6_addr_copy(&sin->sin6_addr, &ipv6_hdr(skb)->saddr); if (np->rxopt.all) datagram_recv_ctl(sk, msg, skb); if (ipv6_addr_type(&sin->sin6_addr) & IPV6_ADDR_LINKLOCAL) @@ -391,17 +391,17 @@ int datagram_recv_ctl(struct sock *sk, struct msghdr *msg, struct sk_buff *skb) struct in6_pktinfo src_info; src_info.ipi6_ifindex = opt->iif; - ipv6_addr_copy(&src_info.ipi6_addr, &skb->nh.ipv6h->daddr); + ipv6_addr_copy(&src_info.ipi6_addr, &ipv6_hdr(skb)->daddr); put_cmsg(msg, SOL_IPV6, IPV6_PKTINFO, sizeof(src_info), &src_info); } if (np->rxopt.bits.rxhlim) { - int hlim = skb->nh.ipv6h->hop_limit; + int hlim = ipv6_hdr(skb)->hop_limit; put_cmsg(msg, SOL_IPV6, IPV6_HOPLIMIT, sizeof(hlim), &hlim); } if (np->rxopt.bits.rxtclass) { - int tclass = (ntohl(*(__be32 *)skb->nh.ipv6h) >> 20) & 0xff; + int tclass = (ntohl(*(__be32 *)ipv6_hdr(skb)) >> 20) & 0xff; put_cmsg(msg, SOL_IPV6, IPV6_TCLASS, sizeof(tclass), &tclass); } @@ -428,7 +428,7 @@ int datagram_recv_ctl(struct sock *sk, struct msghdr *msg, struct sk_buff *skb) * IPV6_RECVDSTOPTS is more generic. --yoshfuji */ unsigned int off = sizeof(struct ipv6hdr); - u8 nexthdr = skb->nh.ipv6h->nexthdr; + u8 nexthdr = ipv6_hdr(skb)->nexthdr; while (off <= opt->lastopt) { unsigned len; @@ -466,11 +466,11 @@ int datagram_recv_ctl(struct sock *sk, struct msghdr *msg, struct sk_buff *skb) struct in6_pktinfo src_info; src_info.ipi6_ifindex = opt->iif; - ipv6_addr_copy(&src_info.ipi6_addr, &skb->nh.ipv6h->daddr); + ipv6_addr_copy(&src_info.ipi6_addr, &ipv6_hdr(skb)->daddr); put_cmsg(msg, SOL_IPV6, IPV6_2292PKTINFO, sizeof(src_info), &src_info); } if (np->rxopt.bits.rxohlim) { - int hlim = skb->nh.ipv6h->hop_limit; + int hlim = ipv6_hdr(skb)->hop_limit; put_cmsg(msg, SOL_IPV6, IPV6_2292HOPLIMIT, sizeof(hlim), &hlim); } if (np->rxopt.bits.ohopopts && opt->hop) { diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c index 6e6b57ac8013..7aff380e74ef 100644 --- a/net/ipv6/esp6.c +++ b/net/ipv6/esp6.c @@ -191,7 +191,7 @@ static int esp6_input(struct xfrm_state *x, struct sk_buff *skb) skb->ip_summed = CHECKSUM_NONE; esph = (struct ipv6_esp_hdr*)skb->data; - iph = skb->nh.ipv6h; + iph = ipv6_hdr(skb); /* Get ivec. This can be wrong, check against another impls. */ if (esp->conf.ivlen) diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c index 9ebf120ba6d3..dab069b0b3f6 100644 --- a/net/ipv6/exthdrs.c +++ b/net/ipv6/exthdrs.c @@ -125,7 +125,7 @@ static int ip6_tlvopt_unknown(struct sk_buff **skbp, int optoff) /* Actually, it is redundant check. icmp_send will recheck in any case. */ - if (ipv6_addr_is_multicast(&skb->nh.ipv6h->daddr)) + if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) break; case 2: /* send ICMP PARM PROB regardless and drop packet */ icmpv6_param_prob(skb, ICMPV6_UNK_OPTION, optoff); @@ -202,7 +202,7 @@ static int ipv6_dest_hao(struct sk_buff **skbp, int optoff) struct sk_buff *skb = *skbp; struct ipv6_destopt_hao *hao; struct inet6_skb_parm *opt = IP6CB(skb); - struct ipv6hdr *ipv6h = skb->nh.ipv6h; + struct ipv6hdr *ipv6h = ipv6_hdr(skb); struct in6_addr tmp_addr; int ret; @@ -248,7 +248,7 @@ static int ipv6_dest_hao(struct sk_buff **skbp, int optoff) *skbp = skb = skb2; hao = (struct ipv6_destopt_hao *)(skb_network_header(skb2) + optoff); - ipv6h = skb2->nh.ipv6h; + ipv6h = ipv6_hdr(skb2); } if (skb->ip_summed == CHECKSUM_COMPLETE) @@ -414,7 +414,7 @@ static int ipv6_rthdr_rcv(struct sk_buff **skbp) return -1; } - if (ipv6_addr_is_multicast(&skb->nh.ipv6h->daddr) || + if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr) || skb->pkt_type != PACKET_HOST) { IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), IPSTATS_MIB_INADDRERRORS); @@ -522,7 +522,7 @@ looped_back: #ifdef CONFIG_IPV6_MIP6 case IPV6_SRCRT_TYPE_2: if (xfrm6_input_addr(skb, (xfrm_address_t *)addr, - (xfrm_address_t *)&skb->nh.ipv6h->saddr, + (xfrm_address_t *)&ipv6_hdr(skb)->saddr, IPPROTO_ROUTING) < 0) { IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), IPSTATS_MIB_INADDRERRORS); @@ -549,8 +549,8 @@ looped_back: } ipv6_addr_copy(&daddr, addr); - ipv6_addr_copy(addr, &skb->nh.ipv6h->daddr); - ipv6_addr_copy(&skb->nh.ipv6h->daddr, &daddr); + ipv6_addr_copy(addr, &ipv6_hdr(skb)->daddr); + ipv6_addr_copy(&ipv6_hdr(skb)->daddr, &daddr); dst_release(xchg(&skb->dst, NULL)); ip6_route_input(skb); @@ -561,7 +561,7 @@ looped_back: } if (skb->dst->dev->flags&IFF_LOOPBACK) { - if (skb->nh.ipv6h->hop_limit <= 1) { + if (ipv6_hdr(skb)->hop_limit <= 1) { IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), IPSTATS_MIB_INHDRERRORS); icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT, @@ -569,7 +569,7 @@ looped_back: kfree_skb(skb); return -1; } - skb->nh.ipv6h->hop_limit--; + ipv6_hdr(skb)->hop_limit--; goto looped_back; } @@ -698,7 +698,7 @@ static int ipv6_hop_jumbo(struct sk_buff **skbp, int optoff) icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, optoff+2); return 0; } - if (skb->nh.ipv6h->payload_len) { + if (ipv6_hdr(skb)->payload_len) { IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), IPSTATS_MIB_INHDRERRORS); icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, optoff); return 0; diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c index e5293b34229f..3a01effda695 100644 --- a/net/ipv6/icmp.c +++ b/net/ipv6/icmp.c @@ -129,9 +129,9 @@ void icmpv6_param_prob(struct sk_buff *skb, int code, int pos) static int is_ineligible(struct sk_buff *skb) { - int ptr = (u8*)(skb->nh.ipv6h+1) - skb->data; + int ptr = (u8 *)(ipv6_hdr(skb) + 1) - skb->data; int len = skb->len - ptr; - __u8 nexthdr = skb->nh.ipv6h->nexthdr; + __u8 nexthdr = ipv6_hdr(skb)->nexthdr; if (len < 0) return 1; @@ -275,7 +275,7 @@ static int icmpv6_getfrag(void *from, char *to, int offset, int len, int odd, st #ifdef CONFIG_IPV6_MIP6 static void mip6_addr_swap(struct sk_buff *skb) { - struct ipv6hdr *iph = skb->nh.ipv6h; + struct ipv6hdr *iph = ipv6_hdr(skb); struct inet6_skb_parm *opt = IP6CB(skb); struct ipv6_destopt_hao *hao; struct in6_addr tmp; @@ -303,7 +303,7 @@ void icmpv6_send(struct sk_buff *skb, int type, int code, __u32 info, struct net_device *dev) { struct inet6_dev *idev = NULL; - struct ipv6hdr *hdr = skb->nh.ipv6h; + struct ipv6hdr *hdr = ipv6_hdr(skb); struct sock *sk; struct ipv6_pinfo *np; struct in6_addr *saddr = NULL; @@ -485,7 +485,7 @@ static void icmpv6_echo_reply(struct sk_buff *skb) int hlimit; int tclass; - saddr = &skb->nh.ipv6h->daddr; + saddr = &ipv6_hdr(skb)->daddr; if (!ipv6_unicast_destination(skb)) saddr = NULL; @@ -495,7 +495,7 @@ static void icmpv6_echo_reply(struct sk_buff *skb) memset(&fl, 0, sizeof(fl)); fl.proto = IPPROTO_ICMPV6; - ipv6_addr_copy(&fl.fl6_dst, &skb->nh.ipv6h->saddr); + ipv6_addr_copy(&fl.fl6_dst, &ipv6_hdr(skb)->saddr); if (saddr) ipv6_addr_copy(&fl.fl6_src, saddr); fl.oif = skb->dev->ifindex; @@ -583,8 +583,8 @@ static void icmpv6_notify(struct sk_buff *skb, int type, int code, __be32 info) if (!pskb_may_pull(skb, inner_offset+8)) return; - saddr = &skb->nh.ipv6h->saddr; - daddr = &skb->nh.ipv6h->daddr; + saddr = &ipv6_hdr(skb)->saddr; + daddr = &ipv6_hdr(skb)->daddr; /* BUGGG_FUTURE: we should try to parse exthdrs in this packet. Without this we will not able f.e. to make source routed @@ -628,8 +628,8 @@ static int icmpv6_rcv(struct sk_buff **pskb) ICMP6_INC_STATS_BH(idev, ICMP6_MIB_INMSGS); - saddr = &skb->nh.ipv6h->saddr; - daddr = &skb->nh.ipv6h->daddr; + saddr = &ipv6_hdr(skb)->saddr; + daddr = &ipv6_hdr(skb)->daddr; /* Perform checksum. */ switch (skb->ip_summed) { diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c index aecc74da0721..9c3c787a21c1 100644 --- a/net/ipv6/ip6_input.c +++ b/net/ipv6/ip6_input.c @@ -96,7 +96,7 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt if (unlikely(!pskb_may_pull(skb, sizeof(*hdr)))) goto err; - hdr = skb->nh.ipv6h; + hdr = ipv6_hdr(skb); if (hdr->version != 6) goto err; @@ -116,7 +116,7 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt IP6_INC_STATS_BH(idev, IPSTATS_MIB_INHDRERRORS); goto drop; } - hdr = skb->nh.ipv6h; + hdr = ipv6_hdr(skb); } if (hdr->nexthdr == NEXTHDR_HOP) { @@ -183,7 +183,7 @@ resubmit: skb_postpull_rcsum(skb, skb_network_header(skb), skb->h.raw - skb->nh.raw); - hdr = skb->nh.ipv6h; + hdr = ipv6_hdr(skb); if (ipv6_addr_is_multicast(&hdr->daddr) && !ipv6_chk_mcast_addr(skb->dev, &hdr->daddr, &hdr->saddr) && @@ -234,7 +234,7 @@ int ip6_mc_input(struct sk_buff *skb) IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), IPSTATS_MIB_INMCASTPKTS); - hdr = skb->nh.ipv6h; + hdr = ipv6_hdr(skb); deliver = likely(!(skb->dev->flags & (IFF_PROMISC|IFF_ALLMULTI))) || ipv6_chk_mcast_addr(skb->dev, &hdr->daddr, NULL); diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index bd25825c0ccd..1900c6226866 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -107,13 +107,13 @@ static int ip6_output2(struct sk_buff *skb) skb->protocol = htons(ETH_P_IPV6); skb->dev = dev; - if (ipv6_addr_is_multicast(&skb->nh.ipv6h->daddr)) { + if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) { struct ipv6_pinfo* np = skb->sk ? inet6_sk(skb->sk) : NULL; struct inet6_dev *idev = ip6_dst_idev(skb->dst); if (!(dev->flags & IFF_LOOPBACK) && (!np || np->mc_loop) && - ipv6_chk_mcast_addr(dev, &skb->nh.ipv6h->daddr, - &skb->nh.ipv6h->saddr)) { + ipv6_chk_mcast_addr(dev, &ipv6_hdr(skb)->daddr, + &ipv6_hdr(skb)->saddr)) { struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC); /* Do not check for IFF_ALLMULTI; multicast routing @@ -124,7 +124,7 @@ static int ip6_output2(struct sk_buff *skb) newskb->dev, ip6_dev_loopback_xmit); - if (skb->nh.ipv6h->hop_limit == 0) { + if (ipv6_hdr(skb)->hop_limit == 0) { IP6_INC_STATS(idev, IPSTATS_MIB_OUTDISCARDS); kfree_skb(skb); return 0; @@ -193,7 +193,7 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl, skb_push(skb, sizeof(struct ipv6hdr)); skb_reset_network_header(skb); - hdr = skb->nh.ipv6h; + hdr = ipv6_hdr(skb); /* * Fill in the IPv6 header @@ -263,8 +263,8 @@ int ip6_nd_hdr(struct sock *sk, struct sk_buff *skb, struct net_device *dev, totlen = len + sizeof(struct ipv6hdr); - hdr = (struct ipv6hdr *) skb_put(skb, sizeof(struct ipv6hdr)); - skb->nh.ipv6h = hdr; + skb->nh.raw = skb_put(skb, sizeof(struct ipv6hdr)); + hdr = ipv6_hdr(skb); *(__be32*)hdr = htonl(0x60000000); @@ -309,7 +309,7 @@ static int ip6_call_ra_chain(struct sk_buff *skb, int sel) static int ip6_forward_proxy_check(struct sk_buff *skb) { - struct ipv6hdr *hdr = skb->nh.ipv6h; + struct ipv6hdr *hdr = ipv6_hdr(skb); u8 nexthdr = hdr->nexthdr; int offset; @@ -366,7 +366,7 @@ static inline int ip6_forward_finish(struct sk_buff *skb) int ip6_forward(struct sk_buff *skb) { struct dst_entry *dst = skb->dst; - struct ipv6hdr *hdr = skb->nh.ipv6h; + struct ipv6hdr *hdr = ipv6_hdr(skb); struct inet6_skb_parm *opt = IP6CB(skb); if (ipv6_devconf.forwarding == 0) @@ -475,7 +475,7 @@ int ip6_forward(struct sk_buff *skb) goto drop; } - hdr = skb->nh.ipv6h; + hdr = ipv6_hdr(skb); /* Mangling hops number delayed to point after skb COW */ @@ -527,10 +527,11 @@ static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from) int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr) { u16 offset = sizeof(struct ipv6hdr); - struct ipv6_opt_hdr *exthdr = (struct ipv6_opt_hdr*)(skb->nh.ipv6h + 1); + struct ipv6_opt_hdr *exthdr = + (struct ipv6_opt_hdr *)(ipv6_hdr(skb) + 1); unsigned int packet_len = skb->tail - skb_network_header(skb); int found_rhdr = 0; - *nexthdr = &skb->nh.ipv6h->nexthdr; + *nexthdr = &ipv6_hdr(skb)->nexthdr; while (offset + 1 <= packet_len) { @@ -643,7 +644,8 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) first_len = skb_pagelen(skb); skb->data_len = first_len - skb_headlen(skb); skb->len = first_len; - skb->nh.ipv6h->payload_len = htons(first_len - sizeof(struct ipv6hdr)); + ipv6_hdr(skb)->payload_len = htons(first_len - + sizeof(struct ipv6hdr)); dst_hold(&rt->u.dst); @@ -665,7 +667,9 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) if (frag->next != NULL) fh->frag_off |= htons(IP6_MF); fh->identification = frag_id; - frag->nh.ipv6h->payload_len = htons(frag->len - sizeof(struct ipv6hdr)); + ipv6_hdr(frag)->payload_len = + htons(frag->len - + sizeof(struct ipv6hdr)); ip6_copy_metadata(frag, skb); } @@ -779,7 +783,8 @@ slow_path: fh->frag_off = htons(offset); if (left > 0) fh->frag_off |= htons(IP6_MF); - frag->nh.ipv6h->payload_len = htons(frag->len - sizeof(struct ipv6hdr)); + ipv6_hdr(frag)->payload_len = htons(frag->len - + sizeof(struct ipv6hdr)); ptr += len; offset += len; @@ -1355,7 +1360,7 @@ int ip6_push_pending_frames(struct sock *sk) skb_push(skb, sizeof(struct ipv6hdr)); skb_reset_network_header(skb); - hdr = skb->nh.ipv6h; + hdr = ipv6_hdr(skb); *(__be32*)hdr = fl->fl6_flowlabel | htonl(0x60000000 | ((int)np->cork.tclass << 20)); diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index bb65779be7a6..05b59a77bc69 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c @@ -602,7 +602,7 @@ ip6ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, skb_reset_network_header(skb2); /* Try to guess incoming interface */ - rt = rt6_lookup(&skb2->nh.ipv6h->saddr, NULL, 0, 0); + rt = rt6_lookup(&ipv6_hdr(skb2)->saddr, NULL, 0, 0); if (rt && rt->rt6i_dev) skb2->dev = rt->rt6i_dev; @@ -636,10 +636,10 @@ static void ip6ip6_dscp_ecn_decapsulate(struct ip6_tnl *t, struct sk_buff *skb) { if (t->parms.flags & IP6_TNL_F_RCV_DSCP_COPY) - ipv6_copy_dscp(ipv6h, skb->nh.ipv6h); + ipv6_copy_dscp(ipv6h, ipv6_hdr(skb)); if (INET_ECN_is_ce(ipv6_get_dsfield(ipv6h))) - IP6_ECN_set_ce(skb->nh.ipv6h); + IP6_ECN_set_ce(ipv6_hdr(skb)); } static inline int ip6_tnl_rcv_ctl(struct ip6_tnl *t) @@ -679,10 +679,8 @@ static int ip6_tnl_rcv(struct sk_buff *skb, __u16 protocol, struct ipv6hdr *ipv6h, struct sk_buff *skb)) { - struct ipv6hdr *ipv6h; struct ip6_tnl *t; - - ipv6h = skb->nh.ipv6h; + struct ipv6hdr *ipv6h = ipv6_hdr(skb); read_lock(&ip6_tnl_lock); @@ -836,7 +834,7 @@ static int ip6_tnl_xmit2(struct sk_buff *skb, { struct ip6_tnl *t = netdev_priv(dev); struct net_device_stats *stats = &t->stat; - struct ipv6hdr *ipv6h = skb->nh.ipv6h; + struct ipv6hdr *ipv6h = ipv6_hdr(skb); struct ipv6_tel_txoption opt; struct dst_entry *dst; struct net_device *tdev; @@ -909,7 +907,7 @@ static int ip6_tnl_xmit2(struct sk_buff *skb, } skb_push(skb, sizeof(struct ipv6hdr)); skb_reset_network_header(skb); - ipv6h = skb->nh.ipv6h; + ipv6h = ipv6_hdr(skb); *(__be32*)ipv6h = fl->fl6_flowlabel | htonl(0x60000000); dsfield = INET_ECN_encapsulate(0, dsfield); ipv6_change_dsfield(ipv6h, ~INET_ECN_MASK, dsfield); @@ -983,7 +981,7 @@ static inline int ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) { struct ip6_tnl *t = netdev_priv(dev); - struct ipv6hdr *ipv6h = skb->nh.ipv6h; + struct ipv6hdr *ipv6h = ipv6_hdr(skb); int encap_limit = -1; __u16 offset; struct flowi fl; diff --git a/net/ipv6/ipcomp6.c b/net/ipv6/ipcomp6.c index 3e71d1691b7d..e2404a629680 100644 --- a/net/ipv6/ipcomp6.c +++ b/net/ipv6/ipcomp6.c @@ -79,7 +79,7 @@ static int ipcomp6_input(struct xfrm_state *x, struct sk_buff *skb) skb->ip_summed = CHECKSUM_NONE; /* Remove ipcomp header and decompress original payload */ - iph = skb->nh.ipv6h; + iph = ipv6_hdr(skb); ipch = (void *)skb->data; skb->h.raw = skb->nh.raw + sizeof(*ipch); __skb_pull(skb, sizeof(*ipch)); diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index 1d56b465bddb..d16e0fd2cd89 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c @@ -101,7 +101,7 @@ static int ipv6_gso_send_check(struct sk_buff *skb) if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h)))) goto out; - ipv6h = skb->nh.ipv6h; + ipv6h = ipv6_hdr(skb); __skb_pull(skb, sizeof(*ipv6h)); err = -EPROTONOSUPPORT; @@ -137,7 +137,7 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, int features) if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h)))) goto out; - ipv6h = skb->nh.ipv6h; + ipv6h = ipv6_hdr(skb); __skb_pull(skb, sizeof(*ipv6h)); segs = ERR_PTR(-EPROTONOSUPPORT); @@ -153,7 +153,7 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, int features) goto out; for (skb = segs; skb; skb = skb->next) { - ipv6h = skb->nh.ipv6h; + ipv6h = ipv6_hdr(skb); ipv6h->payload_len = htons(skb->len - skb->mac_len - sizeof(*ipv6h)); } diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index 924e24907c3e..b2b37ba48b9c 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c @@ -1167,11 +1167,11 @@ int igmp6_event_query(struct sk_buff *skb) return -EINVAL; /* compute payload length excluding extension headers */ - len = ntohs(skb->nh.ipv6h->payload_len) + sizeof(struct ipv6hdr); - len -= (char *)skb->h.raw - (char *)skb->nh.ipv6h; + len = ntohs(ipv6_hdr(skb)->payload_len) + sizeof(struct ipv6hdr); + len -= skb->h.raw - skb->nh.raw; /* Drop queries with not link local source */ - if (!(ipv6_addr_type(&skb->nh.ipv6h->saddr)&IPV6_ADDR_LINKLOCAL)) + if (!(ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL)) return -EINVAL; idev = in6_dev_get(skb->dev); @@ -1303,7 +1303,7 @@ int igmp6_event_report(struct sk_buff *skb) hdr = (struct icmp6hdr*) skb->h.raw; /* Drop reports with not link local source */ - addr_type = ipv6_addr_type(&skb->nh.ipv6h->saddr); + addr_type = ipv6_addr_type(&ipv6_hdr(skb)->saddr); if (addr_type != IPV6_ADDR_ANY && !(addr_type&IPV6_ADDR_LINKLOCAL)) return -EINVAL; @@ -1441,7 +1441,7 @@ static inline int mld_dev_queue_xmit2(struct sk_buff *skb) unsigned char ha[MAX_ADDR_LEN]; int err; - ndisc_mc_map(&skb->nh.ipv6h->daddr, ha, dev, 1); + ndisc_mc_map(&ipv6_hdr(skb)->daddr, ha, dev, 1); err = dev->hard_header(skb, dev, ETH_P_IPV6, ha, NULL, skb->len); if (err < 0) { kfree_skb(skb); @@ -1459,15 +1459,14 @@ static inline int mld_dev_queue_xmit(struct sk_buff *skb) static void mld_sendpack(struct sk_buff *skb) { - struct ipv6hdr *pip6 = skb->nh.ipv6h; + struct ipv6hdr *pip6 = ipv6_hdr(skb); struct mld2_report *pmr = (struct mld2_report *)skb->h.raw; int payload_len, mldlen; struct inet6_dev *idev = in6_dev_get(skb->dev); int err; IP6_INC_STATS(idev, IPSTATS_MIB_OUTREQUESTS); - payload_len = skb->tail - (unsigned char *)skb->nh.ipv6h - - sizeof(struct ipv6hdr); + payload_len = skb->tail - skb_network_header(skb) - sizeof(*pip6); mldlen = skb->tail - skb->h.raw; pip6->payload_len = htons(payload_len); diff --git a/net/ipv6/mip6.c b/net/ipv6/mip6.c index bb4033553f3b..7b5f9d82e801 100644 --- a/net/ipv6/mip6.c +++ b/net/ipv6/mip6.c @@ -129,7 +129,7 @@ static struct mip6_report_rate_limiter mip6_report_rl = { static int mip6_destopt_input(struct xfrm_state *x, struct sk_buff *skb) { - struct ipv6hdr *iph = skb->nh.ipv6h; + struct ipv6hdr *iph = ipv6_hdr(skb); struct ipv6_destopt_hdr *destopt = (struct ipv6_destopt_hdr *)skb->data; if (!ipv6_addr_equal(&iph->saddr, (struct in6_addr *)x->coaddr) && @@ -223,16 +223,16 @@ static int mip6_destopt_reject(struct xfrm_state *x, struct sk_buff *skb, struct skb_get_timestamp(skb, &stamp); - if (!mip6_report_rl_allow(&stamp, &skb->nh.ipv6h->daddr, - hao ? &hao->addr : &skb->nh.ipv6h->saddr, + if (!mip6_report_rl_allow(&stamp, &ipv6_hdr(skb)->daddr, + hao ? &hao->addr : &ipv6_hdr(skb)->saddr, opt->iif)) goto out; memset(&sel, 0, sizeof(sel)); - memcpy(&sel.daddr, (xfrm_address_t *)&skb->nh.ipv6h->daddr, + memcpy(&sel.daddr, (xfrm_address_t *)&ipv6_hdr(skb)->daddr, sizeof(sel.daddr)); sel.prefixlen_d = 128; - memcpy(&sel.saddr, (xfrm_address_t *)&skb->nh.ipv6h->saddr, + memcpy(&sel.saddr, (xfrm_address_t *)&ipv6_hdr(skb)->saddr, sizeof(sel.saddr)); sel.prefixlen_s = 128; sel.family = AF_INET6; @@ -256,12 +256,13 @@ static int mip6_destopt_offset(struct xfrm_state *x, struct sk_buff *skb, u8 **nexthdr) { u16 offset = sizeof(struct ipv6hdr); - struct ipv6_opt_hdr *exthdr = (struct ipv6_opt_hdr*)(skb->nh.ipv6h + 1); + struct ipv6_opt_hdr *exthdr = + (struct ipv6_opt_hdr *)(ipv6_hdr(skb) + 1); const unsigned char *nh = skb_network_header(skb); unsigned int packet_len = skb->tail - nh; int found_rhdr = 0; - *nexthdr = &skb->nh.ipv6h->nexthdr; + *nexthdr = &ipv6_hdr(skb)->nexthdr; while (offset + 1 <= packet_len) { @@ -387,12 +388,13 @@ static int mip6_rthdr_offset(struct xfrm_state *x, struct sk_buff *skb, u8 **nexthdr) { u16 offset = sizeof(struct ipv6hdr); - struct ipv6_opt_hdr *exthdr = (struct ipv6_opt_hdr*)(skb->nh.ipv6h + 1); + struct ipv6_opt_hdr *exthdr = + (struct ipv6_opt_hdr *)(ipv6_hdr(skb) + 1); const unsigned char *nh = skb_network_header(skb); unsigned int packet_len = skb->tail - nh; int found_rhdr = 0; - *nexthdr = &skb->nh.ipv6h->nexthdr; + *nexthdr = &ipv6_hdr(skb)->nexthdr; while (offset + 1 <= packet_len) { diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c index a3e3d9e2f44b..00feb4c4d98b 100644 --- a/net/ipv6/ndisc.c +++ b/net/ipv6/ndisc.c @@ -598,7 +598,7 @@ void ndisc_send_ns(struct net_device *dev, struct neighbour *neigh, dev->addr_len, dev->type); /* checksum */ - msg->icmph.icmp6_cksum = csum_ipv6_magic(&skb->nh.ipv6h->saddr, + msg->icmph.icmp6_cksum = csum_ipv6_magic(&ipv6_hdr(skb)->saddr, daddr, len, IPPROTO_ICMPV6, csum_partial((__u8 *) msg, @@ -697,7 +697,7 @@ void ndisc_send_rs(struct net_device *dev, struct in6_addr *saddr, dev->addr_len, dev->type); /* checksum */ - hdr->icmp6_cksum = csum_ipv6_magic(&skb->nh.ipv6h->saddr, daddr, len, + hdr->icmp6_cksum = csum_ipv6_magic(&ipv6_hdr(skb)->saddr, daddr, len, IPPROTO_ICMPV6, csum_partial((__u8 *) hdr, len, 0)); @@ -736,8 +736,8 @@ static void ndisc_solicit(struct neighbour *neigh, struct sk_buff *skb) struct in6_addr *target = (struct in6_addr *)&neigh->primary_key; int probes = atomic_read(&neigh->probes); - if (skb && ipv6_chk_addr(&skb->nh.ipv6h->saddr, dev, 1)) - saddr = &skb->nh.ipv6h->saddr; + if (skb && ipv6_chk_addr(&ipv6_hdr(skb)->saddr, dev, 1)) + saddr = &ipv6_hdr(skb)->saddr; if ((probes -= neigh->parms->ucast_probes) < 0) { if (!(neigh->nud_state & NUD_VALID)) { @@ -761,8 +761,8 @@ static void ndisc_solicit(struct neighbour *neigh, struct sk_buff *skb) static void ndisc_recv_ns(struct sk_buff *skb) { struct nd_msg *msg = (struct nd_msg *)skb->h.raw; - struct in6_addr *saddr = &skb->nh.ipv6h->saddr; - struct in6_addr *daddr = &skb->nh.ipv6h->daddr; + struct in6_addr *saddr = &ipv6_hdr(skb)->saddr; + struct in6_addr *daddr = &ipv6_hdr(skb)->daddr; u8 *lladdr = NULL; u32 ndoptlen = skb->tail - msg->opt; struct ndisc_options ndopts; @@ -939,8 +939,8 @@ out: static void ndisc_recv_na(struct sk_buff *skb) { struct nd_msg *msg = (struct nd_msg *)skb->h.raw; - struct in6_addr *saddr = &skb->nh.ipv6h->saddr; - struct in6_addr *daddr = &skb->nh.ipv6h->daddr; + struct in6_addr *saddr = &ipv6_hdr(skb)->saddr; + struct in6_addr *daddr = &ipv6_hdr(skb)->daddr; u8 *lladdr = NULL; u32 ndoptlen = skb->tail - msg->opt; struct ndisc_options ndopts; @@ -1044,7 +1044,7 @@ static void ndisc_recv_rs(struct sk_buff *skb) unsigned long ndoptlen = skb->len - sizeof(*rs_msg); struct neighbour *neigh; struct inet6_dev *idev; - struct in6_addr *saddr = &skb->nh.ipv6h->saddr; + struct in6_addr *saddr = &ipv6_hdr(skb)->saddr; struct ndisc_options ndopts; u8 *lladdr = NULL; @@ -1110,7 +1110,7 @@ static void ndisc_router_discovery(struct sk_buff *skb) optlen = (skb->tail - skb->h.raw) - sizeof(struct ra_msg); - if (!(ipv6_addr_type(&skb->nh.ipv6h->saddr) & IPV6_ADDR_LINKLOCAL)) { + if (!(ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL)) { ND_PRINTK2(KERN_WARNING "ICMPv6 RA: source address is not link-local.\n"); return; @@ -1176,7 +1176,7 @@ static void ndisc_router_discovery(struct sk_buff *skb) pref = ICMPV6_ROUTER_PREF_MEDIUM; #endif - rt = rt6_get_dflt_router(&skb->nh.ipv6h->saddr, skb->dev); + rt = rt6_get_dflt_router(&ipv6_hdr(skb)->saddr, skb->dev); if (rt) neigh = rt->rt6i_nexthop; @@ -1191,7 +1191,7 @@ static void ndisc_router_discovery(struct sk_buff *skb) ND_PRINTK3(KERN_DEBUG "ICMPv6 RA: adding default router.\n"); - rt = rt6_add_dflt_router(&skb->nh.ipv6h->saddr, skb->dev, pref); + rt = rt6_add_dflt_router(&ipv6_hdr(skb)->saddr, skb->dev, pref); if (rt == NULL) { ND_PRINTK0(KERN_ERR "ICMPv6 RA: %s() failed to add default route.\n", @@ -1263,7 +1263,7 @@ skip_defrtr: */ if (!neigh) - neigh = __neigh_lookup(&nd_tbl, &skb->nh.ipv6h->saddr, + neigh = __neigh_lookup(&nd_tbl, &ipv6_hdr(skb)->saddr, skb->dev, 1); if (neigh) { u8 *lladdr = NULL; @@ -1292,7 +1292,7 @@ skip_defrtr: if (((struct route_info *)p)->prefix_len > in6_dev->cnf.accept_ra_rt_info_max_plen) continue; rt6_route_rcv(skb->dev, (u8*)p, (p->nd_opt_len) << 3, - &skb->nh.ipv6h->saddr); + &ipv6_hdr(skb)->saddr); } } #endif @@ -1351,7 +1351,7 @@ static void ndisc_redirect_rcv(struct sk_buff *skb) int optlen; u8 *lladdr = NULL; - if (!(ipv6_addr_type(&skb->nh.ipv6h->saddr) & IPV6_ADDR_LINKLOCAL)) { + if (!(ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL)) { ND_PRINTK2(KERN_WARNING "ICMPv6 Redirect: source address is not link-local.\n"); return; @@ -1416,8 +1416,8 @@ static void ndisc_redirect_rcv(struct sk_buff *skb) neigh = __neigh_lookup(&nd_tbl, target, skb->dev, 1); if (neigh) { - rt6_redirect(dest, &skb->nh.ipv6h->daddr, - &skb->nh.ipv6h->saddr, neigh, lladdr, + rt6_redirect(dest, &ipv6_hdr(skb)->daddr, + &ipv6_hdr(skb)->saddr, neigh, lladdr, on_link); neigh_release(neigh); } @@ -1453,14 +1453,14 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh, return; } - if (!ipv6_addr_equal(&skb->nh.ipv6h->daddr, target) && + if (!ipv6_addr_equal(&ipv6_hdr(skb)->daddr, target) && !(ipv6_addr_type(target) & IPV6_ADDR_LINKLOCAL)) { ND_PRINTK2(KERN_WARNING "ICMPv6 Redirect: target address is not link-local.\n"); return; } - ndisc_flow_init(&fl, NDISC_REDIRECT, &saddr_buf, &skb->nh.ipv6h->saddr, + ndisc_flow_init(&fl, NDISC_REDIRECT, &saddr_buf, &ipv6_hdr(skb)->saddr, dev->ifindex); dst = ip6_route_output(NULL, &fl); @@ -1515,7 +1515,7 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh, hlen = 0; skb_reserve(buff, LL_RESERVED_SPACE(dev)); - ip6_nd_hdr(sk, buff, dev, &saddr_buf, &skb->nh.ipv6h->saddr, + ip6_nd_hdr(sk, buff, dev, &saddr_buf, &ipv6_hdr(skb)->saddr, IPPROTO_ICMPV6, len); icmph = (struct icmp6hdr *)skb_put(buff, len); @@ -1531,7 +1531,7 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh, addrp = (struct in6_addr *)(icmph + 1); ipv6_addr_copy(addrp, target); addrp++; - ipv6_addr_copy(addrp, &skb->nh.ipv6h->daddr); + ipv6_addr_copy(addrp, &ipv6_hdr(skb)->daddr); opt = (u8*) (addrp + 1); @@ -1552,9 +1552,9 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh, *(opt++) = (rd_len >> 3); opt += 6; - memcpy(opt, skb->nh.ipv6h, rd_len - 8); + memcpy(opt, ipv6_hdr(skb), rd_len - 8); - icmph->icmp6_cksum = csum_ipv6_magic(&saddr_buf, &skb->nh.ipv6h->saddr, + icmph->icmp6_cksum = csum_ipv6_magic(&saddr_buf, &ipv6_hdr(skb)->saddr, len, IPPROTO_ICMPV6, csum_partial((u8 *) icmph, len, 0)); @@ -1588,10 +1588,10 @@ int ndisc_rcv(struct sk_buff *skb) __skb_push(skb, skb->data-skb->h.raw); - if (skb->nh.ipv6h->hop_limit != 255) { + if (ipv6_hdr(skb)->hop_limit != 255) { ND_PRINTK2(KERN_WARNING "ICMPv6 NDISC: invalid hop-limit: %d\n", - skb->nh.ipv6h->hop_limit); + ipv6_hdr(skb)->hop_limit); return 0; } diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c index 1c405dd30c67..38b149613915 100644 --- a/net/ipv6/netfilter.c +++ b/net/ipv6/netfilter.c @@ -11,7 +11,7 @@ int ip6_route_me_harder(struct sk_buff *skb) { - struct ipv6hdr *iph = skb->nh.ipv6h; + struct ipv6hdr *iph = ipv6_hdr(skb); struct dst_entry *dst; struct flowi fl = { .oif = skb->sk ? skb->sk->sk_bound_dev_if : 0, @@ -61,7 +61,7 @@ static void nf_ip6_saveroute(const struct sk_buff *skb, struct nf_info *info) struct ip6_rt_info *rt_info = nf_info_reroute(info); if (info->hook == NF_IP6_LOCAL_OUT) { - struct ipv6hdr *iph = skb->nh.ipv6h; + struct ipv6hdr *iph = ipv6_hdr(skb); rt_info->daddr = iph->daddr; rt_info->saddr = iph->saddr; @@ -73,7 +73,7 @@ static int nf_ip6_reroute(struct sk_buff **pskb, const struct nf_info *info) struct ip6_rt_info *rt_info = nf_info_reroute(info); if (info->hook == NF_IP6_LOCAL_OUT) { - struct ipv6hdr *iph = (*pskb)->nh.ipv6h; + struct ipv6hdr *iph = ipv6_hdr(*pskb); if (!ipv6_addr_equal(&iph->daddr, &rt_info->daddr) || !ipv6_addr_equal(&iph->saddr, &rt_info->saddr)) return ip6_route_me_harder(*pskb); @@ -84,7 +84,7 @@ static int nf_ip6_reroute(struct sk_buff **pskb, const struct nf_info *info) __sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook, unsigned int dataoff, u_int8_t protocol) { - struct ipv6hdr *ip6h = skb->nh.ipv6h; + struct ipv6hdr *ip6h = ipv6_hdr(skb); __sum16 csum = 0; switch (skb->ip_summed) { diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index 7c512e13f956..caf9e375a0f1 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c @@ -115,7 +115,7 @@ ip6_packet_match(const struct sk_buff *skb, { size_t i; unsigned long ret; - const struct ipv6hdr *ipv6 = skb->nh.ipv6h; + const struct ipv6hdr *ipv6 = ipv6_hdr(skb); #define FWINV(bool,invflg) ((bool) ^ !!(ip6info->invflags & invflg)) @@ -301,7 +301,7 @@ ip6t_do_table(struct sk_buff **pskb, goto no_match; ADD_COUNTER(e->counters, - ntohs((*pskb)->nh.ipv6h->payload_len) + ntohs(ipv6_hdr(*pskb)->payload_len) + IPV6_HDR_LEN, 1); @@ -1448,8 +1448,8 @@ static void __exit ip6_tables_fini(void) int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset, int target, unsigned short *fragoff) { - unsigned int start = (u8*)(skb->nh.ipv6h + 1) - skb->data; - u8 nexthdr = skb->nh.ipv6h->nexthdr; + unsigned int start = (u8 *)(ipv6_hdr(skb) + 1) - skb->data; + u8 nexthdr = ipv6_hdr(skb)->nexthdr; unsigned int len = skb->len - start; if (fragoff) diff --git a/net/ipv6/netfilter/ip6t_HL.c b/net/ipv6/netfilter/ip6t_HL.c index ccbab66277e3..4115a576ba25 100644 --- a/net/ipv6/netfilter/ip6t_HL.c +++ b/net/ipv6/netfilter/ip6t_HL.c @@ -32,7 +32,7 @@ static unsigned int ip6t_hl_target(struct sk_buff **pskb, if (!skb_make_writable(pskb, (*pskb)->len)) return NF_DROP; - ip6h = (*pskb)->nh.ipv6h; + ip6h = ipv6_hdr(*pskb); switch (info->mode) { case IP6T_HL_SET: diff --git a/net/ipv6/netfilter/ip6t_LOG.c b/net/ipv6/netfilter/ip6t_LOG.c index fc9e51a77784..54d176187f3f 100644 --- a/net/ipv6/netfilter/ip6t_LOG.c +++ b/net/ipv6/netfilter/ip6t_LOG.c @@ -422,7 +422,7 @@ ip6t_log_packet(unsigned int pf, printk(" "); } - dump_packet(loginfo, skb, (u8*)skb->nh.ipv6h - skb->data, 1); + dump_packet(loginfo, skb, skb_network_offset(skb), 1); printk("\n"); spin_unlock_bh(&log_lock); } diff --git a/net/ipv6/netfilter/ip6t_REJECT.c b/net/ipv6/netfilter/ip6t_REJECT.c index 4441bed430f8..cb3d2415a064 100644 --- a/net/ipv6/netfilter/ip6t_REJECT.c +++ b/net/ipv6/netfilter/ip6t_REJECT.c @@ -47,7 +47,7 @@ static void send_reset(struct sk_buff *oldskb) struct tcphdr otcph, *tcph; unsigned int otcplen, hh_len; int tcphoff, needs_ack; - struct ipv6hdr *oip6h = oldskb->nh.ipv6h, *ip6h; + struct ipv6hdr *oip6h = ipv6_hdr(oldskb), *ip6h; struct dst_entry *dst = NULL; u8 proto; struct flowi fl; @@ -122,7 +122,7 @@ static void send_reset(struct sk_buff *oldskb) skb_put(nskb, sizeof(struct ipv6hdr)); skb_reset_network_header(nskb); - ip6h = nskb->nh.ipv6h; + ip6h = ipv6_hdr(nskb); ip6h->version = 6; ip6h->hop_limit = dst_metric(dst, RTAX_HOPLIMIT); ip6h->nexthdr = IPPROTO_TCP; @@ -156,8 +156,8 @@ static void send_reset(struct sk_buff *oldskb) tcph->check = 0; /* Adjust TCP checksum */ - tcph->check = csum_ipv6_magic(&nskb->nh.ipv6h->saddr, - &nskb->nh.ipv6h->daddr, + tcph->check = csum_ipv6_magic(&ipv6_hdr(nskb)->saddr, + &ipv6_hdr(nskb)->daddr, sizeof(struct tcphdr), IPPROTO_TCP, csum_partial((char *)tcph, sizeof(struct tcphdr), 0)); diff --git a/net/ipv6/netfilter/ip6t_eui64.c b/net/ipv6/netfilter/ip6t_eui64.c index c2676066a80f..0f3dd932f0a6 100644 --- a/net/ipv6/netfilter/ip6t_eui64.c +++ b/net/ipv6/netfilter/ip6t_eui64.c @@ -42,7 +42,7 @@ match(const struct sk_buff *skb, memset(eui64, 0, sizeof(eui64)); if (eth_hdr(skb)->h_proto == htons(ETH_P_IPV6)) { - if (skb->nh.ipv6h->version == 0x6) { + if (ipv6_hdr(skb)->version == 0x6) { memcpy(eui64, eth_hdr(skb)->h_source, 3); memcpy(eui64 + 5, eth_hdr(skb)->h_source + 3, 3); eui64[3] = 0xff; @@ -50,7 +50,7 @@ match(const struct sk_buff *skb, eui64[0] |= 0x02; i = 0; - while ((skb->nh.ipv6h->saddr.s6_addr[8+i] == eui64[i]) + while ((ipv6_hdr(skb)->saddr.s6_addr[8 + i] == eui64[i]) && (i < 8)) i++; diff --git a/net/ipv6/netfilter/ip6t_hl.c b/net/ipv6/netfilter/ip6t_hl.c index 37c8a4d4ed78..d606c0e6d6fd 100644 --- a/net/ipv6/netfilter/ip6t_hl.c +++ b/net/ipv6/netfilter/ip6t_hl.c @@ -25,7 +25,7 @@ static int match(const struct sk_buff *skb, int offset, unsigned int protoff, int *hotdrop) { const struct ip6t_hl_info *info = matchinfo; - const struct ipv6hdr *ip6h = skb->nh.ipv6h; + const struct ipv6hdr *ip6h = ipv6_hdr(skb); switch (info->mode) { case IP6T_HL_EQ: diff --git a/net/ipv6/netfilter/ip6t_ipv6header.c b/net/ipv6/netfilter/ip6t_ipv6header.c index 700a11d25deb..fd6a0869099b 100644 --- a/net/ipv6/netfilter/ip6t_ipv6header.c +++ b/net/ipv6/netfilter/ip6t_ipv6header.c @@ -45,7 +45,7 @@ ipv6header_match(const struct sk_buff *skb, /* Make sure this isn't an evil packet */ /* type of the 1st exthdr */ - nexthdr = skb->nh.ipv6h->nexthdr; + nexthdr = ipv6_hdr(skb)->nexthdr; /* pointer to the 1st exthdr */ ptr = sizeof(struct ipv6hdr); /* available length */ diff --git a/net/ipv6/netfilter/ip6table_mangle.c b/net/ipv6/netfilter/ip6table_mangle.c index da2c1994539b..85b1c272946c 100644 --- a/net/ipv6/netfilter/ip6table_mangle.c +++ b/net/ipv6/netfilter/ip6table_mangle.c @@ -146,21 +146,21 @@ ip6t_local_hook(unsigned int hook, #endif /* save source/dest address, mark, hoplimit, flowlabel, priority, */ - memcpy(&saddr, &(*pskb)->nh.ipv6h->saddr, sizeof(saddr)); - memcpy(&daddr, &(*pskb)->nh.ipv6h->daddr, sizeof(daddr)); + memcpy(&saddr, &ipv6_hdr(*pskb)->saddr, sizeof(saddr)); + memcpy(&daddr, &ipv6_hdr(*pskb)->daddr, sizeof(daddr)); mark = (*pskb)->mark; - hop_limit = (*pskb)->nh.ipv6h->hop_limit; + hop_limit = ipv6_hdr(*pskb)->hop_limit; /* flowlabel and prio (includes version, which shouldn't change either */ - flowlabel = *((u_int32_t *) (*pskb)->nh.ipv6h); + flowlabel = *((u_int32_t *)ipv6_hdr(*pskb)); ret = ip6t_do_table(pskb, hook, in, out, &packet_mangler); if (ret != NF_DROP && ret != NF_STOLEN - && (memcmp(&(*pskb)->nh.ipv6h->saddr, &saddr, sizeof(saddr)) - || memcmp(&(*pskb)->nh.ipv6h->daddr, &daddr, sizeof(daddr)) + && (memcmp(&ipv6_hdr(*pskb)->saddr, &saddr, sizeof(saddr)) + || memcmp(&ipv6_hdr(*pskb)->daddr, &daddr, sizeof(daddr)) || (*pskb)->mark != mark - || (*pskb)->nh.ipv6h->hop_limit != hop_limit)) + || ipv6_hdr(*pskb)->hop_limit != hop_limit)) return ip6_route_me_harder(*pskb) == 0 ? ret : NF_DROP; return ret; diff --git a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c index d1102455668d..fe7f46c4dbff 100644 --- a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c +++ b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c @@ -138,16 +138,10 @@ static int ipv6_prepare(struct sk_buff **pskb, unsigned int hooknum, unsigned int *dataoff, u_int8_t *protonum) { - unsigned int extoff; - unsigned char pnum; - int protoff; - - extoff = (u8*)((*pskb)->nh.ipv6h + 1) - (*pskb)->data; - pnum = (*pskb)->nh.ipv6h->nexthdr; - - protoff = nf_ct_ipv6_skip_exthdr(*pskb, extoff, &pnum, - (*pskb)->len - extoff); - + unsigned int extoff = (u8 *)(ipv6_hdr(*pskb) + 1) - (*pskb)->data; + unsigned char pnum = ipv6_hdr(*pskb)->nexthdr; + int protoff = nf_ct_ipv6_skip_exthdr(*pskb, extoff, &pnum, + (*pskb)->len - extoff); /* * (protoff == (*pskb)->len) mean that the packet doesn't have no data * except of IPv6 & ext headers. but it's tracked anyway. - YK @@ -179,9 +173,8 @@ static unsigned int ipv6_confirm(unsigned int hooknum, struct nf_conn_help *help; enum ip_conntrack_info ctinfo; unsigned int ret, protoff; - unsigned int extoff = (u8*)((*pskb)->nh.ipv6h + 1) - - (*pskb)->data; - unsigned char pnum = (*pskb)->nh.ipv6h->nexthdr; + unsigned int extoff = (u8 *)(ipv6_hdr(*pskb) + 1) - (*pskb)->data; + unsigned char pnum = ipv6_hdr(*pskb)->nexthdr; /* This is where we call the helper: as the packet goes out. */ diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c index bc1d09584008..d3fbb1f1caf5 100644 --- a/net/ipv6/netfilter/nf_conntrack_reasm.c +++ b/net/ipv6/netfilter/nf_conntrack_reasm.c @@ -400,8 +400,8 @@ static int nf_ct_frag6_queue(struct nf_ct_frag6_queue *fq, struct sk_buff *skb, } offset = ntohs(fhdr->frag_off) & ~0x7; - end = offset + (ntohs(skb->nh.ipv6h->payload_len) - - ((u8 *) (fhdr + 1) - (u8 *) (skb->nh.ipv6h + 1))); + end = offset + (ntohs(ipv6_hdr(skb)->payload_len) - + ((u8 *)(fhdr + 1) - (u8 *)(ipv6_hdr(skb) + 1))); if ((unsigned int)end > IPV6_MAXPLEN) { DEBUGP("offset is too large.\n"); @@ -652,7 +652,7 @@ nf_ct_frag6_reasm(struct nf_ct_frag6_queue *fq, struct net_device *dev) head->next = NULL; head->dev = dev; head->tstamp = fq->stamp; - head->nh.ipv6h->payload_len = htons(payload_len); + ipv6_hdr(head)->payload_len = htons(payload_len); /* Yes, and fold redundant checksum back. 8) */ if (head->ip_summed == CHECKSUM_COMPLETE) @@ -706,9 +706,9 @@ out_fail: static int find_prev_fhdr(struct sk_buff *skb, u8 *prevhdrp, int *prevhoff, int *fhoff) { - u8 nexthdr = skb->nh.ipv6h->nexthdr; - u8 prev_nhoff = (u8 *)&skb->nh.ipv6h->nexthdr - skb->data; - int start = (u8 *)(skb->nh.ipv6h+1) - skb->data; + u8 nexthdr = ipv6_hdr(skb)->nexthdr; + u8 prev_nhoff = (u8 *)&ipv6_hdr(skb)->nexthdr - skb->data; + int start = (u8 *)(ipv6_hdr(skb) + 1) - skb->data; int len = skb->len - start; u8 prevhdr = NEXTHDR_IPV6; @@ -764,7 +764,7 @@ struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb) struct sk_buff *ret_skb = NULL; /* Jumbo payload inhibits frag. header */ - if (skb->nh.ipv6h->payload_len == 0) { + if (ipv6_hdr(skb)->payload_len == 0) { DEBUGP("payload len = 0\n"); return skb; } @@ -786,7 +786,7 @@ struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb) } clone->h.raw = clone->data + fhoff; - hdr = clone->nh.ipv6h; + hdr = ipv6_hdr(clone); fhdr = (struct frag_hdr *)clone->h.raw; if (!(fhdr->frag_off & htons(0xFFF9))) { diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index 9b2bcde73f19..0e2b56ce0a56 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c @@ -152,7 +152,7 @@ int ipv6_raw_deliver(struct sk_buff *skb, int nexthdr) int delivered = 0; __u8 hash; - saddr = &skb->nh.ipv6h->saddr; + saddr = &ipv6_hdr(skb)->saddr; daddr = saddr + 1; hash = nexthdr & (MAX_INET_PROTOS - 1); @@ -363,15 +363,16 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb) if (skb->ip_summed == CHECKSUM_COMPLETE) { skb_postpull_rcsum(skb, skb_network_header(skb), skb->h.raw - skb->nh.raw); - if (!csum_ipv6_magic(&skb->nh.ipv6h->saddr, - &skb->nh.ipv6h->daddr, + if (!csum_ipv6_magic(&ipv6_hdr(skb)->saddr, + &ipv6_hdr(skb)->daddr, skb->len, inet->num, skb->csum)) skb->ip_summed = CHECKSUM_UNNECESSARY; } if (skb->ip_summed != CHECKSUM_UNNECESSARY) - skb->csum = ~csum_unfold(csum_ipv6_magic(&skb->nh.ipv6h->saddr, - &skb->nh.ipv6h->daddr, - skb->len, inet->num, 0)); + skb->csum = ~csum_unfold(csum_ipv6_magic(&ipv6_hdr(skb)->saddr, + &ipv6_hdr(skb)->daddr, + skb->len, + inet->num, 0)); if (inet->hdrincl) { if (skb_checksum_complete(skb)) { @@ -438,7 +439,7 @@ static int rawv6_recvmsg(struct kiocb *iocb, struct sock *sk, if (sin6) { sin6->sin6_family = AF_INET6; sin6->sin6_port = 0; - ipv6_addr_copy(&sin6->sin6_addr, &skb->nh.ipv6h->saddr); + ipv6_addr_copy(&sin6->sin6_addr, &ipv6_hdr(skb)->saddr); sin6->sin6_flowinfo = 0; sin6->sin6_scope_id = 0; if (ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL) @@ -578,7 +579,7 @@ static int rawv6_send_hdrinc(struct sock *sk, void *from, int length, skb_put(skb, length); skb_reset_network_header(skb); - iph = skb->nh.ipv6h; + iph = ipv6_hdr(skb); skb->ip_summed = CHECKSUM_NONE; diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c index f85e49acb91a..156a37fe3ff3 100644 --- a/net/ipv6/reassembly.c +++ b/net/ipv6/reassembly.c @@ -430,8 +430,8 @@ static void ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb, goto err; offset = ntohs(fhdr->frag_off) & ~0x7; - end = offset + (ntohs(skb->nh.ipv6h->payload_len) - - ((u8 *) (fhdr + 1) - (u8 *) (skb->nh.ipv6h + 1))); + end = offset + (ntohs(ipv6_hdr(skb)->payload_len) - + ((u8 *)(fhdr + 1) - (u8 *)(ipv6_hdr(skb) + 1))); if ((unsigned int)end > IPV6_MAXPLEN) { IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), @@ -671,7 +671,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff **skb_in, head->next = NULL; head->dev = dev; head->tstamp = fq->stamp; - head->nh.ipv6h->payload_len = htons(payload_len); + ipv6_hdr(head)->payload_len = htons(payload_len); IP6CB(head)->nhoff = nhoff; *skb_in = head; @@ -708,9 +708,7 @@ static int ipv6_frag_rcv(struct sk_buff **skbp) struct net_device *dev = skb->dev; struct frag_hdr *fhdr; struct frag_queue *fq; - struct ipv6hdr *hdr; - - hdr = skb->nh.ipv6h; + struct ipv6hdr *hdr = ipv6_hdr(skb); IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), IPSTATS_MIB_REASMREQDS); @@ -726,7 +724,7 @@ static int ipv6_frag_rcv(struct sk_buff **skbp) return -1; } - hdr = skb->nh.ipv6h; + hdr = ipv6_hdr(skb); fhdr = (struct frag_hdr *)skb->h.raw; if (!(fhdr->frag_off & htons(0xFFF9))) { diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 0aa4762f53f7..52cbe1cd4045 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -726,7 +726,7 @@ out2: void ip6_route_input(struct sk_buff *skb) { - struct ipv6hdr *iph = skb->nh.ipv6h; + struct ipv6hdr *iph = ipv6_hdr(skb); int flags = RT6_LOOKUP_F_HAS_SADDR; struct flowi fl = { .iif = skb->dev->ifindex, @@ -1775,7 +1775,7 @@ static inline int ip6_pkt_drop(struct sk_buff *skb, int code, int type; switch (ipstats_mib_noroutes) { case IPSTATS_MIB_INNOROUTES: - type = ipv6_addr_type(&skb->nh.ipv6h->daddr); + type = ipv6_addr_type(&ipv6_hdr(skb)->daddr); if (type == IPV6_ADDR_ANY || type == IPV6_ADDR_RESERVED) { IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_INADDRERRORS); break; diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index e33ac3c3a9ca..18ec86f177d9 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c @@ -366,7 +366,7 @@ out: static inline void ipip6_ecn_decapsulate(struct iphdr *iph, struct sk_buff *skb) { if (INET_ECN_is_ce(iph->tos)) - IP6_ECN_set_ce(skb->nh.ipv6h); + IP6_ECN_set_ce(ipv6_hdr(skb)); } static int ipip6_rcv(struct sk_buff *skb) @@ -430,7 +430,7 @@ static int ipip6_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) struct ip_tunnel *tunnel = netdev_priv(dev); struct net_device_stats *stats = &tunnel->stat; struct iphdr *tiph = &tunnel->parms.iph; - struct ipv6hdr *iph6 = skb->nh.ipv6h; + struct ipv6hdr *iph6 = ipv6_hdr(skb); u8 tos = tunnel->parms.iph.tos; struct rtable *rt; /* Route to the other host */ struct net_device *tdev; /* Device to other host */ @@ -468,7 +468,7 @@ static int ipip6_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) addr_type = ipv6_addr_type(addr6); if (addr_type == IPV6_ADDR_ANY) { - addr6 = &skb->nh.ipv6h->daddr; + addr6 = &ipv6_hdr(skb)->daddr; addr_type = ipv6_addr_type(addr6); } @@ -550,7 +550,7 @@ static int ipip6_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) skb_set_owner_w(new_skb, skb->sk); dev_kfree_skb(skb); skb = new_skb; - iph6 = skb->nh.ipv6h; + iph6 = ipv6_hdr(skb); } skb->h.raw = skb->nh.raw; diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 80a52ab1e384..85b3e89110f9 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -115,8 +115,8 @@ static __inline__ __sum16 tcp_v6_check(struct tcphdr *th, int len, static __u32 tcp_v6_init_sequence(struct sk_buff *skb) { - return secure_tcpv6_sequence_number(skb->nh.ipv6h->daddr.s6_addr32, - skb->nh.ipv6h->saddr.s6_addr32, + return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32, + ipv6_hdr(skb)->saddr.s6_addr32, skb->h.th->dest, skb->h.th->source); } @@ -837,7 +837,7 @@ static int tcp_v6_inbound_md5_hash (struct sock *sk, struct sk_buff *skb) { __u8 *hash_location = NULL; struct tcp_md5sig_key *hash_expected; - struct ipv6hdr *ip6h = skb->nh.ipv6h; + struct ipv6hdr *ip6h = ipv6_hdr(skb); struct tcphdr *th = skb->h.th; int length = (th->doff << 2) - sizeof (*th); int genhash; @@ -966,7 +966,7 @@ static int tcp_v6_gso_send_check(struct sk_buff *skb) if (!pskb_may_pull(skb, sizeof(*th))) return -EINVAL; - ipv6h = skb->nh.ipv6h; + ipv6h = ipv6_hdr(skb); th = skb->h.th; th->check = 0; @@ -995,7 +995,7 @@ static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb) #ifdef CONFIG_TCP_MD5SIG if (sk) - key = tcp_v6_md5_do_lookup(sk, &skb->nh.ipv6h->daddr); + key = tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr); else key = NULL; @@ -1039,20 +1039,18 @@ static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb) (TCPOPT_NOP << 16) | (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG); - tcp_v6_do_calc_md5_hash((__u8*)&opt[1], - key, - &skb->nh.ipv6h->daddr, - &skb->nh.ipv6h->saddr, - t1, IPPROTO_TCP, - tot_len); + tcp_v6_do_calc_md5_hash((__u8 *)&opt[1], key, + &ipv6_hdr(skb)->daddr, + &ipv6_hdr(skb)->saddr, + t1, IPPROTO_TCP, tot_len); } #endif buff->csum = csum_partial((char *)t1, sizeof(*t1), 0); memset(&fl, 0, sizeof(fl)); - ipv6_addr_copy(&fl.fl6_dst, &skb->nh.ipv6h->saddr); - ipv6_addr_copy(&fl.fl6_src, &skb->nh.ipv6h->daddr); + ipv6_addr_copy(&fl.fl6_dst, &ipv6_hdr(skb)->saddr); + ipv6_addr_copy(&fl.fl6_src, &ipv6_hdr(skb)->daddr); t1->check = csum_ipv6_magic(&fl.fl6_src, &fl.fl6_dst, sizeof(*t1), IPPROTO_TCP, @@ -1093,7 +1091,7 @@ static void tcp_v6_send_ack(struct tcp_timewait_sock *tw, #ifdef CONFIG_TCP_MD5SIG if (!tw && skb->sk) { - key = tcp_v6_md5_do_lookup(skb->sk, &skb->nh.ipv6h->daddr); + key = tcp_v6_md5_do_lookup(skb->sk, &ipv6_hdr(skb)->daddr); } else if (tw && tw->tw_md5_keylen) { tw_key.key = tw->tw_md5_key; tw_key.keylen = tw->tw_md5_keylen; @@ -1142,20 +1140,18 @@ static void tcp_v6_send_ack(struct tcp_timewait_sock *tw, if (key) { *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG); - tcp_v6_do_calc_md5_hash((__u8 *)topt, - key, - &skb->nh.ipv6h->daddr, - &skb->nh.ipv6h->saddr, - t1, IPPROTO_TCP, - tot_len); + tcp_v6_do_calc_md5_hash((__u8 *)topt, key, + &ipv6_hdr(skb)->daddr, + &ipv6_hdr(skb)->saddr, + t1, IPPROTO_TCP, tot_len); } #endif buff->csum = csum_partial((char *)t1, tot_len, 0); memset(&fl, 0, sizeof(fl)); - ipv6_addr_copy(&fl.fl6_dst, &skb->nh.ipv6h->saddr); - ipv6_addr_copy(&fl.fl6_src, &skb->nh.ipv6h->daddr); + ipv6_addr_copy(&fl.fl6_dst, &ipv6_hdr(skb)->saddr); + ipv6_addr_copy(&fl.fl6_src, &ipv6_hdr(skb)->daddr); t1->check = csum_ipv6_magic(&fl.fl6_src, &fl.fl6_dst, tot_len, IPPROTO_TCP, @@ -1204,13 +1200,13 @@ static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb) /* Find possible connection requests. */ req = inet6_csk_search_req(sk, &prev, th->source, - &skb->nh.ipv6h->saddr, - &skb->nh.ipv6h->daddr, inet6_iif(skb)); + &ipv6_hdr(skb)->saddr, + &ipv6_hdr(skb)->daddr, inet6_iif(skb)); if (req) return tcp_check_req(sk, skb, req, prev); - nsk = __inet6_lookup_established(&tcp_hashinfo, &skb->nh.ipv6h->saddr, - th->source, &skb->nh.ipv6h->daddr, + nsk = __inet6_lookup_established(&tcp_hashinfo, &ipv6_hdr(skb)->saddr, + th->source, &ipv6_hdr(skb)->daddr, ntohs(th->dest), inet6_iif(skb)); if (nsk) { @@ -1277,8 +1273,8 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb) tcp_openreq_init(req, &tmp_opt, skb); treq = inet6_rsk(req); - ipv6_addr_copy(&treq->rmt_addr, &skb->nh.ipv6h->saddr); - ipv6_addr_copy(&treq->loc_addr, &skb->nh.ipv6h->daddr); + ipv6_addr_copy(&treq->rmt_addr, &ipv6_hdr(skb)->saddr); + ipv6_addr_copy(&treq->loc_addr, &ipv6_hdr(skb)->daddr); TCP_ECN_create_request(req, skb->h.th); treq->pktopts = NULL; if (ipv6_opt_accepted(sk, skb) || @@ -1365,7 +1361,7 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb, newnp->pktoptions = NULL; newnp->opt = NULL; newnp->mcast_oif = inet6_iif(skb); - newnp->mcast_hops = skb->nh.ipv6h->hop_limit; + newnp->mcast_hops = ipv6_hdr(skb)->hop_limit; /* * No need to charge this sock to the relevant IPv6 refcnt debug socks count @@ -1473,7 +1469,7 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb, } newnp->opt = NULL; newnp->mcast_oif = inet6_iif(skb); - newnp->mcast_hops = skb->nh.ipv6h->hop_limit; + newnp->mcast_hops = ipv6_hdr(skb)->hop_limit; /* Clone native IPv6 options from listening socket (if any) @@ -1532,15 +1528,16 @@ out: static __sum16 tcp_v6_checksum_init(struct sk_buff *skb) { if (skb->ip_summed == CHECKSUM_COMPLETE) { - if (!tcp_v6_check(skb->h.th,skb->len,&skb->nh.ipv6h->saddr, - &skb->nh.ipv6h->daddr,skb->csum)) { + if (!tcp_v6_check(skb->h.th, skb->len, &ipv6_hdr(skb)->saddr, + &ipv6_hdr(skb)->daddr, skb->csum)) { skb->ip_summed = CHECKSUM_UNNECESSARY; return 0; } } - skb->csum = ~csum_unfold(tcp_v6_check(skb->h.th,skb->len,&skb->nh.ipv6h->saddr, - &skb->nh.ipv6h->daddr, 0)); + skb->csum = ~csum_unfold(tcp_v6_check(skb->h.th, skb->len, + &ipv6_hdr(skb)->saddr, + &ipv6_hdr(skb)->daddr, 0)); if (skb->len <= 76) { return __skb_checksum_complete(skb); @@ -1668,7 +1665,7 @@ ipv6_pktoptions: if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo) np->mcast_oif = inet6_iif(opt_skb); if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) - np->mcast_hops = opt_skb->nh.ipv6h->hop_limit; + np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit; if (ipv6_opt_accepted(sk, opt_skb)) { skb_set_owner_r(opt_skb, sk); opt_skb = xchg(&np->pktoptions, opt_skb); @@ -1718,11 +1715,11 @@ static int tcp_v6_rcv(struct sk_buff **pskb) skb->len - th->doff*4); TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq); TCP_SKB_CB(skb)->when = 0; - TCP_SKB_CB(skb)->flags = ipv6_get_dsfield(skb->nh.ipv6h); + TCP_SKB_CB(skb)->flags = ipv6_get_dsfield(ipv6_hdr(skb)); TCP_SKB_CB(skb)->sacked = 0; - sk = __inet6_lookup(&tcp_hashinfo, &skb->nh.ipv6h->saddr, th->source, - &skb->nh.ipv6h->daddr, ntohs(th->dest), + sk = __inet6_lookup(&tcp_hashinfo, &ipv6_hdr(skb)->saddr, th->source, + &ipv6_hdr(skb)->daddr, ntohs(th->dest), inet6_iif(skb)); if (!sk) @@ -1802,7 +1799,7 @@ do_time_wait: struct sock *sk2; sk2 = inet6_lookup_listener(&tcp_hashinfo, - &skb->nh.ipv6h->daddr, + &ipv6_hdr(skb)->daddr, ntohs(th->dest), inet6_iif(skb)); if (sk2 != NULL) { struct inet_timewait_sock *tw = inet_twsk(sk); diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index e991e606ab1f..55affe39b2eb 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -180,7 +180,8 @@ try_again: ipv6_addr_set(&sin6->sin6_addr, 0, 0, htonl(0xffff), ip_hdr(skb)->saddr); else { - ipv6_addr_copy(&sin6->sin6_addr, &skb->nh.ipv6h->saddr); + ipv6_addr_copy(&sin6->sin6_addr, + &ipv6_hdr(skb)->saddr); if (ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL) sin6->sin6_scope_id = IP6CB(skb)->iif; } @@ -392,13 +393,13 @@ static inline int udp6_csum_init(struct sk_buff *skb, struct udphdr *uh, return 1; } if (skb->ip_summed == CHECKSUM_COMPLETE && - !csum_ipv6_magic(&skb->nh.ipv6h->saddr, &skb->nh.ipv6h->daddr, + !csum_ipv6_magic(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr, skb->len, proto, skb->csum)) skb->ip_summed = CHECKSUM_UNNECESSARY; if (skb->ip_summed != CHECKSUM_UNNECESSARY) - skb->csum = ~csum_unfold(csum_ipv6_magic(&skb->nh.ipv6h->saddr, - &skb->nh.ipv6h->daddr, + skb->csum = ~csum_unfold(csum_ipv6_magic(&ipv6_hdr(skb)->saddr, + &ipv6_hdr(skb)->daddr, skb->len, proto, 0)); return 0; @@ -417,8 +418,8 @@ int __udp6_lib_rcv(struct sk_buff **pskb, struct hlist_head udptable[], if (!pskb_may_pull(skb, sizeof(struct udphdr))) goto short_packet; - saddr = &skb->nh.ipv6h->saddr; - daddr = &skb->nh.ipv6h->daddr; + saddr = &ipv6_hdr(skb)->saddr; + daddr = &ipv6_hdr(skb)->daddr; uh = skb->h.uh; ulen = ntohs(uh->len); @@ -438,8 +439,8 @@ int __udp6_lib_rcv(struct sk_buff **pskb, struct hlist_head udptable[], if (ulen < skb->len) { if (pskb_trim_rcsum(skb, ulen)) goto short_packet; - saddr = &skb->nh.ipv6h->saddr; - daddr = &skb->nh.ipv6h->daddr; + saddr = &ipv6_hdr(skb)->saddr; + daddr = &ipv6_hdr(skb)->daddr; uh = skb->h.uh; } } diff --git a/net/ipv6/xfrm6_input.c b/net/ipv6/xfrm6_input.c index 5c929f886129..f30ef16072f6 100644 --- a/net/ipv6/xfrm6_input.c +++ b/net/ipv6/xfrm6_input.c @@ -35,7 +35,7 @@ int xfrm6_rcv_spi(struct sk_buff *skb, __be32 spi) goto drop; do { - struct ipv6hdr *iph = skb->nh.ipv6h; + struct ipv6hdr *iph = ipv6_hdr(skb); if (xfrm_nr == XFRM_MAX_DEPTH) goto drop; @@ -112,7 +112,7 @@ int xfrm6_rcv_spi(struct sk_buff *skb, __be32 spi) return -1; } else { #ifdef CONFIG_NETFILTER - skb->nh.ipv6h->payload_len = htons(skb->len); + ipv6_hdr(skb)->payload_len = htons(skb->len); __skb_push(skb, skb->data - skb_network_header(skb)); NF_HOOK(PF_INET6, NF_IP6_PRE_ROUTING, skb, skb->dev, NULL, diff --git a/net/ipv6/xfrm6_mode_beet.c b/net/ipv6/xfrm6_mode_beet.c index 247e2d5d2acf..b5a48c255f07 100644 --- a/net/ipv6/xfrm6_mode_beet.c +++ b/net/ipv6/xfrm6_mode_beet.c @@ -38,7 +38,7 @@ static int xfrm6_beet_output(struct xfrm_state *x, struct sk_buff *skb) int hdr_len; skb_push(skb, x->props.header_len); - iph = skb->nh.ipv6h; + iph = ipv6_hdr(skb); hdr_len = ip6_find_1stfragopt(skb, &prevhdr); skb->nh.raw = prevhdr - x->props.header_len; @@ -46,7 +46,7 @@ static int xfrm6_beet_output(struct xfrm_state *x, struct sk_buff *skb) memmove(skb->data, iph, hdr_len); skb_reset_network_header(skb); - top_iph = skb->nh.ipv6h; + top_iph = ipv6_hdr(skb); skb->nh.raw = &top_iph->nexthdr; skb->h.ipv6h = top_iph + 1; @@ -74,7 +74,7 @@ static int xfrm6_beet_input(struct xfrm_state *x, struct sk_buff *skb) skb_set_mac_header(skb, -skb->mac_len); memmove(skb_mac_header(skb), old_mac, skb->mac_len); - ip6h = skb->nh.ipv6h; + ip6h = ipv6_hdr(skb); ip6h->payload_len = htons(skb->len - size); ipv6_addr_copy(&ip6h->daddr, (struct in6_addr *) &x->sel.daddr.a6); ipv6_addr_copy(&ip6h->saddr, (struct in6_addr *) &x->sel.saddr.a6); diff --git a/net/ipv6/xfrm6_mode_ro.c b/net/ipv6/xfrm6_mode_ro.c index 6031c16d46ca..d01958d921ac 100644 --- a/net/ipv6/xfrm6_mode_ro.c +++ b/net/ipv6/xfrm6_mode_ro.c @@ -50,7 +50,7 @@ static int xfrm6_ro_output(struct xfrm_state *x, struct sk_buff *skb) int hdr_len; skb_push(skb, x->props.header_len); - iph = skb->nh.ipv6h; + iph = ipv6_hdr(skb); hdr_len = x->type->hdr_offset(x, skb, &prevhdr); skb->nh.raw = prevhdr - x->props.header_len; diff --git a/net/ipv6/xfrm6_mode_transport.c b/net/ipv6/xfrm6_mode_transport.c index ace0bbf4f25d..358b60d9d912 100644 --- a/net/ipv6/xfrm6_mode_transport.c +++ b/net/ipv6/xfrm6_mode_transport.c @@ -32,7 +32,7 @@ static int xfrm6_transport_output(struct xfrm_state *x, struct sk_buff *skb) int hdr_len; skb_push(skb, x->props.header_len); - iph = skb->nh.ipv6h; + iph = ipv6_hdr(skb); hdr_len = x->type->hdr_offset(x, skb, &prevhdr); skb->nh.raw = prevhdr - x->props.header_len; @@ -57,7 +57,7 @@ static int xfrm6_transport_input(struct xfrm_state *x, struct sk_buff *skb) memmove(skb->h.raw, skb_network_header(skb), ihl); skb->nh.raw = skb->h.raw; } - skb->nh.ipv6h->payload_len = htons(skb->len + ihl - + ipv6_hdr(skb)->payload_len = htons(skb->len + ihl - sizeof(struct ipv6hdr)); skb->h.raw = skb->data; return 0; diff --git a/net/ipv6/xfrm6_mode_tunnel.c b/net/ipv6/xfrm6_mode_tunnel.c index 498f17b5c42f..28f36b363d1f 100644 --- a/net/ipv6/xfrm6_mode_tunnel.c +++ b/net/ipv6/xfrm6_mode_tunnel.c @@ -18,7 +18,7 @@ static inline void ipip6_ecn_decapsulate(struct sk_buff *skb) { - struct ipv6hdr *outer_iph = skb->nh.ipv6h; + struct ipv6hdr *outer_iph = ipv6_hdr(skb); struct ipv6hdr *inner_iph = skb->h.ipv6h; if (INET_ECN_is_ce(ipv6_get_dsfield(outer_iph))) @@ -27,7 +27,7 @@ static inline void ipip6_ecn_decapsulate(struct sk_buff *skb) static inline void ip6ip_ecn_decapsulate(struct sk_buff *skb) { - if (INET_ECN_is_ce(ipv6_get_dsfield(skb->nh.ipv6h))) + if (INET_ECN_is_ce(ipv6_get_dsfield(ipv6_hdr(skb)))) IP_ECN_set_ce(skb->h.ipiph); } @@ -51,10 +51,10 @@ static int xfrm6_tunnel_output(struct xfrm_state *x, struct sk_buff *skb) int dsfield; skb_push(skb, x->props.header_len); - iph = skb->nh.ipv6h; + iph = ipv6_hdr(skb); skb_reset_network_header(skb); - top_iph = skb->nh.ipv6h; + top_iph = ipv6_hdr(skb); skb->nh.raw = &top_iph->nexthdr; skb->h.ipv6h = top_iph + 1; @@ -102,7 +102,7 @@ static int xfrm6_tunnel_input(struct xfrm_state *x, struct sk_buff *skb) nh = skb_network_header(skb); if (nh[IP6CB(skb)->nhoff] == IPPROTO_IPV6) { if (x->props.flags & XFRM_STATE_DECAP_DSCP) - ipv6_copy_dscp(skb->nh.ipv6h, skb->h.ipv6h); + ipv6_copy_dscp(ipv6_hdr(skb), skb->h.ipv6h); if (!(x->props.flags & XFRM_STATE_NOECN)) ipip6_ecn_decapsulate(skb); } else { diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c index cb5a723d4cb4..b93bfb87f494 100644 --- a/net/ipv6/xfrm6_policy.c +++ b/net/ipv6/xfrm6_policy.c @@ -271,7 +271,7 @@ static inline void _decode_session6(struct sk_buff *skb, struct flowi *fl) { u16 offset = skb->h.raw - skb->nh.raw; - struct ipv6hdr *hdr = skb->nh.ipv6h; + struct ipv6hdr *hdr = ipv6_hdr(skb); struct ipv6_opt_hdr *exthdr; const unsigned char *nh = skb_network_header(skb); u8 nexthdr = nh[IP6CB(skb)->nhoff]; diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c index 93c42232aa39..538499a89975 100644 --- a/net/ipv6/xfrm6_tunnel.c +++ b/net/ipv6/xfrm6_tunnel.c @@ -257,7 +257,7 @@ static int xfrm6_tunnel_input(struct xfrm_state *x, struct sk_buff *skb) static int xfrm6_tunnel_rcv(struct sk_buff *skb) { - struct ipv6hdr *iph = skb->nh.ipv6h; + struct ipv6hdr *iph = ipv6_hdr(skb); __be32 spi; spi = xfrm6_tunnel_spi_lookup((xfrm_address_t *)&iph->saddr); diff --git a/net/netfilter/xt_DSCP.c b/net/netfilter/xt_DSCP.c index de647bd54893..a97704a3f95c 100644 --- a/net/netfilter/xt_DSCP.c +++ b/net/netfilter/xt_DSCP.c @@ -56,13 +56,13 @@ static unsigned int target6(struct sk_buff **pskb, const void *targinfo) { const struct xt_DSCP_info *dinfo = targinfo; - u_int8_t dscp = ipv6_get_dsfield((*pskb)->nh.ipv6h) >> XT_DSCP_SHIFT; + u_int8_t dscp = ipv6_get_dsfield(ipv6_hdr(*pskb)) >> XT_DSCP_SHIFT; if (dscp != dinfo->dscp) { if (!skb_make_writable(pskb, sizeof(struct ipv6hdr))) return NF_DROP; - ipv6_change_dsfield((*pskb)->nh.ipv6h, (__u8)(~XT_DSCP_MASK), + ipv6_change_dsfield(ipv6_hdr(*pskb), (__u8)(~XT_DSCP_MASK), dinfo->dscp << XT_DSCP_SHIFT); } return XT_CONTINUE; diff --git a/net/netfilter/xt_TCPMSS.c b/net/netfilter/xt_TCPMSS.c index 9e948ce27600..15fe8f649510 100644 --- a/net/netfilter/xt_TCPMSS.c +++ b/net/netfilter/xt_TCPMSS.c @@ -171,7 +171,7 @@ xt_tcpmss_target6(struct sk_buff **pskb, const struct xt_target *target, const void *targinfo) { - struct ipv6hdr *ipv6h = (*pskb)->nh.ipv6h; + struct ipv6hdr *ipv6h = ipv6_hdr(*pskb); u8 nexthdr; int tcphoff; int ret; @@ -187,7 +187,7 @@ xt_tcpmss_target6(struct sk_buff **pskb, if (ret < 0) return NF_DROP; if (ret > 0) { - ipv6h = (*pskb)->nh.ipv6h; + ipv6h = ipv6_hdr(*pskb); ipv6h->payload_len = htons(ntohs(ipv6h->payload_len) + ret); } return XT_CONTINUE; diff --git a/net/netfilter/xt_dscp.c b/net/netfilter/xt_dscp.c index 9ec294cd2436..000e9c2f8d1f 100644 --- a/net/netfilter/xt_dscp.c +++ b/net/netfilter/xt_dscp.c @@ -49,7 +49,7 @@ static int match6(const struct sk_buff *skb, int *hotdrop) { const struct xt_dscp_info *info = matchinfo; - u_int8_t dscp = ipv6_get_dsfield(skb->nh.ipv6h) >> XT_DSCP_SHIFT; + u_int8_t dscp = ipv6_get_dsfield(ipv6_hdr(skb)) >> XT_DSCP_SHIFT; return (dscp == info->dscp) ^ !!info->invert; } diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c index 47af19ab03cf..eb932913693b 100644 --- a/net/netfilter/xt_hashlimit.c +++ b/net/netfilter/xt_hashlimit.c @@ -392,10 +392,10 @@ hashlimit_init_dst(struct xt_hashlimit_htable *hinfo, struct dsthash_dst *dst, #if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE) case AF_INET6: if (hinfo->cfg.mode & XT_HASHLIMIT_HASH_DIP) - memcpy(&dst->addr.ip6.dst, &skb->nh.ipv6h->daddr, + memcpy(&dst->addr.ip6.dst, &ipv6_hdr(skb)->daddr, sizeof(dst->addr.ip6.dst)); if (hinfo->cfg.mode & XT_HASHLIMIT_HASH_SIP) - memcpy(&dst->addr.ip6.src, &skb->nh.ipv6h->saddr, + memcpy(&dst->addr.ip6.src, &ipv6_hdr(skb)->saddr, sizeof(dst->addr.ip6.src)); if (!(hinfo->cfg.mode & diff --git a/net/netfilter/xt_length.c b/net/netfilter/xt_length.c index 65fdb2166996..77288c5ada78 100644 --- a/net/netfilter/xt_length.c +++ b/net/netfilter/xt_length.c @@ -47,7 +47,8 @@ match6(const struct sk_buff *skb, int *hotdrop) { const struct xt_length_info *info = matchinfo; - u_int16_t pktlen = ntohs(skb->nh.ipv6h->payload_len) + sizeof(struct ipv6hdr); + const u_int16_t pktlen = (ntohs(ipv6_hdr(skb)->payload_len) + + sizeof(struct ipv6hdr)); return (pktlen >= info->min && pktlen <= info->max) ^ info->invert; } diff --git a/net/sched/cls_rsvp.h b/net/sched/cls_rsvp.h index cb8cf5bfa053..6f373b020eb4 100644 --- a/net/sched/cls_rsvp.h +++ b/net/sched/cls_rsvp.h @@ -143,7 +143,7 @@ static int rsvp_classify(struct sk_buff *skb, struct tcf_proto *tp, u8 tunnelid = 0; u8 *xprt; #if RSVP_DST_LEN == 4 - struct ipv6hdr *nhptr = skb->nh.ipv6h; + struct ipv6hdr *nhptr = ipv6_hdr(skb); #else struct iphdr *nhptr = ip_hdr(skb); #endif diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c index 45b5734dd72a..2c857af79a1e 100644 --- a/net/sched/sch_dsmark.c +++ b/net/sched/sch_dsmark.c @@ -220,7 +220,7 @@ static int dsmark_enqueue(struct sk_buff *skb,struct Qdisc *sch) & ~INET_ECN_MASK; break; case __constant_htons(ETH_P_IPV6): - skb->tc_index = ipv6_get_dsfield(skb->nh.ipv6h) + skb->tc_index = ipv6_get_dsfield(ipv6_hdr(skb)) & ~INET_ECN_MASK; break; default: @@ -296,7 +296,7 @@ static struct sk_buff *dsmark_dequeue(struct Qdisc *sch) p->value[index]); break; case __constant_htons(ETH_P_IPV6): - ipv6_change_dsfield(skb->nh.ipv6h, p->mask[index], + ipv6_change_dsfield(ipv6_hdr(skb), p->mask[index], p->value[index]); break; default: diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c index 02081bc9e0d1..e3695407afc6 100644 --- a/net/sched/sch_sfq.c +++ b/net/sched/sch_sfq.c @@ -152,7 +152,7 @@ static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb) } case __constant_htons(ETH_P_IPV6): { - struct ipv6hdr *iph = skb->nh.ipv6h; + struct ipv6hdr *iph = ipv6_hdr(skb); h = iph->daddr.s6_addr32[3]; h2 = iph->saddr.s6_addr32[3]^iph->nexthdr; if (iph->nexthdr == IPPROTO_TCP || diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c index 742f9ff42fbf..001be2de0b3c 100644 --- a/net/sctp/ipv6.c +++ b/net/sctp/ipv6.c @@ -393,10 +393,10 @@ static void sctp_v6_from_skb(union sctp_addr *addr,struct sk_buff *skb, sh = (struct sctphdr *) skb->h.raw; if (is_saddr) { *port = sh->source; - from = &skb->nh.ipv6h->saddr; + from = &ipv6_hdr(skb)->saddr; } else { *port = sh->dest; - from = &skb->nh.ipv6h->daddr; + from = &ipv6_hdr(skb)->daddr; } ipv6_addr_copy(&addr->v6.sin6_addr, from); } @@ -698,7 +698,7 @@ static int sctp_v6_skb_iif(const struct sk_buff *skb) /* Was this packet marked by Explicit Congestion Notification? */ static int sctp_v6_is_ce(const struct sk_buff *skb) { - return *((__u32 *)(skb->nh.ipv6h)) & htonl(1<<20); + return *((__u32 *)(ipv6_hdr(skb))) & htonl(1 << 20); } /* Dump the v6 addr to the seq file. */ @@ -777,7 +777,7 @@ static void sctp_inet6_skb_msgname(struct sk_buff *skb, char *msgname, } /* Otherwise, just copy the v6 address. */ - ipv6_addr_copy(&sin6->sin6_addr, &skb->nh.ipv6h->saddr); + ipv6_addr_copy(&sin6->sin6_addr, &ipv6_hdr(skb)->saddr); if (ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL) { struct sctp_ulpevent *ev = sctp_skb2event(skb); sin6->sin6_scope_id = ev->iif; -- cgit v1.2.3-59-g8ed1b From badff6d01a8589a1c828b0bf118903ca38627f4e Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Tue, 13 Mar 2007 13:06:52 -0300 Subject: [SK_BUFF]: Introduce skb_reset_transport_header(skb) For the common, open coded 'skb->h.raw = skb->data' operation, so that we can later turn skb->h.raw into a offset, reducing the size of struct sk_buff in 64bit land while possibly keeping it as a pointer on 32bit. This one touches just the most simple cases: skb->h.raw = skb->data; skb->h.raw = {skb_push|[__]skb_pull}() The next ones will handle the slightly more "complex" cases. Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: David S. Miller --- drivers/infiniband/hw/cxgb3/iwch_cm.c | 6 +++--- drivers/net/appletalk/cops.c | 2 +- drivers/net/appletalk/ltpc.c | 4 ++-- drivers/net/cxgb3/sge.c | 2 +- include/linux/dccp.h | 6 +++--- include/linux/skbuff.h | 5 +++++ net/appletalk/aarp.c | 6 +++--- net/appletalk/ddp.c | 4 ++-- net/ax25/af_ax25.c | 4 ++-- net/ax25/ax25_in.c | 8 ++++---- net/bluetooth/af_bluetooth.c | 2 +- net/bluetooth/hci_core.c | 9 +++++---- net/bluetooth/hci_sock.c | 2 +- net/core/dev.c | 2 +- net/core/netpoll.c | 2 +- net/decnet/dn_nsp_in.c | 2 +- net/decnet/dn_nsp_out.c | 2 +- net/decnet/dn_route.c | 4 ++-- net/ipv4/af_inet.c | 6 ++++-- net/ipv4/ah4.c | 3 ++- net/ipv4/ip_input.c | 2 +- net/ipv4/ip_output.c | 2 +- net/ipv4/ipmr.c | 2 +- net/ipv4/udp.c | 3 ++- net/ipv4/xfrm4_mode_transport.c | 2 +- net/ipv6/ip6_input.c | 2 +- net/ipv6/ip6_output.c | 8 ++++---- net/ipv6/ipv6_sockglue.c | 4 ++-- net/ipv6/netfilter/nf_conntrack_reasm.c | 2 +- net/ipv6/reassembly.c | 2 +- net/ipv6/xfrm6_mode_transport.c | 2 +- net/ipx/af_ipx.c | 2 +- net/ipx/ipx_route.c | 2 +- net/irda/af_irda.c | 4 ++-- net/irda/irlap_frame.c | 2 +- net/iucv/af_iucv.c | 2 +- net/key/af_key.c | 2 +- net/llc/llc_sap.c | 2 +- net/netlink/af_netlink.c | 2 +- net/netrom/af_netrom.c | 6 +++--- net/netrom/nr_in.c | 2 +- net/netrom/nr_loopback.c | 2 +- net/rose/af_rose.c | 2 +- net/rose/rose_loopback.c | 2 +- net/rose/rose_route.c | 2 +- net/unix/af_unix.c | 2 +- net/x25/af_x25.c | 3 +-- net/x25/x25_dev.c | 2 +- net/x25/x25_in.c | 2 +- 49 files changed, 82 insertions(+), 73 deletions(-) (limited to 'include') diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c index 2d2de9b8b729..66ad4d40ba1d 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_cm.c +++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c @@ -507,7 +507,7 @@ static void send_mpa_req(struct iwch_ep *ep, struct sk_buff *skb) */ skb_get(skb); set_arp_failure_handler(skb, arp_failure_discard); - skb->h.raw = skb->data; + skb_reset_transport_header(skb); len = skb->len; req = (struct tx_data_wr *) skb_push(skb, sizeof(*req)); req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA)); @@ -559,7 +559,7 @@ static int send_mpa_reject(struct iwch_ep *ep, const void *pdata, u8 plen) skb_get(skb); skb->priority = CPL_PRIORITY_DATA; set_arp_failure_handler(skb, arp_failure_discard); - skb->h.raw = skb->data; + skb_reset_transport_header(skb); req = (struct tx_data_wr *) skb_push(skb, sizeof(*req)); req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA)); req->wr_lo = htonl(V_WR_TID(ep->hwtid)); @@ -610,7 +610,7 @@ static int send_mpa_reply(struct iwch_ep *ep, const void *pdata, u8 plen) */ skb_get(skb); set_arp_failure_handler(skb, arp_failure_discard); - skb->h.raw = skb->data; + skb_reset_transport_header(skb); len = skb->len; req = (struct tx_data_wr *) skb_push(skb, sizeof(*req)); req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA)); diff --git a/drivers/net/appletalk/cops.c b/drivers/net/appletalk/cops.c index 28cb79cee910..da6ffa8cd81e 100644 --- a/drivers/net/appletalk/cops.c +++ b/drivers/net/appletalk/cops.c @@ -855,7 +855,7 @@ static void cops_rx(struct net_device *dev) skb_reset_mac_header(skb); /* Point to entire packet. */ skb_pull(skb,3); - skb->h.raw = skb->data; /* Point to data (Skip header). */ + skb_reset_transport_header(skb); /* Point to data (Skip header). */ /* Update the counters. */ lp->stats.rx_packets++; diff --git a/drivers/net/appletalk/ltpc.c b/drivers/net/appletalk/ltpc.c index 12682439f8bd..dc3bce992dcf 100644 --- a/drivers/net/appletalk/ltpc.c +++ b/drivers/net/appletalk/ltpc.c @@ -776,7 +776,7 @@ static int sendup_buffer (struct net_device *dev) /* copy ddp(s,e)hdr + contents */ memcpy(skb->data,(void*)ltdmabuf,len); - skb->h.raw = skb->data; + skb_reset_transport_header(skb); stats->rx_packets++; stats->rx_bytes+=skb->len; @@ -923,7 +923,7 @@ static int ltpc_xmit(struct sk_buff *skb, struct net_device *dev) cbuf.laptype = skb->data[2]; skb_pull(skb,3); /* skip past LLAP header */ cbuf.length = skb->len; /* this is host order */ - skb->h.raw=skb->data; + skb_reset_transport_header(skb); if(debug & DEBUG_UPPER) { printk("command "); diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c index 892e5dcafa04..a891f6f81527 100644 --- a/drivers/net/cxgb3/sge.c +++ b/drivers/net/cxgb3/sge.c @@ -1622,7 +1622,7 @@ static inline int rx_offload(struct t3cdev *tdev, struct sge_rspq *rq, rq->offload_pkts++; skb_reset_mac_header(skb); skb_reset_network_header(skb); - skb->h.raw = skb->data; + skb_reset_transport_header(skb); if (rq->polling) { rx_gather[gather_idx++] = skb; diff --git a/include/linux/dccp.h b/include/linux/dccp.h index 1cb054bd93f2..1f4df61735f7 100644 --- a/include/linux/dccp.h +++ b/include/linux/dccp.h @@ -265,9 +265,9 @@ static inline struct dccp_hdr *dccp_hdr(const struct sk_buff *skb) static inline struct dccp_hdr *dccp_zeroed_hdr(struct sk_buff *skb, int headlen) { - skb->h.raw = skb_push(skb, headlen); - memset(skb->h.raw, 0, headlen); - return dccp_hdr(skb); + skb_push(skb, headlen); + skb_reset_transport_header(skb); + return memset(skb->h.raw, 0, headlen); } static inline struct dccp_hdr_ext *dccp_hdrx(const struct sk_buff *skb) diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 31806a7ce40e..7c1f1756e482 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -957,6 +957,11 @@ static inline void skb_reserve(struct sk_buff *skb, int len) skb->tail += len; } +static inline void skb_reset_transport_header(struct sk_buff *skb) +{ + skb->h.raw = skb->data; +} + static inline unsigned char *skb_network_header(const struct sk_buff *skb) { return skb->nh.raw; diff --git a/net/appletalk/aarp.c b/net/appletalk/aarp.c index de495c57aa90..5ef6a238bdbc 100644 --- a/net/appletalk/aarp.c +++ b/net/appletalk/aarp.c @@ -119,7 +119,7 @@ static void __aarp_send_query(struct aarp_entry *a) /* Set up the buffer */ skb_reserve(skb, dev->hard_header_len + aarp_dl->header_length); skb_reset_network_header(skb); - skb->h.raw = skb->data; + skb_reset_transport_header(skb); skb_put(skb, sizeof(*eah)); skb->protocol = htons(ETH_P_ATALK); skb->dev = dev; @@ -166,7 +166,7 @@ static void aarp_send_reply(struct net_device *dev, struct atalk_addr *us, /* Set up the buffer */ skb_reserve(skb, dev->hard_header_len + aarp_dl->header_length); skb_reset_network_header(skb); - skb->h.raw = skb->data; + skb_reset_transport_header(skb); skb_put(skb, sizeof(*eah)); skb->protocol = htons(ETH_P_ATALK); skb->dev = dev; @@ -217,7 +217,7 @@ static void aarp_send_probe(struct net_device *dev, struct atalk_addr *us) /* Set up the buffer */ skb_reserve(skb, dev->hard_header_len + aarp_dl->header_length); skb_reset_network_header(skb); - skb->h.raw = skb->data; + skb_reset_transport_header(skb); skb_put(skb, sizeof(*eah)); skb->protocol = htons(ETH_P_ATALK); skb->dev = dev; diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c index 934f25993ce8..137341b4d833 100644 --- a/net/appletalk/ddp.c +++ b/net/appletalk/ddp.c @@ -1275,7 +1275,7 @@ static int handle_ip_over_ddp(struct sk_buff *skb) skb->protocol = htons(ETH_P_IP); skb_pull(skb, 13); skb->dev = dev; - skb->h.raw = skb->data; + skb_reset_transport_header(skb); stats = dev->priv; stats->rx_packets++; @@ -1522,7 +1522,7 @@ static int ltalk_rcv(struct sk_buff *skb, struct net_device *dev, /* Non routable, so force a drop if we slip up later */ ddp->deh_len_hops = htons(skb->len + (DDP_MAXHOPS << 10)); } - skb->h.raw = skb->data; + skb_reset_transport_header(skb); return atalk_rcv(skb, dev, pt, orig_dev); freeit: diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c index b1a4d60ce9a8..14db01a4ff63 100644 --- a/net/ax25/af_ax25.c +++ b/net/ax25/af_ax25.c @@ -1631,8 +1631,8 @@ static int ax25_recvmsg(struct kiocb *iocb, struct socket *sock, if (!ax25_sk(sk)->pidincl) skb_pull(skb, 1); /* Remove PID */ - skb->h.raw = skb->data; - copied = skb->len; + skb_reset_transport_header(skb); + copied = skb->len; if (copied > size) { copied = size; diff --git a/net/ax25/ax25_in.c b/net/ax25/ax25_in.c index 3b2aac670266..724ad5ce75d4 100644 --- a/net/ax25/ax25_in.c +++ b/net/ax25/ax25_in.c @@ -62,7 +62,7 @@ static int ax25_rx_fragment(ax25_cb *ax25, struct sk_buff *skb) skbn->dev = ax25->ax25_dev->dev; skb_reset_network_header(skbn); - skbn->h.raw = skbn->data; + skb_reset_transport_header(skbn); /* Copy data from the fragments */ while ((skbo = skb_dequeue(&ax25->frag_queue)) != NULL) { @@ -196,7 +196,7 @@ static int ax25_rcv(struct sk_buff *skb, struct net_device *dev, * Process the AX.25/LAPB frame. */ - skb->h.raw = skb->data; + skb_reset_transport_header(skb); if ((ax25_dev = ax25_dev_ax25dev(dev)) == NULL) { kfree_skb(skb); @@ -246,7 +246,7 @@ static int ax25_rcv(struct sk_buff *skb, struct net_device *dev, switch (skb->data[1]) { case AX25_P_IP: skb_pull(skb,2); /* drop PID/CTRL */ - skb->h.raw = skb->data; + skb_reset_transport_header(skb); skb_reset_network_header(skb); skb->dev = dev; skb->pkt_type = PACKET_HOST; @@ -256,7 +256,7 @@ static int ax25_rcv(struct sk_buff *skb, struct net_device *dev, case AX25_P_ARP: skb_pull(skb,2); - skb->h.raw = skb->data; + skb_reset_transport_header(skb); skb_reset_network_header(skb); skb->dev = dev; skb->pkt_type = PACKET_HOST; diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c index c7228cfc6218..d942b946ba07 100644 --- a/net/bluetooth/af_bluetooth.c +++ b/net/bluetooth/af_bluetooth.c @@ -221,7 +221,7 @@ int bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock, copied = len; } - skb->h.raw = skb->data; + skb_reset_transport_header(skb); err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); skb_free_datagram(sk, skb); diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index 64fea0903fd7..c11ceb6b3f79 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -1074,11 +1074,11 @@ static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags) struct hci_acl_hdr *hdr; int len = skb->len; - hdr = (struct hci_acl_hdr *) skb_push(skb, HCI_ACL_HDR_SIZE); + skb_push(skb, HCI_ACL_HDR_SIZE); + skb_reset_transport_header(skb); + hdr = (struct hci_acl_hdr *)skb->h.raw; hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags)); hdr->dlen = cpu_to_le16(len); - - skb->h.raw = (void *) hdr; } int hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags) @@ -1143,7 +1143,8 @@ int hci_send_sco(struct hci_conn *conn, struct sk_buff *skb) hdr.handle = cpu_to_le16(conn->handle); hdr.dlen = skb->len; - skb->h.raw = skb_push(skb, HCI_SCO_HDR_SIZE); + skb_push(skb, HCI_SCO_HDR_SIZE); + skb_reset_transport_header(skb); memcpy(skb->h.raw, &hdr, HCI_SCO_HDR_SIZE); skb->dev = (void *) hdev; diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c index 71f5cfbbebb8..832b5f44be5c 100644 --- a/net/bluetooth/hci_sock.c +++ b/net/bluetooth/hci_sock.c @@ -375,7 +375,7 @@ static int hci_sock_recvmsg(struct kiocb *iocb, struct socket *sock, copied = len; } - skb->h.raw = skb->data; + skb_reset_transport_header(skb); err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); hci_sock_cmsg(sk, msg, skb); diff --git a/net/core/dev.c b/net/core/dev.c index 3af0bdc86491..99f15728d9cb 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1773,7 +1773,7 @@ int netif_receive_skb(struct sk_buff *skb) __get_cpu_var(netdev_rx_stat).total++; skb_reset_network_header(skb); - skb->h.raw = skb->data; + skb_reset_transport_header(skb); skb->mac_len = skb->nh.raw - skb->mac.raw; pt_prev = NULL; diff --git a/net/core/netpoll.c b/net/core/netpoll.c index 496b06244a8e..8b22723d6436 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c @@ -362,7 +362,7 @@ static void arp_reply(struct sk_buff *skb) return; skb_reset_network_header(skb); - skb->h.raw = skb->data; + skb_reset_transport_header(skb); arp = arp_hdr(skb); if ((arp->ar_hrd != htons(ARPHRD_ETHER) && diff --git a/net/decnet/dn_nsp_in.c b/net/decnet/dn_nsp_in.c index 9d20904f6f52..d9498a165acf 100644 --- a/net/decnet/dn_nsp_in.c +++ b/net/decnet/dn_nsp_in.c @@ -725,7 +725,7 @@ static int dn_nsp_rx_packet(struct sk_buff *skb) if (!pskb_may_pull(skb, 2)) goto free_out; - skb->h.raw = skb->data; + skb_reset_transport_header(skb); cb->nsp_flags = *ptr++; if (decnet_debug_level & 2) diff --git a/net/decnet/dn_nsp_out.c b/net/decnet/dn_nsp_out.c index 2d2cda82c7db..84b8c5b45fef 100644 --- a/net/decnet/dn_nsp_out.c +++ b/net/decnet/dn_nsp_out.c @@ -79,7 +79,7 @@ static void dn_nsp_send(struct sk_buff *skb) struct dst_entry *dst; struct flowi fl; - skb->h.raw = skb->data; + skb_reset_transport_header(skb); scp->stamp = jiffies; dst = sk_dst_check(sk, 0); diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c index 32a7db36c9e5..bb73bf16630f 100644 --- a/net/decnet/dn_route.c +++ b/net/decnet/dn_route.c @@ -504,7 +504,7 @@ static int dn_route_rx_long(struct sk_buff *skb) goto drop_it; skb_pull(skb, 20); - skb->h.raw = skb->data; + skb_reset_transport_header(skb); /* Destination info */ ptr += 2; @@ -542,7 +542,7 @@ static int dn_route_rx_short(struct sk_buff *skb) goto drop_it; skb_pull(skb, 5); - skb->h.raw = skb->data; + skb_reset_transport_header(skb); cb->dst = *(__le16 *)ptr; ptr += 2; diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index e7720c72a6e2..f011390f19c9 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -1120,7 +1120,8 @@ static int inet_gso_send_check(struct sk_buff *skb) if (unlikely(!pskb_may_pull(skb, ihl))) goto out; - skb->h.raw = __skb_pull(skb, ihl); + __skb_pull(skb, ihl); + skb_reset_transport_header(skb); iph = ip_hdr(skb); proto = iph->protocol & (MAX_INET_PROTOS - 1); err = -EPROTONOSUPPORT; @@ -1163,7 +1164,8 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb, int features) if (unlikely(!pskb_may_pull(skb, ihl))) goto out; - skb->h.raw = __skb_pull(skb, ihl); + __skb_pull(skb, ihl); + skb_reset_transport_header(skb); iph = ip_hdr(skb); id = ntohs(iph->id); proto = iph->protocol & (MAX_INET_PROTOS - 1); diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c index 00fd31da252e..ebcc797e1c13 100644 --- a/net/ipv4/ah4.c +++ b/net/ipv4/ah4.c @@ -182,7 +182,8 @@ static int ah_input(struct xfrm_state *x, struct sk_buff *skb) } ((struct iphdr*)work_buf)->protocol = ah->nexthdr; skb->nh.raw += ah_hlen; - skb->h.raw = memcpy(skb_network_header(skb), work_buf, ihl); + memcpy(skb_network_header(skb), work_buf, ihl); + skb->h.raw = skb->nh.raw; __skb_pull(skb, ah_hlen + ihl); return 0; diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c index 237880a80432..324e7e0fdb2a 100644 --- a/net/ipv4/ip_input.c +++ b/net/ipv4/ip_input.c @@ -201,7 +201,7 @@ static inline int ip_local_deliver_finish(struct sk_buff *skb) __skb_pull(skb, ip_hdrlen(skb)); /* Point into the IP datagram, just past the header. */ - skb->h.raw = skb->data; + skb_reset_transport_header(skb); rcu_read_lock(); { diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 1abc48899f2d..63c05be0764d 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -500,7 +500,7 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff*)) * before previous one went down. */ if (frag) { frag->ip_summed = CHECKSUM_NONE; - frag->h.raw = frag->data; + skb_reset_transport_header(frag); __skb_push(frag, hlen); skb_reset_network_header(frag); memcpy(skb_network_header(frag), iph, hlen); diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index e0021499093f..03869d91f6f0 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c @@ -563,7 +563,7 @@ static int ipmr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert) */ skb_push(skb, sizeof(struct iphdr)); skb_reset_network_header(skb); - skb->h.raw = skb->data; + skb_reset_transport_header(skb); msg = (struct igmpmsg *)skb_network_header(skb); memcpy(msg, skb_network_header(pkt), sizeof(struct iphdr)); msg->im_msgtype = IGMPMSG_WHOLEPKT; diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index b4cad50c18e9..13739cd8206f 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -1002,7 +1002,8 @@ static int udp_encap_rcv(struct sock * sk, struct sk_buff *skb) * transport header to point to ESP. Keep UDP on the stack * for later. */ - skb->h.raw = __skb_pull(skb, len); + __skb_pull(skb, len); + skb_reset_transport_header(skb); /* modify the protocol (it's ESP!) */ iph->protocol = IPPROTO_ESP; diff --git a/net/ipv4/xfrm4_mode_transport.c b/net/ipv4/xfrm4_mode_transport.c index 124f24bc4dbc..2c46cbb3bbb5 100644 --- a/net/ipv4/xfrm4_mode_transport.c +++ b/net/ipv4/xfrm4_mode_transport.c @@ -52,7 +52,7 @@ static int xfrm4_transport_input(struct xfrm_state *x, struct sk_buff *skb) skb->nh.raw = skb->h.raw; } ip_hdr(skb)->tot_len = htons(skb->len + ihl); - skb->h.raw = skb->data; + skb_reset_transport_header(skb); return 0; } diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c index 9c3c787a21c1..2dd32a2ca056 100644 --- a/net/ipv6/ip6_input.c +++ b/net/ipv6/ip6_input.c @@ -101,7 +101,7 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt if (hdr->version != 6) goto err; - skb->h.raw = (u8 *)(hdr + 1); + skb->h.raw = skb->nh.raw + sizeof(*hdr); IP6CB(skb)->nhoff = offsetof(struct ipv6hdr, nexthdr); pkt_len = ntohs(hdr->payload_len); diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 1900c6226866..0f4434eff66a 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -654,7 +654,7 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) * before previous one went down. */ if (frag) { frag->ip_summed = CHECKSUM_NONE; - frag->h.raw = frag->data; + skb_reset_transport_header(frag); fh = (struct frag_hdr*)__skb_push(frag, sizeof(struct frag_hdr)); __skb_push(frag, hlen); skb_reset_network_header(frag); @@ -747,8 +747,8 @@ slow_path: skb_reserve(frag, LL_RESERVED_SPACE(rt->u.dst.dev)); skb_put(frag, len + hlen + sizeof(struct frag_hdr)); skb_reset_network_header(frag); - fh = (struct frag_hdr*)(frag->data + hlen); - frag->h.raw = frag->data + hlen + sizeof(struct frag_hdr); + fh = (struct frag_hdr *)(skb_network_header(frag) + hlen); + frag->h.raw = frag->nh.raw + hlen + sizeof(struct frag_hdr); /* * Charge the memory for the fragment to any owner @@ -991,7 +991,7 @@ static inline int ip6_ufo_append_data(struct sock *sk, skb_reset_network_header(skb); /* initialize protocol header pointer */ - skb->h.raw = skb->data + fragheaderlen; + skb->h.raw = skb->nh.raw + fragheaderlen; skb->ip_summed = CHECKSUM_PARTIAL; skb->csum = 0; diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index d16e0fd2cd89..da930fa089c9 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c @@ -108,7 +108,7 @@ static int ipv6_gso_send_check(struct sk_buff *skb) rcu_read_lock(); ops = ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr); if (likely(ops && ops->gso_send_check)) { - skb->h.raw = skb->data; + skb_reset_transport_header(skb); err = ops->gso_send_check(skb); } rcu_read_unlock(); @@ -144,7 +144,7 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, int features) rcu_read_lock(); ops = ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr); if (likely(ops && ops->gso_segment)) { - skb->h.raw = skb->data; + skb_reset_transport_header(skb); segs = ops->gso_segment(skb, features); } rcu_read_unlock(); diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c index d3fbb1f1caf5..75138cf1fa61 100644 --- a/net/ipv6/netfilter/nf_conntrack_reasm.c +++ b/net/ipv6/netfilter/nf_conntrack_reasm.c @@ -634,7 +634,7 @@ nf_ct_frag6_reasm(struct nf_ct_frag6_queue *fq, struct net_device *dev) head->nh.raw += sizeof(struct frag_hdr); skb_shinfo(head)->frag_list = head->next; - head->h.raw = head->data; + skb_reset_transport_header(head); skb_push(head, head->data - skb_network_header(head)); atomic_sub(head->truesize, &nf_ct_frag6_mem); diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c index 156a37fe3ff3..2594f0fb290d 100644 --- a/net/ipv6/reassembly.c +++ b/net/ipv6/reassembly.c @@ -653,7 +653,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff **skb_in, head->nh.raw += sizeof(struct frag_hdr); skb_shinfo(head)->frag_list = head->next; - head->h.raw = head->data; + skb_reset_transport_header(head); skb_push(head, head->data - skb_network_header(head)); atomic_sub(head->truesize, &ip6_frag_mem); diff --git a/net/ipv6/xfrm6_mode_transport.c b/net/ipv6/xfrm6_mode_transport.c index 358b60d9d912..cae6cacd58c4 100644 --- a/net/ipv6/xfrm6_mode_transport.c +++ b/net/ipv6/xfrm6_mode_transport.c @@ -59,7 +59,7 @@ static int xfrm6_transport_input(struct xfrm_state *x, struct sk_buff *skb) } ipv6_hdr(skb)->payload_len = htons(skb->len + ihl - sizeof(struct ipv6hdr)); - skb->h.raw = skb->data; + skb_reset_transport_header(skb); return 0; } diff --git a/net/ipx/af_ipx.c b/net/ipx/af_ipx.c index 9aa7b961b3eb..392f8bc92691 100644 --- a/net/ipx/af_ipx.c +++ b/net/ipx/af_ipx.c @@ -577,7 +577,7 @@ static struct sk_buff *ipxitf_adjust_skbuff(struct ipx_interface *intrfc, if (skb2) { skb_reserve(skb2, out_offset); skb_reset_network_header(skb2); - skb2->h.raw = skb2->data; + skb_reset_transport_header(skb2); skb_put(skb2, skb->len); memcpy(ipx_hdr(skb2), ipx_hdr(skb), skb->len); memcpy(skb2->cb, skb->cb, sizeof(skb->cb)); diff --git a/net/ipx/ipx_route.c b/net/ipx/ipx_route.c index e8277f544e72..e16c11423527 100644 --- a/net/ipx/ipx_route.c +++ b/net/ipx/ipx_route.c @@ -204,7 +204,7 @@ int ipxrtr_route_packet(struct sock *sk, struct sockaddr_ipx *usipx, /* Fill in IPX header */ skb_reset_network_header(skb); - skb->h.raw = skb->data; + skb_reset_transport_header(skb); skb_put(skb, sizeof(struct ipxhdr)); ipx = ipx_hdr(skb); ipx->ipx_pktsize = htons(len + sizeof(struct ipxhdr)); diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c index 0eb7d596d470..c3cd2ba123e5 100644 --- a/net/irda/af_irda.c +++ b/net/irda/af_irda.c @@ -1363,8 +1363,8 @@ static int irda_recvmsg_dgram(struct kiocb *iocb, struct socket *sock, if (!skb) return err; - skb->h.raw = skb->data; - copied = skb->len; + skb_reset_transport_header(skb); + copied = skb->len; if (copied > size) { IRDA_DEBUG(2, "%s(), Received truncated frame (%zd < %zd)!\n", diff --git a/net/irda/irlap_frame.c b/net/irda/irlap_frame.c index 7c815de4a3bf..803ac4187485 100644 --- a/net/irda/irlap_frame.c +++ b/net/irda/irlap_frame.c @@ -95,7 +95,7 @@ void irlap_queue_xmit(struct irlap_cb *self, struct sk_buff *skb) skb->dev = self->netdev; skb_reset_mac_header(skb); skb_reset_network_header(skb); - skb->h.raw = skb->data; + skb_reset_transport_header(skb); skb->protocol = htons(ETH_P_IRDA); skb->priority = TC_PRIO_BESTEFFORT; diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c index a485496059c6..55632883d17b 100644 --- a/net/iucv/af_iucv.c +++ b/net/iucv/af_iucv.c @@ -953,7 +953,7 @@ static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg) return; } - skb->h.raw = skb->data; + skb_reset_transport_header(skb); skb_reset_network_header(skb); skb->len = msg->length; } diff --git a/net/key/af_key.c b/net/key/af_key.c index 345019345f09..3cd228aacfe8 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c @@ -3667,7 +3667,7 @@ static int pfkey_recvmsg(struct kiocb *kiocb, copied = len; } - skb->h.raw = skb->data; + skb_reset_transport_header(skb); err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); if (err) goto out_free; diff --git a/net/llc/llc_sap.c b/net/llc/llc_sap.c index e76bbbfb64bd..2525165e2e8f 100644 --- a/net/llc/llc_sap.c +++ b/net/llc/llc_sap.c @@ -39,7 +39,7 @@ struct sk_buff *llc_alloc_frame(struct sock *sk, struct net_device *dev) skb_reset_mac_header(skb); skb_reserve(skb, 50); skb_reset_network_header(skb); - skb->h.raw = skb->data; + skb_reset_transport_header(skb); skb->protocol = htons(ETH_P_802_2); skb->dev = dev; if (sk != NULL) diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 350ed1c0e702..50dc5edb7752 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -1215,7 +1215,7 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock, copied = len; } - skb->h.raw = skb->data; + skb_reset_transport_header(skb); err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); if (msg->msg_name) { diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c index a54e7ef2568a..8d0f30a015df 100644 --- a/net/netrom/af_netrom.c +++ b/net/netrom/af_netrom.c @@ -878,7 +878,7 @@ int nr_rx_frame(struct sk_buff *skb, struct net_device *dev) if (frametype == NR_PROTOEXT && circuit_index == NR_PROTO_IP && circuit_id == NR_PROTO_IP) { skb_pull(skb, NR_NETWORK_LEN + NR_TRANSPORT_LEN); - skb->h.raw = skb->data; + skb_reset_transport_header(skb); return nr_rx_ip(skb, dev); } @@ -904,7 +904,7 @@ int nr_rx_frame(struct sk_buff *skb, struct net_device *dev) } if (sk != NULL) { - skb->h.raw = skb->data; + skb_reset_transport_header(skb); if (frametype == NR_CONNACK && skb->len == 22) nr_sk(sk)->bpqext = 1; @@ -1149,7 +1149,7 @@ static int nr_recvmsg(struct kiocb *iocb, struct socket *sock, return er; } - skb->h.raw = skb->data; + skb_reset_transport_header(skb); copied = skb->len; if (copied > size) { diff --git a/net/netrom/nr_in.c b/net/netrom/nr_in.c index 5560acbaaa95..e6dc749e14be 100644 --- a/net/netrom/nr_in.c +++ b/net/netrom/nr_in.c @@ -51,7 +51,7 @@ static int nr_queue_rx_frame(struct sock *sk, struct sk_buff *skb, int more) if ((skbn = alloc_skb(nr->fraglen, GFP_ATOMIC)) == NULL) return 1; - skbn->h.raw = skbn->data; + skb_reset_transport_header(skbn); while ((skbo = skb_dequeue(&nr->frag_queue)) != NULL) { memcpy(skb_put(skbn, skbo->len), skbo->data, skbo->len); diff --git a/net/netrom/nr_loopback.c b/net/netrom/nr_loopback.c index e856ae1b360a..99fdab16ded0 100644 --- a/net/netrom/nr_loopback.c +++ b/net/netrom/nr_loopback.c @@ -35,7 +35,7 @@ int nr_loopback_queue(struct sk_buff *skb) if ((skbn = alloc_skb(skb->len, GFP_ATOMIC)) != NULL) { memcpy(skb_put(skbn, skb->len), skb->data, skb->len); - skbn->h.raw = skbn->data; + skb_reset_transport_header(skbn); skb_queue_tail(&loopback_queue, skbn); diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c index f64be9369ef7..6d8684a11ac6 100644 --- a/net/rose/af_rose.c +++ b/net/rose/af_rose.c @@ -1234,7 +1234,7 @@ static int rose_recvmsg(struct kiocb *iocb, struct socket *sock, *asmptr = qbit; } - skb->h.raw = skb->data; + skb_reset_transport_header(skb); copied = skb->len; if (copied > size) { diff --git a/net/rose/rose_loopback.c b/net/rose/rose_loopback.c index 3e41bd93ab9f..cd01642f0491 100644 --- a/net/rose/rose_loopback.c +++ b/net/rose/rose_loopback.c @@ -77,7 +77,7 @@ static void rose_loopback_timer(unsigned long param) dest = (rose_address *)(skb->data + 4); lci_o = 0xFFF - lci_i; - skb->h.raw = skb->data; + skb_reset_transport_header(skb); sk = rose_find_socket(lci_o, &rose_loopback_neigh); if (sk) { diff --git a/net/rose/rose_route.c b/net/rose/rose_route.c index a1233e1b1ab6..1f9aefd95a99 100644 --- a/net/rose/rose_route.c +++ b/net/rose/rose_route.c @@ -906,7 +906,7 @@ int rose_route_frame(struct sk_buff *skb, ax25_cb *ax25) } } else { - skb->h.raw = skb->data; + skb_reset_transport_header(skb); res = rose_process_rx_frame(sk, skb); goto out; } diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 606971645b33..aec8cf165e1a 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -1319,7 +1319,7 @@ static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock, unix_attach_fds(siocb->scm, skb); unix_get_secdata(siocb->scm, skb); - skb->h.raw = skb->data; + skb_reset_transport_header(skb); err = memcpy_fromiovec(skb_put(skb,len), msg->msg_iov, len); if (err) goto out_free; diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c index a19884315622..fc713059ccdd 100644 --- a/net/x25/af_x25.c +++ b/net/x25/af_x25.c @@ -1210,8 +1210,7 @@ static int x25_recvmsg(struct kiocb *iocb, struct socket *sock, } } - skb->h.raw = skb->data; - + skb_reset_transport_header(skb); copied = skb->len; if (copied > size) { diff --git a/net/x25/x25_dev.c b/net/x25/x25_dev.c index 94fd12f3a909..848a6b6f90a6 100644 --- a/net/x25/x25_dev.c +++ b/net/x25/x25_dev.c @@ -48,7 +48,7 @@ static int x25_receive_data(struct sk_buff *skb, struct x25_neigh *nb) if ((sk = x25_find_socket(lci, nb)) != NULL) { int queued = 1; - skb->h.raw = skb->data; + skb_reset_transport_header(skb); bh_lock_sock(sk); if (!sock_owned_by_user(sk)) { queued = x25_process_rx_frame(sk, skb); diff --git a/net/x25/x25_in.c b/net/x25/x25_in.c index c5239fcdefa0..b2bbe552a89d 100644 --- a/net/x25/x25_in.c +++ b/net/x25/x25_in.c @@ -53,7 +53,7 @@ static int x25_queue_rx_frame(struct sock *sk, struct sk_buff *skb, int more) skb_queue_tail(&x25->fragment_queue, skb); - skbn->h.raw = skbn->data; + skb_reset_transport_header(skbn); skbo = skb_dequeue(&x25->fragment_queue); memcpy(skb_put(skbn, skbo->len), skbo->data, skbo->len); -- cgit v1.2.3-59-g8ed1b From ea2ae17d6443abddc79480dc9f7af8feacabddc4 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Wed, 25 Apr 2007 17:55:53 -0700 Subject: [SK_BUFF]: Introduce skb_transport_offset() For the quite common 'skb->h.raw - skb->data' sequence. Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: David S. Miller --- drivers/net/atl1/atl1_main.c | 10 +++++----- drivers/net/cassini.c | 6 ++---- drivers/net/cxgb3/sge.c | 7 ++++--- drivers/net/e1000/e1000_main.c | 10 +++++----- drivers/net/ixgb/ixgb_main.c | 8 ++++---- drivers/net/myri10ge/myri10ge.c | 5 +++-- drivers/net/netxen/netxen_nic_hw.c | 2 +- drivers/net/sk98lin/skge.c | 4 ++-- drivers/net/skge.c | 2 +- drivers/net/sky2.c | 2 +- drivers/net/sungem.c | 6 ++---- drivers/net/sunhme.c | 6 ++---- include/linux/skbuff.h | 5 +++++ include/net/udplite.h | 6 +++--- net/core/dev.c | 2 +- net/core/skbuff.c | 2 +- net/ipv4/esp4.c | 2 +- net/ipv4/udp.c | 2 +- net/ipv6/esp6.c | 9 +++------ net/ipv6/exthdrs.c | 12 +++++++----- net/ipv6/ip6_input.c | 2 +- net/ipv6/ipcomp6.c | 4 +--- net/ipv6/mip6.c | 5 +++-- net/ipv6/raw.c | 4 ++-- net/ipv6/reassembly.c | 3 ++- net/sctp/input.c | 2 +- 26 files changed, 64 insertions(+), 64 deletions(-) (limited to 'include') diff --git a/drivers/net/atl1/atl1_main.c b/drivers/net/atl1/atl1_main.c index d2be79a30f8a..c26f8ce320e6 100644 --- a/drivers/net/atl1/atl1_main.c +++ b/drivers/net/atl1/atl1_main.c @@ -1326,8 +1326,8 @@ static int atl1_tx_csum(struct atl1_adapter *adapter, struct sk_buff *skb, u8 css, cso; if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { - cso = skb->h.raw - skb->data; - css = (skb->h.raw + skb->csum_offset) - skb->data; + cso = skb_transport_offset(skb); + css = cso + skb->csum; if (unlikely(cso & 0x1)) { printk(KERN_DEBUG "%s: payload offset != even number\n", atl1_driver_name); @@ -1369,8 +1369,8 @@ static void atl1_tx_map(struct atl1_adapter *adapter, if (tcp_seg) { /* TSO/GSO */ - proto_hdr_len = - ((skb->h.raw - skb->data) + (skb->h.th->doff << 2)); + proto_hdr_len = (skb_transport_offset(skb) + + (skb->h.th->doff << 2)); buffer_info->length = proto_hdr_len; page = virt_to_page(skb->data); offset = (unsigned long)skb->data & ~PAGE_MASK; @@ -1562,7 +1562,7 @@ static int atl1_xmit_frame(struct sk_buff *skb, struct net_device *netdev) mss = skb_shinfo(skb)->gso_size; if (mss) { if (skb->protocol == htons(ETH_P_IP)) { - proto_hdr_len = ((skb->h.raw - skb->data) + + proto_hdr_len = (skb_transport_offset(skb) + (skb->h.th->doff << 2)); if (unlikely(proto_hdr_len > len)) { dev_kfree_skb_any(skb); diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c index 68e37a655fe2..bd3ab6493e39 100644 --- a/drivers/net/cassini.c +++ b/drivers/net/cassini.c @@ -2821,10 +2821,8 @@ static inline int cas_xmit_tx_ringN(struct cas *cp, int ring, ctrl = 0; if (skb->ip_summed == CHECKSUM_PARTIAL) { - u64 csum_start_off, csum_stuff_off; - - csum_start_off = (u64) (skb->h.raw - skb->data); - csum_stuff_off = csum_start_off + skb->csum_offset; + const u64 csum_start_off = skb_transport_offset(skb); + const u64 csum_stuff_off = csum_start_off + skb->csum_offset; ctrl = TX_DESC_CSUM_EN | CAS_BASE(TX_DESC_CSUM_START, csum_start_off) | diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c index a891f6f81527..d38b1bcd138e 100644 --- a/drivers/net/cxgb3/sge.c +++ b/drivers/net/cxgb3/sge.c @@ -1319,9 +1319,10 @@ static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb, /* Only TX_DATA builds SGLs */ from = (struct work_request_hdr *)skb->data; - memcpy(&d->flit[1], &from[1], skb->h.raw - skb->data - sizeof(*from)); + memcpy(&d->flit[1], &from[1], + skb_transport_offset(skb) - sizeof(*from)); - flits = (skb->h.raw - skb->data) / 8; + flits = skb_transport_offset(skb) / 8; sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl; sgl_flits = make_sgl(skb, sgp, skb->h.raw, skb->tail - skb->h.raw, adap->pdev); @@ -1349,7 +1350,7 @@ static inline unsigned int calc_tx_descs_ofld(const struct sk_buff *skb) if (skb->len <= WR_LEN && cnt == 0) return 1; /* packet fits as immediate data */ - flits = (skb->h.raw - skb->data) / 8; /* headers */ + flits = skb_transport_offset(skb) / 8; /* headers */ if (skb->tail != skb->h.raw) cnt++; return flits_to_desc(flits + sgl_len(cnt)); diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c index a3d9986b4170..78cf417cf236 100644 --- a/drivers/net/e1000/e1000_main.c +++ b/drivers/net/e1000/e1000_main.c @@ -2887,7 +2887,7 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, return err; } - hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2)); + hdr_len = (skb_transport_offset(skb) + (skb->h.th->doff << 2)); mss = skb_shinfo(skb)->gso_size; if (skb->protocol == htons(ETH_P_IP)) { struct iphdr *iph = ip_hdr(skb); @@ -2897,7 +2897,7 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, iph->daddr, 0, IPPROTO_TCP, 0); cmd_length = E1000_TXD_CMD_IP; - ipcse = skb->h.raw - skb->data - 1; + ipcse = skb_transport_offset(skb) - 1; } else if (skb->protocol == htons(ETH_P_IPV6)) { ipv6_hdr(skb)->payload_len = 0; skb->h.th->check = @@ -2908,7 +2908,7 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, } ipcss = skb_network_offset(skb); ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data; - tucss = skb->h.raw - skb->data; + tucss = skb_transport_offset(skb); tucso = (void *)&(skb->h.th->check) - (void *)skb->data; tucse = 0; @@ -2950,7 +2950,7 @@ e1000_tx_csum(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, uint8_t css; if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { - css = skb->h.raw - skb->data; + css = skb_transport_offset(skb); i = tx_ring->next_to_use; buffer_info = &tx_ring->buffer_info[i]; @@ -3292,7 +3292,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) /* TSO Workaround for 82571/2/3 Controllers -- if skb->data * points to just header, pull a few bytes of payload from * frags into skb->data */ - hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2)); + hdr_len = (skb_transport_offset(skb) + (skb->h.th->doff << 2)); if (skb->data_len && (hdr_len == (skb->len - skb->data_len))) { switch (adapter->hw.mac_type) { unsigned int pull_size; diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c index bba4dcaf92e9..ceea6e45792d 100644 --- a/drivers/net/ixgb/ixgb_main.c +++ b/drivers/net/ixgb/ixgb_main.c @@ -1190,7 +1190,7 @@ ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb) return err; } - hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2)); + hdr_len = (skb_transport_offset(skb) + (skb->h.th->doff << 2)); mss = skb_shinfo(skb)->gso_size; iph = ip_hdr(skb); iph->tot_len = 0; @@ -1199,8 +1199,8 @@ ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb) 0, IPPROTO_TCP, 0); ipcss = skb_network_offset(skb); ipcso = (void *)&(iph->check) - (void *)skb->data; - ipcse = skb->h.raw - skb->data - 1; - tucss = skb->h.raw - skb->data; + ipcse = skb_transport_offset(skb) - 1; + tucss = skb_transport_offset(skb); tucso = (void *)&(skb->h.th->check) - (void *)skb->data; tucse = 0; @@ -1245,7 +1245,7 @@ ixgb_tx_csum(struct ixgb_adapter *adapter, struct sk_buff *skb) if(likely(skb->ip_summed == CHECKSUM_PARTIAL)) { struct ixgb_buffer *buffer_info; - css = skb->h.raw - skb->data; + css = skb_transport_offset(skb); cso = css + skb->csum_offset; i = adapter->tx_ring.next_to_use; diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c index 7c04179c7b13..e04228c7b14f 100644 --- a/drivers/net/myri10ge/myri10ge.c +++ b/drivers/net/myri10ge/myri10ge.c @@ -2029,7 +2029,7 @@ again: odd_flag = 0; flags = (MXGEFW_FLAGS_NO_TSO | MXGEFW_FLAGS_FIRST); if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { - cksum_offset = (skb->h.raw - skb->data); + cksum_offset = skb_transport_offset(skb); pseudo_hdr_offset = cksum_offset + skb->csum_offset; /* If the headers are excessively large, then we must * fall back to a software checksum */ @@ -2054,7 +2054,8 @@ again: * send loop that we are still in the * header portion of the TSO packet. * TSO header must be at most 134 bytes long */ - cum_len = -((skb->h.raw - skb->data) + (skb->h.th->doff << 2)); + cum_len = -(skb_transport_offset(skb) + + (skb->h.th->doff << 2)); /* for TSO, pseudo_hdr_offset holds mss. * The firmware figures out where to put diff --git a/drivers/net/netxen/netxen_nic_hw.c b/drivers/net/netxen/netxen_nic_hw.c index 28d68c3550ef..09ca2192cbfa 100644 --- a/drivers/net/netxen/netxen_nic_hw.c +++ b/drivers/net/netxen/netxen_nic_hw.c @@ -387,7 +387,7 @@ void netxen_tso_check(struct netxen_adapter *adapter, } } adapter->stats.xmitcsummed++; - desc->tcp_hdr_offset = skb->h.raw - skb->data; + desc->tcp_hdr_offset = skb_transport_offset(skb); desc->ip_hdr_offset = skb_network_offset(skb); } diff --git a/drivers/net/sk98lin/skge.c b/drivers/net/sk98lin/skge.c index 9ac1fe659dc9..e4ab7a8acc1a 100644 --- a/drivers/net/sk98lin/skge.c +++ b/drivers/net/sk98lin/skge.c @@ -1562,7 +1562,7 @@ struct sk_buff *pMessage) /* pointer to send-message */ pTxd->pMBuf = pMessage; if (pMessage->ip_summed == CHECKSUM_PARTIAL) { - u16 hdrlen = pMessage->h.raw - pMessage->data; + u16 hdrlen = skb_transport_offset(pMessage); u16 offset = hdrlen + pMessage->csum_offset; if ((pMessage->h.ipiph->protocol == IPPROTO_UDP ) && @@ -1681,7 +1681,7 @@ struct sk_buff *pMessage) /* pointer to send-message */ ** Does the HW need to evaluate checksum for TCP or UDP packets? */ if (pMessage->ip_summed == CHECKSUM_PARTIAL) { - u16 hdrlen = pMessage->h.raw - pMessage->data; + u16 hdrlen = skb_transport_offset(pMessage); u16 offset = hdrlen + pMessage->csum_offset; Control = BMU_STFWD; diff --git a/drivers/net/skge.c b/drivers/net/skge.c index d476a3cc2e94..ca7a0e039849 100644 --- a/drivers/net/skge.c +++ b/drivers/net/skge.c @@ -2654,7 +2654,7 @@ static int skge_xmit_frame(struct sk_buff *skb, struct net_device *dev) td->dma_hi = map >> 32; if (skb->ip_summed == CHECKSUM_PARTIAL) { - int offset = skb->h.raw - skb->data; + const int offset = skb_transport_offset(skb); /* This seems backwards, but it is what the sk98lin * does. Looks like hardware is wrong? diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c index a37bb205f3d3..a35f2f2784ae 100644 --- a/drivers/net/sky2.c +++ b/drivers/net/sky2.c @@ -1421,7 +1421,7 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev) /* Handle TCP checksum offload */ if (skb->ip_summed == CHECKSUM_PARTIAL) { - unsigned offset = skb->h.raw - skb->data; + const unsigned offset = skb_transport_offset(skb); u32 tcpsum; tcpsum = offset << 16; /* sum start */ diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c index db2e1a6b7231..4bb89dec5650 100644 --- a/drivers/net/sungem.c +++ b/drivers/net/sungem.c @@ -1028,10 +1028,8 @@ static int gem_start_xmit(struct sk_buff *skb, struct net_device *dev) ctrl = 0; if (skb->ip_summed == CHECKSUM_PARTIAL) { - u64 csum_start_off, csum_stuff_off; - - csum_start_off = (u64) (skb->h.raw - skb->data); - csum_stuff_off = csum_start_off + skb->csum_offset; + const u64 csum_start_off = skb_transport_offset(skb); + const u64 csum_stuff_off = csum_start_off + skb->csum_offset; ctrl = (TXDCTRL_CENAB | (csum_start_off << 15) | diff --git a/drivers/net/sunhme.c b/drivers/net/sunhme.c index aca592bc0329..4b69c1deb9f3 100644 --- a/drivers/net/sunhme.c +++ b/drivers/net/sunhme.c @@ -2269,10 +2269,8 @@ static int happy_meal_start_xmit(struct sk_buff *skb, struct net_device *dev) tx_flags = TXFLAG_OWN; if (skb->ip_summed == CHECKSUM_PARTIAL) { - u32 csum_start_off, csum_stuff_off; - - csum_start_off = (u32) (skb->h.raw - skb->data); - csum_stuff_off = csum_start_off + skb->csum_offset; + const u32 csum_start_off = skb_transport_offset(skb); + const u32 csum_stuff_off = csum_start_off + skb->csum_offset; tx_flags = (TXFLAG_OWN | TXFLAG_CSENABLE | ((csum_start_off << 14) & TXFLAG_CSBUFBEGIN) | diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 7c1f1756e482..64c3c1687e49 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -962,6 +962,11 @@ static inline void skb_reset_transport_header(struct sk_buff *skb) skb->h.raw = skb->data; } +static inline int skb_transport_offset(const struct sk_buff *skb) +{ + return skb->h.raw - skb->data; +} + static inline unsigned char *skb_network_header(const struct sk_buff *skb) { return skb->nh.raw; diff --git a/include/net/udplite.h b/include/net/udplite.h index d99df75fe54c..765032036657 100644 --- a/include/net/udplite.h +++ b/include/net/udplite.h @@ -101,14 +101,14 @@ static inline int udplite_sender_cscov(struct udp_sock *up, struct udphdr *uh) static inline __wsum udplite_csum_outgoing(struct sock *sk, struct sk_buff *skb) { - int off, len, cscov = udplite_sender_cscov(udp_sk(sk), skb->h.uh); + int cscov = udplite_sender_cscov(udp_sk(sk), skb->h.uh); __wsum csum = 0; skb->ip_summed = CHECKSUM_NONE; /* no HW support for checksumming */ skb_queue_walk(&sk->sk_write_queue, skb) { - off = skb->h.raw - skb->data; - len = skb->len - off; + const int off = skb_transport_offset(skb); + const int len = skb->len - off; csum = skb_checksum(skb, off, (cscov > len)? len : cscov, csum); diff --git a/net/core/dev.c b/net/core/dev.c index 99f15728d9cb..f7f7e5687e46 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1156,7 +1156,7 @@ EXPORT_SYMBOL(netif_device_attach); int skb_checksum_help(struct sk_buff *skb) { __wsum csum; - int ret = 0, offset = skb->h.raw - skb->data; + int ret = 0, offset = skb_transport_offset(skb); if (skb->ip_summed == CHECKSUM_COMPLETE) goto out_set_summed; diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 1493c95f633e..b242020c02f7 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -1348,7 +1348,7 @@ void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to) long csstart; if (skb->ip_summed == CHECKSUM_PARTIAL) - csstart = skb->h.raw - skb->data; + csstart = skb_transport_offset(skb); else csstart = skb_headlen(skb); diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c index ed3deed66445..957674562801 100644 --- a/net/ipv4/esp4.c +++ b/net/ipv4/esp4.c @@ -27,7 +27,7 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb) int nfrags; /* Strip IP+ESP header. */ - __skb_pull(skb, skb->h.raw - skb->data); + __skb_pull(skb, skb_transport_offset(skb)); /* Now skb is pure payload to encrypt */ err = -ENOMEM; diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 13739cd8206f..13875e8419a7 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -435,7 +435,7 @@ static void udp4_hwcsum_outgoing(struct sock *sk, struct sk_buff *skb, * fragments on the socket so that all csums of sk_buffs * should be together */ - offset = skb->h.raw - skb->data; + offset = skb_transport_offset(skb); skb->csum = skb_checksum(skb, offset, skb->len - offset, 0); skb->ip_summed = CHECKSUM_NONE; diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c index 7aff380e74ef..35905867ded1 100644 --- a/net/ipv6/esp6.c +++ b/net/ipv6/esp6.c @@ -42,21 +42,18 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb) { int err; - int hdr_len; struct ipv6hdr *top_iph; struct ipv6_esp_hdr *esph; struct crypto_blkcipher *tfm; struct blkcipher_desc desc; - struct esp_data *esp; struct sk_buff *trailer; int blksize; int clen; int alen; int nfrags; - - esp = x->data; - hdr_len = skb->h.raw - skb->data + - sizeof(*esph) + esp->conf.ivlen; + struct esp_data *esp = x->data; + int hdr_len = (skb_transport_offset(skb) + + sizeof(*esph) + esp->conf.ivlen); /* Strip IP+ESP header. */ __skb_pull(skb, hdr_len); diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c index dab069b0b3f6..1bda0299890e 100644 --- a/net/ipv6/exthdrs.c +++ b/net/ipv6/exthdrs.c @@ -146,7 +146,7 @@ static int ip6_parse_tlv(struct tlvtype_proc *procs, struct sk_buff **skbp) int off = skb->h.raw - skb->nh.raw; int len = ((skb->h.raw[1]+1)<<3); - if ((skb->h.raw + len) - skb->data > skb_headlen(skb)) + if (skb_transport_offset(skb) + len > skb_headlen(skb)) goto bad; off += 2; @@ -288,8 +288,9 @@ static int ipv6_destopt_rcv(struct sk_buff **skbp) #endif struct dst_entry *dst; - if (!pskb_may_pull(skb, (skb->h.raw-skb->data)+8) || - !pskb_may_pull(skb, (skb->h.raw-skb->data)+((skb->h.raw[1]+1)<<3))) { + if (!pskb_may_pull(skb, skb_transport_offset(skb) + 8) || + !pskb_may_pull(skb, (skb_transport_offset(skb) + + ((skb->h.raw[1] + 1) << 3)))) { IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), IPSTATS_MIB_INHDRERRORS); kfree_skb(skb); @@ -387,8 +388,9 @@ static int ipv6_rthdr_rcv(struct sk_buff **skbp) in6_dev_put(idev); - if (!pskb_may_pull(skb, (skb->h.raw-skb->data)+8) || - !pskb_may_pull(skb, (skb->h.raw-skb->data)+((skb->h.raw[1]+1)<<3))) { + if (!pskb_may_pull(skb, skb_transport_offset(skb) + 8) || + !pskb_may_pull(skb, (skb_transport_offset(skb) + + ((skb->h.raw[1] + 1) << 3)))) { IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), IPSTATS_MIB_INHDRERRORS); kfree_skb(skb); diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c index 2dd32a2ca056..44275411d1a8 100644 --- a/net/ipv6/ip6_input.c +++ b/net/ipv6/ip6_input.c @@ -160,7 +160,7 @@ static inline int ip6_input_finish(struct sk_buff *skb) rcu_read_lock(); resubmit: idev = ip6_dst_idev(skb->dst); - if (!pskb_pull(skb, skb->h.raw - skb->data)) + if (!pskb_pull(skb, skb_transport_offset(skb))) goto discard; nhoff = IP6CB(skb)->nhoff; nexthdr = skb_network_header(skb)[nhoff]; diff --git a/net/ipv6/ipcomp6.c b/net/ipv6/ipcomp6.c index e2404a629680..4a6501695e98 100644 --- a/net/ipv6/ipcomp6.c +++ b/net/ipv6/ipcomp6.c @@ -124,15 +124,13 @@ static int ipcomp6_output(struct xfrm_state *x, struct sk_buff *skb) { int err; struct ipv6hdr *top_iph; - int hdr_len; struct ipv6_comp_hdr *ipch; struct ipcomp_data *ipcd = x->data; int plen, dlen; u8 *start, *scratch; struct crypto_comp *tfm; int cpu; - - hdr_len = skb->h.raw - skb->data; + int hdr_len = skb_transport_offset(skb); /* check whether datagram len is larger than threshold */ if ((skb->len - hdr_len) < ipcd->threshold) { diff --git a/net/ipv6/mip6.c b/net/ipv6/mip6.c index 7b5f9d82e801..85202891644e 100644 --- a/net/ipv6/mip6.c +++ b/net/ipv6/mip6.c @@ -90,8 +90,9 @@ int mip6_mh_filter(struct sock *sk, struct sk_buff *skb) { struct ip6_mh *mh; - if (!pskb_may_pull(skb, (skb->h.raw - skb->data) + 8) || - !pskb_may_pull(skb, (skb->h.raw - skb->data) + ((skb->h.raw[1] + 1) << 3))) + if (!pskb_may_pull(skb, (skb_transport_offset(skb)) + 8) || + !pskb_may_pull(skb, (skb_transport_offset(skb) + + ((skb->h.raw[1] + 1) << 3)))) return -1; mh = (struct ip6_mh *)skb->h.raw; diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index 0e2b56ce0a56..bb049f1c2679 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c @@ -513,7 +513,7 @@ static int rawv6_push_pending_frames(struct sock *sk, struct flowi *fl, if (csum_skb) continue; - len = skb->len - (skb->h.raw - skb->data); + len = skb->len - skb_transport_offset(skb); if (offset >= len) { offset -= len; continue; @@ -525,7 +525,7 @@ static int rawv6_push_pending_frames(struct sock *sk, struct flowi *fl, skb = csum_skb; } - offset += skb->h.raw - skb->data; + offset += skb_transport_offset(skb); if (skb_copy_bits(skb, offset, &csum, 2)) BUG(); diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c index 2594f0fb290d..ef29a7bb97ce 100644 --- a/net/ipv6/reassembly.c +++ b/net/ipv6/reassembly.c @@ -718,7 +718,8 @@ static int ipv6_frag_rcv(struct sk_buff **skbp) icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, skb->h.raw-skb->nh.raw); return -1; } - if (!pskb_may_pull(skb, (skb->h.raw-skb->data)+sizeof(struct frag_hdr))) { + if (!pskb_may_pull(skb, (skb_transport_offset(skb) + + sizeof(struct frag_hdr)))) { IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_INHDRERRORS); icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, skb->h.raw-skb->nh.raw); return -1; diff --git a/net/sctp/input.c b/net/sctp/input.c index 595fe32b3d41..9311b5ddf5c0 100644 --- a/net/sctp/input.c +++ b/net/sctp/input.c @@ -141,7 +141,7 @@ int sctp_rcv(struct sk_buff *skb) sh = (struct sctphdr *) skb->h.raw; /* Pull up the IP and SCTP headers. */ - __skb_pull(skb, skb->h.raw - skb->data); + __skb_pull(skb, skb_transport_offset(skb)); if (skb->len < sizeof(struct sctphdr)) goto discard_it; if ((skb->ip_summed != CHECKSUM_UNNECESSARY) && -- cgit v1.2.3-59-g8ed1b From 967b05f64e27d04a4c8879addd0e1c52137e2c9e Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Tue, 13 Mar 2007 13:51:52 -0300 Subject: [SK_BUFF]: Introduce skb_set_transport_header For the cases where the transport header is being set to a offset from skb->data. Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: David S. Miller --- include/linux/skbuff.h | 6 ++++++ net/ax25/af_ax25.c | 20 ++++++++------------ net/ax25/ax25_in.c | 2 +- net/ipv4/esp4.c | 3 ++- net/ipv4/ip_output.c | 19 ++++++++----------- net/ipv4/tcp_input.c | 2 +- net/ipv6/ah6.c | 2 +- net/ipv6/esp6.c | 4 ++-- net/ipv6/netfilter/nf_conntrack_reasm.c | 2 +- net/ipv6/xfrm6_mode_beet.c | 2 +- net/ipv6/xfrm6_mode_ro.c | 2 +- net/ipv6/xfrm6_mode_transport.c | 2 +- 12 files changed, 33 insertions(+), 33 deletions(-) (limited to 'include') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 64c3c1687e49..684292efa823 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -962,6 +962,12 @@ static inline void skb_reset_transport_header(struct sk_buff *skb) skb->h.raw = skb->data; } +static inline void skb_set_transport_header(struct sk_buff *skb, + const int offset) +{ + skb->h.raw = skb->data + offset; +} + static inline int skb_transport_offset(const struct sk_buff *skb) { return skb->h.raw - skb->data; diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c index 14db01a4ff63..75d4d695edec 100644 --- a/net/ax25/af_ax25.c +++ b/net/ax25/af_ax25.c @@ -1425,7 +1425,6 @@ static int ax25_sendmsg(struct kiocb *iocb, struct socket *sock, struct sockaddr_ax25 sax; struct sk_buff *skb; ax25_digi dtmp, *dp; - unsigned char *asmptr; ax25_cb *ax25; size_t size; int lv, err, addr_len = msg->msg_namelen; @@ -1551,10 +1550,8 @@ static int ax25_sendmsg(struct kiocb *iocb, struct socket *sock, skb_reset_network_header(skb); /* Add the PID if one is not supplied by the user in the skb */ - if (!ax25->pidincl) { - asmptr = skb_push(skb, 1); - *asmptr = sk->sk_protocol; - } + if (!ax25->pidincl) + *skb_push(skb, 1) = sk->sk_protocol; SOCK_DEBUG(sk, "AX.25: Transmitting buffer\n"); @@ -1573,7 +1570,7 @@ static int ax25_sendmsg(struct kiocb *iocb, struct socket *sock, goto out; } - asmptr = skb_push(skb, 1 + ax25_addr_size(dp)); + skb_push(skb, 1 + ax25_addr_size(dp)); SOCK_DEBUG(sk, "Building AX.25 Header (dp=%p).\n", dp); @@ -1581,17 +1578,16 @@ static int ax25_sendmsg(struct kiocb *iocb, struct socket *sock, SOCK_DEBUG(sk, "Num digipeaters=%d\n", dp->ndigi); /* Build an AX.25 header */ - asmptr += (lv = ax25_addr_build(asmptr, &ax25->source_addr, - &sax.sax25_call, dp, - AX25_COMMAND, AX25_MODULUS)); + lv = ax25_addr_build(skb->data, &ax25->source_addr, &sax.sax25_call, + dp, AX25_COMMAND, AX25_MODULUS); SOCK_DEBUG(sk, "Built header (%d bytes)\n",lv); - skb->h.raw = asmptr; + skb_set_transport_header(skb, lv); - SOCK_DEBUG(sk, "base=%p pos=%p\n", skb->data, asmptr); + SOCK_DEBUG(sk, "base=%p pos=%p\n", skb->data, skb->h.raw); - *asmptr = AX25_UI; + *skb->h.raw = AX25_UI; /* Datagram frames go straight out of the door as UI */ ax25_queue_xmit(skb, ax25->ax25_dev->dev); diff --git a/net/ax25/ax25_in.c b/net/ax25/ax25_in.c index 724ad5ce75d4..31c59387a6fc 100644 --- a/net/ax25/ax25_in.c +++ b/net/ax25/ax25_in.c @@ -233,7 +233,7 @@ static int ax25_rcv(struct sk_buff *skb, struct net_device *dev, /* UI frame - bypass LAPB processing */ if ((*skb->data & ~0x10) == AX25_UI && dp.lastrepeat + 1 == dp.ndigi) { - skb->h.raw = skb->data + 2; /* skip control and pid */ + skb_set_transport_header(skb, 2); /* skip control and pid */ ax25_send_to_raw(&dest, skb, skb->data[1]); diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c index 957674562801..82543eebfa52 100644 --- a/net/ipv4/esp4.c +++ b/net/ipv4/esp4.c @@ -261,7 +261,8 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb) iph->protocol = nexthdr[1]; pskb_trim(skb, skb->len - alen - padlen - 2); - skb->h.raw = __skb_pull(skb, sizeof(*esph) + esp->conf.ivlen) - ihl; + __skb_pull(skb, sizeof(*esph) + esp->conf.ivlen); + skb_set_transport_header(skb, -ihl); return 0; diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 63c05be0764d..6d92358fc513 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -593,7 +593,7 @@ slow_path: skb_reserve(skb2, ll_rs); skb_put(skb2, len + hlen); skb_reset_network_header(skb2); - skb2->h.raw = skb2->data + hlen; + skb2->h.raw = skb2->nh.raw + hlen; /* * Charge the memory for the fragment to any owner @@ -724,7 +724,7 @@ static inline int ip_ufo_append_data(struct sock *sk, skb_reset_network_header(skb); /* initialize protocol header pointer */ - skb->h.raw = skb->data + fragheaderlen; + skb->h.raw = skb->nh.raw + fragheaderlen; skb->ip_summed = CHECKSUM_PARTIAL; skb->csum = 0; @@ -1099,8 +1099,6 @@ ssize_t ip_append_page(struct sock *sk, struct page *page, } if (len <= 0) { struct sk_buff *skb_prev; - char *data; - struct iphdr *iph; int alloclen; skb_prev = skb; @@ -1123,16 +1121,15 @@ ssize_t ip_append_page(struct sock *sk, struct page *page, /* * Find where to start putting bytes. */ - data = skb_put(skb, fragheaderlen + fraggap); + skb_put(skb, fragheaderlen + fraggap); skb_reset_network_header(skb); - iph = ip_hdr(skb); - data += fragheaderlen; - skb->h.raw = data; + skb->h.raw = skb->nh.raw + fragheaderlen; if (fraggap) { - skb->csum = skb_copy_and_csum_bits( - skb_prev, maxfraglen, - data, fraggap, 0); + skb->csum = skb_copy_and_csum_bits(skb_prev, + maxfraglen, + skb->h.raw, + fraggap, 0); skb_prev->csum = csum_sub(skb_prev->csum, skb->csum); pskb_trim_unique(skb_prev, maxfraglen); diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 5da823a32250..2776a8b01339 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -3636,7 +3636,7 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list, skb_set_mac_header(nskb, skb_mac_header(skb) - skb->head); skb_set_network_header(nskb, skb_network_header(skb) - skb->head); - nskb->h.raw = nskb->data + (skb->h.raw - skb->head); + skb_set_transport_header(nskb, skb->h.raw - skb->head); skb_reserve(nskb, header); memcpy(nskb->head, skb->head, header); diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c index b682d2368c2a..e5ee981d3e10 100644 --- a/net/ipv6/ah6.c +++ b/net/ipv6/ah6.c @@ -247,7 +247,7 @@ static int ah6_output(struct xfrm_state *x, struct sk_buff *skb) memcpy(tmp_base, top_iph, sizeof(tmp_base)); tmp_ext = NULL; - extlen = skb->h.raw - (unsigned char *)(top_iph + 1); + extlen = skb_transport_offset(skb) + sizeof(struct ipv6hdr); if (extlen) { extlen += sizeof(*tmp_ext); tmp_ext = kmalloc(extlen, GFP_ATOMIC); diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c index 35905867ded1..ad522b7b5771 100644 --- a/net/ipv6/esp6.c +++ b/net/ipv6/esp6.c @@ -228,8 +228,8 @@ static int esp6_input(struct xfrm_state *x, struct sk_buff *skb) ret = nexthdr[1]; } - skb->h.raw = __skb_pull(skb, sizeof(*esph) + esp->conf.ivlen) - hdr_len; - + __skb_pull(skb, sizeof(*esph) + esp->conf.ivlen); + skb_set_transport_header(skb, -hdr_len); out: return ret; } diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c index 75138cf1fa61..015950522c8b 100644 --- a/net/ipv6/netfilter/nf_conntrack_reasm.c +++ b/net/ipv6/netfilter/nf_conntrack_reasm.c @@ -785,7 +785,7 @@ struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb) goto ret_orig; } - clone->h.raw = clone->data + fhoff; + skb_set_transport_header(clone, fhoff); hdr = ipv6_hdr(clone); fhdr = (struct frag_hdr *)clone->h.raw; diff --git a/net/ipv6/xfrm6_mode_beet.c b/net/ipv6/xfrm6_mode_beet.c index b5a48c255f07..abac09409ded 100644 --- a/net/ipv6/xfrm6_mode_beet.c +++ b/net/ipv6/xfrm6_mode_beet.c @@ -42,7 +42,7 @@ static int xfrm6_beet_output(struct xfrm_state *x, struct sk_buff *skb) hdr_len = ip6_find_1stfragopt(skb, &prevhdr); skb->nh.raw = prevhdr - x->props.header_len; - skb->h.raw = skb->data + hdr_len; + skb_set_transport_header(skb, hdr_len); memmove(skb->data, iph, hdr_len); skb_reset_network_header(skb); diff --git a/net/ipv6/xfrm6_mode_ro.c b/net/ipv6/xfrm6_mode_ro.c index d01958d921ac..da48ecf3fe96 100644 --- a/net/ipv6/xfrm6_mode_ro.c +++ b/net/ipv6/xfrm6_mode_ro.c @@ -54,7 +54,7 @@ static int xfrm6_ro_output(struct xfrm_state *x, struct sk_buff *skb) hdr_len = x->type->hdr_offset(x, skb, &prevhdr); skb->nh.raw = prevhdr - x->props.header_len; - skb->h.raw = skb->data + hdr_len; + skb_set_transport_header(skb, hdr_len); memmove(skb->data, iph, hdr_len); return 0; } diff --git a/net/ipv6/xfrm6_mode_transport.c b/net/ipv6/xfrm6_mode_transport.c index cae6cacd58c4..0134d74ef087 100644 --- a/net/ipv6/xfrm6_mode_transport.c +++ b/net/ipv6/xfrm6_mode_transport.c @@ -36,7 +36,7 @@ static int xfrm6_transport_output(struct xfrm_state *x, struct sk_buff *skb) hdr_len = x->type->hdr_offset(x, skb, &prevhdr); skb->nh.raw = prevhdr - x->props.header_len; - skb->h.raw = skb->data + hdr_len; + skb_set_transport_header(skb, hdr_len); memmove(skb->data, iph, hdr_len); return 0; } -- cgit v1.2.3-59-g8ed1b From 2c0fd387b00a6758550b5ca1aae4408374483fe7 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Tue, 13 Mar 2007 13:59:32 -0300 Subject: [SCTP]: Introduce sctp_hdr() For consistency with all the other skb->h.raw accessors. Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: David S. Miller --- include/linux/sctp.h | 9 +++++++++ net/sctp/input.c | 14 +++++--------- net/sctp/ipv6.c | 4 ++-- net/sctp/protocol.c | 10 ++++------ 4 files changed, 20 insertions(+), 17 deletions(-) (limited to 'include') diff --git a/include/linux/sctp.h b/include/linux/sctp.h index d4f86560bfff..d76767dfe59e 100644 --- a/include/linux/sctp.h +++ b/include/linux/sctp.h @@ -63,6 +63,15 @@ typedef struct sctphdr { __be32 checksum; } __attribute__((packed)) sctp_sctphdr_t; +#ifdef __KERNEL__ +#include + +static inline struct sctphdr *sctp_hdr(const struct sk_buff *skb) +{ + return (struct sctphdr *)skb->h.raw; +} +#endif + /* Section 3.2. Chunk Field Descriptions. */ typedef struct sctp_chunkhdr { __u8 type; diff --git a/net/sctp/input.c b/net/sctp/input.c index 9311b5ddf5c0..3a322c584c74 100644 --- a/net/sctp/input.c +++ b/net/sctp/input.c @@ -79,14 +79,10 @@ static void sctp_add_backlog(struct sock *sk, struct sk_buff *skb); /* Calculate the SCTP checksum of an SCTP packet. */ static inline int sctp_rcv_checksum(struct sk_buff *skb) { - struct sctphdr *sh; - __u32 cmp, val; struct sk_buff *list = skb_shinfo(skb)->frag_list; - - sh = (struct sctphdr *) skb->h.raw; - cmp = ntohl(sh->checksum); - - val = sctp_start_cksum((__u8 *)sh, skb_headlen(skb)); + struct sctphdr *sh = sctp_hdr(skb); + __u32 cmp = ntohl(sh->checksum); + __u32 val = sctp_start_cksum((__u8 *)sh, skb_headlen(skb)); for (; list; list = list->next) val = sctp_update_cksum((__u8 *)list->data, skb_headlen(list), @@ -138,7 +134,7 @@ int sctp_rcv(struct sk_buff *skb) if (skb_linearize(skb)) goto discard_it; - sh = (struct sctphdr *) skb->h.raw; + sh = sctp_hdr(skb); /* Pull up the IP and SCTP headers. */ __skb_pull(skb, skb_transport_offset(skb)); @@ -905,7 +901,7 @@ static struct sctp_association *__sctp_rcv_init_lookup(struct sk_buff *skb, struct sctp_association *asoc; union sctp_addr addr; union sctp_addr *paddr = &addr; - struct sctphdr *sh = (struct sctphdr *) skb->h.raw; + struct sctphdr *sh = sctp_hdr(skb); sctp_chunkhdr_t *ch; union sctp_params params; sctp_init_chunk_t *init; diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c index 001be2de0b3c..0992bc5bb528 100644 --- a/net/sctp/ipv6.c +++ b/net/sctp/ipv6.c @@ -390,7 +390,7 @@ static void sctp_v6_from_skb(union sctp_addr *addr,struct sk_buff *skb, addr->v6.sin6_flowinfo = 0; /* FIXME */ addr->v6.sin6_scope_id = ((struct inet6_skb_parm *)skb->cb)->iif; - sh = (struct sctphdr *) skb->h.raw; + sh = sctp_hdr(skb); if (is_saddr) { *port = sh->source; from = &ipv6_hdr(skb)->saddr; @@ -765,7 +765,7 @@ static void sctp_inet6_skb_msgname(struct sk_buff *skb, char *msgname, if (msgname) { sctp_inet6_msgname(msgname, addr_len); sin6 = (struct sockaddr_in6 *)msgname; - sh = (struct sctphdr *)skb->h.raw; + sh = sctp_hdr(skb); sin6->sin6_port = sh->source; /* Map ipv4 address into v4-mapped-on-v6 address. */ diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index 08f92ba4ebd7..7c28c9b959e2 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c @@ -235,7 +235,7 @@ static void sctp_v4_from_skb(union sctp_addr *addr, struct sk_buff *skb, port = &addr->v4.sin_port; addr->v4.sin_family = AF_INET; - sh = (struct sctphdr *) skb->h.raw; + sh = sctp_hdr(skb); if (is_saddr) { *port = sh->source; from = &ip_hdr(skb)->saddr; @@ -731,13 +731,11 @@ static void sctp_inet_event_msgname(struct sctp_ulpevent *event, char *msgname, /* Initialize and copy out a msgname from an inbound skb. */ static void sctp_inet_skb_msgname(struct sk_buff *skb, char *msgname, int *len) { - struct sctphdr *sh; - struct sockaddr_in *sin; - if (msgname) { + struct sctphdr *sh = sctp_hdr(skb); + struct sockaddr_in *sin = (struct sockaddr_in *)msgname; + sctp_inet_msgname(msgname, len); - sin = (struct sockaddr_in *)msgname; - sh = (struct sctphdr *)skb->h.raw; sin->sin_port = sh->source; sin->sin_addr.s_addr = ip_hdr(skb)->saddr; } -- cgit v1.2.3-59-g8ed1b From cc70ab261c9f997589546100ddec5da6bfd89c4e Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Tue, 13 Mar 2007 14:03:22 -0300 Subject: [ICMP6]: Introduce icmp6_hdr() For consistency with all the other skb->h.raw accessors. Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: David S. Miller --- include/linux/icmpv6.h | 9 +++++++++ net/ipv6/datagram.c | 2 +- net/ipv6/icmp.c | 8 ++++---- net/ipv6/mcast.c | 6 +++--- net/ipv6/ndisc.c | 2 +- 5 files changed, 18 insertions(+), 9 deletions(-) (limited to 'include') diff --git a/include/linux/icmpv6.h b/include/linux/icmpv6.h index 68d3526c3a05..0b5ba5eb7ed2 100644 --- a/include/linux/icmpv6.h +++ b/include/linux/icmpv6.h @@ -75,6 +75,15 @@ struct icmp6hdr { #define icmp6_router_pref icmp6_dataun.u_nd_ra.router_pref }; +#ifdef __KERNEL__ +#include + +static inline struct icmp6hdr *icmp6_hdr(const struct sk_buff *skb) +{ + return (struct icmp6hdr *)skb->h.raw; +} +#endif + #define ICMPV6_ROUTER_PREF_LOW 0x3 #define ICMPV6_ROUTER_PREF_MEDIUM 0x0 #define ICMPV6_ROUTER_PREF_HIGH 0x1 diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c index f429290c2c37..feba6b197fe9 100644 --- a/net/ipv6/datagram.c +++ b/net/ipv6/datagram.c @@ -209,7 +209,7 @@ void ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port, u32 info, u8 *payload) { struct ipv6_pinfo *np = inet6_sk(sk); - struct icmp6hdr *icmph = (struct icmp6hdr *)skb->h.raw; + struct icmp6hdr *icmph = icmp6_hdr(skb); struct sock_exterr_skb *serr; if (!np->recverr) diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c index 3a01effda695..d3edc3cf1ce9 100644 --- a/net/ipv6/icmp.c +++ b/net/ipv6/icmp.c @@ -222,7 +222,7 @@ static int icmpv6_push_pending_frames(struct sock *sk, struct flowi *fl, struct if ((skb = skb_peek(&sk->sk_write_queue)) == NULL) goto out; - icmp6h = (struct icmp6hdr*) skb->h.raw; + icmp6h = icmp6_hdr(skb); memcpy(icmp6h, thdr, sizeof(struct icmp6hdr)); icmp6h->icmp6_cksum = 0; @@ -476,7 +476,7 @@ static void icmpv6_echo_reply(struct sk_buff *skb) struct inet6_dev *idev; struct ipv6_pinfo *np; struct in6_addr *saddr = NULL; - struct icmp6hdr *icmph = (struct icmp6hdr *) skb->h.raw; + struct icmp6hdr *icmph = icmp6_hdr(skb); struct icmp6hdr tmp_hdr; struct flowi fl; struct icmpv6_msg msg; @@ -651,7 +651,7 @@ static int icmpv6_rcv(struct sk_buff **pskb) if (!pskb_pull(skb, sizeof(struct icmp6hdr))) goto discard_it; - hdr = (struct icmp6hdr *) skb->h.raw; + hdr = icmp6_hdr(skb); type = hdr->icmp6_type; @@ -677,7 +677,7 @@ static int icmpv6_rcv(struct sk_buff **pskb) */ if (!pskb_may_pull(skb, sizeof(struct ipv6hdr))) goto discard_it; - hdr = (struct icmp6hdr *) skb->h.raw; + hdr = icmp6_hdr(skb); orig_hdr = (struct ipv6hdr *) (hdr + 1); rt6_pmtu_discovery(&orig_hdr->daddr, &orig_hdr->saddr, dev, ntohl(hdr->icmp6_mtu)); diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index b2b37ba48b9c..1f2a3be9308a 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c @@ -988,7 +988,7 @@ int ipv6_is_mld(struct sk_buff *skb, int nexthdr) if (!pskb_may_pull(skb, sizeof(struct icmp6hdr))) return 0; - pic = (struct icmp6hdr *)skb->h.raw; + pic = icmp6_hdr(skb); switch (pic->icmp6_type) { case ICMPV6_MGM_QUERY: @@ -1179,7 +1179,7 @@ int igmp6_event_query(struct sk_buff *skb) if (idev == NULL) return 0; - hdr = (struct icmp6hdr *) skb->h.raw; + hdr = icmp6_hdr(skb); group = (struct in6_addr *) (hdr + 1); group_type = ipv6_addr_type(group); @@ -1300,7 +1300,7 @@ int igmp6_event_report(struct sk_buff *skb) if (!pskb_may_pull(skb, sizeof(struct in6_addr))) return -EINVAL; - hdr = (struct icmp6hdr*) skb->h.raw; + hdr = icmp6_hdr(skb); /* Drop reports with not link local source */ addr_type = ipv6_addr_type(&ipv6_hdr(skb)->saddr); diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c index 00feb4c4d98b..8b946f56287a 100644 --- a/net/ipv6/ndisc.c +++ b/net/ipv6/ndisc.c @@ -1366,7 +1366,7 @@ static void ndisc_redirect_rcv(struct sk_buff *skb) return; } - icmph = (struct icmp6hdr *) skb->h.raw; + icmph = icmp6_hdr(skb); target = (struct in6_addr *) (icmph + 1); dest = target + 1; -- cgit v1.2.3-59-g8ed1b From d9edf9e2be0f7661558984c32bd53867a7037fd3 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Tue, 13 Mar 2007 14:19:23 -0300 Subject: [SK_BUFF]: Introduce igmp_hdr() & friends, remove skb->h.igmph Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: David S. Miller --- include/linux/igmp.h | 21 +++++++++++++++++++++ include/linux/skbuff.h | 1 - net/ipv4/igmp.c | 22 +++++++++++----------- net/ipv4/ipmr.c | 2 +- 4 files changed, 33 insertions(+), 13 deletions(-) (limited to 'include') diff --git a/include/linux/igmp.h b/include/linux/igmp.h index a113fe68d8a1..ca285527b879 100644 --- a/include/linux/igmp.h +++ b/include/linux/igmp.h @@ -80,6 +80,27 @@ struct igmpv3_query { __be32 srcs[0]; }; +#ifdef __KERNEL__ +#include + +static inline struct igmphdr *igmp_hdr(const struct sk_buff *skb) +{ + return (struct igmphdr *)skb->h.raw; +} + +static inline struct igmpv3_report * + igmpv3_report_hdr(const struct sk_buff *skb) +{ + return (struct igmpv3_report *)skb->h.raw; +} + +static inline struct igmpv3_query * + igmpv3_query_hdr(const struct sk_buff *skb) +{ + return (struct igmpv3_query *)skb->h.raw; +} +#endif + #define IGMP_HOST_MEMBERSHIP_QUERY 0x11 /* From RFC1112 */ #define IGMP_HOST_MEMBERSHIP_REPORT 0x12 /* Ditto */ #define IGMP_DVMRP 0x13 /* DVMRP routing */ diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 684292efa823..0a4a7ac034fa 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -240,7 +240,6 @@ struct sk_buff { struct tcphdr *th; struct udphdr *uh; struct icmphdr *icmph; - struct igmphdr *igmph; struct iphdr *ipiph; struct ipv6hdr *ipv6h; unsigned char *raw; diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index f511d03e2439..292516bb1eca 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c @@ -333,8 +333,8 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size) ((u8*)&pip[1])[2] = 0; ((u8*)&pip[1])[3] = 0; - pig =(struct igmpv3_report *)skb_put(skb, sizeof(*pig)); - skb->h.igmph = (struct igmphdr *)pig; + skb->h.raw = skb_put(skb, sizeof(*pig)); + pig = igmpv3_report_hdr(skb); pig->type = IGMPV3_HOST_MEMBERSHIP_REPORT; pig->resv1 = 0; pig->csum = 0; @@ -346,13 +346,13 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size) static int igmpv3_sendpack(struct sk_buff *skb) { struct iphdr *pip = ip_hdr(skb); - struct igmphdr *pig = skb->h.igmph; + struct igmphdr *pig = igmp_hdr(skb); const int iplen = skb->tail - skb->nh.raw; const int igmplen = skb->tail - skb->h.raw; pip->tot_len = htons(iplen); ip_send_check(pip); - pig->csum = ip_compute_csum(skb->h.igmph, igmplen); + pig->csum = ip_compute_csum(igmp_hdr(skb), igmplen); return NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL, skb->dev, dst_output); @@ -379,7 +379,7 @@ static struct sk_buff *add_grhead(struct sk_buff *skb, struct ip_mc_list *pmc, pgr->grec_auxwords = 0; pgr->grec_nsrcs = 0; pgr->grec_mca = pmc->multiaddr; - pih = (struct igmpv3_report *)skb->h.igmph; + pih = igmpv3_report_hdr(skb); pih->ngrec = htons(ntohs(pih->ngrec)+1); *ppgr = pgr; return skb; @@ -412,7 +412,7 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ip_mc_list *pmc, if (!*psf_list) goto empty_source; - pih = skb ? (struct igmpv3_report *)skb->h.igmph : NULL; + pih = skb ? igmpv3_report_hdr(skb) : NULL; /* EX and TO_EX get a fresh packet, if needed */ if (truncate) { @@ -829,8 +829,8 @@ static void igmp_heard_report(struct in_device *in_dev, __be32 group) static void igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb, int len) { - struct igmphdr *ih = skb->h.igmph; - struct igmpv3_query *ih3 = (struct igmpv3_query *)ih; + struct igmphdr *ih = igmp_hdr(skb); + struct igmpv3_query *ih3 = igmpv3_query_hdr(skb); struct ip_mc_list *im; __be32 group = ih->group; int max_delay; @@ -863,12 +863,12 @@ static void igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb, if (!pskb_may_pull(skb, sizeof(struct igmpv3_query))) return; - ih3 = (struct igmpv3_query *) skb->h.raw; + ih3 = igmpv3_query_hdr(skb); if (ih3->nsrcs) { if (!pskb_may_pull(skb, sizeof(struct igmpv3_query) + ntohs(ih3->nsrcs)*sizeof(__be32))) return; - ih3 = (struct igmpv3_query *) skb->h.raw; + ih3 = igmpv3_query_hdr(skb); } max_delay = IGMPV3_MRC(ih3->code)*(HZ/IGMP_TIMER_SCALE); @@ -945,7 +945,7 @@ int igmp_rcv(struct sk_buff *skb) goto drop; } - ih = skb->h.igmph; + ih = igmp_hdr(skb); switch (ih->type) { case IGMP_HOST_MEMBERSHIP_QUERY: igmp_heard_query(in_dev, skb, len); diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index 03869d91f6f0..05bc27002def 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c @@ -1430,7 +1430,7 @@ int pim_rcv_v1(struct sk_buff * skb) if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(*encap))) goto drop; - pim = (struct igmphdr*)skb->h.raw; + pim = igmp_hdr(skb); if (!mroute_do_pim || skb->len < sizeof(*pim) + sizeof(*encap) || -- cgit v1.2.3-59-g8ed1b From 4bedb45203eab92a87b4c863fe2d0cded633427f Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Tue, 13 Mar 2007 14:28:48 -0300 Subject: [SK_BUFF]: Introduce udp_hdr(), remove skb->h.uh Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: David S. Miller --- drivers/net/gianfar.c | 4 ++-- drivers/net/ioc3-eth.c | 2 +- drivers/net/mv643xx_eth.c | 2 +- include/linux/skbuff.h | 1 - include/linux/udp.h | 9 +++++++++ include/net/udplite.h | 2 +- net/core/netpoll.c | 4 +++- net/core/pktgen.c | 4 ++-- net/ipv4/udp.c | 12 ++++++------ net/ipv6/udp.c | 10 +++++----- net/rxrpc/connection.c | 4 ++-- net/rxrpc/transport.c | 4 ++-- 12 files changed, 34 insertions(+), 24 deletions(-) (limited to 'include') diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c index c9abc96a0919..b9f44602c5e1 100644 --- a/drivers/net/gianfar.c +++ b/drivers/net/gianfar.c @@ -944,9 +944,9 @@ static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb) /* And provide the already calculated phcs */ if (ip_hdr(skb)->protocol == IPPROTO_UDP) { flags |= TXFCB_UDP; - fcb->phcs = skb->h.uh->check; + fcb->phcs = udp_hdr(skb)->check; } else - fcb->phcs = skb->h.th->check; + fcb->phcs = udp_hdr(skb)->check; /* l3os is the distance between the start of the * frame (skb->data) and the start of the IP hdr. diff --git a/drivers/net/ioc3-eth.c b/drivers/net/ioc3-eth.c index d375e786b4b3..ba012e10d79a 100644 --- a/drivers/net/ioc3-eth.c +++ b/drivers/net/ioc3-eth.c @@ -1422,7 +1422,7 @@ static int ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev) csoff = ETH_HLEN + (ih->ihl << 2); if (proto == IPPROTO_UDP) { csoff += offsetof(struct udphdr, check); - skb->h.uh->check = csum; + udp_hdr(skb)->check = csum; } if (proto == IPPROTO_TCP) { csoff += offsetof(struct tcphdr, check); diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c index 6b39a268ec29..43723839e934 100644 --- a/drivers/net/mv643xx_eth.c +++ b/drivers/net/mv643xx_eth.c @@ -1166,7 +1166,7 @@ static void eth_tx_submit_descs_for_skb(struct mv643xx_private *mp, switch (ip_hdr(skb)->protocol) { case IPPROTO_UDP: cmd_sts |= ETH_UDP_FRAME; - desc->l4i_chk = skb->h.uh->check; + desc->l4i_chk = udp_hdr(skb)->check; break; case IPPROTO_TCP: desc->l4i_chk = skb->h.th->check; diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 0a4a7ac034fa..cb1ac48cc808 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -238,7 +238,6 @@ struct sk_buff { union { struct tcphdr *th; - struct udphdr *uh; struct icmphdr *icmph; struct iphdr *ipiph; struct ipv6hdr *ipv6h; diff --git a/include/linux/udp.h b/include/linux/udp.h index 7e08c07efe0f..1f58503af9a6 100644 --- a/include/linux/udp.h +++ b/include/linux/udp.h @@ -26,6 +26,15 @@ struct udphdr { __sum16 check; }; +#ifdef __KERNEL__ +#include + +static inline struct udphdr *udp_hdr(const struct sk_buff *skb) +{ + return (struct udphdr *)skb->h.raw; +} +#endif + /* UDP socket options */ #define UDP_CORK 1 /* Never send partially complete segments */ #define UDP_ENCAP 100 /* Set the socket to accept encapsulated packets */ diff --git a/include/net/udplite.h b/include/net/udplite.h index 765032036657..635b0eafca95 100644 --- a/include/net/udplite.h +++ b/include/net/udplite.h @@ -101,7 +101,7 @@ static inline int udplite_sender_cscov(struct udp_sock *up, struct udphdr *uh) static inline __wsum udplite_csum_outgoing(struct sock *sk, struct sk_buff *skb) { - int cscov = udplite_sender_cscov(udp_sk(sk), skb->h.uh); + int cscov = udplite_sender_cscov(udp_sk(sk), udp_hdr(skb)); __wsum csum = 0; skb->ip_summed = CHECKSUM_NONE; /* no HW support for checksumming */ diff --git a/net/core/netpoll.c b/net/core/netpoll.c index 8b22723d6436..57a82445c465 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c @@ -296,7 +296,9 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len) memcpy(skb->data, msg, len); skb->len += len; - skb->h.uh = udph = (struct udphdr *) skb_push(skb, sizeof(*udph)); + skb_push(skb, sizeof(*udph)); + skb_reset_transport_header(skb); + udph = udp_hdr(skb); udph->source = htons(np->local_port); udph->dest = htons(np->remote_port); udph->len = htons(udp_len); diff --git a/net/core/pktgen.c b/net/core/pktgen.c index ee82364c8f31..160d4f01c46e 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c @@ -2392,7 +2392,7 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev, skb->dev = odev; skb->pkt_type = PACKET_HOST; skb->nh.raw = (unsigned char *)iph; - skb->h.uh = udph; + skb->h.raw = (unsigned char *)udph; if (pkt_dev->nfrags <= 0) pgh = (struct pktgen_hdr *)skb_put(skb, datalen); @@ -2737,7 +2737,7 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev, skb->dev = odev; skb->pkt_type = PACKET_HOST; skb->nh.raw = (unsigned char *)iph; - skb->h.uh = udph; + skb->h.raw = (unsigned char *)udph; if (pkt_dev->nfrags <= 0) pgh = (struct pktgen_hdr *)skb_put(skb, datalen); diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 13875e8419a7..926404c5e58c 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -420,7 +420,7 @@ static void udp4_hwcsum_outgoing(struct sock *sk, struct sk_buff *skb, __be32 src, __be32 dst, int len ) { unsigned int offset; - struct udphdr *uh = skb->h.uh; + struct udphdr *uh = udp_hdr(skb); __wsum csum = 0; if (skb_queue_len(&sk->sk_write_queue) == 1) { @@ -470,7 +470,7 @@ static int udp_push_pending_frames(struct sock *sk) /* * Create a UDP header */ - uh = skb->h.uh; + uh = udp_hdr(skb); uh->source = fl->fl_ip_sport; uh->dest = fl->fl_ip_dport; uh->len = htons(up->len); @@ -866,7 +866,7 @@ try_again: if (sin) { sin->sin_family = AF_INET; - sin->sin_port = skb->h.uh->source; + sin->sin_port = udp_hdr(skb)->source; sin->sin_addr.s_addr = ip_hdr(skb)->saddr; memset(sin->sin_zero, 0, sizeof(sin->sin_zero)); } @@ -949,7 +949,7 @@ static int udp_encap_rcv(struct sock * sk, struct sk_buff *skb) return 1; /* Now we can get the pointers */ - uh = skb->h.uh; + uh = udp_hdr(skb); udpdata = (__u8 *)uh + sizeof(struct udphdr); udpdata32 = (__be32 *)udpdata; @@ -1207,7 +1207,7 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[], int proto) { struct sock *sk; - struct udphdr *uh = skb->h.uh; + struct udphdr *uh = udp_hdr(skb); unsigned short ulen; struct rtable *rt = (struct rtable*)skb->dst; __be32 saddr = ip_hdr(skb)->saddr; @@ -1227,7 +1227,7 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[], /* UDP validates ulen. */ if (ulen < sizeof(*uh) || pskb_trim_rcsum(skb, ulen)) goto short_packet; - uh = skb->h.uh; + uh = udp_hdr(skb); } if (udp4_csum_init(skb, uh, proto)) diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 55affe39b2eb..1e3dfb20b1cf 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -172,7 +172,7 @@ try_again: sin6 = (struct sockaddr_in6 *) msg->msg_name; sin6->sin6_family = AF_INET6; - sin6->sin6_port = skb->h.uh->source; + sin6->sin6_port = udp_hdr(skb)->source; sin6->sin6_flowinfo = 0; sin6->sin6_scope_id = 0; @@ -346,7 +346,7 @@ static int __udp6_lib_mcast_deliver(struct sk_buff *skb, struct in6_addr *saddr, struct in6_addr *daddr, struct hlist_head udptable[]) { struct sock *sk, *sk2; - const struct udphdr *uh = skb->h.uh; + const struct udphdr *uh = udp_hdr(skb); int dif; read_lock(&udp_hash_lock); @@ -420,7 +420,7 @@ int __udp6_lib_rcv(struct sk_buff **pskb, struct hlist_head udptable[], saddr = &ipv6_hdr(skb)->saddr; daddr = &ipv6_hdr(skb)->daddr; - uh = skb->h.uh; + uh = udp_hdr(skb); ulen = ntohs(uh->len); if (ulen > skb->len) @@ -441,7 +441,7 @@ int __udp6_lib_rcv(struct sk_buff **pskb, struct hlist_head udptable[], goto short_packet; saddr = &ipv6_hdr(skb)->saddr; daddr = &ipv6_hdr(skb)->daddr; - uh = skb->h.uh; + uh = udp_hdr(skb); } } @@ -534,7 +534,7 @@ static int udp_v6_push_pending_frames(struct sock *sk) /* * Create a UDP header */ - uh = skb->h.uh; + uh = udp_hdr(skb); uh->source = fl->fl_ip_sport; uh->dest = fl->fl_ip_dport; uh->len = htons(up->len); diff --git a/net/rxrpc/connection.c b/net/rxrpc/connection.c index e601fa87bb77..665a99952440 100644 --- a/net/rxrpc/connection.c +++ b/net/rxrpc/connection.c @@ -229,10 +229,10 @@ int rxrpc_connection_lookup(struct rxrpc_peer *peer, _enter("%p{{%hu}},%u,%hu", peer, peer->trans->port, - ntohs(pkt->h.uh->source), + ntohs(udp_hdr(pkt)->source), ntohs(msg->hdr.serviceId)); - x_port = pkt->h.uh->source; + x_port = udp_hdr(pkt)->source; x_epoch = msg->hdr.epoch; x_clflag = msg->hdr.flags & RXRPC_CLIENT_INITIATED; x_connid = htonl(ntohl(msg->hdr.cid) & RXRPC_CIDMASK); diff --git a/net/rxrpc/transport.c b/net/rxrpc/transport.c index cac078b74068..62398fd01f85 100644 --- a/net/rxrpc/transport.c +++ b/net/rxrpc/transport.c @@ -479,7 +479,7 @@ void rxrpc_trans_receive_packet(struct rxrpc_transport *trans) } addr = ip_hdr(pkt)->saddr; - port = pkt->h.uh->source; + port = udp_hdr(pkt)->source; _net("Rx Received UDP packet from %08x:%04hu", ntohl(addr), ntohs(port)); @@ -625,7 +625,7 @@ int rxrpc_trans_immediate_abort(struct rxrpc_transport *trans, memset(&sin,0,sizeof(sin)); sin.sin_family = AF_INET; - sin.sin_port = msg->pkt->h.uh->source; + sin.sin_port = udp_hdr(msg->pkt)->source; sin.sin_addr.s_addr = ip_hdr(msg->pkt)->saddr; msghdr.msg_name = &sin; -- cgit v1.2.3-59-g8ed1b From 88c7664f13bd1a36acb8566b93892a4c58759ac6 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Tue, 13 Mar 2007 14:43:18 -0300 Subject: [SK_BUFF]: Introduce icmp_hdr(), remove skb->h.icmph Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: David S. Miller --- include/linux/icmp.h | 9 +++++++++ include/linux/skbuff.h | 1 - net/dccp/ipv4.c | 4 ++-- net/ipv4/ah4.c | 4 ++-- net/ipv4/esp4.c | 4 ++-- net/ipv4/icmp.c | 14 +++++++------- net/ipv4/ip_gre.c | 12 ++++++------ net/ipv4/ip_sockglue.c | 6 +++--- net/ipv4/ipcomp.c | 4 ++-- net/ipv4/ipip.c | 12 ++++++------ net/ipv4/raw.c | 6 +++--- net/ipv4/tcp_ipv4.c | 4 ++-- net/ipv4/udp.c | 4 ++-- net/ipv6/sit.c | 12 ++++++------ net/sctp/input.c | 4 ++-- 15 files changed, 54 insertions(+), 46 deletions(-) (limited to 'include') diff --git a/include/linux/icmp.h b/include/linux/icmp.h index 24da4fbc1a2f..cd3017a15789 100644 --- a/include/linux/icmp.h +++ b/include/linux/icmp.h @@ -82,6 +82,15 @@ struct icmphdr { } un; }; +#ifdef __KERNEL__ +#include + +static inline struct icmphdr *icmp_hdr(const struct sk_buff *skb) +{ + return (struct icmphdr *)skb->h.raw; +} +#endif + /* * constants for (set|get)sockopt */ diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index cb1ac48cc808..e580416de78a 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -238,7 +238,6 @@ struct sk_buff { union { struct tcphdr *th; - struct icmphdr *icmph; struct iphdr *ipiph; struct ipv6hdr *ipv6h; unsigned char *raw; diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c index b85437dae0e7..718f2fa923a1 100644 --- a/net/dccp/ipv4.c +++ b/net/dccp/ipv4.c @@ -207,8 +207,8 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info) (iph->ihl << 2)); struct dccp_sock *dp; struct inet_sock *inet; - const int type = skb->h.icmph->type; - const int code = skb->h.icmph->code; + const int type = icmp_hdr(skb)->type; + const int code = icmp_hdr(skb)->code; struct sock *sk; __u64 seq; int err; diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c index ebcc797e1c13..e1bb9e0aa5f3 100644 --- a/net/ipv4/ah4.c +++ b/net/ipv4/ah4.c @@ -198,8 +198,8 @@ static void ah4_err(struct sk_buff *skb, u32 info) struct ip_auth_hdr *ah = (struct ip_auth_hdr*)(skb->data+(iph->ihl<<2)); struct xfrm_state *x; - if (skb->h.icmph->type != ICMP_DEST_UNREACH || - skb->h.icmph->code != ICMP_FRAG_NEEDED) + if (icmp_hdr(skb)->type != ICMP_DEST_UNREACH || + icmp_hdr(skb)->code != ICMP_FRAG_NEEDED) return; x = xfrm_state_lookup((xfrm_address_t *)&iph->daddr, ah->spi, IPPROTO_AH, AF_INET); diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c index 82543eebfa52..de019f9fbfe1 100644 --- a/net/ipv4/esp4.c +++ b/net/ipv4/esp4.c @@ -304,8 +304,8 @@ static void esp4_err(struct sk_buff *skb, u32 info) struct ip_esp_hdr *esph = (struct ip_esp_hdr*)(skb->data+(iph->ihl<<2)); struct xfrm_state *x; - if (skb->h.icmph->type != ICMP_DEST_UNREACH || - skb->h.icmph->code != ICMP_FRAG_NEEDED) + if (icmp_hdr(skb)->type != ICMP_DEST_UNREACH || + icmp_hdr(skb)->code != ICMP_FRAG_NEEDED) return; x = xfrm_state_lookup((xfrm_address_t *)&iph->daddr, esph->spi, IPPROTO_ESP, AF_INET); diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index 4d70c21c50aa..8372f8b8f0cd 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c @@ -355,7 +355,7 @@ static void icmp_push_reply(struct icmp_bxm *icmp_param, ipc, rt, MSG_DONTWAIT) < 0) ip_flush_pending_frames(icmp_socket->sk); else if ((skb = skb_peek(&icmp_socket->sk->sk_write_queue)) != NULL) { - struct icmphdr *icmph = skb->h.icmph; + struct icmphdr *icmph = icmp_hdr(skb); __wsum csum = 0; struct sk_buff *skb1; @@ -613,7 +613,7 @@ static void icmp_unreach(struct sk_buff *skb) if (!pskb_may_pull(skb, sizeof(struct iphdr))) goto out_err; - icmph = skb->h.icmph; + icmph = icmp_hdr(skb); iph = (struct iphdr *)skb->data; if (iph->ihl < 5) /* Mangled header, drop. */ @@ -743,7 +743,7 @@ static void icmp_redirect(struct sk_buff *skb) iph = (struct iphdr *)skb->data; - switch (skb->h.icmph->code & 7) { + switch (icmp_hdr(skb)->code & 7) { case ICMP_REDIR_NET: case ICMP_REDIR_NETTOS: /* @@ -752,7 +752,7 @@ static void icmp_redirect(struct sk_buff *skb) case ICMP_REDIR_HOST: case ICMP_REDIR_HOSTTOS: ip_rt_redirect(ip_hdr(skb)->saddr, iph->daddr, - skb->h.icmph->un.gateway, + icmp_hdr(skb)->un.gateway, iph->saddr, skb->dev); break; } @@ -780,7 +780,7 @@ static void icmp_echo(struct sk_buff *skb) if (!sysctl_icmp_echo_ignore_all) { struct icmp_bxm icmp_param; - icmp_param.data.icmph = *skb->h.icmph; + icmp_param.data.icmph = *icmp_hdr(skb); icmp_param.data.icmph.type = ICMP_ECHOREPLY; icmp_param.skb = skb; icmp_param.offset = 0; @@ -816,7 +816,7 @@ static void icmp_timestamp(struct sk_buff *skb) icmp_param.data.times[2] = icmp_param.data.times[1]; if (skb_copy_bits(skb, 0, &icmp_param.data.times[0], 4)) BUG(); - icmp_param.data.icmph = *skb->h.icmph; + icmp_param.data.icmph = *icmp_hdr(skb); icmp_param.data.icmph.type = ICMP_TIMESTAMPREPLY; icmp_param.data.icmph.code = 0; icmp_param.skb = skb; @@ -943,7 +943,7 @@ int icmp_rcv(struct sk_buff *skb) if (!pskb_pull(skb, sizeof(struct icmphdr))) goto error; - icmph = skb->h.icmph; + icmph = icmp_hdr(skb); /* * 18 is the highest 'known' ICMP type. Anything else is a mystery diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index 969fe31723a7..39216e6a59ed 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c @@ -320,8 +320,8 @@ static void ipgre_err(struct sk_buff *skb, u32 info) struct iphdr *iph = (struct iphdr*)skb->data; __be16 *p = (__be16*)(skb->data+(iph->ihl<<2)); int grehlen = (iph->ihl<<2) + 4; - int type = skb->h.icmph->type; - int code = skb->h.icmph->code; + const int type = icmp_hdr(skb)->type; + const int code = icmp_hdr(skb)->code; struct ip_tunnel *t; __be16 flags; @@ -388,8 +388,8 @@ out: struct iphdr *iph = (struct iphdr*)dp; struct iphdr *eiph; __be16 *p = (__be16*)(dp+(iph->ihl<<2)); - int type = skb->h.icmph->type; - int code = skb->h.icmph->code; + const int type = icmp_hdr(skb)->type; + const int code = icmp_hdr(skb)->code; int rel_type = 0; int rel_code = 0; __be32 rel_info = 0; @@ -422,7 +422,7 @@ out: default: return; case ICMP_PARAMETERPROB: - n = ntohl(skb->h.icmph->un.gateway) >> 24; + n = ntohl(icmp_hdr(skb)->un.gateway) >> 24; if (n < (iph->ihl<<2)) return; @@ -442,7 +442,7 @@ out: return; case ICMP_FRAG_NEEDED: /* And it is the only really necessary thing :-) */ - n = ntohs(skb->h.icmph->un.frag.mtu); + n = ntohs(icmp_hdr(skb)->un.frag.mtu); if (n < grehlen+68) return; n -= grehlen; diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index fabc250e16dd..ccdc59df015f 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c @@ -269,12 +269,12 @@ void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err, serr = SKB_EXT_ERR(skb); serr->ee.ee_errno = err; serr->ee.ee_origin = SO_EE_ORIGIN_ICMP; - serr->ee.ee_type = skb->h.icmph->type; - serr->ee.ee_code = skb->h.icmph->code; + serr->ee.ee_type = icmp_hdr(skb)->type; + serr->ee.ee_code = icmp_hdr(skb)->code; serr->ee.ee_pad = 0; serr->ee.ee_info = info; serr->ee.ee_data = 0; - serr->addr_offset = (u8 *)&(((struct iphdr *)(skb->h.icmph + 1))->daddr) - + serr->addr_offset = (u8 *)&(((struct iphdr *)(icmp_hdr(skb) + 1))->daddr) - skb_network_header(skb); serr->port = port; diff --git a/net/ipv4/ipcomp.c b/net/ipv4/ipcomp.c index 8eb46064c525..1f13cc507a47 100644 --- a/net/ipv4/ipcomp.c +++ b/net/ipv4/ipcomp.c @@ -174,8 +174,8 @@ static void ipcomp4_err(struct sk_buff *skb, u32 info) struct ip_comp_hdr *ipch = (struct ip_comp_hdr *)(skb->data+(iph->ihl<<2)); struct xfrm_state *x; - if (skb->h.icmph->type != ICMP_DEST_UNREACH || - skb->h.icmph->code != ICMP_FRAG_NEEDED) + if (icmp_hdr(skb)->type != ICMP_DEST_UNREACH || + icmp_hdr(skb)->code != ICMP_FRAG_NEEDED) return; spi = htonl(ntohs(ipch->cpi)); diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c index b7f6ff4705b0..b32b50114806 100644 --- a/net/ipv4/ipip.c +++ b/net/ipv4/ipip.c @@ -280,8 +280,8 @@ static int ipip_err(struct sk_buff *skb, u32 info) ICMP in the real Internet is absolutely infeasible. */ struct iphdr *iph = (struct iphdr*)skb->data; - int type = skb->h.icmph->type; - int code = skb->h.icmph->code; + const int type = icmp_hdr(skb)->type; + const int code = icmp_hdr(skb)->code; struct ip_tunnel *t; int err; @@ -336,8 +336,8 @@ out: struct iphdr *iph = (struct iphdr*)dp; int hlen = iph->ihl<<2; struct iphdr *eiph; - int type = skb->h.icmph->type; - int code = skb->h.icmph->code; + const int type = icmp_hdr(skb)->type; + const int code = icmp_hdr(skb)->code; int rel_type = 0; int rel_code = 0; __be32 rel_info = 0; @@ -354,7 +354,7 @@ out: default: return 0; case ICMP_PARAMETERPROB: - n = ntohl(skb->h.icmph->un.gateway) >> 24; + n = ntohl(icmp_hdr(skb)->un.gateway) >> 24; if (n < hlen) return 0; @@ -373,7 +373,7 @@ out: return 0; case ICMP_FRAG_NEEDED: /* And it is the only really necessary thing :-) */ - n = ntohs(skb->h.icmph->un.frag.mtu); + n = ntohs(icmp_hdr(skb)->un.frag.mtu); if (n < hlen+68) return 0; n -= hlen; diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c index ac57afa7c316..bf101dc1a972 100644 --- a/net/ipv4/raw.c +++ b/net/ipv4/raw.c @@ -132,7 +132,7 @@ static __inline__ int icmp_filter(struct sock *sk, struct sk_buff *skb) if (!pskb_may_pull(skb, sizeof(struct icmphdr))) return 1; - type = skb->h.icmph->type; + type = icmp_hdr(skb)->type; if (type < 32) { __u32 data = raw_sk(sk)->filter.data; @@ -184,8 +184,8 @@ out: void raw_err (struct sock *sk, struct sk_buff *skb, u32 info) { struct inet_sock *inet = inet_sk(sk); - int type = skb->h.icmph->type; - int code = skb->h.icmph->code; + const int type = icmp_hdr(skb)->type; + const int code = icmp_hdr(skb)->code; int err = 0; int harderr = 0; diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 3a86d6b887ac..51424df9078e 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -354,8 +354,8 @@ void tcp_v4_err(struct sk_buff *skb, u32 info) struct tcphdr *th = (struct tcphdr *)(skb->data + (iph->ihl << 2)); struct tcp_sock *tp; struct inet_sock *inet; - int type = skb->h.icmph->type; - int code = skb->h.icmph->code; + const int type = icmp_hdr(skb)->type; + const int code = icmp_hdr(skb)->code; struct sock *sk; __u32 seq; int err; diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 926404c5e58c..71b0b60ba538 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -330,8 +330,8 @@ void __udp4_lib_err(struct sk_buff *skb, u32 info, struct hlist_head udptable[]) struct inet_sock *inet; struct iphdr *iph = (struct iphdr*)skb->data; struct udphdr *uh = (struct udphdr*)(skb->data+(iph->ihl<<2)); - int type = skb->h.icmph->type; - int code = skb->h.icmph->code; + const int type = icmp_hdr(skb)->type; + const int code = icmp_hdr(skb)->code; struct sock *sk; int harderr; int err; diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index 18ec86f177d9..1e8827b90aa7 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c @@ -224,8 +224,8 @@ static int ipip6_err(struct sk_buff *skb, u32 info) ICMP in the real Internet is absolutely infeasible. */ struct iphdr *iph = (struct iphdr*)skb->data; - int type = skb->h.icmph->type; - int code = skb->h.icmph->code; + const int type = icmp_hdr(skb)->type; + const int code = icmp_hdr(skb)->code; struct ip_tunnel *t; int err; @@ -280,8 +280,8 @@ out: struct iphdr *iph = (struct iphdr*)dp; int hlen = iph->ihl<<2; struct ipv6hdr *iph6; - int type = skb->h.icmph->type; - int code = skb->h.icmph->code; + const int type = icmp_hdr(skb)->type; + const int code = icmp_hdr(skb)->code; int rel_type = 0; int rel_code = 0; int rel_info = 0; @@ -296,14 +296,14 @@ out: default: return; case ICMP_PARAMETERPROB: - if (skb->h.icmph->un.gateway < hlen) + if (icmp_hdr(skb)->un.gateway < hlen) return; /* So... This guy found something strange INSIDE encapsulated packet. Well, he is fool, but what can we do ? */ rel_type = ICMPV6_PARAMPROB; - rel_info = skb->h.icmph->un.gateway - hlen; + rel_info = icmp_hdr(skb)->un.gateway - hlen; break; case ICMP_DEST_UNREACH: diff --git a/net/sctp/input.c b/net/sctp/input.c index 3a322c584c74..40d0df80183f 100644 --- a/net/sctp/input.c +++ b/net/sctp/input.c @@ -507,8 +507,8 @@ void sctp_v4_err(struct sk_buff *skb, __u32 info) { struct iphdr *iph = (struct iphdr *)skb->data; struct sctphdr *sh = (struct sctphdr *)(skb->data + (iph->ihl <<2)); - int type = skb->h.icmph->type; - int code = skb->h.icmph->code; + const int type = icmp_hdr(skb)->type; + const int code = icmp_hdr(skb)->code; struct sock *sk; struct sctp_association *asoc = NULL; struct sctp_transport *transport; -- cgit v1.2.3-59-g8ed1b From ab6a5bb6b28a970104a34f0f6959b73cf61bdc72 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Sun, 18 Mar 2007 17:43:48 -0700 Subject: [TCP]: Introduce tcp_hdrlen() and tcp_optlen() The ip_hdrlen() buddy, created to reduce the number of skb->h.th-> uses and to avoid the longer, open coded equivalent. Ditched a no-op in bnx2 in the process. I wonder if we should have a BUG_ON(skb->h.th->doff < 5) in tcp_optlen()... Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: David S. Miller --- drivers/net/atl1/atl1_main.c | 7 +++---- drivers/net/bnx2.c | 7 +++---- drivers/net/e1000/e1000_main.c | 4 ++-- drivers/net/ehea/ehea_main.c | 2 +- drivers/net/ixgb/ixgb_main.c | 2 +- drivers/net/myri10ge/myri10ge.c | 3 +-- drivers/net/netxen/netxen_nic_hw.c | 3 +-- drivers/net/netxen/netxen_nic_main.c | 2 +- drivers/net/sky2.c | 2 +- drivers/net/tg3.c | 4 ++-- drivers/s390/net/qeth_eddp.c | 8 ++++---- include/linux/tcp.h | 10 ++++++++++ net/ipv4/tcp_ipv4.c | 2 +- net/ipv6/tcp_ipv6.c | 2 +- 14 files changed, 32 insertions(+), 26 deletions(-) (limited to 'include') diff --git a/drivers/net/atl1/atl1_main.c b/drivers/net/atl1/atl1_main.c index c26f8ce320e6..8d5994751e2e 100644 --- a/drivers/net/atl1/atl1_main.c +++ b/drivers/net/atl1/atl1_main.c @@ -1307,7 +1307,7 @@ static int atl1_tso(struct atl1_adapter *adapter, struct sk_buff *skb, tso->tsopl |= (iph->ihl & CSUM_PARAM_IPHL_MASK) << CSUM_PARAM_IPHL_SHIFT; - tso->tsopl |= ((skb->h.th->doff << 2) & + tso->tsopl |= (tcp_hdrlen(skb) & TSO_PARAM_TCPHDRLEN_MASK) << TSO_PARAM_TCPHDRLEN_SHIFT; tso->tsopl |= (skb_shinfo(skb)->gso_size & TSO_PARAM_MSS_MASK) << TSO_PARAM_MSS_SHIFT; @@ -1369,8 +1369,7 @@ static void atl1_tx_map(struct atl1_adapter *adapter, if (tcp_seg) { /* TSO/GSO */ - proto_hdr_len = (skb_transport_offset(skb) + - (skb->h.th->doff << 2)); + proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); buffer_info->length = proto_hdr_len; page = virt_to_page(skb->data); offset = (unsigned long)skb->data & ~PAGE_MASK; @@ -1563,7 +1562,7 @@ static int atl1_xmit_frame(struct sk_buff *skb, struct net_device *netdev) if (mss) { if (skb->protocol == htons(ETH_P_IP)) { proto_hdr_len = (skb_transport_offset(skb) + - (skb->h.th->doff << 2)); + tcp_hdrlen(skb)); if (unlikely(proto_hdr_len > len)) { dev_kfree_skb_any(skb); return NETDEV_TX_OK; diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c index eb0c4f1d4483..73512fb16452 100644 --- a/drivers/net/bnx2.c +++ b/drivers/net/bnx2.c @@ -4521,13 +4521,12 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_OK; } - tcp_opt_len = ((skb->h.th->doff - 5) * 4); vlan_tag_flags |= TX_BD_FLAGS_SW_LSO; tcp_opt_len = 0; - if (skb->h.th->doff > 5) { - tcp_opt_len = (skb->h.th->doff - 5) << 2; - } + if (skb->h.th->doff > 5) + tcp_opt_len = tcp_optlen(skb); + ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr); iph = ip_hdr(skb); diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c index 78cf417cf236..4572fbba50f9 100644 --- a/drivers/net/e1000/e1000_main.c +++ b/drivers/net/e1000/e1000_main.c @@ -2887,7 +2887,7 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, return err; } - hdr_len = (skb_transport_offset(skb) + (skb->h.th->doff << 2)); + hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); mss = skb_shinfo(skb)->gso_size; if (skb->protocol == htons(ETH_P_IP)) { struct iphdr *iph = ip_hdr(skb); @@ -3292,7 +3292,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) /* TSO Workaround for 82571/2/3 Controllers -- if skb->data * points to just header, pull a few bytes of payload from * frags into skb->data */ - hdr_len = (skb_transport_offset(skb) + (skb->h.th->doff << 2)); + hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); if (skb->data_len && (hdr_len == (skb->len - skb->data_len))) { switch (adapter->hw.mac_type) { unsigned int pull_size; diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c index 0dc701e611e5..63732d2305bb 100644 --- a/drivers/net/ehea/ehea_main.c +++ b/drivers/net/ehea/ehea_main.c @@ -1300,7 +1300,7 @@ static void write_swqe2_TSO(struct sk_buff *skb, /* copy only eth/ip/tcp headers to immediate data and * the rest of skb->data to sg1entry */ - headersize = ETH_HLEN + ip_hdrlen(skb) + (skb->h.th->doff * 4); + headersize = ETH_HLEN + ip_hdrlen(skb) + tcp_hdrlen(skb); skb_data_size = skb->len - skb->data_len; diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c index ceea6e45792d..96550d681623 100644 --- a/drivers/net/ixgb/ixgb_main.c +++ b/drivers/net/ixgb/ixgb_main.c @@ -1190,7 +1190,7 @@ ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb) return err; } - hdr_len = (skb_transport_offset(skb) + (skb->h.th->doff << 2)); + hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); mss = skb_shinfo(skb)->gso_size; iph = ip_hdr(skb); iph->tot_len = 0; diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c index e04228c7b14f..e4b69a0485ba 100644 --- a/drivers/net/myri10ge/myri10ge.c +++ b/drivers/net/myri10ge/myri10ge.c @@ -2054,8 +2054,7 @@ again: * send loop that we are still in the * header portion of the TSO packet. * TSO header must be at most 134 bytes long */ - cum_len = -(skb_transport_offset(skb) + - (skb->h.th->doff << 2)); + cum_len = -(skb_transport_offset(skb) + tcp_hdrlen(skb)); /* for TSO, pseudo_hdr_offset holds mss. * The firmware figures out where to put diff --git a/drivers/net/netxen/netxen_nic_hw.c b/drivers/net/netxen/netxen_nic_hw.c index 09ca2192cbfa..0fba8f190762 100644 --- a/drivers/net/netxen/netxen_nic_hw.c +++ b/drivers/net/netxen/netxen_nic_hw.c @@ -374,8 +374,7 @@ void netxen_tso_check(struct netxen_adapter *adapter, { if (desc->mss) { desc->total_hdr_length = (sizeof(struct ethhdr) + - ip_hdrlen(skb) + - skb->h.th->doff * 4); + ip_hdrlen(skb) + tcp_hdrlen(skb)); netxen_set_cmd_desc_opcode(desc, TX_TCP_LSO); } else if (skb->ip_summed == CHECKSUM_PARTIAL) { if (ip_hdr(skb)->protocol == IPPROTO_TCP) { diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c index b548a30e5c8e..b488e94bc4c0 100644 --- a/drivers/net/netxen/netxen_nic_main.c +++ b/drivers/net/netxen/netxen_nic_main.c @@ -779,7 +779,7 @@ static int netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) if (skb_shinfo(skb)->gso_size > 0) { no_of_desc++; - if ((ip_hdrlen(skb) + skb->h.th->doff * 4 + + if ((ip_hdrlen(skb) + tcp_hdrlen(skb) + sizeof(struct ethhdr)) > (sizeof(struct cmd_desc_type0) - 2)) { no_of_desc++; diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c index a35f2f2784ae..fd291fc93169 100644 --- a/drivers/net/sky2.c +++ b/drivers/net/sky2.c @@ -1392,7 +1392,7 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev) /* Check for TCP Segmentation Offload */ mss = skb_shinfo(skb)->gso_size; if (mss != 0) { - mss += ((skb->h.th->doff - 5) * 4); /* TCP options */ + mss += tcp_optlen(skb); /* TCP options */ mss += ip_hdrlen(skb) + sizeof(struct tcphdr); mss += ETH_HLEN; diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index 76a31afe20de..7ca30d76bf6f 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c @@ -3911,7 +3911,7 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) else { struct iphdr *iph = ip_hdr(skb); - tcp_opt_len = ((skb->h.th->doff - 5) * 4); + tcp_opt_len = tcp_optlen(skb); ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr); iph->check = 0; @@ -4065,7 +4065,7 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev) goto out_unlock; } - tcp_opt_len = ((skb->h.th->doff - 5) * 4); + tcp_opt_len = tcp_optlen(skb); ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr); hdr_len = ip_tcp_len + tcp_opt_len; diff --git a/drivers/s390/net/qeth_eddp.c b/drivers/s390/net/qeth_eddp.c index 90da58b4e532..273f1745a009 100644 --- a/drivers/s390/net/qeth_eddp.c +++ b/drivers/s390/net/qeth_eddp.c @@ -477,13 +477,13 @@ qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx, skb_network_header(skb), ip_hdrlen(skb), skb->h.raw, - skb->h.th->doff * 4); + tcp_hdrlen(skb)); else eddp = qeth_eddp_create_eddp_data(qhdr, skb_network_header(skb), sizeof(struct ipv6hdr), skb->h.raw, - skb->h.th->doff * 4); + tcp_hdrlen(skb)); if (eddp == NULL) { QETH_DBF_TEXT(trace, 2, "eddpfcnm"); @@ -596,11 +596,11 @@ qeth_eddp_create_context_tcp(struct qeth_card *card, struct sk_buff *skb, ctx = qeth_eddp_create_context_generic(card, skb, (sizeof(struct qeth_hdr) + ip_hdrlen(skb) + - skb->h.th->doff * 4)); + tcp_hdrlen(skb))); else if (skb->protocol == htons(ETH_P_IPV6)) ctx = qeth_eddp_create_context_generic(card, skb, sizeof(struct qeth_hdr) + sizeof(struct ipv6hdr) + - skb->h.th->doff*4); + tcp_hdrlen(skb)); else QETH_DBF_TEXT(trace, 2, "cetcpinv"); diff --git a/include/linux/tcp.h b/include/linux/tcp.h index 18a468dd5055..244ae0dacf4a 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -178,6 +178,16 @@ struct tcp_md5sig { #include #include +static inline unsigned int tcp_hdrlen(const struct sk_buff *skb) +{ + return skb->h.th->doff * 4; +} + +static inline unsigned int tcp_optlen(const struct sk_buff *skb) +{ + return (skb->h.th->doff - 5) * 4; +} + /* This defines a selective acknowledgement block. */ struct tcp_sack_block_wire { __be32 start_seq; diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 51424df9078e..c146a02f8495 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -1564,7 +1564,7 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb) return 0; } - if (skb->len < (skb->h.th->doff << 2) || tcp_checksum_complete(skb)) + if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb)) goto csum_err; if (sk->sk_state == TCP_LISTEN) { diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 85b3e89110f9..c573353f21cd 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -1609,7 +1609,7 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb) return 0; } - if (skb->len < (skb->h.th->doff<<2) || tcp_checksum_complete(skb)) + if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb)) goto csum_err; if (sk->sk_state == TCP_LISTEN) { -- cgit v1.2.3-59-g8ed1b From aa8223c7bb0b05183e1737881ed21827aa5b9e73 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Tue, 10 Apr 2007 21:04:22 -0700 Subject: [SK_BUFF]: Introduce tcp_hdr(), remove skb->h.th Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: David S. Miller --- drivers/net/atl1/atl1_main.c | 7 ++++--- drivers/net/bnx2.c | 8 ++++---- drivers/net/chelsio/sge.c | 2 +- drivers/net/cxgb3/sge.c | 2 +- drivers/net/e1000/e1000_main.c | 11 ++++++----- drivers/net/ioc3-eth.c | 2 +- drivers/net/ixgb/ixgb_main.c | 7 ++++--- drivers/net/mv643xx_eth.c | 2 +- drivers/net/tg3.c | 15 +++++++-------- drivers/s390/net/qeth_eddp.c | 2 +- drivers/s390/net/qeth_tso.h | 4 ++-- include/linux/skbuff.h | 1 - include/linux/tcp.h | 9 +++++++-- include/net/tcp.h | 2 +- include/net/tcp_ecn.h | 6 +++--- net/ipv4/ip_output.c | 4 ++-- net/ipv4/syncookies.c | 36 ++++++++++++++++++------------------ net/ipv4/tcp.c | 22 +++++++++++----------- net/ipv4/tcp_input.c | 28 +++++++++++++++------------- net/ipv4/tcp_ipv4.c | 32 ++++++++++++++++---------------- net/ipv4/tcp_minisocks.c | 9 +++++---- net/ipv4/tcp_output.c | 13 ++++++++----- net/ipv6/tcp_ipv6.c | 32 ++++++++++++++++---------------- 23 files changed, 134 insertions(+), 122 deletions(-) (limited to 'include') diff --git a/drivers/net/atl1/atl1_main.c b/drivers/net/atl1/atl1_main.c index 8d5994751e2e..d60c2217332c 100644 --- a/drivers/net/atl1/atl1_main.c +++ b/drivers/net/atl1/atl1_main.c @@ -1298,9 +1298,10 @@ static int atl1_tso(struct atl1_adapter *adapter, struct sk_buff *skb, iph->tot_len = 0; iph->check = 0; - skb->h.th->check = ~csum_tcpudp_magic(iph->saddr, - iph->daddr, 0, - IPPROTO_TCP, 0); + tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, + iph->daddr, 0, + IPPROTO_TCP, + 0); ipofst = skb_network_offset(skb); if (ipofst != ENET_HEADER_SIZE) /* 802.3 frame */ tso->tsopl |= 1 << TSO_PARAM_ETHTYPE_SHIFT; diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c index 73512fb16452..7e7b5f344030 100644 --- a/drivers/net/bnx2.c +++ b/drivers/net/bnx2.c @@ -4524,7 +4524,7 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev) vlan_tag_flags |= TX_BD_FLAGS_SW_LSO; tcp_opt_len = 0; - if (skb->h.th->doff > 5) + if (tcp_hdr(skb)->doff > 5) tcp_opt_len = tcp_optlen(skb); ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr); @@ -4532,9 +4532,9 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev) iph = ip_hdr(skb); iph->check = 0; iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len); - skb->h.th->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, - 0, IPPROTO_TCP, 0); - + tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, + iph->daddr, 0, + IPPROTO_TCP, 0); if (tcp_opt_len || (iph->ihl > 5)) { vlan_tag_flags |= ((iph->ihl - 5) + (tcp_opt_len >> 2)) << 8; diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c index a4204dff3636..43e92f9f0bcd 100644 --- a/drivers/net/chelsio/sge.c +++ b/drivers/net/chelsio/sge.c @@ -1872,7 +1872,7 @@ int t1_start_xmit(struct sk_buff *skb, struct net_device *dev) hdr->opcode = CPL_TX_PKT_LSO; hdr->ip_csum_dis = hdr->l4_csum_dis = 0; hdr->ip_hdr_words = ip_hdr(skb)->ihl; - hdr->tcp_hdr_words = skb->h.th->doff; + hdr->tcp_hdr_words = tcp_hdr(skb)->doff; hdr->eth_type_mss = htons(MK_ETH_TYPE_MSS(eth_type, skb_shinfo(skb)->gso_size)); hdr->len = htonl(skb->len - sizeof(*hdr)); diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c index d38b1bcd138e..a70fe9145a2e 100644 --- a/drivers/net/cxgb3/sge.c +++ b/drivers/net/cxgb3/sge.c @@ -901,7 +901,7 @@ static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb, CPL_ETH_II : CPL_ETH_II_VLAN; tso_info |= V_LSO_ETH_TYPE(eth_type) | V_LSO_IPHDR_WORDS(ip_hdr(skb)->ihl) | - V_LSO_TCPHDR_WORDS(skb->h.th->doff); + V_LSO_TCPHDR_WORDS(tcp_hdr(skb)->doff); hdr->lso_info = htonl(tso_info); flits = 3; } else { diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c index 4572fbba50f9..e86deb2ef823 100644 --- a/drivers/net/e1000/e1000_main.c +++ b/drivers/net/e1000/e1000_main.c @@ -2893,14 +2893,15 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, struct iphdr *iph = ip_hdr(skb); iph->tot_len = 0; iph->check = 0; - skb->h.th->check = ~csum_tcpudp_magic(iph->saddr, - iph->daddr, 0, - IPPROTO_TCP, 0); + tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, + iph->daddr, 0, + IPPROTO_TCP, + 0); cmd_length = E1000_TXD_CMD_IP; ipcse = skb_transport_offset(skb) - 1; } else if (skb->protocol == htons(ETH_P_IPV6)) { ipv6_hdr(skb)->payload_len = 0; - skb->h.th->check = + tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0); @@ -2909,7 +2910,7 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, ipcss = skb_network_offset(skb); ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data; tucss = skb_transport_offset(skb); - tucso = (void *)&(skb->h.th->check) - (void *)skb->data; + tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data; tucse = 0; cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE | diff --git a/drivers/net/ioc3-eth.c b/drivers/net/ioc3-eth.c index ba012e10d79a..bc62e770a256 100644 --- a/drivers/net/ioc3-eth.c +++ b/drivers/net/ioc3-eth.c @@ -1426,7 +1426,7 @@ static int ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev) } if (proto == IPPROTO_TCP) { csoff += offsetof(struct tcphdr, check); - skb->h.th->check = csum; + tcp_hdr(skb)->check = csum; } w0 = ETXD_DOCHECKSUM | (csoff << ETXD_CHKOFF_SHIFT); diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c index 96550d681623..e729ced52dc3 100644 --- a/drivers/net/ixgb/ixgb_main.c +++ b/drivers/net/ixgb/ixgb_main.c @@ -1195,13 +1195,14 @@ ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb) iph = ip_hdr(skb); iph->tot_len = 0; iph->check = 0; - skb->h.th->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, - 0, IPPROTO_TCP, 0); + tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, + iph->daddr, 0, + IPPROTO_TCP, 0); ipcss = skb_network_offset(skb); ipcso = (void *)&(iph->check) - (void *)skb->data; ipcse = skb_transport_offset(skb) - 1; tucss = skb_transport_offset(skb); - tucso = (void *)&(skb->h.th->check) - (void *)skb->data; + tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data; tucse = 0; i = adapter->tx_ring.next_to_use; diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c index 43723839e934..ab15ecd4b3d6 100644 --- a/drivers/net/mv643xx_eth.c +++ b/drivers/net/mv643xx_eth.c @@ -1169,7 +1169,7 @@ static void eth_tx_submit_descs_for_skb(struct mv643xx_private *mp, desc->l4i_chk = udp_hdr(skb)->check; break; case IPPROTO_TCP: - desc->l4i_chk = skb->h.th->check; + desc->l4i_chk = tcp_hdr(skb)->check; break; default: BUG(); diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index 7ca30d76bf6f..414365c3198d 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c @@ -3922,7 +3922,7 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) base_flags |= (TXD_FLAG_CPU_PRE_DMA | TXD_FLAG_CPU_POST_DMA); - skb->h.th->check = 0; + tcp_hdr(skb)->check = 0; } else if (skb->ip_summed == CHECKSUM_PARTIAL) @@ -4080,14 +4080,13 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev) iph->check = 0; iph->tot_len = htons(mss + hdr_len); if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) { - skb->h.th->check = 0; + tcp_hdr(skb)->check = 0; base_flags &= ~TXD_FLAG_TCPUDP_CSUM; - } - else { - skb->h.th->check = ~csum_tcpudp_magic(iph->saddr, - iph->daddr, 0, - IPPROTO_TCP, 0); - } + } else + tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, + iph->daddr, 0, + IPPROTO_TCP, + 0); if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) || (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) { diff --git a/drivers/s390/net/qeth_eddp.c b/drivers/s390/net/qeth_eddp.c index 273f1745a009..b8e84674e170 100644 --- a/drivers/s390/net/qeth_eddp.c +++ b/drivers/s390/net/qeth_eddp.c @@ -416,7 +416,7 @@ __qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx, eddp->skb_offset += VLAN_HLEN; #endif /* CONFIG_QETH_VLAN */ } - tcph = eddp->skb->h.th; + tcph = tcp_hdr(eddp->skb); while (eddp->skb_offset < eddp->skb->len) { data_len = min((int)skb_shinfo(eddp->skb)->gso_size, (int)(eddp->skb->len - eddp->skb_offset)); diff --git a/drivers/s390/net/qeth_tso.h b/drivers/s390/net/qeth_tso.h index 4040bdd8c327..c20e923cf9ad 100644 --- a/drivers/s390/net/qeth_tso.h +++ b/drivers/s390/net/qeth_tso.h @@ -41,7 +41,7 @@ qeth_tso_fill_header(struct qeth_card *card, struct sk_buff *skb) hdr = (struct qeth_hdr_tso *) skb->data; iph = ip_hdr(skb); - tcph = skb->h.th; + tcph = tcp_hdr(skb); /*fix header to TSO values ...*/ hdr->hdr.hdr.l3.id = QETH_HEADER_TYPE_TSO; /*set values which are fix for the first approach ...*/ @@ -65,7 +65,7 @@ qeth_tso_set_tcpip_header(struct qeth_card *card, struct sk_buff *skb) { struct iphdr *iph = ip_hdr(skb); struct ipv6hdr *ip6h = ipv6_hdr(skb); - struct tcphdr *tcph = skb->h.th; + struct tcphdr *tcph = tcp_hdr(skb); tcph->check = 0; if (skb->protocol == ETH_P_IPV6) { diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index e580416de78a..8f158d66d2a8 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -237,7 +237,6 @@ struct sk_buff { /* 4 byte hole on 64 bit*/ union { - struct tcphdr *th; struct iphdr *ipiph; struct ipv6hdr *ipv6h; unsigned char *raw; diff --git a/include/linux/tcp.h b/include/linux/tcp.h index 244ae0dacf4a..911d937fb4c1 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -178,14 +178,19 @@ struct tcp_md5sig { #include #include +static inline struct tcphdr *tcp_hdr(const struct sk_buff *skb) +{ + return (struct tcphdr *)skb->h.raw; +} + static inline unsigned int tcp_hdrlen(const struct sk_buff *skb) { - return skb->h.th->doff * 4; + return tcp_hdr(skb)->doff * 4; } static inline unsigned int tcp_optlen(const struct sk_buff *skb) { - return (skb->h.th->doff - 5) * 4; + return (tcp_hdr(skb)->doff - 5) * 4; } /* This defines a selective acknowledgement block. */ diff --git a/include/net/tcp.h b/include/net/tcp.h index 6dacc352dcf1..af9273204cfd 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -984,7 +984,7 @@ static inline void tcp_openreq_init(struct request_sock *req, ireq->wscale_ok = rx_opt->wscale_ok; ireq->acked = 0; ireq->ecn_ok = 0; - ireq->rmt_port = skb->h.th->source; + ireq->rmt_port = tcp_hdr(skb)->source; } extern void tcp_enter_memory_pressure(void); diff --git a/include/net/tcp_ecn.h b/include/net/tcp_ecn.h index 4629d77173f2..b5f7c6ac0880 100644 --- a/include/net/tcp_ecn.h +++ b/include/net/tcp_ecn.h @@ -54,7 +54,7 @@ static inline void TCP_ECN_send(struct sock *sk, struct tcp_sock *tp, INET_ECN_xmit(sk); if (tp->ecn_flags&TCP_ECN_QUEUE_CWR) { tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR; - skb->h.th->cwr = 1; + tcp_hdr(skb)->cwr = 1; skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; } } else { @@ -62,7 +62,7 @@ static inline void TCP_ECN_send(struct sock *sk, struct tcp_sock *tp, INET_ECN_dontxmit(sk); } if (tp->ecn_flags & TCP_ECN_DEMAND_CWR) - skb->h.th->ece = 1; + tcp_hdr(skb)->ece = 1; } } @@ -70,7 +70,7 @@ static inline void TCP_ECN_send(struct sock *sk, struct tcp_sock *tp, static inline void TCP_ECN_accept_cwr(struct tcp_sock *tp, struct sk_buff *skb) { - if (skb->h.th->cwr) + if (tcp_hdr(skb)->cwr) tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR; } diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 6d92358fc513..602268661eb3 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -1352,8 +1352,8 @@ void ip_send_reply(struct sock *sk, struct sk_buff *skb, struct ip_reply_arg *ar .tos = RT_TOS(ip_hdr(skb)->tos) } }, /* Not quite clean, but right. */ .uli_u = { .ports = - { .sport = skb->h.th->dest, - .dport = skb->h.th->source } }, + { .sport = tcp_hdr(skb)->dest, + .dport = tcp_hdr(skb)->source } }, .proto = sk->sk_protocol }; security_skb_classify_flow(skb, &fl); if (ip_route_output_key(&rt, &fl)) diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c index 261607178491..2da1be0589a9 100644 --- a/net/ipv4/syncookies.c +++ b/net/ipv4/syncookies.c @@ -125,10 +125,11 @@ static __u16 const msstab[] = { __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb, __u16 *mssp) { struct tcp_sock *tp = tcp_sk(sk); + const struct iphdr *iph = ip_hdr(skb); + const struct tcphdr *th = tcp_hdr(skb); int mssind; const __u16 mss = *mssp; - tp->last_synq_overflow = jiffies; /* XXX sort msstab[] by probability? Binary search? */ @@ -138,9 +139,8 @@ __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb, __u16 *mssp) NET_INC_STATS_BH(LINUX_MIB_SYNCOOKIESSENT); - return secure_tcp_syn_cookie(ip_hdr(skb)->saddr, ip_hdr(skb)->daddr, - skb->h.th->source, skb->h.th->dest, - ntohl(skb->h.th->seq), + return secure_tcp_syn_cookie(iph->saddr, iph->daddr, + th->source, th->dest, ntohl(th->seq), jiffies / (HZ * 60), mssind); } @@ -157,14 +157,13 @@ __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb, __u16 *mssp) */ static inline int cookie_check(struct sk_buff *skb, __u32 cookie) { - __u32 seq; - __u32 mssind; - - seq = ntohl(skb->h.th->seq)-1; - mssind = check_tcp_syn_cookie(cookie, - ip_hdr(skb)->saddr, ip_hdr(skb)->daddr, - skb->h.th->source, skb->h.th->dest, - seq, jiffies / (HZ * 60), COUNTER_TRIES); + const struct iphdr *iph = ip_hdr(skb); + const struct tcphdr *th = tcp_hdr(skb); + __u32 seq = ntohl(th->seq) - 1; + __u32 mssind = check_tcp_syn_cookie(cookie, iph->saddr, iph->daddr, + th->source, th->dest, seq, + jiffies / (HZ * 60), + COUNTER_TRIES); return mssind < NUM_MSS ? msstab[mssind] + 1 : 0; } @@ -191,14 +190,15 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, struct inet_request_sock *ireq; struct tcp_request_sock *treq; struct tcp_sock *tp = tcp_sk(sk); - __u32 cookie = ntohl(skb->h.th->ack_seq) - 1; + const struct tcphdr *th = tcp_hdr(skb); + __u32 cookie = ntohl(th->ack_seq) - 1; struct sock *ret = sk; struct request_sock *req; int mss; struct rtable *rt; __u8 rcv_wscale; - if (!sysctl_tcp_syncookies || !skb->h.th->ack) + if (!sysctl_tcp_syncookies || !th->ack) goto out; if (time_after(jiffies, tp->last_synq_overflow + TCP_TIMEOUT_INIT) || @@ -220,10 +220,10 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, } ireq = inet_rsk(req); treq = tcp_rsk(req); - treq->rcv_isn = ntohl(skb->h.th->seq) - 1; + treq->rcv_isn = ntohl(th->seq) - 1; treq->snt_isn = cookie; req->mss = mss; - ireq->rmt_port = skb->h.th->source; + ireq->rmt_port = th->source; ireq->loc_addr = ip_hdr(skb)->daddr; ireq->rmt_addr = ip_hdr(skb)->saddr; ireq->opt = NULL; @@ -261,8 +261,8 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, .tos = RT_CONN_FLAGS(sk) } }, .proto = IPPROTO_TCP, .uli_u = { .ports = - { .sport = skb->h.th->dest, - .dport = skb->h.th->source } } }; + { .sport = th->dest, + .dport = th->source } } }; security_req_classify_flow(req, &fl); if (ip_route_output_key(&rt, &fl)) { reqsk_free(req); diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 689f9330f1b9..f832f3c33ab1 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -425,7 +425,7 @@ int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg) /* Subtract 1, if FIN is in queue. */ if (answ && !skb_queue_empty(&sk->sk_receive_queue)) answ -= - ((struct sk_buff *)sk->sk_receive_queue.prev)->h.th->fin; + tcp_hdr((struct sk_buff *)sk->sk_receive_queue.prev)->fin; } else answ = tp->urg_seq - tp->copied_seq; release_sock(sk); @@ -1016,9 +1016,9 @@ static inline struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off) skb_queue_walk(&sk->sk_receive_queue, skb) { offset = seq - TCP_SKB_CB(skb)->seq; - if (skb->h.th->syn) + if (tcp_hdr(skb)->syn) offset--; - if (offset < skb->len || skb->h.th->fin) { + if (offset < skb->len || tcp_hdr(skb)->fin) { *off = offset; return skb; } @@ -1070,7 +1070,7 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc, if (offset != skb->len) break; } - if (skb->h.th->fin) { + if (tcp_hdr(skb)->fin) { sk_eat_skb(sk, skb, 0); ++seq; break; @@ -1174,11 +1174,11 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, break; } offset = *seq - TCP_SKB_CB(skb)->seq; - if (skb->h.th->syn) + if (tcp_hdr(skb)->syn) offset--; if (offset < skb->len) goto found_ok_skb; - if (skb->h.th->fin) + if (tcp_hdr(skb)->fin) goto found_fin_ok; BUG_TRAP(flags & MSG_PEEK); skb = skb->next; @@ -1394,7 +1394,7 @@ skip_copy: if (used + offset < skb->len) continue; - if (skb->h.th->fin) + if (tcp_hdr(skb)->fin) goto found_fin_ok; if (!(flags & MSG_PEEK)) { sk_eat_skb(sk, skb, copied_early); @@ -1563,7 +1563,7 @@ void tcp_close(struct sock *sk, long timeout) */ while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) { u32 len = TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq - - skb->h.th->fin; + tcp_hdr(skb)->fin; data_was_unread += len; __kfree_skb(skb); } @@ -2170,7 +2170,7 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features) if (!pskb_may_pull(skb, sizeof(*th))) goto out; - th = skb->h.th; + th = tcp_hdr(skb); thlen = th->doff * 4; if (thlen < sizeof(*th)) goto out; @@ -2210,7 +2210,7 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features) delta = htonl(oldlen + (thlen + len)); skb = segs; - th = skb->h.th; + th = tcp_hdr(skb); seq = ntohl(th->seq); do { @@ -2224,7 +2224,7 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features) seq += len; skb = skb->next; - th = skb->h.th; + th = tcp_hdr(skb); th->seq = htonl(seq); th->cwr = 0; diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 2776a8b01339..c1ce36237380 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -148,7 +148,7 @@ static void tcp_measure_rcv_mss(struct sock *sk, * to handle super-low mtu links fairly. */ (len >= TCP_MIN_MSS + sizeof(struct tcphdr) && - !(tcp_flag_word(skb->h.th)&TCP_REMNANT))) { + !(tcp_flag_word(tcp_hdr(skb)) & TCP_REMNANT))) { /* Subtract also invariant (if peer is RFC compliant), * tcp header plus fixed timestamp option length. * Resulting "len" is MSS free of SACK jitter. @@ -2559,9 +2559,9 @@ static int tcp_ack_update_window(struct sock *sk, struct tcp_sock *tp, struct sk_buff *skb, u32 ack, u32 ack_seq) { int flag = 0; - u32 nwin = ntohs(skb->h.th->window); + u32 nwin = ntohs(tcp_hdr(skb)->window); - if (likely(!skb->h.th->syn)) + if (likely(!tcp_hdr(skb)->syn)) nwin <<= tp->rx_opt.snd_wscale; if (tcp_may_update_window(tp, ack, ack_seq, nwin)) { @@ -2766,7 +2766,7 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag) if (TCP_SKB_CB(skb)->sacked) flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una); - if (TCP_ECN_rcv_ecn_echo(tp, skb->h.th)) + if (TCP_ECN_rcv_ecn_echo(tp, tcp_hdr(skb))) flag |= FLAG_ECE; tcp_ca_event(sk, CA_EVENT_SLOW_ACK); @@ -2833,7 +2833,7 @@ uninteresting_ack: void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx, int estab) { unsigned char *ptr; - struct tcphdr *th = skb->h.th; + struct tcphdr *th = tcp_hdr(skb); int length=(th->doff*4)-sizeof(struct tcphdr); ptr = (unsigned char *)(th + 1); @@ -2995,7 +2995,7 @@ static inline void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq) static int tcp_disordered_ack(const struct sock *sk, const struct sk_buff *skb) { struct tcp_sock *tp = tcp_sk(sk); - struct tcphdr *th = skb->h.th; + struct tcphdr *th = tcp_hdr(skb); u32 seq = TCP_SKB_CB(skb)->seq; u32 ack = TCP_SKB_CB(skb)->ack_seq; @@ -3357,8 +3357,8 @@ static void tcp_ofo_queue(struct sock *sk) __skb_unlink(skb, &tp->out_of_order_queue); __skb_queue_tail(&sk->sk_receive_queue, skb); tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; - if (skb->h.th->fin) - tcp_fin(skb, sk, skb->h.th); + if (tcp_hdr(skb)->fin) + tcp_fin(skb, sk, tcp_hdr(skb)); } } @@ -3366,7 +3366,7 @@ static int tcp_prune_queue(struct sock *sk); static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) { - struct tcphdr *th = skb->h.th; + struct tcphdr *th = tcp_hdr(skb); struct tcp_sock *tp = tcp_sk(sk); int eaten = -1; @@ -3605,7 +3605,7 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list, * - bloated or contains data before "start" or * overlaps to the next one. */ - if (!skb->h.th->syn && !skb->h.th->fin && + if (!tcp_hdr(skb)->syn && !tcp_hdr(skb)->fin && (tcp_win_from_space(skb->truesize) > skb->len || before(TCP_SKB_CB(skb)->seq, start) || (skb->next != tail && @@ -3616,7 +3616,7 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list, start = TCP_SKB_CB(skb)->end_seq; skb = skb->next; } - if (skb == tail || skb->h.th->syn || skb->h.th->fin) + if (skb == tail || tcp_hdr(skb)->syn || tcp_hdr(skb)->fin) return; while (before(start, end)) { @@ -3665,7 +3665,9 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list, __kfree_skb(skb); NET_INC_STATS_BH(LINUX_MIB_TCPRCVCOLLAPSED); skb = next; - if (skb == tail || skb->h.th->syn || skb->h.th->fin) + if (skb == tail || + tcp_hdr(skb)->syn || + tcp_hdr(skb)->fin) return; } } @@ -4072,7 +4074,7 @@ static int tcp_dma_try_early_copy(struct sock *sk, struct sk_buff *skb, int hlen tcp_rcv_space_adjust(sk); if ((tp->ucopy.len == 0) || - (tcp_flag_word(skb->h.th) & TCP_FLAG_PSH) || + (tcp_flag_word(tcp_hdr(skb)) & TCP_FLAG_PSH) || (atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1))) { tp->ucopy.wakeup = 1; sk->sk_data_ready(sk, 0); diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index c146a02f8495..617a5e4ca010 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -127,8 +127,8 @@ static inline __u32 tcp_v4_init_sequence(struct sk_buff *skb) { return secure_tcp_sequence_number(ip_hdr(skb)->daddr, ip_hdr(skb)->saddr, - skb->h.th->dest, - skb->h.th->source); + tcp_hdr(skb)->dest, + tcp_hdr(skb)->source); } int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp) @@ -499,7 +499,7 @@ out: void tcp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb) { struct inet_sock *inet = inet_sk(sk); - struct tcphdr *th = skb->h.th; + struct tcphdr *th = tcp_hdr(skb); if (skb->ip_summed == CHECKSUM_PARTIAL) { th->check = ~tcp_v4_check(len, inet->saddr, @@ -522,7 +522,7 @@ int tcp_v4_gso_send_check(struct sk_buff *skb) return -EINVAL; iph = ip_hdr(skb); - th = skb->h.th; + th = tcp_hdr(skb); th->check = 0; th->check = ~tcp_v4_check(skb->len, iph->saddr, iph->daddr, 0); @@ -546,7 +546,7 @@ int tcp_v4_gso_send_check(struct sk_buff *skb) static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb) { - struct tcphdr *th = skb->h.th; + struct tcphdr *th = tcp_hdr(skb); struct { struct tcphdr th; #ifdef CONFIG_TCP_MD5SIG @@ -622,7 +622,7 @@ static void tcp_v4_send_ack(struct tcp_timewait_sock *twsk, struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts) { - struct tcphdr *th = skb->h.th; + struct tcphdr *th = tcp_hdr(skb); struct { struct tcphdr th; __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2) @@ -745,7 +745,7 @@ static int tcp_v4_send_synack(struct sock *sk, struct request_sock *req, skb = tcp_make_synack(sk, dst, req); if (skb) { - struct tcphdr *th = skb->h.th; + struct tcphdr *th = tcp_hdr(skb); th->check = tcp_v4_check(skb->len, ireq->loc_addr, @@ -781,7 +781,7 @@ static void syn_flood_warning(struct sk_buff *skb) warntime = jiffies; printk(KERN_INFO "possible SYN flooding on port %d. Sending cookies.\n", - ntohs(skb->h.th->dest)); + ntohs(tcp_hdr(skb)->dest)); } } #endif @@ -1134,7 +1134,7 @@ static int tcp_v4_inbound_md5_hash(struct sock *sk, struct sk_buff *skb) __u8 *hash_location = NULL; struct tcp_md5sig_key *hash_expected; const struct iphdr *iph = ip_hdr(skb); - struct tcphdr *th = skb->h.th; + struct tcphdr *th = tcp_hdr(skb); int length = (th->doff << 2) - sizeof(struct tcphdr); int genhash; unsigned char *ptr; @@ -1327,7 +1327,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) ireq->rmt_addr = saddr; ireq->opt = tcp_v4_save_options(sk, skb); if (!want_cookie) - TCP_ECN_create_request(req, skb->h.th); + TCP_ECN_create_request(req, tcp_hdr(skb)); if (want_cookie) { #ifdef CONFIG_SYN_COOKIES @@ -1375,7 +1375,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open " "request from %u.%u.%u.%u/%u\n", NIPQUAD(saddr), - ntohs(skb->h.th->source)); + ntohs(tcp_hdr(skb)->source)); dst_release(dst); goto drop_and_free; } @@ -1481,7 +1481,7 @@ exit: static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb) { - struct tcphdr *th = skb->h.th; + struct tcphdr *th = tcp_hdr(skb); const struct iphdr *iph = ip_hdr(skb); struct sock *nsk; struct request_sock **prev; @@ -1556,7 +1556,7 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb) if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */ TCP_CHECK_TIMER(sk); - if (tcp_rcv_established(sk, skb, skb->h.th, skb->len)) { + if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) { rsk = sk; goto reset; } @@ -1582,7 +1582,7 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb) } TCP_CHECK_TIMER(sk); - if (tcp_rcv_state_process(sk, skb, skb->h.th, skb->len)) { + if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) { rsk = sk; goto reset; } @@ -1625,7 +1625,7 @@ int tcp_v4_rcv(struct sk_buff *skb) if (!pskb_may_pull(skb, sizeof(struct tcphdr))) goto discard_it; - th = skb->h.th; + th = tcp_hdr(skb); if (th->doff < sizeof(struct tcphdr) / 4) goto bad_packet; @@ -1640,7 +1640,7 @@ int tcp_v4_rcv(struct sk_buff *skb) tcp_v4_checksum_init(skb))) goto bad_packet; - th = skb->h.th; + th = tcp_hdr(skb); iph = ip_hdr(skb); TCP_SKB_CB(skb)->seq = ntohl(th->seq); TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin + diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index 463d2b24d2db..a12b08fca5ad 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c @@ -453,7 +453,8 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req, newtp->rx_opt.snd_wscale = newtp->rx_opt.rcv_wscale = 0; newtp->window_clamp = min(newtp->window_clamp, 65535U); } - newtp->snd_wnd = ntohs(skb->h.th->window) << newtp->rx_opt.snd_wscale; + newtp->snd_wnd = (ntohs(tcp_hdr(skb)->window) << + newtp->rx_opt.snd_wscale); newtp->max_window = newtp->snd_wnd; if (newtp->rx_opt.tstamp_ok) { @@ -488,7 +489,7 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb, struct request_sock *req, struct request_sock **prev) { - struct tcphdr *th = skb->h.th; + const struct tcphdr *th = tcp_hdr(skb); __be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK); int paws_reject = 0; struct tcp_options_received tmp_opt; @@ -710,8 +711,8 @@ int tcp_child_process(struct sock *parent, struct sock *child, int state = child->sk_state; if (!sock_owned_by_user(child)) { - ret = tcp_rcv_state_process(child, skb, skb->h.th, skb->len); - + ret = tcp_rcv_state_process(child, skb, tcp_hdr(skb), + skb->len); /* Wakeup parent, send SIGIO */ if (state == TCP_SYN_RECV && child->sk_state != state) parent->sk_data_ready(parent, 0); diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index f19f5fb361b5..29c53fbb2204 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -465,11 +465,12 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, tcp_header_size += TCPOLEN_MD5SIG_ALIGNED; #endif - th = (struct tcphdr *) skb_push(skb, tcp_header_size); - skb->h.th = th; + skb_push(skb, tcp_header_size); + skb_reset_transport_header(skb); skb_set_owner_w(skb, sk); /* Build TCP header and checksum it. */ + th = tcp_hdr(skb); th->source = inet->sport; th->dest = inet->dport; th->seq = htonl(tcb->seq); @@ -524,7 +525,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, tp->af_specific->calc_md5_hash(md5_hash_location, md5, sk, NULL, NULL, - skb->h.th, + tcp_hdr(skb), sk->sk_protocol, skb->len); } @@ -2128,8 +2129,10 @@ struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst, if (md5) tcp_header_size += TCPOLEN_MD5SIG_ALIGNED; #endif - skb->h.th = th = (struct tcphdr *) skb_push(skb, tcp_header_size); + skb_push(skb, tcp_header_size); + skb_reset_transport_header(skb); + th = tcp_hdr(skb); memset(th, 0, sizeof(struct tcphdr)); th->syn = 1; th->ack = 1; @@ -2183,7 +2186,7 @@ struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst, tp->af_specific->calc_md5_hash(md5_hash_location, md5, NULL, dst, req, - skb->h.th, sk->sk_protocol, + tcp_hdr(skb), sk->sk_protocol, skb->len); } #endif diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index c573353f21cd..4a55da079f5f 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -117,8 +117,8 @@ static __u32 tcp_v6_init_sequence(struct sk_buff *skb) { return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32, ipv6_hdr(skb)->saddr.s6_addr32, - skb->h.th->dest, - skb->h.th->source); + tcp_hdr(skb)->dest, + tcp_hdr(skb)->source); } static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, @@ -509,7 +509,7 @@ static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req, skb = tcp_make_synack(sk, dst, req); if (skb) { - struct tcphdr *th = skb->h.th; + struct tcphdr *th = tcp_hdr(skb); th->check = tcp_v6_check(th, skb->len, &treq->loc_addr, &treq->rmt_addr, @@ -838,7 +838,7 @@ static int tcp_v6_inbound_md5_hash (struct sock *sk, struct sk_buff *skb) __u8 *hash_location = NULL; struct tcp_md5sig_key *hash_expected; struct ipv6hdr *ip6h = ipv6_hdr(skb); - struct tcphdr *th = skb->h.th; + struct tcphdr *th = tcp_hdr(skb); int length = (th->doff << 2) - sizeof (*th); int genhash; u8 *ptr; @@ -946,7 +946,7 @@ static struct timewait_sock_ops tcp6_timewait_sock_ops = { static void tcp_v6_send_check(struct sock *sk, int len, struct sk_buff *skb) { struct ipv6_pinfo *np = inet6_sk(sk); - struct tcphdr *th = skb->h.th; + struct tcphdr *th = tcp_hdr(skb); if (skb->ip_summed == CHECKSUM_PARTIAL) { th->check = ~csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP, 0); @@ -967,7 +967,7 @@ static int tcp_v6_gso_send_check(struct sk_buff *skb) return -EINVAL; ipv6h = ipv6_hdr(skb); - th = skb->h.th; + th = tcp_hdr(skb); th->check = 0; th->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, skb->len, @@ -979,7 +979,7 @@ static int tcp_v6_gso_send_check(struct sk_buff *skb) static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb) { - struct tcphdr *th = skb->h.th, *t1; + struct tcphdr *th = tcp_hdr(skb), *t1; struct sk_buff *buff; struct flowi fl; int tot_len = sizeof(*th); @@ -1079,7 +1079,7 @@ static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb) static void tcp_v6_send_ack(struct tcp_timewait_sock *tw, struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts) { - struct tcphdr *th = skb->h.th, *t1; + struct tcphdr *th = tcp_hdr(skb), *t1; struct sk_buff *buff; struct flowi fl; int tot_len = sizeof(struct tcphdr); @@ -1195,7 +1195,7 @@ static void tcp_v6_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req) static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb) { struct request_sock *req, **prev; - const struct tcphdr *th = skb->h.th; + const struct tcphdr *th = tcp_hdr(skb); struct sock *nsk; /* Find possible connection requests. */ @@ -1275,7 +1275,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb) treq = inet6_rsk(req); ipv6_addr_copy(&treq->rmt_addr, &ipv6_hdr(skb)->saddr); ipv6_addr_copy(&treq->loc_addr, &ipv6_hdr(skb)->daddr); - TCP_ECN_create_request(req, skb->h.th); + TCP_ECN_create_request(req, tcp_hdr(skb)); treq->pktopts = NULL; if (ipv6_opt_accepted(sk, skb) || np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo || @@ -1528,14 +1528,14 @@ out: static __sum16 tcp_v6_checksum_init(struct sk_buff *skb) { if (skb->ip_summed == CHECKSUM_COMPLETE) { - if (!tcp_v6_check(skb->h.th, skb->len, &ipv6_hdr(skb)->saddr, + if (!tcp_v6_check(tcp_hdr(skb), skb->len, &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr, skb->csum)) { skb->ip_summed = CHECKSUM_UNNECESSARY; return 0; } } - skb->csum = ~csum_unfold(tcp_v6_check(skb->h.th, skb->len, + skb->csum = ~csum_unfold(tcp_v6_check(tcp_hdr(skb), skb->len, &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr, 0)); @@ -1601,7 +1601,7 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb) if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */ TCP_CHECK_TIMER(sk); - if (tcp_rcv_established(sk, skb, skb->h.th, skb->len)) + if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) goto reset; TCP_CHECK_TIMER(sk); if (opt_skb) @@ -1632,7 +1632,7 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb) } TCP_CHECK_TIMER(sk); - if (tcp_rcv_state_process(sk, skb, skb->h.th, skb->len)) + if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) goto reset; TCP_CHECK_TIMER(sk); if (opt_skb) @@ -1698,7 +1698,7 @@ static int tcp_v6_rcv(struct sk_buff **pskb) if (!pskb_may_pull(skb, sizeof(struct tcphdr))) goto discard_it; - th = skb->h.th; + th = tcp_hdr(skb); if (th->doff < sizeof(struct tcphdr)/4) goto bad_packet; @@ -1709,7 +1709,7 @@ static int tcp_v6_rcv(struct sk_buff **pskb) tcp_v6_checksum_init(skb))) goto bad_packet; - th = skb->h.th; + th = tcp_hdr(skb); TCP_SKB_CB(skb)->seq = ntohl(th->seq); TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin + skb->len - th->doff*4); -- cgit v1.2.3-59-g8ed1b From b0061ce49c83657563b64ffcf1ec137110230d93 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Wed, 25 Apr 2007 18:02:22 -0700 Subject: [SK_BUFF]: Introduce ipip_hdr(), remove skb->h.ipiph Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: David S. Miller --- drivers/net/sk98lin/skge.c | 4 ++-- drivers/net/skge.c | 2 +- include/linux/ip.h | 5 +++++ include/linux/skbuff.h | 1 - net/ipv4/xfrm4_mode_beet.c | 6 +++--- net/ipv4/xfrm4_mode_tunnel.c | 6 +++--- net/ipv6/xfrm6_mode_tunnel.c | 2 +- 7 files changed, 15 insertions(+), 11 deletions(-) (limited to 'include') diff --git a/drivers/net/sk98lin/skge.c b/drivers/net/sk98lin/skge.c index e4ab7a8acc1a..b987a5c3f42a 100644 --- a/drivers/net/sk98lin/skge.c +++ b/drivers/net/sk98lin/skge.c @@ -1565,7 +1565,7 @@ struct sk_buff *pMessage) /* pointer to send-message */ u16 hdrlen = skb_transport_offset(pMessage); u16 offset = hdrlen + pMessage->csum_offset; - if ((pMessage->h.ipiph->protocol == IPPROTO_UDP ) && + if ((ipip_hdr(pMessage)->protocol == IPPROTO_UDP) && (pAC->GIni.GIChipRev == 0) && (pAC->GIni.GIChipId == CHIP_ID_YUKON)) { pTxd->TBControl = BMU_TCP_CHECK; @@ -1691,7 +1691,7 @@ struct sk_buff *pMessage) /* pointer to send-message */ ** opcode for udp is not working in the hardware yet ** (Revision 2.0) */ - if ((pMessage->h.ipiph->protocol == IPPROTO_UDP ) && + if ((ipip_hdr(pMessage)->protocol == IPPROTO_UDP) && (pAC->GIni.GIChipRev == 0) && (pAC->GIni.GIChipId == CHIP_ID_YUKON)) { Control |= BMU_TCP_CHECK; diff --git a/drivers/net/skge.c b/drivers/net/skge.c index ca7a0e039849..99b61cfb7ce6 100644 --- a/drivers/net/skge.c +++ b/drivers/net/skge.c @@ -2659,7 +2659,7 @@ static int skge_xmit_frame(struct sk_buff *skb, struct net_device *dev) /* This seems backwards, but it is what the sk98lin * does. Looks like hardware is wrong? */ - if (skb->h.ipiph->protocol == IPPROTO_UDP + if (ipip_hdr(skb)->protocol == IPPROTO_UDP && hw->chip_rev == 0 && hw->chip_id == CHIP_ID_YUKON) control = BMU_TCP_CHECK; else diff --git a/include/linux/ip.h b/include/linux/ip.h index f2f26db16f57..19578440b5fc 100644 --- a/include/linux/ip.h +++ b/include/linux/ip.h @@ -111,6 +111,11 @@ static inline struct iphdr *ip_hdr(const struct sk_buff *skb) { return (struct iphdr *)skb_network_header(skb); } + +static inline struct iphdr *ipip_hdr(const struct sk_buff *skb) +{ + return (struct iphdr *)skb->h.raw; +} #endif struct ip_auth_hdr { diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 8f158d66d2a8..862a81cf7f74 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -237,7 +237,6 @@ struct sk_buff { /* 4 byte hole on 64 bit*/ union { - struct iphdr *ipiph; struct ipv6hdr *ipv6h; unsigned char *raw; } h; diff --git a/net/ipv4/xfrm4_mode_beet.c b/net/ipv4/xfrm4_mode_beet.c index 9e5ba12c6c75..32fcfc0b5c8c 100644 --- a/net/ipv4/xfrm4_mode_beet.c +++ b/net/ipv4/xfrm4_mode_beet.c @@ -83,7 +83,7 @@ static int xfrm4_beet_input(struct xfrm_state *x, struct sk_buff *skb) if (!pskb_may_pull(skb, sizeof(*ph))) goto out; - ph = (struct ip_beet_phdr *)(skb->h.ipiph + 1); + ph = (struct ip_beet_phdr *)(ipip_hdr(skb) + 1); phlen = sizeof(*ph) + ph->padlen; optlen = ph->hdrlen * 8 + (IPV4_BEET_PHMAXLEN - phlen); @@ -97,9 +97,9 @@ static int xfrm4_beet_input(struct xfrm_state *x, struct sk_buff *skb) ph_nexthdr = ph->nexthdr; } - skb->nh.raw = skb->data + (phlen - sizeof(*iph)); + skb_set_network_header(skb, phlen - sizeof(*iph)); memmove(skb_network_header(skb), iph, sizeof(*iph)); - skb->h.raw = skb->data + (phlen + optlen); + skb_set_transport_header(skb, phlen + optlen); skb->data = skb->h.raw; iph = ip_hdr(skb); diff --git a/net/ipv4/xfrm4_mode_tunnel.c b/net/ipv4/xfrm4_mode_tunnel.c index edba75610a46..521e52f055c1 100644 --- a/net/ipv4/xfrm4_mode_tunnel.c +++ b/net/ipv4/xfrm4_mode_tunnel.c @@ -17,7 +17,7 @@ static inline void ipip_ecn_decapsulate(struct sk_buff *skb) { struct iphdr *outer_iph = ip_hdr(skb); - struct iphdr *inner_iph = skb->h.ipiph; + struct iphdr *inner_iph = ipip_hdr(skb); if (INET_ECN_is_ce(outer_iph->tos)) IP_ECN_set_ce(inner_iph); @@ -47,7 +47,7 @@ static int xfrm4_tunnel_output(struct xfrm_state *x, struct sk_buff *skb) int flags; iph = ip_hdr(skb); - skb->h.ipiph = iph; + skb->h.raw = skb->nh.raw; skb_push(skb, x->props.header_len); skb_reset_network_header(skb); @@ -116,7 +116,7 @@ static int xfrm4_tunnel_input(struct xfrm_state *x, struct sk_buff *skb) iph = ip_hdr(skb); if (iph->protocol == IPPROTO_IPIP) { if (x->props.flags & XFRM_STATE_DECAP_DSCP) - ipv4_copy_dscp(iph, skb->h.ipiph); + ipv4_copy_dscp(iph, ipip_hdr(skb)); if (!(x->props.flags & XFRM_STATE_NOECN)) ipip_ecn_decapsulate(skb); } diff --git a/net/ipv6/xfrm6_mode_tunnel.c b/net/ipv6/xfrm6_mode_tunnel.c index 28f36b363d1f..9d3bd338e5d0 100644 --- a/net/ipv6/xfrm6_mode_tunnel.c +++ b/net/ipv6/xfrm6_mode_tunnel.c @@ -28,7 +28,7 @@ static inline void ipip6_ecn_decapsulate(struct sk_buff *skb) static inline void ip6ip_ecn_decapsulate(struct sk_buff *skb) { if (INET_ECN_is_ce(ipv6_get_dsfield(ipv6_hdr(skb)))) - IP_ECN_set_ce(skb->h.ipiph); + IP_ECN_set_ce(ipip_hdr(skb)); } /* Add encapsulation header. -- cgit v1.2.3-59-g8ed1b From 39b89160df691045d1449cbaef43c02084c7543a Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Tue, 10 Apr 2007 21:06:25 -0700 Subject: [SK_BUFF]: Introduce ipipv6_hdr(), remove skb->h.ipv6h Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: David S. Miller --- include/linux/ipv6.h | 5 +++++ include/linux/skbuff.h | 1 - net/ipv6/xfrm6_mode_beet.c | 4 ++-- net/ipv6/xfrm6_mode_tunnel.c | 8 ++++---- 4 files changed, 11 insertions(+), 7 deletions(-) (limited to 'include') diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index ec79c59b2077..b768fcc0a4c4 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h @@ -228,6 +228,11 @@ static inline struct ipv6hdr *ipv6_hdr(const struct sk_buff *skb) return (struct ipv6hdr *)skb_network_header(skb); } +static inline struct ipv6hdr *ipipv6_hdr(const struct sk_buff *skb) +{ + return (struct ipv6hdr *)skb->h.raw; +} + /* This structure contains results of exthdrs parsing as offsets from skb->nh. diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 862a81cf7f74..d3f186230ee2 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -237,7 +237,6 @@ struct sk_buff { /* 4 byte hole on 64 bit*/ union { - struct ipv6hdr *ipv6h; unsigned char *raw; } h; diff --git a/net/ipv6/xfrm6_mode_beet.c b/net/ipv6/xfrm6_mode_beet.c index abac09409ded..0cc96ece003d 100644 --- a/net/ipv6/xfrm6_mode_beet.c +++ b/net/ipv6/xfrm6_mode_beet.c @@ -47,8 +47,8 @@ static int xfrm6_beet_output(struct xfrm_state *x, struct sk_buff *skb) skb_reset_network_header(skb); top_iph = ipv6_hdr(skb); - skb->nh.raw = &top_iph->nexthdr; - skb->h.ipv6h = top_iph + 1; + skb->h.raw = skb->nh.raw + sizeof(struct ipv6hdr); + skb->nh.raw += offsetof(struct ipv6hdr, nexthdr); ipv6_addr_copy(&top_iph->saddr, (struct in6_addr *)&x->props.saddr); ipv6_addr_copy(&top_iph->daddr, (struct in6_addr *)&x->id.daddr); diff --git a/net/ipv6/xfrm6_mode_tunnel.c b/net/ipv6/xfrm6_mode_tunnel.c index 9d3bd338e5d0..21d65df7479e 100644 --- a/net/ipv6/xfrm6_mode_tunnel.c +++ b/net/ipv6/xfrm6_mode_tunnel.c @@ -19,7 +19,7 @@ static inline void ipip6_ecn_decapsulate(struct sk_buff *skb) { struct ipv6hdr *outer_iph = ipv6_hdr(skb); - struct ipv6hdr *inner_iph = skb->h.ipv6h; + struct ipv6hdr *inner_iph = ipipv6_hdr(skb); if (INET_ECN_is_ce(ipv6_get_dsfield(outer_iph))) IP6_ECN_set_ce(inner_iph); @@ -55,8 +55,8 @@ static int xfrm6_tunnel_output(struct xfrm_state *x, struct sk_buff *skb) skb_reset_network_header(skb); top_iph = ipv6_hdr(skb); - skb->nh.raw = &top_iph->nexthdr; - skb->h.ipv6h = top_iph + 1; + skb->h.raw = skb->nh.raw + sizeof(struct ipv6hdr); + skb->nh.raw += offsetof(struct ipv6hdr, nexthdr); top_iph->version = 6; if (xdst->route->ops->family == AF_INET6) { @@ -102,7 +102,7 @@ static int xfrm6_tunnel_input(struct xfrm_state *x, struct sk_buff *skb) nh = skb_network_header(skb); if (nh[IP6CB(skb)->nhoff] == IPPROTO_IPV6) { if (x->props.flags & XFRM_STATE_DECAP_DSCP) - ipv6_copy_dscp(ipv6_hdr(skb), skb->h.ipv6h); + ipv6_copy_dscp(ipv6_hdr(skb), ipipv6_hdr(skb)); if (!(x->props.flags & XFRM_STATE_NOECN)) ipip6_ecn_decapsulate(skb); } else { -- cgit v1.2.3-59-g8ed1b From 9c70220b73908f64792422a2c39c593c4792f2c5 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Wed, 25 Apr 2007 18:04:18 -0700 Subject: [SK_BUFF]: Introduce skb_transport_header(skb) For the places where we need a pointer to the transport header, it is still legal to touch skb->h.raw directly if just adding to, subtracting from or setting it to another layer header. Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: David S. Miller --- drivers/net/appletalk/ltpc.c | 7 +++++-- drivers/net/cxgb3/sge.c | 8 +++++--- drivers/s390/net/qeth_eddp.c | 4 ++-- include/linux/atalk.h | 4 ++-- include/linux/dccp.h | 19 ++++++++++++------- include/linux/icmp.h | 2 +- include/linux/icmpv6.h | 2 +- include/linux/igmp.h | 6 +++--- include/linux/ip.h | 2 +- include/linux/ipv6.h | 2 +- include/linux/sctp.h | 2 +- include/linux/skbuff.h | 5 +++++ include/linux/tcp.h | 2 +- include/linux/udp.h | 2 +- include/net/ipx.h | 2 +- include/net/pkt_cls.h | 2 +- include/net/udp.h | 4 ++-- net/802/psnap.c | 2 +- net/ax25/af_ax25.c | 5 +++-- net/bluetooth/hci_core.c | 4 ++-- net/core/dev.c | 6 +++--- net/econet/af_econet.c | 2 +- net/ipv4/igmp.c | 2 +- net/ipv4/ip_gre.c | 2 +- net/ipv4/ip_output.c | 6 ++++-- net/ipv4/ipconfig.c | 4 ++-- net/ipv4/ipmr.c | 8 +++++--- net/ipv4/tcp.c | 12 +++++++----- net/ipv4/tcp_input.c | 13 +++++++------ net/ipv4/xfrm4_mode_beet.c | 2 +- net/ipv4/xfrm4_mode_transport.c | 5 +++-- net/ipv6/ah6.c | 2 +- net/ipv6/esp6.c | 2 +- net/ipv6/exthdrs.c | 21 ++++++++++----------- net/ipv6/ipcomp6.c | 2 +- net/ipv6/mcast.c | 16 +++++++++------- net/ipv6/mip6.c | 8 ++++---- net/ipv6/ndisc.c | 17 +++++++++-------- net/ipv6/raw.c | 2 +- net/ipv6/reassembly.c | 2 +- net/ipv6/xfrm6_mode_transport.c | 5 +++-- net/xfrm/xfrm_input.c | 6 +++--- 42 files changed, 129 insertions(+), 102 deletions(-) (limited to 'include') diff --git a/drivers/net/appletalk/ltpc.c b/drivers/net/appletalk/ltpc.c index dc3bce992dcf..43c17c85c97b 100644 --- a/drivers/net/appletalk/ltpc.c +++ b/drivers/net/appletalk/ltpc.c @@ -917,6 +917,7 @@ static int ltpc_xmit(struct sk_buff *skb, struct net_device *dev) int i; struct lt_sendlap cbuf; + unsigned char *hdr; cbuf.command = LT_SENDLAP; cbuf.dnode = skb->data[0]; @@ -932,11 +933,13 @@ static int ltpc_xmit(struct sk_buff *skb, struct net_device *dev) printk("\n"); } - do_write(dev,&cbuf,sizeof(cbuf),skb->h.raw,skb->len); + hdr = skb_transport_header(skb); + do_write(dev, &cbuf, sizeof(cbuf), hdr, skb->len); if(debug & DEBUG_UPPER) { printk("sent %d ddp bytes\n",skb->len); - for(i=0;ilen;i++) printk("%02x ",skb->h.raw[i]); + for (i = 0; i < skb->len; i++) + printk("%02x ", hdr[i]); printk("\n"); } diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c index a70fe9145a2e..610e4769efa4 100644 --- a/drivers/net/cxgb3/sge.c +++ b/drivers/net/cxgb3/sge.c @@ -1324,12 +1324,14 @@ static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb, flits = skb_transport_offset(skb) / 8; sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl; - sgl_flits = make_sgl(skb, sgp, skb->h.raw, skb->tail - skb->h.raw, + sgl_flits = make_sgl(skb, sgp, skb_transport_header(skb), + skb->tail - skb_transport_header(skb), adap->pdev); if (need_skb_unmap()) { setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits); skb->destructor = deferred_unmap_destructor; - ((struct unmap_info *)skb->cb)->len = skb->tail - skb->h.raw; + ((struct unmap_info *)skb->cb)->len = (skb->tail - + skb_transport_header(skb)); } write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, @@ -1351,7 +1353,7 @@ static inline unsigned int calc_tx_descs_ofld(const struct sk_buff *skb) return 1; /* packet fits as immediate data */ flits = skb_transport_offset(skb) / 8; /* headers */ - if (skb->tail != skb->h.raw) + if (skb->tail != skb_transport_header(skb)) cnt++; return flits_to_desc(flits + sgl_len(cnt)); } diff --git a/drivers/s390/net/qeth_eddp.c b/drivers/s390/net/qeth_eddp.c index b8e84674e170..5890bb5ad23e 100644 --- a/drivers/s390/net/qeth_eddp.c +++ b/drivers/s390/net/qeth_eddp.c @@ -476,13 +476,13 @@ qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx, eddp = qeth_eddp_create_eddp_data(qhdr, skb_network_header(skb), ip_hdrlen(skb), - skb->h.raw, + skb_transport_header(skb), tcp_hdrlen(skb)); else eddp = qeth_eddp_create_eddp_data(qhdr, skb_network_header(skb), sizeof(struct ipv6hdr), - skb->h.raw, + skb_transport_header(skb), tcp_hdrlen(skb)); if (eddp == NULL) { diff --git a/include/linux/atalk.h b/include/linux/atalk.h index d12984ddaa9f..ced8a1ed080c 100644 --- a/include/linux/atalk.h +++ b/include/linux/atalk.h @@ -101,7 +101,7 @@ struct ddpehdr { static __inline__ struct ddpehdr *ddp_hdr(struct sk_buff *skb) { - return (struct ddpehdr *)skb->h.raw; + return (struct ddpehdr *)skb_transport_header(skb); } /* AppleTalk AARP headers */ @@ -129,7 +129,7 @@ struct elapaarp { static __inline__ struct elapaarp *aarp_hdr(struct sk_buff *skb) { - return (struct elapaarp *)skb->h.raw; + return (struct elapaarp *)skb_transport_header(skb); } /* Not specified - how long till we drop a resolved entry */ diff --git a/include/linux/dccp.h b/include/linux/dccp.h index 1f4df61735f7..fdd4217f1047 100644 --- a/include/linux/dccp.h +++ b/include/linux/dccp.h @@ -260,19 +260,20 @@ enum { static inline struct dccp_hdr *dccp_hdr(const struct sk_buff *skb) { - return (struct dccp_hdr *)skb->h.raw; + return (struct dccp_hdr *)skb_transport_header(skb); } static inline struct dccp_hdr *dccp_zeroed_hdr(struct sk_buff *skb, int headlen) { skb_push(skb, headlen); skb_reset_transport_header(skb); - return memset(skb->h.raw, 0, headlen); + return memset(skb_transport_header(skb), 0, headlen); } static inline struct dccp_hdr_ext *dccp_hdrx(const struct sk_buff *skb) { - return (struct dccp_hdr_ext *)(skb->h.raw + sizeof(struct dccp_hdr)); + return (struct dccp_hdr_ext *)(skb_transport_header(skb) + + sizeof(struct dccp_hdr)); } static inline unsigned int __dccp_basic_hdr_len(const struct dccp_hdr *dh) @@ -301,12 +302,14 @@ static inline __u64 dccp_hdr_seq(const struct sk_buff *skb) static inline struct dccp_hdr_request *dccp_hdr_request(struct sk_buff *skb) { - return (struct dccp_hdr_request *)(skb->h.raw + dccp_basic_hdr_len(skb)); + return (struct dccp_hdr_request *)(skb_transport_header(skb) + + dccp_basic_hdr_len(skb)); } static inline struct dccp_hdr_ack_bits *dccp_hdr_ack_bits(const struct sk_buff *skb) { - return (struct dccp_hdr_ack_bits *)(skb->h.raw + dccp_basic_hdr_len(skb)); + return (struct dccp_hdr_ack_bits *)(skb_transport_header(skb) + + dccp_basic_hdr_len(skb)); } static inline u64 dccp_hdr_ack_seq(const struct sk_buff *skb) @@ -317,12 +320,14 @@ static inline u64 dccp_hdr_ack_seq(const struct sk_buff *skb) static inline struct dccp_hdr_response *dccp_hdr_response(struct sk_buff *skb) { - return (struct dccp_hdr_response *)(skb->h.raw + dccp_basic_hdr_len(skb)); + return (struct dccp_hdr_response *)(skb_transport_header(skb) + + dccp_basic_hdr_len(skb)); } static inline struct dccp_hdr_reset *dccp_hdr_reset(struct sk_buff *skb) { - return (struct dccp_hdr_reset *)(skb->h.raw + dccp_basic_hdr_len(skb)); + return (struct dccp_hdr_reset *)(skb_transport_header(skb) + + dccp_basic_hdr_len(skb)); } static inline unsigned int __dccp_hdr_len(const struct dccp_hdr *dh) diff --git a/include/linux/icmp.h b/include/linux/icmp.h index cd3017a15789..474f2a51cf0a 100644 --- a/include/linux/icmp.h +++ b/include/linux/icmp.h @@ -87,7 +87,7 @@ struct icmphdr { static inline struct icmphdr *icmp_hdr(const struct sk_buff *skb) { - return (struct icmphdr *)skb->h.raw; + return (struct icmphdr *)skb_transport_header(skb); } #endif diff --git a/include/linux/icmpv6.h b/include/linux/icmpv6.h index 0b5ba5eb7ed2..7c5e9817e998 100644 --- a/include/linux/icmpv6.h +++ b/include/linux/icmpv6.h @@ -80,7 +80,7 @@ struct icmp6hdr { static inline struct icmp6hdr *icmp6_hdr(const struct sk_buff *skb) { - return (struct icmp6hdr *)skb->h.raw; + return (struct icmp6hdr *)skb_transport_header(skb); } #endif diff --git a/include/linux/igmp.h b/include/linux/igmp.h index ca285527b879..f510e7e382a8 100644 --- a/include/linux/igmp.h +++ b/include/linux/igmp.h @@ -85,19 +85,19 @@ struct igmpv3_query { static inline struct igmphdr *igmp_hdr(const struct sk_buff *skb) { - return (struct igmphdr *)skb->h.raw; + return (struct igmphdr *)skb_transport_header(skb); } static inline struct igmpv3_report * igmpv3_report_hdr(const struct sk_buff *skb) { - return (struct igmpv3_report *)skb->h.raw; + return (struct igmpv3_report *)skb_transport_header(skb); } static inline struct igmpv3_query * igmpv3_query_hdr(const struct sk_buff *skb) { - return (struct igmpv3_query *)skb->h.raw; + return (struct igmpv3_query *)skb_transport_header(skb); } #endif diff --git a/include/linux/ip.h b/include/linux/ip.h index 19578440b5fc..bd0a2a8631c6 100644 --- a/include/linux/ip.h +++ b/include/linux/ip.h @@ -114,7 +114,7 @@ static inline struct iphdr *ip_hdr(const struct sk_buff *skb) static inline struct iphdr *ipip_hdr(const struct sk_buff *skb) { - return (struct iphdr *)skb->h.raw; + return (struct iphdr *)skb_transport_header(skb); } #endif diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index b768fcc0a4c4..09ea01a8a99c 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h @@ -230,7 +230,7 @@ static inline struct ipv6hdr *ipv6_hdr(const struct sk_buff *skb) static inline struct ipv6hdr *ipipv6_hdr(const struct sk_buff *skb) { - return (struct ipv6hdr *)skb->h.raw; + return (struct ipv6hdr *)skb_transport_header(skb); } /* diff --git a/include/linux/sctp.h b/include/linux/sctp.h index d76767dfe59e..d70df61a029f 100644 --- a/include/linux/sctp.h +++ b/include/linux/sctp.h @@ -68,7 +68,7 @@ typedef struct sctphdr { static inline struct sctphdr *sctp_hdr(const struct sk_buff *skb) { - return (struct sctphdr *)skb->h.raw; + return (struct sctphdr *)skb_transport_header(skb); } #endif diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index d3f186230ee2..39a6da243b24 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -951,6 +951,11 @@ static inline void skb_reserve(struct sk_buff *skb, int len) skb->tail += len; } +static inline unsigned char *skb_transport_header(const struct sk_buff *skb) +{ + return skb->h.raw; +} + static inline void skb_reset_transport_header(struct sk_buff *skb) { skb->h.raw = skb->data; diff --git a/include/linux/tcp.h b/include/linux/tcp.h index 911d937fb4c1..c6b9f92e8289 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -180,7 +180,7 @@ struct tcp_md5sig { static inline struct tcphdr *tcp_hdr(const struct sk_buff *skb) { - return (struct tcphdr *)skb->h.raw; + return (struct tcphdr *)skb_transport_header(skb); } static inline unsigned int tcp_hdrlen(const struct sk_buff *skb) diff --git a/include/linux/udp.h b/include/linux/udp.h index 1f58503af9a6..6de445c31a64 100644 --- a/include/linux/udp.h +++ b/include/linux/udp.h @@ -31,7 +31,7 @@ struct udphdr { static inline struct udphdr *udp_hdr(const struct sk_buff *skb) { - return (struct udphdr *)skb->h.raw; + return (struct udphdr *)skb_transport_header(skb); } #endif diff --git a/include/net/ipx.h b/include/net/ipx.h index c6b2ee610866..4cc0b4eca948 100644 --- a/include/net/ipx.h +++ b/include/net/ipx.h @@ -43,7 +43,7 @@ struct ipxhdr { static __inline__ struct ipxhdr *ipx_hdr(struct sk_buff *skb) { - return (struct ipxhdr *)skb->h.raw; + return (struct ipxhdr *)skb_transport_header(skb); } struct ipx_interface { diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h index 880eb7b54164..dcb3a91f1364 100644 --- a/include/net/pkt_cls.h +++ b/include/net/pkt_cls.h @@ -328,7 +328,7 @@ static inline unsigned char * tcf_get_base_ptr(struct sk_buff *skb, int layer) case TCF_LAYER_NETWORK: return skb_network_header(skb); case TCF_LAYER_TRANSPORT: - return skb->h.raw; + return skb_transport_header(skb); } return NULL; diff --git a/include/net/udp.h b/include/net/udp.h index 4a9699f79281..4906ed7113e7 100644 --- a/include/net/udp.h +++ b/include/net/udp.h @@ -89,8 +89,8 @@ static inline int udp_lib_checksum_complete(struct sk_buff *skb) */ static inline __wsum udp_csum_outgoing(struct sock *sk, struct sk_buff *skb) { - __wsum csum = csum_partial(skb->h.raw, sizeof(struct udphdr), 0); - + __wsum csum = csum_partial(skb_transport_header(skb), + sizeof(struct udphdr), 0); skb_queue_walk(&sk->sk_write_queue, skb) { csum = csum_add(csum, skb->csum); } diff --git a/net/802/psnap.c b/net/802/psnap.c index 6e7c2120b83f..7cba1f426081 100644 --- a/net/802/psnap.c +++ b/net/802/psnap.c @@ -56,7 +56,7 @@ static int snap_rcv(struct sk_buff *skb, struct net_device *dev, }; rcu_read_lock(); - proto = find_snap_client(skb->h.raw); + proto = find_snap_client(skb_transport_header(skb)); if (proto) { /* Pass the frame on. */ skb->h.raw += 5; diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c index 75d4d695edec..5f28887822e9 100644 --- a/net/ax25/af_ax25.c +++ b/net/ax25/af_ax25.c @@ -1585,9 +1585,10 @@ static int ax25_sendmsg(struct kiocb *iocb, struct socket *sock, skb_set_transport_header(skb, lv); - SOCK_DEBUG(sk, "base=%p pos=%p\n", skb->data, skb->h.raw); + SOCK_DEBUG(sk, "base=%p pos=%p\n", + skb->data, skb_transport_header(skb)); - *skb->h.raw = AX25_UI; + *skb_transport_header(skb) = AX25_UI; /* Datagram frames go straight out of the door as UI */ ax25_queue_xmit(skb, ax25->ax25_dev->dev); diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index c11ceb6b3f79..c177e75d64a6 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -1076,7 +1076,7 @@ static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags) skb_push(skb, HCI_ACL_HDR_SIZE); skb_reset_transport_header(skb); - hdr = (struct hci_acl_hdr *)skb->h.raw; + hdr = (struct hci_acl_hdr *)skb_transport_header(skb); hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags)); hdr->dlen = cpu_to_le16(len); } @@ -1145,7 +1145,7 @@ int hci_send_sco(struct hci_conn *conn, struct sk_buff *skb) skb_push(skb, HCI_SCO_HDR_SIZE); skb_reset_transport_header(skb); - memcpy(skb->h.raw, &hdr, HCI_SCO_HDR_SIZE); + memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE); skb->dev = (void *) hdev; bt_cb(skb)->pkt_type = HCI_SCODATA_PKT; diff --git a/net/core/dev.c b/net/core/dev.c index f7f7e5687e46..30fcc7f9d4ed 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1175,12 +1175,12 @@ int skb_checksum_help(struct sk_buff *skb) BUG_ON(offset > (int)skb->len); csum = skb_checksum(skb, offset, skb->len-offset, 0); - offset = skb->tail - skb->h.raw; + offset = skb->tail - skb_transport_header(skb); BUG_ON(offset <= 0); BUG_ON(skb->csum_offset + 2 > offset); - *(__sum16*)(skb->h.raw + skb->csum_offset) = csum_fold(csum); - + *(__sum16 *)(skb_transport_header(skb) + + skb->csum_offset) = csum_fold(csum); out_set_summed: skb->ip_summed = CHECKSUM_NONE; out: diff --git a/net/econet/af_econet.c b/net/econet/af_econet.c index dcc2e4b6b2fe..78993dadb53a 100644 --- a/net/econet/af_econet.c +++ b/net/econet/af_econet.c @@ -943,7 +943,7 @@ static void aun_data_available(struct sock *sk, int slen) printk(KERN_DEBUG "AUN: recvfrom() error %d\n", -err); } - data = skb->h.raw + sizeof(struct udphdr); + data = skb_transport_header(skb) + sizeof(struct udphdr); ah = (struct aunhdr *)data; len = skb->len - sizeof(struct udphdr); ip = ip_hdr(skb); diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index 292516bb1eca..8f0df7b4dfe7 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c @@ -348,7 +348,7 @@ static int igmpv3_sendpack(struct sk_buff *skb) struct iphdr *pip = ip_hdr(skb); struct igmphdr *pig = igmp_hdr(skb); const int iplen = skb->tail - skb->nh.raw; - const int igmplen = skb->tail - skb->h.raw; + const int igmplen = skb->tail - skb_transport_header(skb); pip->tot_len = htons(iplen); ip_send_check(pip); diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index 39216e6a59ed..e6a9e452fd61 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c @@ -619,7 +619,7 @@ static int ipgre_rcv(struct sk_buff *skb) skb_reset_mac_header(skb); __pskb_pull(skb, offset); skb_reset_network_header(skb); - skb_postpull_rcsum(skb, skb->h.raw, offset); + skb_postpull_rcsum(skb, skb_transport_header(skb), offset); skb->pkt_type = PACKET_HOST; #ifdef CONFIG_NET_IPGRE_BROADCAST if (MULTICAST(iph->daddr)) { diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 602268661eb3..11029b9d4cf7 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -1128,7 +1128,7 @@ ssize_t ip_append_page(struct sock *sk, struct page *page, if (fraggap) { skb->csum = skb_copy_and_csum_bits(skb_prev, maxfraglen, - skb->h.raw, + skb_transport_header(skb), fraggap, 0); skb_prev->csum = csum_sub(skb_prev->csum, skb->csum); @@ -1374,7 +1374,9 @@ void ip_send_reply(struct sock *sk, struct sk_buff *skb, struct ip_reply_arg *ar &ipc, rt, MSG_DONTWAIT); if ((skb = skb_peek(&sk->sk_write_queue)) != NULL) { if (arg->csumoffset >= 0) - *((__sum16 *)skb->h.raw + arg->csumoffset) = csum_fold(csum_add(skb->csum, arg->csum)); + *((__sum16 *)skb_transport_header(skb) + + arg->csumoffset) = csum_fold(csum_add(skb->csum, + arg->csum)); skb->ip_summed = CHECKSUM_NONE; ip_push_pending_frames(sk); } diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c index 6b91c9f5d57a..4e19ee0e0102 100644 --- a/net/ipv4/ipconfig.c +++ b/net/ipv4/ipconfig.c @@ -432,7 +432,7 @@ ic_rarp_recv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt goto drop; /* Basic sanity checks can be done without the lock. */ - rarp = (struct arphdr *)skb->h.raw; + rarp = (struct arphdr *)skb_transport_header(skb); /* If this test doesn't pass, it's not IP, or we should * ignore it anyway. @@ -455,7 +455,7 @@ ic_rarp_recv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt goto drop; /* OK, it is all there and looks valid, process... */ - rarp = (struct arphdr *)skb->h.raw; + rarp = (struct arphdr *)skb_transport_header(skb); rarp_ptr = (unsigned char *) (rarp + 1); /* One reply at a time, please. */ diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index 05bc27002def..8f45c95db451 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c @@ -1437,7 +1437,8 @@ int pim_rcv_v1(struct sk_buff * skb) pim->group != PIM_V1_VERSION || pim->code != PIM_V1_REGISTER) goto drop; - encap = (struct iphdr*)(skb->h.raw + sizeof(struct igmphdr)); + encap = (struct iphdr *)(skb_transport_header(skb) + + sizeof(struct igmphdr)); /* Check that: a. packet is really destinted to a multicast group @@ -1490,7 +1491,7 @@ static int pim_rcv(struct sk_buff * skb) if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(*encap))) goto drop; - pim = (struct pimreghdr*)skb->h.raw; + pim = (struct pimreghdr *)skb_transport_header(skb); if (pim->type != ((PIM_VERSION<<4)|(PIM_REGISTER)) || (pim->flags&PIM_NULL_REGISTER) || (ip_compute_csum((void *)pim, sizeof(*pim)) != 0 && @@ -1498,7 +1499,8 @@ static int pim_rcv(struct sk_buff * skb) goto drop; /* check if the inner packet is destined to mcast group */ - encap = (struct iphdr*)(skb->h.raw + sizeof(struct pimreghdr)); + encap = (struct iphdr *)(skb_transport_header(skb) + + sizeof(struct pimreghdr)); if (!MULTICAST(encap->daddr) || encap->tot_len == 0 || ntohs(encap->tot_len) + sizeof(*pim) > skb->len) diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index f832f3c33ab1..2b214cc3724c 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -2219,8 +2219,9 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features) th->check = ~csum_fold((__force __wsum)((__force u32)th->check + (__force u32)delta)); if (skb->ip_summed != CHECKSUM_PARTIAL) - th->check = csum_fold(csum_partial(skb->h.raw, thlen, - skb->csum)); + th->check = + csum_fold(csum_partial(skb_transport_header(skb), + thlen, skb->csum)); seq += len; skb = skb->next; @@ -2230,12 +2231,13 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features) th->cwr = 0; } while (skb->next); - delta = htonl(oldlen + (skb->tail - skb->h.raw) + skb->data_len); + delta = htonl(oldlen + (skb->tail - skb_transport_header(skb)) + + skb->data_len); th->check = ~csum_fold((__force __wsum)((__force u32)th->check + (__force u32)delta)); if (skb->ip_summed != CHECKSUM_PARTIAL) - th->check = csum_fold(csum_partial(skb->h.raw, thlen, - skb->csum)); + th->check = csum_fold(csum_partial(skb_transport_header(skb), + thlen, skb->csum)); out: return segs; diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index c1ce36237380..9c3b4c7a50ad 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -140,7 +140,7 @@ static void tcp_measure_rcv_mss(struct sock *sk, * * "len" is invariant segment length, including TCP header. */ - len += skb->data - skb->h.raw; + len += skb->data - skb_transport_header(skb); if (len >= TCP_MIN_RCVMSS + sizeof(struct tcphdr) || /* If PSH is not set, packet should be * full sized, provided peer TCP is not badly broken. @@ -940,7 +940,8 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_ { const struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); - unsigned char *ptr = ack_skb->h.raw + TCP_SKB_CB(ack_skb)->sacked; + unsigned char *ptr = (skb_transport_header(ack_skb) + + TCP_SKB_CB(ack_skb)->sacked); struct tcp_sack_block_wire *sp = (struct tcp_sack_block_wire *)(ptr+2); struct sk_buff *cached_skb; int num_sacks = (ptr[1] - TCPOLEN_SACK_BASE)>>3; @@ -3634,10 +3635,10 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list, return; skb_set_mac_header(nskb, skb_mac_header(skb) - skb->head); - skb_set_network_header(nskb, - skb_network_header(skb) - skb->head); - skb_set_transport_header(nskb, skb->h.raw - skb->head); - + skb_set_network_header(nskb, (skb_network_header(skb) - + skb->head)); + skb_set_transport_header(nskb, (skb_transport_header(skb) - + skb->head)); skb_reserve(nskb, header); memcpy(nskb->head, skb->head, header); memcpy(nskb->cb, skb->cb, sizeof(skb->cb)); diff --git a/net/ipv4/xfrm4_mode_beet.c b/net/ipv4/xfrm4_mode_beet.c index 32fcfc0b5c8c..591f0f1ef87f 100644 --- a/net/ipv4/xfrm4_mode_beet.c +++ b/net/ipv4/xfrm4_mode_beet.c @@ -51,7 +51,7 @@ static int xfrm4_beet_output(struct xfrm_state *x, struct sk_buff *skb) BUG_ON(optlen < 0); - ph = (struct ip_beet_phdr *)skb->h.raw; + ph = (struct ip_beet_phdr *)skb_transport_header(skb); ph->padlen = 4 - (optlen & 4); ph->hdrlen = optlen / 8; ph->nexthdr = top_iph->protocol; diff --git a/net/ipv4/xfrm4_mode_transport.c b/net/ipv4/xfrm4_mode_transport.c index 2c46cbb3bbb5..dc8834ea3754 100644 --- a/net/ipv4/xfrm4_mode_transport.c +++ b/net/ipv4/xfrm4_mode_transport.c @@ -45,10 +45,11 @@ static int xfrm4_transport_output(struct xfrm_state *x, struct sk_buff *skb) */ static int xfrm4_transport_input(struct xfrm_state *x, struct sk_buff *skb) { - int ihl = skb->data - skb->h.raw; + int ihl = skb->data - skb_transport_header(skb); if (skb->h.raw != skb->nh.raw) { - memmove(skb->h.raw, skb_network_header(skb), ihl); + memmove(skb_transport_header(skb), + skb_network_header(skb), ihl); skb->nh.raw = skb->h.raw; } ip_hdr(skb)->tot_len = htons(skb->len + ihl); diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c index e5ee981d3e10..d2af4fe3725b 100644 --- a/net/ipv6/ah6.c +++ b/net/ipv6/ah6.c @@ -268,7 +268,7 @@ static int ah6_output(struct xfrm_state *x, struct sk_buff *skb) goto error_free_iph; } - ah = (struct ip_auth_hdr *)skb->h.raw; + ah = (struct ip_auth_hdr *)skb_transport_header(skb); ah->nexthdr = nexthdr; top_iph->priority = 0; diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c index ad522b7b5771..436eb9e6a6cf 100644 --- a/net/ipv6/esp6.c +++ b/net/ipv6/esp6.c @@ -87,7 +87,7 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb) pskb_put(skb, trailer, clen - skb->len); top_iph = (struct ipv6hdr *)__skb_push(skb, hdr_len); - esph = (struct ipv6_esp_hdr *)skb->h.raw; + esph = (struct ipv6_esp_hdr *)skb_transport_header(skb); top_iph->payload_len = htons(skb->len + alen - sizeof(*top_iph)); *(u8 *)(trailer->tail - 1) = *skb_network_header(skb); *skb_network_header(skb) = IPPROTO_ESP; diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c index 1bda0299890e..f25ee773f52e 100644 --- a/net/ipv6/exthdrs.c +++ b/net/ipv6/exthdrs.c @@ -144,7 +144,7 @@ static int ip6_parse_tlv(struct tlvtype_proc *procs, struct sk_buff **skbp) struct tlvtype_proc *curr; const unsigned char *nh = skb_network_header(skb); int off = skb->h.raw - skb->nh.raw; - int len = ((skb->h.raw[1]+1)<<3); + int len = (skb_transport_header(skb)[1] + 1) << 3; if (skb_transport_offset(skb) + len > skb_headlen(skb)) goto bad; @@ -290,15 +290,14 @@ static int ipv6_destopt_rcv(struct sk_buff **skbp) if (!pskb_may_pull(skb, skb_transport_offset(skb) + 8) || !pskb_may_pull(skb, (skb_transport_offset(skb) + - ((skb->h.raw[1] + 1) << 3)))) { + ((skb_transport_header(skb)[1] + 1) << 3)))) { IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), IPSTATS_MIB_INHDRERRORS); kfree_skb(skb); return -1; } - opt->lastopt = skb->h.raw - skb->nh.raw; - opt->dst1 = skb->h.raw - skb->nh.raw; + opt->lastopt = opt->dst1 = skb->h.raw - skb->nh.raw; #ifdef CONFIG_IPV6_MIP6 dstbuf = opt->dst1; #endif @@ -307,7 +306,7 @@ static int ipv6_destopt_rcv(struct sk_buff **skbp) if (ip6_parse_tlv(tlvprocdestopt_lst, skbp)) { dst_release(dst); skb = *skbp; - skb->h.raw += ((skb->h.raw[1]+1)<<3); + skb->h.raw += (skb_transport_header(skb)[1] + 1) << 3; opt = IP6CB(skb); #ifdef CONFIG_IPV6_MIP6 opt->nhoff = dstbuf; @@ -390,14 +389,14 @@ static int ipv6_rthdr_rcv(struct sk_buff **skbp) if (!pskb_may_pull(skb, skb_transport_offset(skb) + 8) || !pskb_may_pull(skb, (skb_transport_offset(skb) + - ((skb->h.raw[1] + 1) << 3)))) { + ((skb_transport_header(skb)[1] + 1) << 3)))) { IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), IPSTATS_MIB_INHDRERRORS); kfree_skb(skb); return -1; } - hdr = (struct ipv6_rt_hdr *) skb->h.raw; + hdr = (struct ipv6_rt_hdr *)skb_transport_header(skb); switch (hdr->type) { #ifdef CONFIG_IPV6_MIP6 @@ -444,8 +443,7 @@ looped_back: break; } - opt->lastopt = skb->h.raw - skb->nh.raw; - opt->srcrt = skb->h.raw - skb->nh.raw; + opt->lastopt = opt->srcrt = skb->h.raw - skb->nh.raw; skb->h.raw += (hdr->hdrlen + 1) << 3; opt->dst0 = opt->dst1; opt->dst1 = 0; @@ -745,7 +743,8 @@ int ipv6_parse_hopopts(struct sk_buff **skbp) * hop-by-hop options. */ if (!pskb_may_pull(skb, sizeof(struct ipv6hdr) + 8) || - !pskb_may_pull(skb, sizeof(struct ipv6hdr) + ((skb->h.raw[1] + 1) << 3))) { + !pskb_may_pull(skb, (sizeof(struct ipv6hdr) + + ((skb_transport_header(skb)[1] + 1) << 3)))) { kfree_skb(skb); return -1; } @@ -753,7 +752,7 @@ int ipv6_parse_hopopts(struct sk_buff **skbp) opt->hop = sizeof(struct ipv6hdr); if (ip6_parse_tlv(tlvprochopopt_lst, skbp)) { skb = *skbp; - skb->h.raw += (skb->h.raw[1]+1)<<3; + skb->h.raw += (skb_transport_header(skb)[1] + 1) << 3; opt = IP6CB(skb); opt->nhoff = sizeof(struct ipv6hdr); return 1; diff --git a/net/ipv6/ipcomp6.c b/net/ipv6/ipcomp6.c index 4a6501695e98..5555c98dea03 100644 --- a/net/ipv6/ipcomp6.c +++ b/net/ipv6/ipcomp6.c @@ -143,7 +143,7 @@ static int ipcomp6_output(struct xfrm_state *x, struct sk_buff *skb) /* compression */ plen = skb->len - hdr_len; dlen = IPCOMP_SCRATCH_SIZE; - start = skb->h.raw; + start = skb_transport_header(skb); cpu = get_cpu(); scratch = *per_cpu_ptr(ipcomp6_scratches, cpu); diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index 1f2a3be9308a..c6436f5e3e9f 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c @@ -1212,7 +1212,7 @@ int igmp6_event_query(struct sk_buff *skb) in6_dev_put(idev); return -EINVAL; } - mlh2 = (struct mld2_query *) skb->h.raw; + mlh2 = (struct mld2_query *)skb_transport_header(skb); max_delay = (MLDV2_MRC(ntohs(mlh2->mrc))*HZ)/1000; if (!max_delay) max_delay = 1; @@ -1235,7 +1235,7 @@ int igmp6_event_query(struct sk_buff *skb) in6_dev_put(idev); return -EINVAL; } - mlh2 = (struct mld2_query *) skb->h.raw; + mlh2 = (struct mld2_query *)skb_transport_header(skb); mark = 1; } } else { @@ -1460,18 +1460,20 @@ static inline int mld_dev_queue_xmit(struct sk_buff *skb) static void mld_sendpack(struct sk_buff *skb) { struct ipv6hdr *pip6 = ipv6_hdr(skb); - struct mld2_report *pmr = (struct mld2_report *)skb->h.raw; + struct mld2_report *pmr = + (struct mld2_report *)skb_transport_header(skb); int payload_len, mldlen; struct inet6_dev *idev = in6_dev_get(skb->dev); int err; IP6_INC_STATS(idev, IPSTATS_MIB_OUTREQUESTS); payload_len = skb->tail - skb_network_header(skb) - sizeof(*pip6); - mldlen = skb->tail - skb->h.raw; + mldlen = skb->tail - skb_transport_header(skb); pip6->payload_len = htons(payload_len); pmr->csum = csum_ipv6_magic(&pip6->saddr, &pip6->daddr, mldlen, - IPPROTO_ICMPV6, csum_partial(skb->h.raw, mldlen, 0)); + IPPROTO_ICMPV6, csum_partial(skb_transport_header(skb), + mldlen, 0)); err = NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, skb->dev, mld_dev_queue_xmit); if (!err) { @@ -1505,7 +1507,7 @@ static struct sk_buff *add_grhead(struct sk_buff *skb, struct ifmcaddr6 *pmc, pgr->grec_auxwords = 0; pgr->grec_nsrcs = 0; pgr->grec_mca = pmc->mca_addr; /* structure copy */ - pmr = (struct mld2_report *)skb->h.raw; + pmr = (struct mld2_report *)skb_transport_header(skb); pmr->ngrec = htons(ntohs(pmr->ngrec)+1); *ppgr = pgr; return skb; @@ -1538,7 +1540,7 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc, if (!*psf_list) goto empty_source; - pmr = skb ? (struct mld2_report *)skb->h.raw : NULL; + pmr = skb ? (struct mld2_report *)skb_transport_header(skb) : NULL; /* EX and TO_EX get a fresh packet, if needed */ if (truncate) { diff --git a/net/ipv6/mip6.c b/net/ipv6/mip6.c index 85202891644e..f0288e92fb52 100644 --- a/net/ipv6/mip6.c +++ b/net/ipv6/mip6.c @@ -92,10 +92,10 @@ int mip6_mh_filter(struct sock *sk, struct sk_buff *skb) if (!pskb_may_pull(skb, (skb_transport_offset(skb)) + 8) || !pskb_may_pull(skb, (skb_transport_offset(skb) + - ((skb->h.raw[1] + 1) << 3)))) + ((skb_transport_header(skb)[1] + 1) << 3)))) return -1; - mh = (struct ip6_mh *)skb->h.raw; + mh = (struct ip6_mh *)skb_transport_header(skb); if (mh->ip6mh_hdrlen < mip6_mh_len(mh->ip6mh_type)) { LIMIT_NETDEBUG(KERN_DEBUG "mip6: MH message too short: %d vs >=%d\n", @@ -158,7 +158,7 @@ static int mip6_destopt_output(struct xfrm_state *x, struct sk_buff *skb) nexthdr = *skb_network_header(skb); *skb_network_header(skb) = IPPROTO_DSTOPTS; - dstopt = (struct ipv6_destopt_hdr *)skb->h.raw; + dstopt = (struct ipv6_destopt_hdr *)skb_transport_header(skb); dstopt->nexthdr = nexthdr; hao = mip6_padn((char *)(dstopt + 1), @@ -370,7 +370,7 @@ static int mip6_rthdr_output(struct xfrm_state *x, struct sk_buff *skb) nexthdr = *skb_network_header(skb); *skb_network_header(skb) = IPPROTO_ROUTING; - rt2 = (struct rt2_hdr *)skb->h.raw; + rt2 = (struct rt2_hdr *)skb_transport_header(skb); rt2->rt_hdr.nexthdr = nexthdr; rt2->rt_hdr.hdrlen = (x->props.header_len >> 3) - 1; rt2->rt_hdr.type = IPV6_SRCRT_TYPE_2; diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c index 8b946f56287a..f9a85ab594db 100644 --- a/net/ipv6/ndisc.c +++ b/net/ipv6/ndisc.c @@ -760,7 +760,7 @@ static void ndisc_solicit(struct neighbour *neigh, struct sk_buff *skb) static void ndisc_recv_ns(struct sk_buff *skb) { - struct nd_msg *msg = (struct nd_msg *)skb->h.raw; + struct nd_msg *msg = (struct nd_msg *)skb_transport_header(skb); struct in6_addr *saddr = &ipv6_hdr(skb)->saddr; struct in6_addr *daddr = &ipv6_hdr(skb)->daddr; u8 *lladdr = NULL; @@ -938,7 +938,7 @@ out: static void ndisc_recv_na(struct sk_buff *skb) { - struct nd_msg *msg = (struct nd_msg *)skb->h.raw; + struct nd_msg *msg = (struct nd_msg *)skb_transport_header(skb); struct in6_addr *saddr = &ipv6_hdr(skb)->saddr; struct in6_addr *daddr = &ipv6_hdr(skb)->daddr; u8 *lladdr = NULL; @@ -1040,7 +1040,7 @@ out: static void ndisc_recv_rs(struct sk_buff *skb) { - struct rs_msg *rs_msg = (struct rs_msg *) skb->h.raw; + struct rs_msg *rs_msg = (struct rs_msg *)skb_transport_header(skb); unsigned long ndoptlen = skb->len - sizeof(*rs_msg); struct neighbour *neigh; struct inet6_dev *idev; @@ -1097,7 +1097,7 @@ out: static void ndisc_router_discovery(struct sk_buff *skb) { - struct ra_msg *ra_msg = (struct ra_msg *) skb->h.raw; + struct ra_msg *ra_msg = (struct ra_msg *)skb_transport_header(skb); struct neighbour *neigh = NULL; struct inet6_dev *in6_dev; struct rt6_info *rt = NULL; @@ -1108,7 +1108,8 @@ static void ndisc_router_discovery(struct sk_buff *skb) __u8 * opt = (__u8 *)(ra_msg + 1); - optlen = (skb->tail - skb->h.raw) - sizeof(struct ra_msg); + optlen = (skb->tail - skb_transport_header(skb)) - + sizeof(struct ra_msg); if (!(ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL)) { ND_PRINTK2(KERN_WARNING @@ -1357,7 +1358,7 @@ static void ndisc_redirect_rcv(struct sk_buff *skb) return; } - optlen = skb->tail - skb->h.raw; + optlen = skb->tail - skb_transport_header(skb); optlen -= sizeof(struct icmp6hdr) + 2 * sizeof(struct in6_addr); if (optlen < 0) { @@ -1584,9 +1585,9 @@ int ndisc_rcv(struct sk_buff *skb) if (!pskb_may_pull(skb, skb->len)) return 0; - msg = (struct nd_msg *) skb->h.raw; + msg = (struct nd_msg *)skb_transport_header(skb); - __skb_push(skb, skb->data-skb->h.raw); + __skb_push(skb, skb->data - skb_transport_header(skb)); if (ipv6_hdr(skb)->hop_limit != 255) { ND_PRINTK2(KERN_WARNING diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index bb049f1c2679..116257d59a36 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c @@ -1077,7 +1077,7 @@ static int rawv6_ioctl(struct sock *sk, int cmd, unsigned long arg) spin_lock_bh(&sk->sk_receive_queue.lock); skb = skb_peek(&sk->sk_receive_queue); if (skb != NULL) - amount = skb->tail - skb->h.raw; + amount = skb->tail - skb_transport_header(skb); spin_unlock_bh(&sk->sk_receive_queue.lock); return put_user(amount, (int __user *)arg); } diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c index ef29a7bb97ce..31d4271ea540 100644 --- a/net/ipv6/reassembly.c +++ b/net/ipv6/reassembly.c @@ -726,7 +726,7 @@ static int ipv6_frag_rcv(struct sk_buff **skbp) } hdr = ipv6_hdr(skb); - fhdr = (struct frag_hdr *)skb->h.raw; + fhdr = (struct frag_hdr *)skb_transport_header(skb); if (!(fhdr->frag_off & htons(0xFFF9))) { /* It is not a fragmented frame */ diff --git a/net/ipv6/xfrm6_mode_transport.c b/net/ipv6/xfrm6_mode_transport.c index 0134d74ef087..d526f4e9c65e 100644 --- a/net/ipv6/xfrm6_mode_transport.c +++ b/net/ipv6/xfrm6_mode_transport.c @@ -51,10 +51,11 @@ static int xfrm6_transport_output(struct xfrm_state *x, struct sk_buff *skb) */ static int xfrm6_transport_input(struct xfrm_state *x, struct sk_buff *skb) { - int ihl = skb->data - skb->h.raw; + int ihl = skb->data - skb_transport_header(skb); if (skb->h.raw != skb->nh.raw) { - memmove(skb->h.raw, skb_network_header(skb), ihl); + memmove(skb_transport_header(skb), + skb_network_header(skb), ihl); skb->nh.raw = skb->h.raw; } ipv6_hdr(skb)->payload_len = htons(skb->len + ihl - diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c index ee15bdae1419..5c4695840c58 100644 --- a/net/xfrm/xfrm_input.c +++ b/net/xfrm/xfrm_input.c @@ -62,7 +62,7 @@ int xfrm_parse_spi(struct sk_buff *skb, u8 nexthdr, __be32 *spi, __be32 *seq) case IPPROTO_COMP: if (!pskb_may_pull(skb, sizeof(struct ip_comp_hdr))) return -EINVAL; - *spi = htonl(ntohs(*(__be16*)(skb->h.raw + 2))); + *spi = htonl(ntohs(*(__be16*)(skb_transport_header(skb) + 2))); *seq = 0; return 0; default: @@ -72,8 +72,8 @@ int xfrm_parse_spi(struct sk_buff *skb, u8 nexthdr, __be32 *spi, __be32 *seq) if (!pskb_may_pull(skb, 16)) return -EINVAL; - *spi = *(__be32*)(skb->h.raw + offset); - *seq = *(__be32*)(skb->h.raw + offset_seq); + *spi = *(__be32*)(skb_transport_header(skb) + offset); + *seq = *(__be32*)(skb_transport_header(skb) + offset_seq); return 0; } EXPORT_SYMBOL(xfrm_parse_spi); -- cgit v1.2.3-59-g8ed1b From 587aa64163bb14f70098f450abab9410787fce9d Mon Sep 17 00:00:00 2001 From: Patrick McHardy Date: Wed, 14 Mar 2007 16:37:25 -0700 Subject: [NETFILTER]: Remove IPv4 only connection tracking/NAT Remove the obsolete IPv4 only connection tracking/NAT as scheduled in feature-removal-schedule. Signed-off-by: Patrick McHardy Signed-off-by: David S. Miller --- Documentation/feature-removal-schedule.txt | 9 - include/linux/netfilter_ipv4/Kbuild | 14 - include/linux/netfilter_ipv4/ip_conntrack.h | 402 ----- include/linux/netfilter_ipv4/ip_conntrack_amanda.h | 11 - include/linux/netfilter_ipv4/ip_conntrack_core.h | 61 - include/linux/netfilter_ipv4/ip_conntrack_ftp.h | 44 - include/linux/netfilter_ipv4/ip_conntrack_h323.h | 89 - include/linux/netfilter_ipv4/ip_conntrack_helper.h | 46 - include/linux/netfilter_ipv4/ip_conntrack_icmp.h | 6 - include/linux/netfilter_ipv4/ip_conntrack_irc.h | 32 - include/linux/netfilter_ipv4/ip_conntrack_pptp.h | 326 ---- .../linux/netfilter_ipv4/ip_conntrack_proto_gre.h | 114 -- .../linux/netfilter_ipv4/ip_conntrack_protocol.h | 98 -- include/linux/netfilter_ipv4/ip_conntrack_sctp.h | 6 - include/linux/netfilter_ipv4/ip_conntrack_sip.h | 40 - include/linux/netfilter_ipv4/ip_conntrack_tcp.h | 6 - include/linux/netfilter_ipv4/ip_conntrack_tftp.h | 20 - include/linux/netfilter_ipv4/ip_conntrack_tuple.h | 146 -- include/linux/netfilter_ipv4/ip_nat.h | 79 - include/linux/netfilter_ipv4/ip_nat_core.h | 18 - include/linux/netfilter_ipv4/ip_nat_helper.h | 33 - include/linux/netfilter_ipv4/ip_nat_pptp.h | 11 - include/linux/netfilter_ipv4/ip_nat_protocol.h | 74 - include/linux/netfilter_ipv4/ip_nat_rule.h | 28 - include/linux/netfilter_ipv4/ipt_SAME.h | 2 +- include/net/netfilter/nf_conntrack.h | 5 + include/net/netfilter/nf_conntrack_compat.h | 145 -- include/net/netfilter/nf_nat_rule.h | 10 - net/ipv4/netfilter/Kconfig | 267 +-- net/ipv4/netfilter/Makefile | 45 +- net/ipv4/netfilter/ip_conntrack_amanda.c | 229 --- net/ipv4/netfilter/ip_conntrack_core.c | 1549 ---------------- net/ipv4/netfilter/ip_conntrack_ftp.c | 520 ------ net/ipv4/netfilter/ip_conntrack_helper_h323.c | 1840 -------------------- net/ipv4/netfilter/ip_conntrack_helper_pptp.c | 684 -------- net/ipv4/netfilter/ip_conntrack_irc.c | 314 ---- net/ipv4/netfilter/ip_conntrack_netbios_ns.c | 143 -- net/ipv4/netfilter/ip_conntrack_netlink.c | 1577 ----------------- net/ipv4/netfilter/ip_conntrack_proto_generic.c | 74 - net/ipv4/netfilter/ip_conntrack_proto_gre.c | 328 ---- net/ipv4/netfilter/ip_conntrack_proto_icmp.c | 315 ---- net/ipv4/netfilter/ip_conntrack_proto_sctp.c | 659 ------- net/ipv4/netfilter/ip_conntrack_proto_tcp.c | 1163 ------------- net/ipv4/netfilter/ip_conntrack_proto_udp.c | 148 -- net/ipv4/netfilter/ip_conntrack_sip.c | 520 ------ net/ipv4/netfilter/ip_conntrack_standalone.c | 962 ---------- net/ipv4/netfilter/ip_conntrack_tftp.c | 161 -- net/ipv4/netfilter/ip_nat_amanda.c | 85 - net/ipv4/netfilter/ip_nat_core.c | 633 ------- net/ipv4/netfilter/ip_nat_ftp.c | 180 -- net/ipv4/netfilter/ip_nat_helper.c | 436 ----- net/ipv4/netfilter/ip_nat_helper_h323.c | 611 ------- net/ipv4/netfilter/ip_nat_helper_pptp.c | 350 ---- net/ipv4/netfilter/ip_nat_irc.c | 122 -- net/ipv4/netfilter/ip_nat_proto_gre.c | 174 -- net/ipv4/netfilter/ip_nat_proto_icmp.c | 87 - net/ipv4/netfilter/ip_nat_proto_tcp.c | 154 -- net/ipv4/netfilter/ip_nat_proto_udp.c | 144 -- net/ipv4/netfilter/ip_nat_proto_unknown.c | 55 - net/ipv4/netfilter/ip_nat_rule.c | 314 ---- net/ipv4/netfilter/ip_nat_sip.c | 282 --- net/ipv4/netfilter/ip_nat_snmp_basic.c | 1333 -------------- net/ipv4/netfilter/ip_nat_standalone.c | 387 ---- net/ipv4/netfilter/ip_nat_tftp.c | 70 - net/ipv4/netfilter/ipt_CLUSTERIP.c | 18 +- net/ipv4/netfilter/ipt_MASQUERADE.c | 57 +- net/ipv4/netfilter/ipt_NETMAP.c | 22 +- net/ipv4/netfilter/ipt_REDIRECT.c | 24 +- net/ipv4/netfilter/ipt_SAME.c | 25 +- net/ipv4/netfilter/nf_nat_h323.c | 4 +- net/ipv4/netfilter/nf_nat_pptp.c | 2 +- net/netfilter/Kconfig | 63 +- net/netfilter/xt_CONNMARK.c | 32 +- net/netfilter/xt_CONNSECMARK.c | 18 +- net/netfilter/xt_NOTRACK.c | 4 +- net/netfilter/xt_connbytes.c | 10 +- net/netfilter/xt_connmark.c | 17 +- net/netfilter/xt_conntrack.c | 110 +- net/netfilter/xt_helper.c | 57 - net/netfilter/xt_state.c | 4 +- 80 files changed, 122 insertions(+), 19135 deletions(-) delete mode 100644 include/linux/netfilter_ipv4/ip_conntrack.h delete mode 100644 include/linux/netfilter_ipv4/ip_conntrack_amanda.h delete mode 100644 include/linux/netfilter_ipv4/ip_conntrack_core.h delete mode 100644 include/linux/netfilter_ipv4/ip_conntrack_ftp.h delete mode 100644 include/linux/netfilter_ipv4/ip_conntrack_h323.h delete mode 100644 include/linux/netfilter_ipv4/ip_conntrack_helper.h delete mode 100644 include/linux/netfilter_ipv4/ip_conntrack_icmp.h delete mode 100644 include/linux/netfilter_ipv4/ip_conntrack_irc.h delete mode 100644 include/linux/netfilter_ipv4/ip_conntrack_pptp.h delete mode 100644 include/linux/netfilter_ipv4/ip_conntrack_proto_gre.h delete mode 100644 include/linux/netfilter_ipv4/ip_conntrack_protocol.h delete mode 100644 include/linux/netfilter_ipv4/ip_conntrack_sctp.h delete mode 100644 include/linux/netfilter_ipv4/ip_conntrack_sip.h delete mode 100644 include/linux/netfilter_ipv4/ip_conntrack_tcp.h delete mode 100644 include/linux/netfilter_ipv4/ip_conntrack_tftp.h delete mode 100644 include/linux/netfilter_ipv4/ip_conntrack_tuple.h delete mode 100644 include/linux/netfilter_ipv4/ip_nat.h delete mode 100644 include/linux/netfilter_ipv4/ip_nat_core.h delete mode 100644 include/linux/netfilter_ipv4/ip_nat_helper.h delete mode 100644 include/linux/netfilter_ipv4/ip_nat_pptp.h delete mode 100644 include/linux/netfilter_ipv4/ip_nat_protocol.h delete mode 100644 include/linux/netfilter_ipv4/ip_nat_rule.h delete mode 100644 include/net/netfilter/nf_conntrack_compat.h delete mode 100644 net/ipv4/netfilter/ip_conntrack_amanda.c delete mode 100644 net/ipv4/netfilter/ip_conntrack_core.c delete mode 100644 net/ipv4/netfilter/ip_conntrack_ftp.c delete mode 100644 net/ipv4/netfilter/ip_conntrack_helper_h323.c delete mode 100644 net/ipv4/netfilter/ip_conntrack_helper_pptp.c delete mode 100644 net/ipv4/netfilter/ip_conntrack_irc.c delete mode 100644 net/ipv4/netfilter/ip_conntrack_netbios_ns.c delete mode 100644 net/ipv4/netfilter/ip_conntrack_netlink.c delete mode 100644 net/ipv4/netfilter/ip_conntrack_proto_generic.c delete mode 100644 net/ipv4/netfilter/ip_conntrack_proto_gre.c delete mode 100644 net/ipv4/netfilter/ip_conntrack_proto_icmp.c delete mode 100644 net/ipv4/netfilter/ip_conntrack_proto_sctp.c delete mode 100644 net/ipv4/netfilter/ip_conntrack_proto_tcp.c delete mode 100644 net/ipv4/netfilter/ip_conntrack_proto_udp.c delete mode 100644 net/ipv4/netfilter/ip_conntrack_sip.c delete mode 100644 net/ipv4/netfilter/ip_conntrack_standalone.c delete mode 100644 net/ipv4/netfilter/ip_conntrack_tftp.c delete mode 100644 net/ipv4/netfilter/ip_nat_amanda.c delete mode 100644 net/ipv4/netfilter/ip_nat_core.c delete mode 100644 net/ipv4/netfilter/ip_nat_ftp.c delete mode 100644 net/ipv4/netfilter/ip_nat_helper.c delete mode 100644 net/ipv4/netfilter/ip_nat_helper_h323.c delete mode 100644 net/ipv4/netfilter/ip_nat_helper_pptp.c delete mode 100644 net/ipv4/netfilter/ip_nat_irc.c delete mode 100644 net/ipv4/netfilter/ip_nat_proto_gre.c delete mode 100644 net/ipv4/netfilter/ip_nat_proto_icmp.c delete mode 100644 net/ipv4/netfilter/ip_nat_proto_tcp.c delete mode 100644 net/ipv4/netfilter/ip_nat_proto_udp.c delete mode 100644 net/ipv4/netfilter/ip_nat_proto_unknown.c delete mode 100644 net/ipv4/netfilter/ip_nat_rule.c delete mode 100644 net/ipv4/netfilter/ip_nat_sip.c delete mode 100644 net/ipv4/netfilter/ip_nat_snmp_basic.c delete mode 100644 net/ipv4/netfilter/ip_nat_standalone.c delete mode 100644 net/ipv4/netfilter/ip_nat_tftp.c (limited to 'include') diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt index 19b4c96b2a49..9817b60e70a3 100644 --- a/Documentation/feature-removal-schedule.txt +++ b/Documentation/feature-removal-schedule.txt @@ -211,15 +211,6 @@ Who: Adrian Bunk --------------------------- -What: IPv4 only connection tracking/NAT/helpers -When: 2.6.22 -Why: The new layer 3 independant connection tracking replaces the old - IPv4 only version. After some stabilization of the new code the - old one will be removed. -Who: Patrick McHardy - ---------------------------- - What: ACPI hooks (X86_SPEEDSTEP_CENTRINO_ACPI) in speedstep-centrino driver When: December 2006 Why: Speedstep-centrino driver with ACPI hooks and acpi-cpufreq driver are diff --git a/include/linux/netfilter_ipv4/Kbuild b/include/linux/netfilter_ipv4/Kbuild index 180337801a86..7185792b900f 100644 --- a/include/linux/netfilter_ipv4/Kbuild +++ b/include/linux/netfilter_ipv4/Kbuild @@ -1,9 +1,3 @@ -header-y += ip_conntrack_helper.h -header-y += ip_conntrack_protocol.h -header-y += ip_conntrack_sctp.h -header-y += ip_conntrack_tcp.h -header-y += ip_conntrack_tftp.h -header-y += ip_nat_pptp.h header-y += ipt_addrtype.h header-y += ipt_ah.h header-y += ipt_CLASSIFY.h @@ -49,13 +43,5 @@ header-y += ipt_ttl.h header-y += ipt_TTL.h header-y += ipt_ULOG.h -unifdef-y += ip_conntrack.h -unifdef-y += ip_conntrack_h323.h -unifdef-y += ip_conntrack_irc.h -unifdef-y += ip_conntrack_pptp.h -unifdef-y += ip_conntrack_proto_gre.h -unifdef-y += ip_conntrack_tuple.h -unifdef-y += ip_nat.h -unifdef-y += ip_nat_rule.h unifdef-y += ip_queue.h unifdef-y += ip_tables.h diff --git a/include/linux/netfilter_ipv4/ip_conntrack.h b/include/linux/netfilter_ipv4/ip_conntrack.h deleted file mode 100644 index da9274e6bf12..000000000000 --- a/include/linux/netfilter_ipv4/ip_conntrack.h +++ /dev/null @@ -1,402 +0,0 @@ -#ifndef _IP_CONNTRACK_H -#define _IP_CONNTRACK_H - -#include - -#ifdef __KERNEL__ -#include -#include -#include -#include - -#include -#include -#include -#include -#include - -/* per conntrack: protocol private data */ -union ip_conntrack_proto { - /* insert conntrack proto private data here */ - struct ip_ct_gre gre; - struct ip_ct_sctp sctp; - struct ip_ct_tcp tcp; - struct ip_ct_icmp icmp; -}; - -union ip_conntrack_expect_proto { - /* insert expect proto private data here */ -}; - -/* Add protocol helper include file here */ -#include -#include -#include -#include -#include - -/* per conntrack: application helper private data */ -union ip_conntrack_help { - /* insert conntrack helper private data (master) here */ - struct ip_ct_h323_master ct_h323_info; - struct ip_ct_pptp_master ct_pptp_info; - struct ip_ct_ftp_master ct_ftp_info; - struct ip_ct_irc_master ct_irc_info; -}; - -#ifdef CONFIG_IP_NF_NAT_NEEDED -#include -#include - -/* per conntrack: nat application helper private data */ -union ip_conntrack_nat_help { - /* insert nat helper private data here */ - struct ip_nat_pptp nat_pptp_info; -}; -#endif - -#include -#include - -#ifdef CONFIG_NETFILTER_DEBUG -#define IP_NF_ASSERT(x) \ -do { \ - if (!(x)) \ - /* Wooah! I'm tripping my conntrack in a frenzy of \ - netplay... */ \ - printk("NF_IP_ASSERT: %s:%i(%s)\n", \ - __FILE__, __LINE__, __FUNCTION__); \ -} while(0) -#else -#define IP_NF_ASSERT(x) -#endif - -struct ip_conntrack_helper; - -struct ip_conntrack -{ - /* Usage count in here is 1 for hash table/destruct timer, 1 per skb, - plus 1 for any connection(s) we are `master' for */ - struct nf_conntrack ct_general; - - /* Have we seen traffic both ways yet? (bitset) */ - unsigned long status; - - /* Timer function; drops refcnt when it goes off. */ - struct timer_list timeout; - -#ifdef CONFIG_IP_NF_CT_ACCT - /* Accounting Information (same cache line as other written members) */ - struct ip_conntrack_counter counters[IP_CT_DIR_MAX]; -#endif - /* If we were expected by an expectation, this will be it */ - struct ip_conntrack *master; - - /* Current number of expected connections */ - unsigned int expecting; - - /* Unique ID that identifies this conntrack*/ - unsigned int id; - - /* Helper, if any. */ - struct ip_conntrack_helper *helper; - - /* Storage reserved for other modules: */ - union ip_conntrack_proto proto; - - union ip_conntrack_help help; - -#ifdef CONFIG_IP_NF_NAT_NEEDED - struct { - struct ip_nat_info info; - union ip_conntrack_nat_help help; -#if defined(CONFIG_IP_NF_TARGET_MASQUERADE) || \ - defined(CONFIG_IP_NF_TARGET_MASQUERADE_MODULE) - int masq_index; -#endif - } nat; -#endif /* CONFIG_IP_NF_NAT_NEEDED */ - -#if defined(CONFIG_IP_NF_CONNTRACK_MARK) - u_int32_t mark; -#endif - -#ifdef CONFIG_IP_NF_CONNTRACK_SECMARK - u_int32_t secmark; -#endif - - /* Traversed often, so hopefully in different cacheline to top */ - /* These are my tuples; original and reply */ - struct ip_conntrack_tuple_hash tuplehash[IP_CT_DIR_MAX]; -}; - -struct ip_conntrack_expect -{ - /* Internal linked list (global expectation list) */ - struct list_head list; - - /* We expect this tuple, with the following mask */ - struct ip_conntrack_tuple tuple, mask; - - /* Function to call after setup and insertion */ - void (*expectfn)(struct ip_conntrack *new, - struct ip_conntrack_expect *this); - - /* The conntrack of the master connection */ - struct ip_conntrack *master; - - /* Timer function; deletes the expectation. */ - struct timer_list timeout; - - /* Usage count. */ - atomic_t use; - - /* Unique ID */ - unsigned int id; - - /* Flags */ - unsigned int flags; - -#ifdef CONFIG_IP_NF_NAT_NEEDED - __be32 saved_ip; - /* This is the original per-proto part, used to map the - * expected connection the way the recipient expects. */ - union ip_conntrack_manip_proto saved_proto; - /* Direction relative to the master connection. */ - enum ip_conntrack_dir dir; -#endif -}; - -#define IP_CT_EXPECT_PERMANENT 0x1 - -static inline struct ip_conntrack * -tuplehash_to_ctrack(const struct ip_conntrack_tuple_hash *hash) -{ - return container_of(hash, struct ip_conntrack, - tuplehash[hash->tuple.dst.dir]); -} - -/* get master conntrack via master expectation */ -#define master_ct(conntr) (conntr->master) - -/* Alter reply tuple (maybe alter helper). */ -extern void -ip_conntrack_alter_reply(struct ip_conntrack *conntrack, - const struct ip_conntrack_tuple *newreply); - -/* Is this tuple taken? (ignoring any belonging to the given - conntrack). */ -extern int -ip_conntrack_tuple_taken(const struct ip_conntrack_tuple *tuple, - const struct ip_conntrack *ignored_conntrack); - -/* Return conntrack_info and tuple hash for given skb. */ -static inline struct ip_conntrack * -ip_conntrack_get(const struct sk_buff *skb, enum ip_conntrack_info *ctinfo) -{ - *ctinfo = skb->nfctinfo; - return (struct ip_conntrack *)skb->nfct; -} - -/* decrement reference count on a conntrack */ -static inline void -ip_conntrack_put(struct ip_conntrack *ct) -{ - IP_NF_ASSERT(ct); - nf_conntrack_put(&ct->ct_general); -} - -extern int invert_tuplepr(struct ip_conntrack_tuple *inverse, - const struct ip_conntrack_tuple *orig); - -extern void __ip_ct_refresh_acct(struct ip_conntrack *ct, - enum ip_conntrack_info ctinfo, - const struct sk_buff *skb, - unsigned long extra_jiffies, - int do_acct); - -/* Refresh conntrack for this many jiffies and do accounting */ -static inline void ip_ct_refresh_acct(struct ip_conntrack *ct, - enum ip_conntrack_info ctinfo, - const struct sk_buff *skb, - unsigned long extra_jiffies) -{ - __ip_ct_refresh_acct(ct, ctinfo, skb, extra_jiffies, 1); -} - -/* Refresh conntrack for this many jiffies */ -static inline void ip_ct_refresh(struct ip_conntrack *ct, - const struct sk_buff *skb, - unsigned long extra_jiffies) -{ - __ip_ct_refresh_acct(ct, 0, skb, extra_jiffies, 0); -} - -/* These are for NAT. Icky. */ -/* Update TCP window tracking data when NAT mangles the packet */ -extern void ip_conntrack_tcp_update(struct sk_buff *skb, - struct ip_conntrack *conntrack, - enum ip_conntrack_dir dir); - -/* Call me when a conntrack is destroyed. */ -extern void (*ip_conntrack_destroyed)(struct ip_conntrack *conntrack); - -/* Fake conntrack entry for untracked connections */ -extern struct ip_conntrack ip_conntrack_untracked; - -/* Returns new sk_buff, or NULL */ -struct sk_buff * -ip_ct_gather_frags(struct sk_buff *skb, u_int32_t user); - -/* Iterate over all conntracks: if iter returns true, it's deleted. */ -extern void -ip_ct_iterate_cleanup(int (*iter)(struct ip_conntrack *i, void *data), - void *data); - -extern struct ip_conntrack_helper * -__ip_conntrack_helper_find_byname(const char *); -extern struct ip_conntrack_helper * -ip_conntrack_helper_find_get(const struct ip_conntrack_tuple *tuple); -extern void ip_conntrack_helper_put(struct ip_conntrack_helper *helper); - -extern struct ip_conntrack_protocol * -__ip_conntrack_proto_find(u_int8_t protocol); -extern struct ip_conntrack_protocol * -ip_conntrack_proto_find_get(u_int8_t protocol); -extern void ip_conntrack_proto_put(struct ip_conntrack_protocol *proto); - -extern void ip_ct_remove_expectations(struct ip_conntrack *ct); - -extern struct ip_conntrack *ip_conntrack_alloc(struct ip_conntrack_tuple *, - struct ip_conntrack_tuple *); - -extern void ip_conntrack_free(struct ip_conntrack *ct); - -extern void ip_conntrack_hash_insert(struct ip_conntrack *ct); - -extern struct ip_conntrack_expect * -__ip_conntrack_expect_find(const struct ip_conntrack_tuple *tuple); - -extern struct ip_conntrack_expect * -ip_conntrack_expect_find_get(const struct ip_conntrack_tuple *tuple); - -extern struct ip_conntrack_tuple_hash * -__ip_conntrack_find(const struct ip_conntrack_tuple *tuple, - const struct ip_conntrack *ignored_conntrack); - -extern void ip_conntrack_flush(void); - -/* It's confirmed if it is, or has been in the hash table. */ -static inline int is_confirmed(struct ip_conntrack *ct) -{ - return test_bit(IPS_CONFIRMED_BIT, &ct->status); -} - -static inline int is_dying(struct ip_conntrack *ct) -{ - return test_bit(IPS_DYING_BIT, &ct->status); -} - -extern unsigned int ip_conntrack_htable_size; -extern int ip_conntrack_checksum; - -#define CONNTRACK_STAT_INC(count) (__get_cpu_var(ip_conntrack_stat).count++) -#define CONNTRACK_STAT_INC_ATOMIC(count) \ -do { \ - local_bh_disable(); \ - __get_cpu_var(ip_conntrack_stat).count++; \ - local_bh_enable(); \ -} while (0) - -#ifdef CONFIG_IP_NF_CONNTRACK_EVENTS -#include -#include - -struct ip_conntrack_ecache { - struct ip_conntrack *ct; - unsigned int events; -}; -DECLARE_PER_CPU(struct ip_conntrack_ecache, ip_conntrack_ecache); - -#define CONNTRACK_ECACHE(x) (__get_cpu_var(ip_conntrack_ecache).x) - -extern struct atomic_notifier_head ip_conntrack_chain; -extern struct atomic_notifier_head ip_conntrack_expect_chain; - -static inline int ip_conntrack_register_notifier(struct notifier_block *nb) -{ - return atomic_notifier_chain_register(&ip_conntrack_chain, nb); -} - -static inline int ip_conntrack_unregister_notifier(struct notifier_block *nb) -{ - return atomic_notifier_chain_unregister(&ip_conntrack_chain, nb); -} - -static inline int -ip_conntrack_expect_register_notifier(struct notifier_block *nb) -{ - return atomic_notifier_chain_register(&ip_conntrack_expect_chain, nb); -} - -static inline int -ip_conntrack_expect_unregister_notifier(struct notifier_block *nb) -{ - return atomic_notifier_chain_unregister(&ip_conntrack_expect_chain, - nb); -} - -extern void ip_ct_deliver_cached_events(const struct ip_conntrack *ct); -extern void __ip_ct_event_cache_init(struct ip_conntrack *ct); - -static inline void -ip_conntrack_event_cache(enum ip_conntrack_events event, - const struct sk_buff *skb) -{ - struct ip_conntrack *ct = (struct ip_conntrack *)skb->nfct; - struct ip_conntrack_ecache *ecache; - - local_bh_disable(); - ecache = &__get_cpu_var(ip_conntrack_ecache); - if (ct != ecache->ct) - __ip_ct_event_cache_init(ct); - ecache->events |= event; - local_bh_enable(); -} - -static inline void ip_conntrack_event(enum ip_conntrack_events event, - struct ip_conntrack *ct) -{ - if (is_confirmed(ct) && !is_dying(ct)) - atomic_notifier_call_chain(&ip_conntrack_chain, event, ct); -} - -static inline void -ip_conntrack_expect_event(enum ip_conntrack_expect_events event, - struct ip_conntrack_expect *exp) -{ - atomic_notifier_call_chain(&ip_conntrack_expect_chain, event, exp); -} -#else /* CONFIG_IP_NF_CONNTRACK_EVENTS */ -static inline void ip_conntrack_event_cache(enum ip_conntrack_events event, - const struct sk_buff *skb) {} -static inline void ip_conntrack_event(enum ip_conntrack_events event, - struct ip_conntrack *ct) {} -static inline void ip_ct_deliver_cached_events(const struct ip_conntrack *ct) {} -static inline void -ip_conntrack_expect_event(enum ip_conntrack_expect_events event, - struct ip_conntrack_expect *exp) {} -#endif /* CONFIG_IP_NF_CONNTRACK_EVENTS */ - -#ifdef CONFIG_IP_NF_NAT_NEEDED -static inline int ip_nat_initialized(struct ip_conntrack *conntrack, - enum ip_nat_manip_type manip) -{ - if (manip == IP_NAT_MANIP_SRC) - return test_bit(IPS_SRC_NAT_DONE_BIT, &conntrack->status); - return test_bit(IPS_DST_NAT_DONE_BIT, &conntrack->status); -} -#endif /* CONFIG_IP_NF_NAT_NEEDED */ - -#endif /* __KERNEL__ */ -#endif /* _IP_CONNTRACK_H */ diff --git a/include/linux/netfilter_ipv4/ip_conntrack_amanda.h b/include/linux/netfilter_ipv4/ip_conntrack_amanda.h deleted file mode 100644 index de3e41f51aec..000000000000 --- a/include/linux/netfilter_ipv4/ip_conntrack_amanda.h +++ /dev/null @@ -1,11 +0,0 @@ -#ifndef _IP_CONNTRACK_AMANDA_H -#define _IP_CONNTRACK_AMANDA_H -/* AMANDA tracking. */ - -struct ip_conntrack_expect; -extern unsigned int (*ip_nat_amanda_hook)(struct sk_buff **pskb, - enum ip_conntrack_info ctinfo, - unsigned int matchoff, - unsigned int matchlen, - struct ip_conntrack_expect *exp); -#endif /* _IP_CONNTRACK_AMANDA_H */ diff --git a/include/linux/netfilter_ipv4/ip_conntrack_core.h b/include/linux/netfilter_ipv4/ip_conntrack_core.h deleted file mode 100644 index e3a6df07aa4b..000000000000 --- a/include/linux/netfilter_ipv4/ip_conntrack_core.h +++ /dev/null @@ -1,61 +0,0 @@ -#ifndef _IP_CONNTRACK_CORE_H -#define _IP_CONNTRACK_CORE_H -#include - -#define MAX_IP_CT_PROTO 256 -extern struct ip_conntrack_protocol *ip_ct_protos[MAX_IP_CT_PROTO]; - -/* This header is used to share core functionality between the - standalone connection tracking module, and the compatibility layer's use - of connection tracking. */ -extern unsigned int ip_conntrack_in(unsigned int hooknum, - struct sk_buff **pskb, - const struct net_device *in, - const struct net_device *out, - int (*okfn)(struct sk_buff *)); - -extern int ip_conntrack_init(void); -extern void ip_conntrack_cleanup(void); - -struct ip_conntrack_protocol; - -extern int -ip_ct_get_tuple(const struct iphdr *iph, - const struct sk_buff *skb, - unsigned int dataoff, - struct ip_conntrack_tuple *tuple, - const struct ip_conntrack_protocol *protocol); - -extern int -ip_ct_invert_tuple(struct ip_conntrack_tuple *inverse, - const struct ip_conntrack_tuple *orig, - const struct ip_conntrack_protocol *protocol); - -/* Find a connection corresponding to a tuple. */ -struct ip_conntrack_tuple_hash * -ip_conntrack_find_get(const struct ip_conntrack_tuple *tuple, - const struct ip_conntrack *ignored_conntrack); - -extern int __ip_conntrack_confirm(struct sk_buff **pskb); - -/* Confirm a connection: returns NF_DROP if packet must be dropped. */ -static inline int ip_conntrack_confirm(struct sk_buff **pskb) -{ - struct ip_conntrack *ct = (struct ip_conntrack *)(*pskb)->nfct; - int ret = NF_ACCEPT; - - if (ct) { - if (!is_confirmed(ct) && !is_dying(ct)) - ret = __ip_conntrack_confirm(pskb); - ip_ct_deliver_cached_events(ct); - } - return ret; -} - -extern void ip_ct_unlink_expect(struct ip_conntrack_expect *exp); - -extern struct list_head *ip_conntrack_hash; -extern struct list_head ip_conntrack_expect_list; -extern rwlock_t ip_conntrack_lock; -#endif /* _IP_CONNTRACK_CORE_H */ - diff --git a/include/linux/netfilter_ipv4/ip_conntrack_ftp.h b/include/linux/netfilter_ipv4/ip_conntrack_ftp.h deleted file mode 100644 index 2129fc3972ac..000000000000 --- a/include/linux/netfilter_ipv4/ip_conntrack_ftp.h +++ /dev/null @@ -1,44 +0,0 @@ -#ifndef _IP_CONNTRACK_FTP_H -#define _IP_CONNTRACK_FTP_H -/* FTP tracking. */ - -/* This enum is exposed to userspace */ -enum ip_ct_ftp_type -{ - /* PORT command from client */ - IP_CT_FTP_PORT, - /* PASV response from server */ - IP_CT_FTP_PASV, - /* EPRT command from client */ - IP_CT_FTP_EPRT, - /* EPSV response from server */ - IP_CT_FTP_EPSV, -}; - -#ifdef __KERNEL__ - -#define FTP_PORT 21 - -#define NUM_SEQ_TO_REMEMBER 2 -/* This structure exists only once per master */ -struct ip_ct_ftp_master { - /* Valid seq positions for cmd matching after newline */ - u_int32_t seq_aft_nl[IP_CT_DIR_MAX][NUM_SEQ_TO_REMEMBER]; - /* 0 means seq_match_aft_nl not set */ - int seq_aft_nl_num[IP_CT_DIR_MAX]; -}; - -struct ip_conntrack_expect; - -/* For NAT to hook in when we find a packet which describes what other - * connection we should expect. */ -extern unsigned int (*ip_nat_ftp_hook)(struct sk_buff **pskb, - enum ip_conntrack_info ctinfo, - enum ip_ct_ftp_type type, - unsigned int matchoff, - unsigned int matchlen, - struct ip_conntrack_expect *exp, - u32 *seq); -#endif /* __KERNEL__ */ - -#endif /* _IP_CONNTRACK_FTP_H */ diff --git a/include/linux/netfilter_ipv4/ip_conntrack_h323.h b/include/linux/netfilter_ipv4/ip_conntrack_h323.h deleted file mode 100644 index 18f769818f4e..000000000000 --- a/include/linux/netfilter_ipv4/ip_conntrack_h323.h +++ /dev/null @@ -1,89 +0,0 @@ -#ifndef _IP_CONNTRACK_H323_H -#define _IP_CONNTRACK_H323_H - -#ifdef __KERNEL__ - -#include - -#define RAS_PORT 1719 -#define Q931_PORT 1720 -#define H323_RTP_CHANNEL_MAX 4 /* Audio, video, FAX and other */ - -/* This structure exists only once per master */ -struct ip_ct_h323_master { - - /* Original and NATed Q.931 or H.245 signal ports */ - u_int16_t sig_port[IP_CT_DIR_MAX]; - - /* Original and NATed RTP ports */ - u_int16_t rtp_port[H323_RTP_CHANNEL_MAX][IP_CT_DIR_MAX]; - - union { - /* RAS connection timeout */ - u_int32_t timeout; - - /* Next TPKT length (for separate TPKT header and data) */ - u_int16_t tpkt_len[IP_CT_DIR_MAX]; - }; -}; - -struct ip_conntrack_expect; - -extern int get_h225_addr(unsigned char *data, TransportAddress * addr, - __be32 * ip, u_int16_t * port); -extern void ip_conntrack_h245_expect(struct ip_conntrack *new, - struct ip_conntrack_expect *this); -extern void ip_conntrack_q931_expect(struct ip_conntrack *new, - struct ip_conntrack_expect *this); -extern int (*set_h245_addr_hook) (struct sk_buff ** pskb, - unsigned char **data, int dataoff, - H245_TransportAddress * addr, - __be32 ip, u_int16_t port); -extern int (*set_h225_addr_hook) (struct sk_buff ** pskb, - unsigned char **data, int dataoff, - TransportAddress * addr, - __be32 ip, u_int16_t port); -extern int (*set_sig_addr_hook) (struct sk_buff ** pskb, - struct ip_conntrack * ct, - enum ip_conntrack_info ctinfo, - unsigned char **data, - TransportAddress * addr, int count); -extern int (*set_ras_addr_hook) (struct sk_buff ** pskb, - struct ip_conntrack * ct, - enum ip_conntrack_info ctinfo, - unsigned char **data, - TransportAddress * addr, int count); -extern int (*nat_rtp_rtcp_hook) (struct sk_buff ** pskb, - struct ip_conntrack * ct, - enum ip_conntrack_info ctinfo, - unsigned char **data, int dataoff, - H245_TransportAddress * addr, - u_int16_t port, u_int16_t rtp_port, - struct ip_conntrack_expect * rtp_exp, - struct ip_conntrack_expect * rtcp_exp); -extern int (*nat_t120_hook) (struct sk_buff ** pskb, struct ip_conntrack * ct, - enum ip_conntrack_info ctinfo, - unsigned char **data, int dataoff, - H245_TransportAddress * addr, u_int16_t port, - struct ip_conntrack_expect * exp); -extern int (*nat_h245_hook) (struct sk_buff ** pskb, struct ip_conntrack * ct, - enum ip_conntrack_info ctinfo, - unsigned char **data, int dataoff, - TransportAddress * addr, u_int16_t port, - struct ip_conntrack_expect * exp); -extern int (*nat_callforwarding_hook) (struct sk_buff ** pskb, - struct ip_conntrack * ct, - enum ip_conntrack_info ctinfo, - unsigned char **data, int dataoff, - TransportAddress * addr, - u_int16_t port, - struct ip_conntrack_expect * exp); -extern int (*nat_q931_hook) (struct sk_buff ** pskb, struct ip_conntrack * ct, - enum ip_conntrack_info ctinfo, - unsigned char **data, TransportAddress * addr, - int idx, u_int16_t port, - struct ip_conntrack_expect * exp); - -#endif - -#endif diff --git a/include/linux/netfilter_ipv4/ip_conntrack_helper.h b/include/linux/netfilter_ipv4/ip_conntrack_helper.h deleted file mode 100644 index 77fe868d36ff..000000000000 --- a/include/linux/netfilter_ipv4/ip_conntrack_helper.h +++ /dev/null @@ -1,46 +0,0 @@ -/* IP connection tracking helpers. */ -#ifndef _IP_CONNTRACK_HELPER_H -#define _IP_CONNTRACK_HELPER_H -#include - -struct module; - -struct ip_conntrack_helper -{ - struct list_head list; /* Internal use. */ - - const char *name; /* name of the module */ - struct module *me; /* pointer to self */ - unsigned int max_expected; /* Maximum number of concurrent - * expected connections */ - unsigned int timeout; /* timeout for expecteds */ - - /* Mask of things we will help (compared against server response) */ - struct ip_conntrack_tuple tuple; - struct ip_conntrack_tuple mask; - - /* Function to call when data passes; return verdict, or -1 to - invalidate. */ - int (*help)(struct sk_buff **pskb, - struct ip_conntrack *ct, - enum ip_conntrack_info conntrackinfo); - - void (*destroy)(struct ip_conntrack *ct); - - int (*to_nfattr)(struct sk_buff *skb, const struct ip_conntrack *ct); -}; - -extern int ip_conntrack_helper_register(struct ip_conntrack_helper *); -extern void ip_conntrack_helper_unregister(struct ip_conntrack_helper *); - -/* Allocate space for an expectation: this is mandatory before calling - ip_conntrack_expect_related. You will have to call put afterwards. */ -extern struct ip_conntrack_expect * -ip_conntrack_expect_alloc(struct ip_conntrack *master); -extern void ip_conntrack_expect_put(struct ip_conntrack_expect *exp); - -/* Add an expected connection: can have more than one per connection */ -extern int ip_conntrack_expect_related(struct ip_conntrack_expect *exp); -extern void ip_conntrack_unexpect_related(struct ip_conntrack_expect *exp); - -#endif /*_IP_CONNTRACK_HELPER_H*/ diff --git a/include/linux/netfilter_ipv4/ip_conntrack_icmp.h b/include/linux/netfilter_ipv4/ip_conntrack_icmp.h deleted file mode 100644 index eed5ee3e4744..000000000000 --- a/include/linux/netfilter_ipv4/ip_conntrack_icmp.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef _IP_CONNTRACK_ICMP_H -#define _IP_CONNTRACK_ICMP_H - -#include - -#endif /* _IP_CONNTRACK_ICMP_H */ diff --git a/include/linux/netfilter_ipv4/ip_conntrack_irc.h b/include/linux/netfilter_ipv4/ip_conntrack_irc.h deleted file mode 100644 index 16601e0d5626..000000000000 --- a/include/linux/netfilter_ipv4/ip_conntrack_irc.h +++ /dev/null @@ -1,32 +0,0 @@ -/* IRC extension for IP connection tracking. - * (C) 2000 by Harald Welte - * based on RR's ip_conntrack_ftp.h - * - * ip_conntrack_irc.h,v 1.6 2000/11/07 18:26:42 laforge Exp - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - * - * - */ -#ifndef _IP_CONNTRACK_IRC_H -#define _IP_CONNTRACK_IRC_H - -/* This structure exists only once per master */ -struct ip_ct_irc_master { -}; - -#ifdef __KERNEL__ -extern unsigned int (*ip_nat_irc_hook)(struct sk_buff **pskb, - enum ip_conntrack_info ctinfo, - unsigned int matchoff, - unsigned int matchlen, - struct ip_conntrack_expect *exp); - -#define IRC_PORT 6667 - -#endif /* __KERNEL__ */ - -#endif /* _IP_CONNTRACK_IRC_H */ diff --git a/include/linux/netfilter_ipv4/ip_conntrack_pptp.h b/include/linux/netfilter_ipv4/ip_conntrack_pptp.h deleted file mode 100644 index 2644b1faddd6..000000000000 --- a/include/linux/netfilter_ipv4/ip_conntrack_pptp.h +++ /dev/null @@ -1,326 +0,0 @@ -/* PPTP constants and structs */ -#ifndef _CONNTRACK_PPTP_H -#define _CONNTRACK_PPTP_H - -/* state of the control session */ -enum pptp_ctrlsess_state { - PPTP_SESSION_NONE, /* no session present */ - PPTP_SESSION_ERROR, /* some session error */ - PPTP_SESSION_STOPREQ, /* stop_sess request seen */ - PPTP_SESSION_REQUESTED, /* start_sess request seen */ - PPTP_SESSION_CONFIRMED, /* session established */ -}; - -/* state of the call inside the control session */ -enum pptp_ctrlcall_state { - PPTP_CALL_NONE, - PPTP_CALL_ERROR, - PPTP_CALL_OUT_REQ, - PPTP_CALL_OUT_CONF, - PPTP_CALL_IN_REQ, - PPTP_CALL_IN_REP, - PPTP_CALL_IN_CONF, - PPTP_CALL_CLEAR_REQ, -}; - - -/* conntrack private data */ -struct ip_ct_pptp_master { - enum pptp_ctrlsess_state sstate; /* session state */ - - /* everything below is going to be per-expectation in newnat, - * since there could be more than one call within one session */ - enum pptp_ctrlcall_state cstate; /* call state */ - __be16 pac_call_id; /* call id of PAC, host byte order */ - __be16 pns_call_id; /* call id of PNS, host byte order */ - - /* in pre-2.6.11 this used to be per-expect. Now it is per-conntrack - * and therefore imposes a fixed limit on the number of maps */ - struct ip_ct_gre_keymap *keymap_orig, *keymap_reply; -}; - -/* conntrack_expect private member */ -struct ip_ct_pptp_expect { - enum pptp_ctrlcall_state cstate; /* call state */ - __be16 pac_call_id; /* call id of PAC */ - __be16 pns_call_id; /* call id of PNS */ -}; - - -#ifdef __KERNEL__ - -#define IP_CONNTR_PPTP PPTP_CONTROL_PORT - -#define PPTP_CONTROL_PORT 1723 - -#define PPTP_PACKET_CONTROL 1 -#define PPTP_PACKET_MGMT 2 - -#define PPTP_MAGIC_COOKIE 0x1a2b3c4d - -struct pptp_pkt_hdr { - __u16 packetLength; - __be16 packetType; - __be32 magicCookie; -}; - -/* PptpControlMessageType values */ -#define PPTP_START_SESSION_REQUEST 1 -#define PPTP_START_SESSION_REPLY 2 -#define PPTP_STOP_SESSION_REQUEST 3 -#define PPTP_STOP_SESSION_REPLY 4 -#define PPTP_ECHO_REQUEST 5 -#define PPTP_ECHO_REPLY 6 -#define PPTP_OUT_CALL_REQUEST 7 -#define PPTP_OUT_CALL_REPLY 8 -#define PPTP_IN_CALL_REQUEST 9 -#define PPTP_IN_CALL_REPLY 10 -#define PPTP_IN_CALL_CONNECT 11 -#define PPTP_CALL_CLEAR_REQUEST 12 -#define PPTP_CALL_DISCONNECT_NOTIFY 13 -#define PPTP_WAN_ERROR_NOTIFY 14 -#define PPTP_SET_LINK_INFO 15 - -#define PPTP_MSG_MAX 15 - -/* PptpGeneralError values */ -#define PPTP_ERROR_CODE_NONE 0 -#define PPTP_NOT_CONNECTED 1 -#define PPTP_BAD_FORMAT 2 -#define PPTP_BAD_VALUE 3 -#define PPTP_NO_RESOURCE 4 -#define PPTP_BAD_CALLID 5 -#define PPTP_REMOVE_DEVICE_ERROR 6 - -struct PptpControlHeader { - __be16 messageType; - __u16 reserved; -}; - -/* FramingCapability Bitmap Values */ -#define PPTP_FRAME_CAP_ASYNC 0x1 -#define PPTP_FRAME_CAP_SYNC 0x2 - -/* BearerCapability Bitmap Values */ -#define PPTP_BEARER_CAP_ANALOG 0x1 -#define PPTP_BEARER_CAP_DIGITAL 0x2 - -struct PptpStartSessionRequest { - __be16 protocolVersion; - __u16 reserved1; - __be32 framingCapability; - __be32 bearerCapability; - __be16 maxChannels; - __be16 firmwareRevision; - __u8 hostName[64]; - __u8 vendorString[64]; -}; - -/* PptpStartSessionResultCode Values */ -#define PPTP_START_OK 1 -#define PPTP_START_GENERAL_ERROR 2 -#define PPTP_START_ALREADY_CONNECTED 3 -#define PPTP_START_NOT_AUTHORIZED 4 -#define PPTP_START_UNKNOWN_PROTOCOL 5 - -struct PptpStartSessionReply { - __be16 protocolVersion; - __u8 resultCode; - __u8 generalErrorCode; - __be32 framingCapability; - __be32 bearerCapability; - __be16 maxChannels; - __be16 firmwareRevision; - __u8 hostName[64]; - __u8 vendorString[64]; -}; - -/* PptpStopReasons */ -#define PPTP_STOP_NONE 1 -#define PPTP_STOP_PROTOCOL 2 -#define PPTP_STOP_LOCAL_SHUTDOWN 3 - -struct PptpStopSessionRequest { - __u8 reason; - __u8 reserved1; - __u16 reserved2; -}; - -/* PptpStopSessionResultCode */ -#define PPTP_STOP_OK 1 -#define PPTP_STOP_GENERAL_ERROR 2 - -struct PptpStopSessionReply { - __u8 resultCode; - __u8 generalErrorCode; - __u16 reserved1; -}; - -struct PptpEchoRequest { - __be32 identNumber; -}; - -/* PptpEchoReplyResultCode */ -#define PPTP_ECHO_OK 1 -#define PPTP_ECHO_GENERAL_ERROR 2 - -struct PptpEchoReply { - __be32 identNumber; - __u8 resultCode; - __u8 generalErrorCode; - __u16 reserved; -}; - -/* PptpFramingType */ -#define PPTP_ASYNC_FRAMING 1 -#define PPTP_SYNC_FRAMING 2 -#define PPTP_DONT_CARE_FRAMING 3 - -/* PptpCallBearerType */ -#define PPTP_ANALOG_TYPE 1 -#define PPTP_DIGITAL_TYPE 2 -#define PPTP_DONT_CARE_BEARER_TYPE 3 - -struct PptpOutCallRequest { - __be16 callID; - __be16 callSerialNumber; - __be32 minBPS; - __be32 maxBPS; - __be32 bearerType; - __be32 framingType; - __be16 packetWindow; - __be16 packetProcDelay; - __be16 phoneNumberLength; - __u16 reserved1; - __u8 phoneNumber[64]; - __u8 subAddress[64]; -}; - -/* PptpCallResultCode */ -#define PPTP_OUTCALL_CONNECT 1 -#define PPTP_OUTCALL_GENERAL_ERROR 2 -#define PPTP_OUTCALL_NO_CARRIER 3 -#define PPTP_OUTCALL_BUSY 4 -#define PPTP_OUTCALL_NO_DIAL_TONE 5 -#define PPTP_OUTCALL_TIMEOUT 6 -#define PPTP_OUTCALL_DONT_ACCEPT 7 - -struct PptpOutCallReply { - __be16 callID; - __be16 peersCallID; - __u8 resultCode; - __u8 generalErrorCode; - __be16 causeCode; - __be32 connectSpeed; - __be16 packetWindow; - __be16 packetProcDelay; - __be32 physChannelID; -}; - -struct PptpInCallRequest { - __be16 callID; - __be16 callSerialNumber; - __be32 callBearerType; - __be32 physChannelID; - __be16 dialedNumberLength; - __be16 dialingNumberLength; - __u8 dialedNumber[64]; - __u8 dialingNumber[64]; - __u8 subAddress[64]; -}; - -/* PptpInCallResultCode */ -#define PPTP_INCALL_ACCEPT 1 -#define PPTP_INCALL_GENERAL_ERROR 2 -#define PPTP_INCALL_DONT_ACCEPT 3 - -struct PptpInCallReply { - __be16 callID; - __be16 peersCallID; - __u8 resultCode; - __u8 generalErrorCode; - __be16 packetWindow; - __be16 packetProcDelay; - __u16 reserved; -}; - -struct PptpInCallConnected { - __be16 peersCallID; - __u16 reserved; - __be32 connectSpeed; - __be16 packetWindow; - __be16 packetProcDelay; - __be32 callFramingType; -}; - -struct PptpClearCallRequest { - __be16 callID; - __u16 reserved; -}; - -struct PptpCallDisconnectNotify { - __be16 callID; - __u8 resultCode; - __u8 generalErrorCode; - __be16 causeCode; - __u16 reserved; - __u8 callStatistics[128]; -}; - -struct PptpWanErrorNotify { - __be16 peersCallID; - __u16 reserved; - __be32 crcErrors; - __be32 framingErrors; - __be32 hardwareOverRuns; - __be32 bufferOverRuns; - __be32 timeoutErrors; - __be32 alignmentErrors; -}; - -struct PptpSetLinkInfo { - __be16 peersCallID; - __u16 reserved; - __be32 sendAccm; - __be32 recvAccm; -}; - -union pptp_ctrl_union { - struct PptpStartSessionRequest sreq; - struct PptpStartSessionReply srep; - struct PptpStopSessionRequest streq; - struct PptpStopSessionReply strep; - struct PptpOutCallRequest ocreq; - struct PptpOutCallReply ocack; - struct PptpInCallRequest icreq; - struct PptpInCallReply icack; - struct PptpInCallConnected iccon; - struct PptpClearCallRequest clrreq; - struct PptpCallDisconnectNotify disc; - struct PptpWanErrorNotify wanerr; - struct PptpSetLinkInfo setlink; -}; - -extern int -(*ip_nat_pptp_hook_outbound)(struct sk_buff **pskb, - struct ip_conntrack *ct, - enum ip_conntrack_info ctinfo, - struct PptpControlHeader *ctlh, - union pptp_ctrl_union *pptpReq); - -extern int -(*ip_nat_pptp_hook_inbound)(struct sk_buff **pskb, - struct ip_conntrack *ct, - enum ip_conntrack_info ctinfo, - struct PptpControlHeader *ctlh, - union pptp_ctrl_union *pptpReq); - -extern void -(*ip_nat_pptp_hook_exp_gre)(struct ip_conntrack_expect *exp_orig, - struct ip_conntrack_expect *exp_reply); - -extern void -(*ip_nat_pptp_hook_expectfn)(struct ip_conntrack *ct, - struct ip_conntrack_expect *exp); -#endif /* __KERNEL__ */ -#endif /* _CONNTRACK_PPTP_H */ diff --git a/include/linux/netfilter_ipv4/ip_conntrack_proto_gre.h b/include/linux/netfilter_ipv4/ip_conntrack_proto_gre.h deleted file mode 100644 index e371e0fc1672..000000000000 --- a/include/linux/netfilter_ipv4/ip_conntrack_proto_gre.h +++ /dev/null @@ -1,114 +0,0 @@ -#ifndef _CONNTRACK_PROTO_GRE_H -#define _CONNTRACK_PROTO_GRE_H -#include - -/* GRE PROTOCOL HEADER */ - -/* GRE Version field */ -#define GRE_VERSION_1701 0x0 -#define GRE_VERSION_PPTP 0x1 - -/* GRE Protocol field */ -#define GRE_PROTOCOL_PPTP 0x880B - -/* GRE Flags */ -#define GRE_FLAG_C 0x80 -#define GRE_FLAG_R 0x40 -#define GRE_FLAG_K 0x20 -#define GRE_FLAG_S 0x10 -#define GRE_FLAG_A 0x80 - -#define GRE_IS_C(f) ((f)&GRE_FLAG_C) -#define GRE_IS_R(f) ((f)&GRE_FLAG_R) -#define GRE_IS_K(f) ((f)&GRE_FLAG_K) -#define GRE_IS_S(f) ((f)&GRE_FLAG_S) -#define GRE_IS_A(f) ((f)&GRE_FLAG_A) - -/* GRE is a mess: Four different standards */ -struct gre_hdr { -#if defined(__LITTLE_ENDIAN_BITFIELD) - __u16 rec:3, - srr:1, - seq:1, - key:1, - routing:1, - csum:1, - version:3, - reserved:4, - ack:1; -#elif defined(__BIG_ENDIAN_BITFIELD) - __u16 csum:1, - routing:1, - key:1, - seq:1, - srr:1, - rec:3, - ack:1, - reserved:4, - version:3; -#else -#error "Adjust your defines" -#endif - __be16 protocol; -}; - -/* modified GRE header for PPTP */ -struct gre_hdr_pptp { - __u8 flags; /* bitfield */ - __u8 version; /* should be GRE_VERSION_PPTP */ - __be16 protocol; /* should be GRE_PROTOCOL_PPTP */ - __be16 payload_len; /* size of ppp payload, not inc. gre header */ - __be16 call_id; /* peer's call_id for this session */ - __be32 seq; /* sequence number. Present if S==1 */ - __be32 ack; /* seq number of highest packet recieved by */ - /* sender in this session */ -}; - - -/* this is part of ip_conntrack */ -struct ip_ct_gre { - unsigned int stream_timeout; - unsigned int timeout; -}; - -#ifdef __KERNEL__ -struct ip_conntrack_expect; -struct ip_conntrack; - -/* structure for original <-> reply keymap */ -struct ip_ct_gre_keymap { - struct list_head list; - - struct ip_conntrack_tuple tuple; -}; - -/* add new tuple->key_reply pair to keymap */ -int ip_ct_gre_keymap_add(struct ip_conntrack *ct, - struct ip_conntrack_tuple *t, - int reply); - -/* delete keymap entries */ -void ip_ct_gre_keymap_destroy(struct ip_conntrack *ct); - - -/* get pointer to gre key, if present */ -static inline __be32 *gre_key(struct gre_hdr *greh) -{ - if (!greh->key) - return NULL; - if (greh->csum || greh->routing) - return (__be32 *) (greh+sizeof(*greh)+4); - return (__be32 *) (greh+sizeof(*greh)); -} - -/* get pointer ot gre csum, if present */ -static inline __sum16 *gre_csum(struct gre_hdr *greh) -{ - if (!greh->csum) - return NULL; - return (__sum16 *) (greh+sizeof(*greh)); -} - -#endif /* __KERNEL__ */ - -#endif /* _CONNTRACK_PROTO_GRE_H */ diff --git a/include/linux/netfilter_ipv4/ip_conntrack_protocol.h b/include/linux/netfilter_ipv4/ip_conntrack_protocol.h deleted file mode 100644 index 2c76b879e3dc..000000000000 --- a/include/linux/netfilter_ipv4/ip_conntrack_protocol.h +++ /dev/null @@ -1,98 +0,0 @@ -/* Header for use in defining a given protocol for connection tracking. */ -#ifndef _IP_CONNTRACK_PROTOCOL_H -#define _IP_CONNTRACK_PROTOCOL_H -#include -#include - -struct seq_file; - -struct ip_conntrack_protocol -{ - /* Protocol number. */ - u_int8_t proto; - - /* Protocol name */ - const char *name; - - /* Try to fill in the third arg: dataoff is offset past IP - hdr. Return true if possible. */ - int (*pkt_to_tuple)(const struct sk_buff *skb, - unsigned int dataoff, - struct ip_conntrack_tuple *tuple); - - /* Invert the per-proto part of the tuple: ie. turn xmit into reply. - * Some packets can't be inverted: return 0 in that case. - */ - int (*invert_tuple)(struct ip_conntrack_tuple *inverse, - const struct ip_conntrack_tuple *orig); - - /* Print out the per-protocol part of the tuple. Return like seq_* */ - int (*print_tuple)(struct seq_file *, - const struct ip_conntrack_tuple *); - - /* Print out the private part of the conntrack. */ - int (*print_conntrack)(struct seq_file *, const struct ip_conntrack *); - - /* Returns verdict for packet, or -1 for invalid. */ - int (*packet)(struct ip_conntrack *conntrack, - const struct sk_buff *skb, - enum ip_conntrack_info ctinfo); - - /* Called when a new connection for this protocol found; - * returns TRUE if it's OK. If so, packet() called next. */ - int (*new)(struct ip_conntrack *conntrack, const struct sk_buff *skb); - - /* Called when a conntrack entry is destroyed */ - void (*destroy)(struct ip_conntrack *conntrack); - - int (*error)(struct sk_buff *skb, enum ip_conntrack_info *ctinfo, - unsigned int hooknum); - - /* convert protoinfo to nfnetink attributes */ - int (*to_nfattr)(struct sk_buff *skb, struct nfattr *nfa, - const struct ip_conntrack *ct); - - /* convert nfnetlink attributes to protoinfo */ - int (*from_nfattr)(struct nfattr *tb[], struct ip_conntrack *ct); - - int (*tuple_to_nfattr)(struct sk_buff *skb, - const struct ip_conntrack_tuple *t); - int (*nfattr_to_tuple)(struct nfattr *tb[], - struct ip_conntrack_tuple *t); - - /* Module (if any) which this is connected to. */ - struct module *me; -}; - -/* Protocol registration. */ -extern int ip_conntrack_protocol_register(struct ip_conntrack_protocol *proto); -extern void ip_conntrack_protocol_unregister(struct ip_conntrack_protocol *proto); -/* Existing built-in protocols */ -extern struct ip_conntrack_protocol ip_conntrack_protocol_tcp; -extern struct ip_conntrack_protocol ip_conntrack_protocol_udp; -extern struct ip_conntrack_protocol ip_conntrack_protocol_icmp; -extern struct ip_conntrack_protocol ip_conntrack_generic_protocol; -extern int ip_conntrack_protocol_tcp_init(void); - -/* Log invalid packets */ -extern unsigned int ip_ct_log_invalid; - -extern int ip_ct_port_tuple_to_nfattr(struct sk_buff *, - const struct ip_conntrack_tuple *); -extern int ip_ct_port_nfattr_to_tuple(struct nfattr *tb[], - struct ip_conntrack_tuple *); - -#ifdef CONFIG_SYSCTL -#ifdef DEBUG_INVALID_PACKETS -#define LOG_INVALID(proto) \ - (ip_ct_log_invalid == (proto) || ip_ct_log_invalid == IPPROTO_RAW) -#else -#define LOG_INVALID(proto) \ - ((ip_ct_log_invalid == (proto) || ip_ct_log_invalid == IPPROTO_RAW) \ - && net_ratelimit()) -#endif -#else -#define LOG_INVALID(proto) 0 -#endif /* CONFIG_SYSCTL */ - -#endif /*_IP_CONNTRACK_PROTOCOL_H*/ diff --git a/include/linux/netfilter_ipv4/ip_conntrack_sctp.h b/include/linux/netfilter_ipv4/ip_conntrack_sctp.h deleted file mode 100644 index 4099a041a32a..000000000000 --- a/include/linux/netfilter_ipv4/ip_conntrack_sctp.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef _IP_CONNTRACK_SCTP_H -#define _IP_CONNTRACK_SCTP_H - -#include - -#endif /* _IP_CONNTRACK_SCTP_H */ diff --git a/include/linux/netfilter_ipv4/ip_conntrack_sip.h b/include/linux/netfilter_ipv4/ip_conntrack_sip.h deleted file mode 100644 index bef6c646defa..000000000000 --- a/include/linux/netfilter_ipv4/ip_conntrack_sip.h +++ /dev/null @@ -1,40 +0,0 @@ -#ifndef __IP_CONNTRACK_SIP_H__ -#define __IP_CONNTRACK_SIP_H__ -#ifdef __KERNEL__ - -#define SIP_PORT 5060 -#define SIP_TIMEOUT 3600 - -enum sip_header_pos { - POS_REG_REQ_URI, - POS_REQ_URI, - POS_FROM, - POS_TO, - POS_VIA, - POS_CONTACT, - POS_CONTENT, - POS_MEDIA, - POS_OWNER, - POS_CONNECTION, - POS_SDP_HEADER, -}; - -extern unsigned int (*ip_nat_sip_hook)(struct sk_buff **pskb, - enum ip_conntrack_info ctinfo, - struct ip_conntrack *ct, - const char **dptr); -extern unsigned int (*ip_nat_sdp_hook)(struct sk_buff **pskb, - enum ip_conntrack_info ctinfo, - struct ip_conntrack_expect *exp, - const char *dptr); - -extern int ct_sip_get_info(const char *dptr, size_t dlen, - unsigned int *matchoff, - unsigned int *matchlen, - enum sip_header_pos pos); -extern int ct_sip_lnlen(const char *line, const char *limit); -extern const char *ct_sip_search(const char *needle, const char *haystack, - size_t needle_len, size_t haystack_len, - int case_sensitive); -#endif /* __KERNEL__ */ -#endif /* __IP_CONNTRACK_SIP_H__ */ diff --git a/include/linux/netfilter_ipv4/ip_conntrack_tcp.h b/include/linux/netfilter_ipv4/ip_conntrack_tcp.h deleted file mode 100644 index 876b8fb17e68..000000000000 --- a/include/linux/netfilter_ipv4/ip_conntrack_tcp.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef _IP_CONNTRACK_TCP_H -#define _IP_CONNTRACK_TCP_H - -#include - -#endif /* _IP_CONNTRACK_TCP_H */ diff --git a/include/linux/netfilter_ipv4/ip_conntrack_tftp.h b/include/linux/netfilter_ipv4/ip_conntrack_tftp.h deleted file mode 100644 index a404fc0abf0e..000000000000 --- a/include/linux/netfilter_ipv4/ip_conntrack_tftp.h +++ /dev/null @@ -1,20 +0,0 @@ -#ifndef _IP_CT_TFTP -#define _IP_CT_TFTP - -#define TFTP_PORT 69 - -struct tftphdr { - __be16 opcode; -}; - -#define TFTP_OPCODE_READ 1 -#define TFTP_OPCODE_WRITE 2 -#define TFTP_OPCODE_DATA 3 -#define TFTP_OPCODE_ACK 4 -#define TFTP_OPCODE_ERROR 5 - -extern unsigned int (*ip_nat_tftp_hook)(struct sk_buff **pskb, - enum ip_conntrack_info ctinfo, - struct ip_conntrack_expect *exp); - -#endif /* _IP_CT_TFTP */ diff --git a/include/linux/netfilter_ipv4/ip_conntrack_tuple.h b/include/linux/netfilter_ipv4/ip_conntrack_tuple.h deleted file mode 100644 index c228bde74c33..000000000000 --- a/include/linux/netfilter_ipv4/ip_conntrack_tuple.h +++ /dev/null @@ -1,146 +0,0 @@ -#ifndef _IP_CONNTRACK_TUPLE_H -#define _IP_CONNTRACK_TUPLE_H - -#include -#include - -/* A `tuple' is a structure containing the information to uniquely - identify a connection. ie. if two packets have the same tuple, they - are in the same connection; if not, they are not. - - We divide the structure along "manipulatable" and - "non-manipulatable" lines, for the benefit of the NAT code. -*/ - -/* The protocol-specific manipulable parts of the tuple: always in - network order! */ -union ip_conntrack_manip_proto -{ - /* Add other protocols here. */ - u_int16_t all; - - struct { - __be16 port; - } tcp; - struct { - __be16 port; - } udp; - struct { - __be16 id; - } icmp; - struct { - __be16 port; - } sctp; - struct { - __be16 key; /* key is 32bit, pptp only uses 16 */ - } gre; -}; - -/* The manipulable part of the tuple. */ -struct ip_conntrack_manip -{ - __be32 ip; - union ip_conntrack_manip_proto u; -}; - -/* This contains the information to distinguish a connection. */ -struct ip_conntrack_tuple -{ - struct ip_conntrack_manip src; - - /* These are the parts of the tuple which are fixed. */ - struct { - __be32 ip; - union { - /* Add other protocols here. */ - u_int16_t all; - - struct { - __be16 port; - } tcp; - struct { - __be16 port; - } udp; - struct { - u_int8_t type, code; - } icmp; - struct { - __be16 port; - } sctp; - struct { - __be16 key; /* key is 32bit, - * pptp only uses 16 */ - } gre; - } u; - - /* The protocol. */ - u_int8_t protonum; - - /* The direction (for tuplehash) */ - u_int8_t dir; - } dst; -}; - -/* This is optimized opposed to a memset of the whole structure. Everything we - * really care about is the source/destination unions */ -#define IP_CT_TUPLE_U_BLANK(tuple) \ - do { \ - (tuple)->src.u.all = 0; \ - (tuple)->dst.u.all = 0; \ - } while (0) - -#ifdef __KERNEL__ - -#define DUMP_TUPLE(tp) \ -DEBUGP("tuple %p: %u %u.%u.%u.%u:%hu -> %u.%u.%u.%u:%hu\n", \ - (tp), (tp)->dst.protonum, \ - NIPQUAD((tp)->src.ip), ntohs((tp)->src.u.all), \ - NIPQUAD((tp)->dst.ip), ntohs((tp)->dst.u.all)) - -/* If we're the first tuple, it's the original dir. */ -#define DIRECTION(h) ((enum ip_conntrack_dir)(h)->tuple.dst.dir) - -/* Connections have two entries in the hash table: one for each way */ -struct ip_conntrack_tuple_hash -{ - struct list_head list; - - struct ip_conntrack_tuple tuple; -}; - -#endif /* __KERNEL__ */ - -static inline int ip_ct_tuple_src_equal(const struct ip_conntrack_tuple *t1, - const struct ip_conntrack_tuple *t2) -{ - return t1->src.ip == t2->src.ip - && t1->src.u.all == t2->src.u.all; -} - -static inline int ip_ct_tuple_dst_equal(const struct ip_conntrack_tuple *t1, - const struct ip_conntrack_tuple *t2) -{ - return t1->dst.ip == t2->dst.ip - && t1->dst.u.all == t2->dst.u.all - && t1->dst.protonum == t2->dst.protonum; -} - -static inline int ip_ct_tuple_equal(const struct ip_conntrack_tuple *t1, - const struct ip_conntrack_tuple *t2) -{ - return ip_ct_tuple_src_equal(t1, t2) && ip_ct_tuple_dst_equal(t1, t2); -} - -static inline int ip_ct_tuple_mask_cmp(const struct ip_conntrack_tuple *t, - const struct ip_conntrack_tuple *tuple, - const struct ip_conntrack_tuple *mask) -{ - return !(((t->src.ip ^ tuple->src.ip) & mask->src.ip) - || ((t->dst.ip ^ tuple->dst.ip) & mask->dst.ip) - || ((t->src.u.all ^ tuple->src.u.all) & mask->src.u.all) - || ((t->dst.u.all ^ tuple->dst.u.all) & mask->dst.u.all) - || ((t->dst.protonum ^ tuple->dst.protonum) - & mask->dst.protonum)); -} - -#endif /* _IP_CONNTRACK_TUPLE_H */ diff --git a/include/linux/netfilter_ipv4/ip_nat.h b/include/linux/netfilter_ipv4/ip_nat.h deleted file mode 100644 index bbca89aab813..000000000000 --- a/include/linux/netfilter_ipv4/ip_nat.h +++ /dev/null @@ -1,79 +0,0 @@ -#ifndef _IP_NAT_H -#define _IP_NAT_H -#include -#include - -#define IP_NAT_MAPPING_TYPE_MAX_NAMELEN 16 - -enum ip_nat_manip_type -{ - IP_NAT_MANIP_SRC, - IP_NAT_MANIP_DST -}; - -/* SRC manip occurs POST_ROUTING or LOCAL_IN */ -#define HOOK2MANIP(hooknum) ((hooknum) != NF_IP_POST_ROUTING && (hooknum) != NF_IP_LOCAL_IN) - -#define IP_NAT_RANGE_MAP_IPS 1 -#define IP_NAT_RANGE_PROTO_SPECIFIED 2 -#define IP_NAT_RANGE_PROTO_RANDOM 4 /* add randomness to "port" selection */ - -/* NAT sequence number modifications */ -struct ip_nat_seq { - /* position of the last TCP sequence number - * modification (if any) */ - u_int32_t correction_pos; - /* sequence number offset before and after last modification */ - int16_t offset_before, offset_after; -}; - -/* Single range specification. */ -struct ip_nat_range -{ - /* Set to OR of flags above. */ - unsigned int flags; - - /* Inclusive: network order. */ - __be32 min_ip, max_ip; - - /* Inclusive: network order */ - union ip_conntrack_manip_proto min, max; -}; - -/* For backwards compat: don't use in modern code. */ -struct ip_nat_multi_range_compat -{ - unsigned int rangesize; /* Must be 1. */ - - /* hangs off end. */ - struct ip_nat_range range[1]; -}; - -#ifdef __KERNEL__ -#include - -/* Protects NAT hash tables, and NAT-private part of conntracks. */ -extern rwlock_t ip_nat_lock; - -/* The structure embedded in the conntrack structure. */ -struct ip_nat_info -{ - struct list_head bysource; - struct ip_nat_seq seq[IP_CT_DIR_MAX]; -}; - -struct ip_conntrack; - -/* Set up the info structure to map into this range. */ -extern unsigned int ip_nat_setup_info(struct ip_conntrack *conntrack, - const struct ip_nat_range *range, - unsigned int hooknum); - -/* Is this tuple already taken? (not by us)*/ -extern int ip_nat_used_tuple(const struct ip_conntrack_tuple *tuple, - const struct ip_conntrack *ignored_conntrack); - -#else /* !__KERNEL__: iptables wants this to compile. */ -#define ip_nat_multi_range ip_nat_multi_range_compat -#endif /*__KERNEL__*/ -#endif diff --git a/include/linux/netfilter_ipv4/ip_nat_core.h b/include/linux/netfilter_ipv4/ip_nat_core.h deleted file mode 100644 index 60566f9fd7b3..000000000000 --- a/include/linux/netfilter_ipv4/ip_nat_core.h +++ /dev/null @@ -1,18 +0,0 @@ -#ifndef _IP_NAT_CORE_H -#define _IP_NAT_CORE_H -#include -#include - -/* This header used to share core functionality between the standalone - NAT module, and the compatibility layer's use of NAT for masquerading. */ - -extern unsigned int ip_nat_packet(struct ip_conntrack *ct, - enum ip_conntrack_info conntrackinfo, - unsigned int hooknum, - struct sk_buff **pskb); - -extern int ip_nat_icmp_reply_translation(struct ip_conntrack *ct, - enum ip_conntrack_info ctinfo, - unsigned int hooknum, - struct sk_buff **pskb); -#endif /* _IP_NAT_CORE_H */ diff --git a/include/linux/netfilter_ipv4/ip_nat_helper.h b/include/linux/netfilter_ipv4/ip_nat_helper.h deleted file mode 100644 index bf9cb105c885..000000000000 --- a/include/linux/netfilter_ipv4/ip_nat_helper.h +++ /dev/null @@ -1,33 +0,0 @@ -#ifndef _IP_NAT_HELPER_H -#define _IP_NAT_HELPER_H -/* NAT protocol helper routines. */ - -#include -#include - -struct sk_buff; - -/* These return true or false. */ -extern int ip_nat_mangle_tcp_packet(struct sk_buff **skb, - struct ip_conntrack *ct, - enum ip_conntrack_info ctinfo, - unsigned int match_offset, - unsigned int match_len, - const char *rep_buffer, - unsigned int rep_len); -extern int ip_nat_mangle_udp_packet(struct sk_buff **skb, - struct ip_conntrack *ct, - enum ip_conntrack_info ctinfo, - unsigned int match_offset, - unsigned int match_len, - const char *rep_buffer, - unsigned int rep_len); -extern int ip_nat_seq_adjust(struct sk_buff **pskb, - struct ip_conntrack *ct, - enum ip_conntrack_info ctinfo); - -/* Setup NAT on this expected conntrack so it follows master, but goes - * to port ct->master->saved_proto. */ -extern void ip_nat_follow_master(struct ip_conntrack *ct, - struct ip_conntrack_expect *this); -#endif diff --git a/include/linux/netfilter_ipv4/ip_nat_pptp.h b/include/linux/netfilter_ipv4/ip_nat_pptp.h deleted file mode 100644 index 36668bf0f373..000000000000 --- a/include/linux/netfilter_ipv4/ip_nat_pptp.h +++ /dev/null @@ -1,11 +0,0 @@ -/* PPTP constants and structs */ -#ifndef _NAT_PPTP_H -#define _NAT_PPTP_H - -/* conntrack private data */ -struct ip_nat_pptp { - __be16 pns_call_id; /* NAT'ed PNS call id */ - __be16 pac_call_id; /* NAT'ed PAC call id */ -}; - -#endif /* _NAT_PPTP_H */ diff --git a/include/linux/netfilter_ipv4/ip_nat_protocol.h b/include/linux/netfilter_ipv4/ip_nat_protocol.h deleted file mode 100644 index 612a43614e7b..000000000000 --- a/include/linux/netfilter_ipv4/ip_nat_protocol.h +++ /dev/null @@ -1,74 +0,0 @@ -/* Header for use in defining a given protocol. */ -#ifndef _IP_NAT_PROTOCOL_H -#define _IP_NAT_PROTOCOL_H -#include -#include - -#include -#include - -struct iphdr; -struct ip_nat_range; - -struct ip_nat_protocol -{ - /* Protocol name */ - const char *name; - - /* Protocol number. */ - unsigned int protonum; - - struct module *me; - - /* Translate a packet to the target according to manip type. - Return true if succeeded. */ - int (*manip_pkt)(struct sk_buff **pskb, - unsigned int iphdroff, - const struct ip_conntrack_tuple *tuple, - enum ip_nat_manip_type maniptype); - - /* Is the manipable part of the tuple between min and max incl? */ - int (*in_range)(const struct ip_conntrack_tuple *tuple, - enum ip_nat_manip_type maniptype, - const union ip_conntrack_manip_proto *min, - const union ip_conntrack_manip_proto *max); - - /* Alter the per-proto part of the tuple (depending on - maniptype), to give a unique tuple in the given range if - possible; return false if not. Per-protocol part of tuple - is initialized to the incoming packet. */ - int (*unique_tuple)(struct ip_conntrack_tuple *tuple, - const struct ip_nat_range *range, - enum ip_nat_manip_type maniptype, - const struct ip_conntrack *conntrack); - - int (*range_to_nfattr)(struct sk_buff *skb, - const struct ip_nat_range *range); - - int (*nfattr_to_range)(struct nfattr *tb[], - struct ip_nat_range *range); -}; - -/* Protocol registration. */ -extern int ip_nat_protocol_register(struct ip_nat_protocol *proto); -extern void ip_nat_protocol_unregister(struct ip_nat_protocol *proto); - -extern struct ip_nat_protocol *ip_nat_proto_find_get(u_int8_t protocol); -extern void ip_nat_proto_put(struct ip_nat_protocol *proto); - -/* Built-in protocols. */ -extern struct ip_nat_protocol ip_nat_protocol_tcp; -extern struct ip_nat_protocol ip_nat_protocol_udp; -extern struct ip_nat_protocol ip_nat_protocol_icmp; -extern struct ip_nat_protocol ip_nat_unknown_protocol; - -extern int init_protocols(void) __init; -extern void cleanup_protocols(void); -extern struct ip_nat_protocol *find_nat_proto(u_int16_t protonum); - -extern int ip_nat_port_range_to_nfattr(struct sk_buff *skb, - const struct ip_nat_range *range); -extern int ip_nat_port_nfattr_to_range(struct nfattr *tb[], - struct ip_nat_range *range); - -#endif /*_IP_NAT_PROTO_H*/ diff --git a/include/linux/netfilter_ipv4/ip_nat_rule.h b/include/linux/netfilter_ipv4/ip_nat_rule.h deleted file mode 100644 index 73b9552e6a89..000000000000 --- a/include/linux/netfilter_ipv4/ip_nat_rule.h +++ /dev/null @@ -1,28 +0,0 @@ -#ifndef _IP_NAT_RULE_H -#define _IP_NAT_RULE_H -#include -#include -#include - -#ifdef __KERNEL__ - -extern int ip_nat_rule_init(void) __init; -extern void ip_nat_rule_cleanup(void); -extern int ip_nat_rule_find(struct sk_buff **pskb, - unsigned int hooknum, - const struct net_device *in, - const struct net_device *out, - struct ip_conntrack *ct, - struct ip_nat_info *info); - -extern unsigned int -alloc_null_binding(struct ip_conntrack *conntrack, - struct ip_nat_info *info, - unsigned int hooknum); - -extern unsigned int -alloc_null_binding_confirmed(struct ip_conntrack *conntrack, - struct ip_nat_info *info, - unsigned int hooknum); -#endif -#endif /* _IP_NAT_RULE_H */ diff --git a/include/linux/netfilter_ipv4/ipt_SAME.h b/include/linux/netfilter_ipv4/ipt_SAME.h index cc4c0b2269af..be6e682a85ec 100644 --- a/include/linux/netfilter_ipv4/ipt_SAME.h +++ b/include/linux/netfilter_ipv4/ipt_SAME.h @@ -13,7 +13,7 @@ struct ipt_same_info u_int32_t *iparray; /* hangs off end. */ - struct ip_nat_range range[IPT_SAME_MAX_RANGE]; + struct nf_nat_range range[IPT_SAME_MAX_RANGE]; }; #endif /*_IPT_SAME_H*/ diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h index 0e690e34c00b..1c6b8bd09b9a 100644 --- a/include/net/netfilter/nf_conntrack.h +++ b/include/net/netfilter/nf_conntrack.h @@ -250,6 +250,11 @@ static inline int nf_ct_is_dying(struct nf_conn *ct) return test_bit(IPS_DYING_BIT, &ct->status); } +static inline int nf_ct_is_untracked(const struct sk_buff *skb) +{ + return (skb->nfct == &nf_conntrack_untracked.ct_general); +} + extern unsigned int nf_conntrack_htable_size; extern int nf_conntrack_checksum; extern atomic_t nf_conntrack_count; diff --git a/include/net/netfilter/nf_conntrack_compat.h b/include/net/netfilter/nf_conntrack_compat.h deleted file mode 100644 index 6f84c1f7fcd4..000000000000 --- a/include/net/netfilter/nf_conntrack_compat.h +++ /dev/null @@ -1,145 +0,0 @@ -#ifndef _NF_CONNTRACK_COMPAT_H -#define _NF_CONNTRACK_COMPAT_H - -#ifdef __KERNEL__ - -#if defined(CONFIG_IP_NF_CONNTRACK) || defined(CONFIG_IP_NF_CONNTRACK_MODULE) - -#include -#include - -#ifdef CONFIG_IP_NF_CONNTRACK_MARK -static inline u_int32_t *nf_ct_get_mark(const struct sk_buff *skb, - u_int32_t *ctinfo) -{ - struct ip_conntrack *ct = ip_conntrack_get(skb, ctinfo); - - if (ct) - return &ct->mark; - else - return NULL; -} -#endif /* CONFIG_IP_NF_CONNTRACK_MARK */ - -#ifdef CONFIG_IP_NF_CONNTRACK_SECMARK -static inline u_int32_t *nf_ct_get_secmark(const struct sk_buff *skb, - u_int32_t *ctinfo) -{ - struct ip_conntrack *ct = ip_conntrack_get(skb, ctinfo); - - if (ct) - return &ct->secmark; - else - return NULL; -} -#endif /* CONFIG_IP_NF_CONNTRACK_SECMARK */ - -#ifdef CONFIG_IP_NF_CT_ACCT -static inline struct ip_conntrack_counter * -nf_ct_get_counters(const struct sk_buff *skb) -{ - enum ip_conntrack_info ctinfo; - struct ip_conntrack *ct = ip_conntrack_get(skb, &ctinfo); - - if (ct) - return ct->counters; - else - return NULL; -} -#endif /* CONFIG_IP_NF_CT_ACCT */ - -static inline int nf_ct_is_untracked(const struct sk_buff *skb) -{ - return (skb->nfct == &ip_conntrack_untracked.ct_general); -} - -static inline void nf_ct_untrack(struct sk_buff *skb) -{ - skb->nfct = &ip_conntrack_untracked.ct_general; -} - -static inline int nf_ct_get_ctinfo(const struct sk_buff *skb, - enum ip_conntrack_info *ctinfo) -{ - struct ip_conntrack *ct = ip_conntrack_get(skb, ctinfo); - return (ct != NULL); -} - -static inline int nf_ct_l3proto_try_module_get(unsigned short l3proto) -{ - need_conntrack(); - return l3proto == PF_INET ? 0 : -1; -} - -static inline void nf_ct_l3proto_module_put(unsigned short l3proto) -{ -} - -#else /* CONFIG_IP_NF_CONNTRACK */ - -#include -#include - -#ifdef CONFIG_NF_CONNTRACK_MARK - -static inline u_int32_t *nf_ct_get_mark(const struct sk_buff *skb, - u_int32_t *ctinfo) -{ - struct nf_conn *ct = nf_ct_get(skb, ctinfo); - - if (ct) - return &ct->mark; - else - return NULL; -} -#endif /* CONFIG_NF_CONNTRACK_MARK */ - -#ifdef CONFIG_NF_CONNTRACK_SECMARK -static inline u_int32_t *nf_ct_get_secmark(const struct sk_buff *skb, - u_int32_t *ctinfo) -{ - struct nf_conn *ct = nf_ct_get(skb, ctinfo); - - if (ct) - return &ct->secmark; - else - return NULL; -} -#endif /* CONFIG_NF_CONNTRACK_MARK */ - -#ifdef CONFIG_NF_CT_ACCT -static inline struct ip_conntrack_counter * -nf_ct_get_counters(const struct sk_buff *skb) -{ - enum ip_conntrack_info ctinfo; - struct nf_conn *ct = nf_ct_get(skb, &ctinfo); - - if (ct) - return ct->counters; - else - return NULL; -} -#endif /* CONFIG_NF_CT_ACCT */ - -static inline int nf_ct_is_untracked(const struct sk_buff *skb) -{ - return (skb->nfct == &nf_conntrack_untracked.ct_general); -} - -static inline void nf_ct_untrack(struct sk_buff *skb) -{ - skb->nfct = &nf_conntrack_untracked.ct_general; -} - -static inline int nf_ct_get_ctinfo(const struct sk_buff *skb, - enum ip_conntrack_info *ctinfo) -{ - struct nf_conn *ct = nf_ct_get(skb, ctinfo); - return (ct != NULL); -} - -#endif /* CONFIG_IP_NF_CONNTRACK */ - -#endif /* __KERNEL__ */ - -#endif /* _NF_CONNTRACK_COMPAT_H */ diff --git a/include/net/netfilter/nf_nat_rule.h b/include/net/netfilter/nf_nat_rule.h index f191c672bcc6..e76565459ad9 100644 --- a/include/net/netfilter/nf_nat_rule.h +++ b/include/net/netfilter/nf_nat_rule.h @@ -4,16 +4,6 @@ #include #include -/* Compatibility definitions for ipt_FOO modules */ -#define ip_nat_range nf_nat_range -#define ip_conntrack_tuple nf_conntrack_tuple -#define ip_conntrack_get nf_ct_get -#define ip_conntrack nf_conn -#define ip_nat_setup_info nf_nat_setup_info -#define ip_nat_multi_range_compat nf_nat_multi_range_compat -#define ip_ct_iterate_cleanup nf_ct_iterate_cleanup -#define IP_NF_ASSERT NF_CT_ASSERT - extern int nf_nat_rule_init(void) __init; extern void nf_nat_rule_cleanup(void); extern int nf_nat_rule_find(struct sk_buff **pskb, diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig index 601808c796ec..46509fae9fd8 100644 --- a/net/ipv4/netfilter/Kconfig +++ b/net/ipv4/netfilter/Kconfig @@ -30,188 +30,6 @@ config NF_CONNTRACK_PROC_COMPAT If unsure, say Y. -# connection tracking, helpers and protocols -config IP_NF_CT_ACCT - bool "Connection tracking flow accounting" - depends on IP_NF_CONNTRACK - help - If this option is enabled, the connection tracking code will - keep per-flow packet and byte counters. - - Those counters can be used for flow-based accounting or the - `connbytes' match. - - If unsure, say `N'. - -config IP_NF_CONNTRACK_MARK - bool 'Connection mark tracking support' - depends on IP_NF_CONNTRACK - help - This option enables support for connection marks, used by the - `CONNMARK' target and `connmark' match. Similar to the mark value - of packets, but this mark value is kept in the conntrack session - instead of the individual packets. - -config IP_NF_CONNTRACK_SECMARK - bool 'Connection tracking security mark support' - depends on IP_NF_CONNTRACK && NETWORK_SECMARK - help - This option enables security markings to be applied to - connections. Typically they are copied to connections from - packets using the CONNSECMARK target and copied back from - connections to packets with the same target, with the packets - being originally labeled via SECMARK. - - If unsure, say 'N'. - -config IP_NF_CONNTRACK_EVENTS - bool "Connection tracking events (EXPERIMENTAL)" - depends on EXPERIMENTAL && IP_NF_CONNTRACK - help - If this option is enabled, the connection tracking code will - provide a notifier chain that can be used by other kernel code - to get notified about changes in the connection tracking state. - - IF unsure, say `N'. - -config IP_NF_CONNTRACK_NETLINK - tristate 'Connection tracking netlink interface (EXPERIMENTAL)' - depends on EXPERIMENTAL && IP_NF_CONNTRACK && NETFILTER_NETLINK - depends on IP_NF_CONNTRACK!=y || NETFILTER_NETLINK!=m - depends on IP_NF_NAT=n || IP_NF_NAT - help - This option enables support for a netlink-based userspace interface - - -config IP_NF_CT_PROTO_SCTP - tristate 'SCTP protocol connection tracking support (EXPERIMENTAL)' - depends on IP_NF_CONNTRACK && EXPERIMENTAL - help - With this option enabled, the connection tracking code will - be able to do state tracking on SCTP connections. - - If you want to compile it as a module, say M here and read - . If unsure, say `N'. - -config IP_NF_FTP - tristate "FTP protocol support" - depends on IP_NF_CONNTRACK - help - Tracking FTP connections is problematic: special helpers are - required for tracking them, and doing masquerading and other forms - of Network Address Translation on them. - - To compile it as a module, choose M here. If unsure, say Y. - -config IP_NF_IRC - tristate "IRC protocol support" - depends on IP_NF_CONNTRACK - ---help--- - There is a commonly-used extension to IRC called - Direct Client-to-Client Protocol (DCC). This enables users to send - files to each other, and also chat to each other without the need - of a server. DCC Sending is used anywhere you send files over IRC, - and DCC Chat is most commonly used by Eggdrop bots. If you are - using NAT, this extension will enable you to send files and initiate - chats. Note that you do NOT need this extension to get files or - have others initiate chats, or everything else in IRC. - - To compile it as a module, choose M here. If unsure, say Y. - -config IP_NF_NETBIOS_NS - tristate "NetBIOS name service protocol support (EXPERIMENTAL)" - depends on IP_NF_CONNTRACK && EXPERIMENTAL - help - NetBIOS name service requests are sent as broadcast messages from an - unprivileged port and responded to with unicast messages to the - same port. This make them hard to firewall properly because connection - tracking doesn't deal with broadcasts. This helper tracks locally - originating NetBIOS name service requests and the corresponding - responses. It relies on correct IP address configuration, specifically - netmask and broadcast address. When properly configured, the output - of "ip address show" should look similar to this: - - $ ip -4 address show eth0 - 4: eth0: mtu 1500 qdisc pfifo_fast qlen 1000 - inet 172.16.2.252/24 brd 172.16.2.255 scope global eth0 - - To compile it as a module, choose M here. If unsure, say N. - -config IP_NF_TFTP - tristate "TFTP protocol support" - depends on IP_NF_CONNTRACK - help - TFTP connection tracking helper, this is required depending - on how restrictive your ruleset is. - If you are using a tftp client behind -j SNAT or -j MASQUERADING - you will need this. - - To compile it as a module, choose M here. If unsure, say Y. - -config IP_NF_AMANDA - tristate "Amanda backup protocol support" - depends on IP_NF_CONNTRACK - select TEXTSEARCH - select TEXTSEARCH_KMP - help - If you are running the Amanda backup package - on this machine or machines that will be MASQUERADED through this - machine, then you may want to enable this feature. This allows the - connection tracking and natting code to allow the sub-channels that - Amanda requires for communication of the backup data, messages and - index. - - To compile it as a module, choose M here. If unsure, say Y. - -config IP_NF_PPTP - tristate 'PPTP protocol support' - depends on IP_NF_CONNTRACK - help - This module adds support for PPTP (Point to Point Tunnelling - Protocol, RFC2637) connection tracking and NAT. - - If you are running PPTP sessions over a stateful firewall or NAT - box, you may want to enable this feature. - - Please note that not all PPTP modes of operation are supported yet. - For more info, read top of the file - net/ipv4/netfilter/ip_conntrack_pptp.c - - If you want to compile it as a module, say M here and read - Documentation/modules.txt. If unsure, say `N'. - -config IP_NF_H323 - tristate 'H.323 protocol support (EXPERIMENTAL)' - depends on IP_NF_CONNTRACK && EXPERIMENTAL - help - H.323 is a VoIP signalling protocol from ITU-T. As one of the most - important VoIP protocols, it is widely used by voice hardware and - software including voice gateways, IP phones, Netmeeting, OpenPhone, - Gnomemeeting, etc. - - With this module you can support H.323 on a connection tracking/NAT - firewall. - - This module supports RAS, Fast Start, H.245 Tunnelling, Call - Forwarding, RTP/RTCP and T.120 based audio, video, fax, chat, - whiteboard, file transfer, etc. For more information, please - visit http://nath323.sourceforge.net/. - - If you want to compile it as a module, say 'M' here and read - Documentation/modules.txt. If unsure, say 'N'. - -config IP_NF_SIP - tristate "SIP protocol support (EXPERIMENTAL)" - depends on IP_NF_CONNTRACK && EXPERIMENTAL - help - SIP is an application-layer control protocol that can establish, - modify, and terminate multimedia sessions (conferences) such as - Internet telephony calls. With the ip_conntrack_sip and - the ip_nat_sip modules you can support the protocol on a connection - tracking/NATing firewall. - - To compile it as a module, choose M here. If unsure, say Y. - config IP_NF_QUEUE tristate "IP Userspace queueing via NETLINK (OBSOLETE)" help @@ -361,17 +179,6 @@ config IP_NF_TARGET_ULOG To compile it as a module, choose M here. If unsure, say N. -# NAT + specific targets: ip_conntrack -config IP_NF_NAT - tristate "Full NAT" - depends on IP_NF_IPTABLES && IP_NF_CONNTRACK - help - The Full NAT option allows masquerading, port forwarding and other - forms of full Network Address Port Translation. It is controlled by - the `nat' table in iptables: see the man page for iptables(8). - - To compile it as a module, choose M here. If unsure, say N. - # NAT + specific targets: nf_conntrack config NF_NAT tristate "Full NAT" @@ -383,11 +190,6 @@ config NF_NAT To compile it as a module, choose M here. If unsure, say N. -config IP_NF_NAT_NEEDED - bool - depends on IP_NF_NAT - default y - config NF_NAT_NEEDED bool depends on NF_NAT @@ -395,7 +197,7 @@ config NF_NAT_NEEDED config IP_NF_TARGET_MASQUERADE tristate "MASQUERADE target support" - depends on (NF_NAT || IP_NF_NAT) + depends on NF_NAT help Masquerading is a special case of NAT: all outgoing connections are changed to seem to come from a particular interface's address, and @@ -407,7 +209,7 @@ config IP_NF_TARGET_MASQUERADE config IP_NF_TARGET_REDIRECT tristate "REDIRECT target support" - depends on (NF_NAT || IP_NF_NAT) + depends on NF_NAT help REDIRECT is a special case of NAT: all incoming connections are mapped onto the incoming interface's address, causing the packets to @@ -418,7 +220,7 @@ config IP_NF_TARGET_REDIRECT config IP_NF_TARGET_NETMAP tristate "NETMAP target support" - depends on (NF_NAT || IP_NF_NAT) + depends on NF_NAT help NETMAP is an implementation of static 1:1 NAT mapping of network addresses. It maps the network address part, while keeping the host @@ -429,28 +231,13 @@ config IP_NF_TARGET_NETMAP config IP_NF_TARGET_SAME tristate "SAME target support" - depends on (NF_NAT || IP_NF_NAT) + depends on NF_NAT help This option adds a `SAME' target, which works like the standard SNAT target, but attempts to give clients the same IP for all connections. To compile it as a module, choose M here. If unsure, say N. -config IP_NF_NAT_SNMP_BASIC - tristate "Basic SNMP-ALG support (EXPERIMENTAL)" - depends on EXPERIMENTAL && IP_NF_NAT - ---help--- - - This module implements an Application Layer Gateway (ALG) for - SNMP payloads. In conjunction with NAT, it allows a network - management system to access multiple private networks with - conflicting addresses. It works by modifying IP addresses - inside SNMP payloads to match IP-layer NAT mapping. - - This is the "basic" form of SNMP-ALG, as described in RFC 2962 - - To compile it as a module, choose M here. If unsure, say N. - config NF_NAT_SNMP_BASIC tristate "Basic SNMP-ALG support (EXPERIMENTAL)" depends on EXPERIMENTAL && NF_NAT @@ -477,78 +264,37 @@ config NF_NAT_PROTO_GRE tristate depends on NF_NAT && NF_CT_PROTO_GRE -config IP_NF_NAT_FTP - tristate - depends on IP_NF_IPTABLES && IP_NF_CONNTRACK && IP_NF_NAT - default IP_NF_NAT && IP_NF_FTP - config NF_NAT_FTP tristate depends on IP_NF_IPTABLES && NF_CONNTRACK && NF_NAT default NF_NAT && NF_CONNTRACK_FTP -config IP_NF_NAT_IRC - tristate - depends on IP_NF_IPTABLES!=n && IP_NF_CONNTRACK!=n && IP_NF_NAT!=n - default IP_NF_NAT if IP_NF_IRC=y - default m if IP_NF_IRC=m - config NF_NAT_IRC tristate depends on IP_NF_IPTABLES && NF_CONNTRACK && NF_NAT default NF_NAT && NF_CONNTRACK_IRC -config IP_NF_NAT_TFTP - tristate - depends on IP_NF_IPTABLES!=n && IP_NF_CONNTRACK!=n && IP_NF_NAT!=n - default IP_NF_NAT if IP_NF_TFTP=y - default m if IP_NF_TFTP=m - config NF_NAT_TFTP tristate depends on IP_NF_IPTABLES && NF_CONNTRACK && NF_NAT default NF_NAT && NF_CONNTRACK_TFTP -config IP_NF_NAT_AMANDA - tristate - depends on IP_NF_IPTABLES!=n && IP_NF_CONNTRACK!=n && IP_NF_NAT!=n - default IP_NF_NAT if IP_NF_AMANDA=y - default m if IP_NF_AMANDA=m - config NF_NAT_AMANDA tristate depends on IP_NF_IPTABLES && NF_CONNTRACK && NF_NAT default NF_NAT && NF_CONNTRACK_AMANDA -config IP_NF_NAT_PPTP - tristate - depends on IP_NF_NAT!=n && IP_NF_PPTP!=n - default IP_NF_NAT if IP_NF_PPTP=y - default m if IP_NF_PPTP=m - config NF_NAT_PPTP tristate depends on IP_NF_IPTABLES && NF_CONNTRACK && NF_NAT default NF_NAT && NF_CONNTRACK_PPTP select NF_NAT_PROTO_GRE -config IP_NF_NAT_H323 - tristate - depends on IP_NF_IPTABLES!=n && IP_NF_CONNTRACK!=n && IP_NF_NAT!=n - default IP_NF_NAT if IP_NF_H323=y - default m if IP_NF_H323=m - config NF_NAT_H323 tristate depends on IP_NF_IPTABLES && NF_CONNTRACK && NF_NAT default NF_NAT && NF_CONNTRACK_H323 -config IP_NF_NAT_SIP - tristate - depends on IP_NF_IPTABLES!=n && IP_NF_CONNTRACK!=n && IP_NF_NAT!=n - default IP_NF_NAT if IP_NF_SIP=y - default m if IP_NF_SIP=m - config NF_NAT_SIP tristate depends on IP_NF_IPTABLES && NF_CONNTRACK && NF_NAT @@ -606,9 +352,8 @@ config IP_NF_TARGET_TTL config IP_NF_TARGET_CLUSTERIP tristate "CLUSTERIP target support (EXPERIMENTAL)" depends on IP_NF_MANGLE && EXPERIMENTAL - depends on IP_NF_CONNTRACK || NF_CONNTRACK_IPV4 - select IP_NF_CONNTRACK_MARK if IP_NF_CONNTRACK - select NF_CONNTRACK_MARK if NF_CONNTRACK_IPV4 + depends on NF_CONNTRACK_IPV4 + select NF_CONNTRACK_MARK help The CLUSTERIP target allows you to build load-balancing clusters of network servers without having a dedicated load-balancing diff --git a/net/ipv4/netfilter/Makefile b/net/ipv4/netfilter/Makefile index 6625ec68180c..409d273f6f82 100644 --- a/net/ipv4/netfilter/Makefile +++ b/net/ipv4/netfilter/Makefile @@ -2,8 +2,6 @@ # Makefile for the netfilter modules on top of IPv4. # -# objects for the standalone - connection tracking / NAT -ip_conntrack-objs := ip_conntrack_standalone.o ip_conntrack_core.o ip_conntrack_proto_generic.o ip_conntrack_proto_tcp.o ip_conntrack_proto_udp.o ip_conntrack_proto_icmp.o # objects for l3 independent conntrack nf_conntrack_ipv4-objs := nf_conntrack_l3proto_ipv4.o nf_conntrack_proto_icmp.o ifeq ($(CONFIG_NF_CONNTRACK_PROC_COMPAT),y) @@ -12,53 +10,14 @@ nf_conntrack_ipv4-objs += nf_conntrack_l3proto_ipv4_compat.o endif endif -ip_nat-objs := ip_nat_core.o ip_nat_helper.o ip_nat_proto_unknown.o ip_nat_proto_tcp.o ip_nat_proto_udp.o ip_nat_proto_icmp.o -nf_nat-objs := nf_nat_core.o nf_nat_helper.o nf_nat_proto_unknown.o nf_nat_proto_tcp.o nf_nat_proto_udp.o nf_nat_proto_icmp.o -ifneq ($(CONFIG_NF_NAT),) +nf_nat-objs := nf_nat_core.o nf_nat_helper.o nf_nat_proto_unknown.o nf_nat_proto_tcp.o nf_nat_proto_udp.o nf_nat_proto_icmp.o iptable_nat-objs := nf_nat_rule.o nf_nat_standalone.o -else -iptable_nat-objs := ip_nat_rule.o ip_nat_standalone.o -endif - -ip_conntrack_pptp-objs := ip_conntrack_helper_pptp.o ip_conntrack_proto_gre.o -ip_nat_pptp-objs := ip_nat_helper_pptp.o ip_nat_proto_gre.o - -ip_conntrack_h323-objs := ip_conntrack_helper_h323.o ../../netfilter/nf_conntrack_h323_asn1.o -ip_nat_h323-objs := ip_nat_helper_h323.o # connection tracking -obj-$(CONFIG_IP_NF_CONNTRACK) += ip_conntrack.o obj-$(CONFIG_NF_CONNTRACK_IPV4) += nf_conntrack_ipv4.o -obj-$(CONFIG_IP_NF_NAT) += ip_nat.o obj-$(CONFIG_NF_NAT) += nf_nat.o -# conntrack netlink interface -obj-$(CONFIG_IP_NF_CONNTRACK_NETLINK) += ip_conntrack_netlink.o - - -# SCTP protocol connection tracking -obj-$(CONFIG_IP_NF_CT_PROTO_SCTP) += ip_conntrack_proto_sctp.o - -# connection tracking helpers -obj-$(CONFIG_IP_NF_H323) += ip_conntrack_h323.o -obj-$(CONFIG_IP_NF_PPTP) += ip_conntrack_pptp.o -obj-$(CONFIG_IP_NF_AMANDA) += ip_conntrack_amanda.o -obj-$(CONFIG_IP_NF_TFTP) += ip_conntrack_tftp.o -obj-$(CONFIG_IP_NF_FTP) += ip_conntrack_ftp.o -obj-$(CONFIG_IP_NF_IRC) += ip_conntrack_irc.o -obj-$(CONFIG_IP_NF_SIP) += ip_conntrack_sip.o -obj-$(CONFIG_IP_NF_NETBIOS_NS) += ip_conntrack_netbios_ns.o - -# NAT helpers (ip_conntrack) -obj-$(CONFIG_IP_NF_NAT_H323) += ip_nat_h323.o -obj-$(CONFIG_IP_NF_NAT_PPTP) += ip_nat_pptp.o -obj-$(CONFIG_IP_NF_NAT_AMANDA) += ip_nat_amanda.o -obj-$(CONFIG_IP_NF_NAT_TFTP) += ip_nat_tftp.o -obj-$(CONFIG_IP_NF_NAT_FTP) += ip_nat_ftp.o -obj-$(CONFIG_IP_NF_NAT_IRC) += ip_nat_irc.o -obj-$(CONFIG_IP_NF_NAT_SIP) += ip_nat_sip.o - # NAT helpers (nf_conntrack) obj-$(CONFIG_NF_NAT_AMANDA) += nf_nat_amanda.o obj-$(CONFIG_NF_NAT_FTP) += nf_nat_ftp.o @@ -78,7 +37,6 @@ obj-$(CONFIG_IP_NF_IPTABLES) += ip_tables.o # the three instances of ip_tables obj-$(CONFIG_IP_NF_FILTER) += iptable_filter.o obj-$(CONFIG_IP_NF_MANGLE) += iptable_mangle.o -obj-$(CONFIG_IP_NF_NAT) += iptable_nat.o obj-$(CONFIG_NF_NAT) += iptable_nat.o obj-$(CONFIG_IP_NF_RAW) += iptable_raw.o @@ -100,7 +58,6 @@ obj-$(CONFIG_IP_NF_TARGET_MASQUERADE) += ipt_MASQUERADE.o obj-$(CONFIG_IP_NF_TARGET_REDIRECT) += ipt_REDIRECT.o obj-$(CONFIG_IP_NF_TARGET_NETMAP) += ipt_NETMAP.o obj-$(CONFIG_IP_NF_TARGET_SAME) += ipt_SAME.o -obj-$(CONFIG_IP_NF_NAT_SNMP_BASIC) += ip_nat_snmp_basic.o obj-$(CONFIG_IP_NF_TARGET_LOG) += ipt_LOG.o obj-$(CONFIG_IP_NF_TARGET_ULOG) += ipt_ULOG.o obj-$(CONFIG_IP_NF_TARGET_CLUSTERIP) += ipt_CLUSTERIP.o diff --git a/net/ipv4/netfilter/ip_conntrack_amanda.c b/net/ipv4/netfilter/ip_conntrack_amanda.c deleted file mode 100644 index c40762c67d0e..000000000000 --- a/net/ipv4/netfilter/ip_conntrack_amanda.c +++ /dev/null @@ -1,229 +0,0 @@ -/* Amanda extension for IP connection tracking, Version 0.2 - * (C) 2002 by Brian J. Murrell - * based on HW's ip_conntrack_irc.c as well as other modules - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - * - * Module load syntax: - * insmod ip_conntrack_amanda.o [master_timeout=n] - * - * Where master_timeout is the timeout (in seconds) of the master - * connection (port 10080). This defaults to 5 minutes but if - * your clients take longer than 5 minutes to do their work - * before getting back to the Amanda server, you can increase - * this value. - * - */ -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -static unsigned int master_timeout = 300; -static char *ts_algo = "kmp"; - -MODULE_AUTHOR("Brian J. Murrell "); -MODULE_DESCRIPTION("Amanda connection tracking module"); -MODULE_LICENSE("GPL"); -module_param(master_timeout, uint, 0600); -MODULE_PARM_DESC(master_timeout, "timeout for the master connection"); -module_param(ts_algo, charp, 0400); -MODULE_PARM_DESC(ts_algo, "textsearch algorithm to use (default kmp)"); - -unsigned int (*ip_nat_amanda_hook)(struct sk_buff **pskb, - enum ip_conntrack_info ctinfo, - unsigned int matchoff, - unsigned int matchlen, - struct ip_conntrack_expect *exp); -EXPORT_SYMBOL_GPL(ip_nat_amanda_hook); - -enum amanda_strings { - SEARCH_CONNECT, - SEARCH_NEWLINE, - SEARCH_DATA, - SEARCH_MESG, - SEARCH_INDEX, -}; - -static struct { - char *string; - size_t len; - struct ts_config *ts; -} search[] = { - [SEARCH_CONNECT] = { - .string = "CONNECT ", - .len = 8, - }, - [SEARCH_NEWLINE] = { - .string = "\n", - .len = 1, - }, - [SEARCH_DATA] = { - .string = "DATA ", - .len = 5, - }, - [SEARCH_MESG] = { - .string = "MESG ", - .len = 5, - }, - [SEARCH_INDEX] = { - .string = "INDEX ", - .len = 6, - }, -}; - -static int help(struct sk_buff **pskb, - struct ip_conntrack *ct, enum ip_conntrack_info ctinfo) -{ - struct ts_state ts; - struct ip_conntrack_expect *exp; - unsigned int dataoff, start, stop, off, i; - char pbuf[sizeof("65535")], *tmp; - u_int16_t port, len; - int ret = NF_ACCEPT; - typeof(ip_nat_amanda_hook) ip_nat_amanda; - - /* Only look at packets from the Amanda server */ - if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) - return NF_ACCEPT; - - /* increase the UDP timeout of the master connection as replies from - * Amanda clients to the server can be quite delayed */ - ip_ct_refresh(ct, *pskb, master_timeout * HZ); - - /* No data? */ - dataoff = ip_hdrlen(*pskb) + sizeof(struct udphdr); - if (dataoff >= (*pskb)->len) { - if (net_ratelimit()) - printk("amanda_help: skblen = %u\n", (*pskb)->len); - return NF_ACCEPT; - } - - memset(&ts, 0, sizeof(ts)); - start = skb_find_text(*pskb, dataoff, (*pskb)->len, - search[SEARCH_CONNECT].ts, &ts); - if (start == UINT_MAX) - goto out; - start += dataoff + search[SEARCH_CONNECT].len; - - memset(&ts, 0, sizeof(ts)); - stop = skb_find_text(*pskb, start, (*pskb)->len, - search[SEARCH_NEWLINE].ts, &ts); - if (stop == UINT_MAX) - goto out; - stop += start; - - for (i = SEARCH_DATA; i <= SEARCH_INDEX; i++) { - memset(&ts, 0, sizeof(ts)); - off = skb_find_text(*pskb, start, stop, search[i].ts, &ts); - if (off == UINT_MAX) - continue; - off += start + search[i].len; - - len = min_t(unsigned int, sizeof(pbuf) - 1, stop - off); - if (skb_copy_bits(*pskb, off, pbuf, len)) - break; - pbuf[len] = '\0'; - - port = simple_strtoul(pbuf, &tmp, 10); - len = tmp - pbuf; - if (port == 0 || len > 5) - break; - - exp = ip_conntrack_expect_alloc(ct); - if (exp == NULL) { - ret = NF_DROP; - goto out; - } - - exp->expectfn = NULL; - exp->flags = 0; - - exp->tuple.src.ip = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.ip; - exp->tuple.src.u.tcp.port = 0; - exp->tuple.dst.ip = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.ip; - exp->tuple.dst.protonum = IPPROTO_TCP; - exp->tuple.dst.u.tcp.port = htons(port); - - exp->mask.src.ip = htonl(0xFFFFFFFF); - exp->mask.src.u.tcp.port = 0; - exp->mask.dst.ip = htonl(0xFFFFFFFF); - exp->mask.dst.protonum = 0xFF; - exp->mask.dst.u.tcp.port = htons(0xFFFF); - - /* RCU read locked by nf_hook_slow */ - ip_nat_amanda = rcu_dereference(ip_nat_amanda_hook); - if (ip_nat_amanda) - ret = ip_nat_amanda(pskb, ctinfo, off - dataoff, - len, exp); - else if (ip_conntrack_expect_related(exp) != 0) - ret = NF_DROP; - ip_conntrack_expect_put(exp); - } - -out: - return ret; -} - -static struct ip_conntrack_helper amanda_helper = { - .max_expected = 3, - .timeout = 180, - .me = THIS_MODULE, - .help = help, - .name = "amanda", - - .tuple = { .src = { .u = { .udp = {.port = __constant_htons(10080) } } }, - .dst = { .protonum = IPPROTO_UDP }, - }, - .mask = { .src = { .u = { 0xFFFF } }, - .dst = { .protonum = 0xFF }, - }, -}; - -static void __exit ip_conntrack_amanda_fini(void) -{ - int i; - - ip_conntrack_helper_unregister(&amanda_helper); - for (i = 0; i < ARRAY_SIZE(search); i++) - textsearch_destroy(search[i].ts); -} - -static int __init ip_conntrack_amanda_init(void) -{ - int ret, i; - - ret = -ENOMEM; - for (i = 0; i < ARRAY_SIZE(search); i++) { - search[i].ts = textsearch_prepare(ts_algo, search[i].string, - search[i].len, - GFP_KERNEL, TS_AUTOLOAD); - if (search[i].ts == NULL) - goto err; - } - ret = ip_conntrack_helper_register(&amanda_helper); - if (ret < 0) - goto err; - return 0; - -err: - for (; i >= 0; i--) { - if (search[i].ts) - textsearch_destroy(search[i].ts); - } - return ret; -} - -module_init(ip_conntrack_amanda_init); -module_exit(ip_conntrack_amanda_fini); diff --git a/net/ipv4/netfilter/ip_conntrack_core.c b/net/ipv4/netfilter/ip_conntrack_core.c deleted file mode 100644 index 986c0c81294f..000000000000 --- a/net/ipv4/netfilter/ip_conntrack_core.c +++ /dev/null @@ -1,1549 +0,0 @@ -/* Connection state tracking for netfilter. This is separated from, - but required by, the NAT layer; it can also be used by an iptables - extension. */ - -/* (C) 1999-2001 Paul `Rusty' Russell - * (C) 2002-2004 Netfilter Core Team - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * 23 Apr 2001: Harald Welte - * - new API and handling of conntrack/nat helpers - * - now capable of multiple expectations for one master - * 16 Jul 2002: Harald Welte - * - add usage/reference counts to ip_conntrack_expect - * - export ip_conntrack[_expect]_{find_get,put} functions - * */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -/* ip_conntrack_lock protects the main hash table, protocol/helper/expected - registrations, conntrack timers*/ -#include -#include -#include -#include - -#define IP_CONNTRACK_VERSION "2.4" - -#if 0 -#define DEBUGP printk -#else -#define DEBUGP(format, args...) -#endif - -DEFINE_RWLOCK(ip_conntrack_lock); - -/* ip_conntrack_standalone needs this */ -atomic_t ip_conntrack_count = ATOMIC_INIT(0); - -void (*ip_conntrack_destroyed)(struct ip_conntrack *conntrack) = NULL; -LIST_HEAD(ip_conntrack_expect_list); -struct ip_conntrack_protocol *ip_ct_protos[MAX_IP_CT_PROTO] __read_mostly; -static LIST_HEAD(helpers); -unsigned int ip_conntrack_htable_size __read_mostly = 0; -int ip_conntrack_max __read_mostly; -struct list_head *ip_conntrack_hash __read_mostly; -static struct kmem_cache *ip_conntrack_cachep __read_mostly; -static struct kmem_cache *ip_conntrack_expect_cachep __read_mostly; -struct ip_conntrack ip_conntrack_untracked; -unsigned int ip_ct_log_invalid __read_mostly; -static LIST_HEAD(unconfirmed); -static int ip_conntrack_vmalloc __read_mostly; - -static unsigned int ip_conntrack_next_id; -static unsigned int ip_conntrack_expect_next_id; -#ifdef CONFIG_IP_NF_CONNTRACK_EVENTS -ATOMIC_NOTIFIER_HEAD(ip_conntrack_chain); -ATOMIC_NOTIFIER_HEAD(ip_conntrack_expect_chain); - -DEFINE_PER_CPU(struct ip_conntrack_ecache, ip_conntrack_ecache); - -/* deliver cached events and clear cache entry - must be called with locally - * disabled softirqs */ -static inline void -__ip_ct_deliver_cached_events(struct ip_conntrack_ecache *ecache) -{ - DEBUGP("ecache: delivering events for %p\n", ecache->ct); - if (is_confirmed(ecache->ct) && !is_dying(ecache->ct) && ecache->events) - atomic_notifier_call_chain(&ip_conntrack_chain, ecache->events, - ecache->ct); - ecache->events = 0; - ip_conntrack_put(ecache->ct); - ecache->ct = NULL; -} - -/* Deliver all cached events for a particular conntrack. This is called - * by code prior to async packet handling or freeing the skb */ -void ip_ct_deliver_cached_events(const struct ip_conntrack *ct) -{ - struct ip_conntrack_ecache *ecache; - - local_bh_disable(); - ecache = &__get_cpu_var(ip_conntrack_ecache); - if (ecache->ct == ct) - __ip_ct_deliver_cached_events(ecache); - local_bh_enable(); -} - -void __ip_ct_event_cache_init(struct ip_conntrack *ct) -{ - struct ip_conntrack_ecache *ecache; - - /* take care of delivering potentially old events */ - ecache = &__get_cpu_var(ip_conntrack_ecache); - BUG_ON(ecache->ct == ct); - if (ecache->ct) - __ip_ct_deliver_cached_events(ecache); - /* initialize for this conntrack/packet */ - ecache->ct = ct; - nf_conntrack_get(&ct->ct_general); -} - -/* flush the event cache - touches other CPU's data and must not be called while - * packets are still passing through the code */ -static void ip_ct_event_cache_flush(void) -{ - struct ip_conntrack_ecache *ecache; - int cpu; - - for_each_possible_cpu(cpu) { - ecache = &per_cpu(ip_conntrack_ecache, cpu); - if (ecache->ct) - ip_conntrack_put(ecache->ct); - } -} -#else -static inline void ip_ct_event_cache_flush(void) {} -#endif /* CONFIG_IP_NF_CONNTRACK_EVENTS */ - -DEFINE_PER_CPU(struct ip_conntrack_stat, ip_conntrack_stat); - -static int ip_conntrack_hash_rnd_initted; -static unsigned int ip_conntrack_hash_rnd; - -static u_int32_t __hash_conntrack(const struct ip_conntrack_tuple *tuple, - unsigned int size, unsigned int rnd) -{ - return (jhash_3words((__force u32)tuple->src.ip, - ((__force u32)tuple->dst.ip ^ tuple->dst.protonum), - (tuple->src.u.all | (tuple->dst.u.all << 16)), - rnd) % size); -} - -static u_int32_t -hash_conntrack(const struct ip_conntrack_tuple *tuple) -{ - return __hash_conntrack(tuple, ip_conntrack_htable_size, - ip_conntrack_hash_rnd); -} - -int -ip_ct_get_tuple(const struct iphdr *iph, - const struct sk_buff *skb, - unsigned int dataoff, - struct ip_conntrack_tuple *tuple, - const struct ip_conntrack_protocol *protocol) -{ - /* Never happen */ - if (iph->frag_off & htons(IP_OFFSET)) { - printk("ip_conntrack_core: Frag of proto %u.\n", - iph->protocol); - return 0; - } - - tuple->src.ip = iph->saddr; - tuple->dst.ip = iph->daddr; - tuple->dst.protonum = iph->protocol; - tuple->dst.dir = IP_CT_DIR_ORIGINAL; - - return protocol->pkt_to_tuple(skb, dataoff, tuple); -} - -int -ip_ct_invert_tuple(struct ip_conntrack_tuple *inverse, - const struct ip_conntrack_tuple *orig, - const struct ip_conntrack_protocol *protocol) -{ - inverse->src.ip = orig->dst.ip; - inverse->dst.ip = orig->src.ip; - inverse->dst.protonum = orig->dst.protonum; - inverse->dst.dir = !orig->dst.dir; - - return protocol->invert_tuple(inverse, orig); -} - - -/* ip_conntrack_expect helper functions */ -void ip_ct_unlink_expect(struct ip_conntrack_expect *exp) -{ - IP_NF_ASSERT(!timer_pending(&exp->timeout)); - list_del(&exp->list); - CONNTRACK_STAT_INC(expect_delete); - exp->master->expecting--; - ip_conntrack_expect_put(exp); -} - -static void expectation_timed_out(unsigned long ul_expect) -{ - struct ip_conntrack_expect *exp = (void *)ul_expect; - - write_lock_bh(&ip_conntrack_lock); - ip_ct_unlink_expect(exp); - write_unlock_bh(&ip_conntrack_lock); - ip_conntrack_expect_put(exp); -} - -struct ip_conntrack_expect * -__ip_conntrack_expect_find(const struct ip_conntrack_tuple *tuple) -{ - struct ip_conntrack_expect *i; - - list_for_each_entry(i, &ip_conntrack_expect_list, list) { - if (ip_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask)) - return i; - } - return NULL; -} - -/* Just find a expectation corresponding to a tuple. */ -struct ip_conntrack_expect * -ip_conntrack_expect_find_get(const struct ip_conntrack_tuple *tuple) -{ - struct ip_conntrack_expect *i; - - read_lock_bh(&ip_conntrack_lock); - i = __ip_conntrack_expect_find(tuple); - if (i) - atomic_inc(&i->use); - read_unlock_bh(&ip_conntrack_lock); - - return i; -} - -/* If an expectation for this connection is found, it gets delete from - * global list then returned. */ -static struct ip_conntrack_expect * -find_expectation(const struct ip_conntrack_tuple *tuple) -{ - struct ip_conntrack_expect *i; - - list_for_each_entry(i, &ip_conntrack_expect_list, list) { - /* If master is not in hash table yet (ie. packet hasn't left - this machine yet), how can other end know about expected? - Hence these are not the droids you are looking for (if - master ct never got confirmed, we'd hold a reference to it - and weird things would happen to future packets). */ - if (ip_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) - && is_confirmed(i->master)) { - if (i->flags & IP_CT_EXPECT_PERMANENT) { - atomic_inc(&i->use); - return i; - } else if (del_timer(&i->timeout)) { - ip_ct_unlink_expect(i); - return i; - } - } - } - return NULL; -} - -/* delete all expectations for this conntrack */ -void ip_ct_remove_expectations(struct ip_conntrack *ct) -{ - struct ip_conntrack_expect *i, *tmp; - - /* Optimization: most connection never expect any others. */ - if (ct->expecting == 0) - return; - - list_for_each_entry_safe(i, tmp, &ip_conntrack_expect_list, list) { - if (i->master == ct && del_timer(&i->timeout)) { - ip_ct_unlink_expect(i); - ip_conntrack_expect_put(i); - } - } -} - -static void -clean_from_lists(struct ip_conntrack *ct) -{ - DEBUGP("clean_from_lists(%p)\n", ct); - list_del(&ct->tuplehash[IP_CT_DIR_ORIGINAL].list); - list_del(&ct->tuplehash[IP_CT_DIR_REPLY].list); - - /* Destroy all pending expectations */ - ip_ct_remove_expectations(ct); -} - -static void -destroy_conntrack(struct nf_conntrack *nfct) -{ - struct ip_conntrack *ct = (struct ip_conntrack *)nfct; - struct ip_conntrack_protocol *proto; - struct ip_conntrack_helper *helper; - typeof(ip_conntrack_destroyed) destroyed; - - DEBUGP("destroy_conntrack(%p)\n", ct); - IP_NF_ASSERT(atomic_read(&nfct->use) == 0); - IP_NF_ASSERT(!timer_pending(&ct->timeout)); - - ip_conntrack_event(IPCT_DESTROY, ct); - set_bit(IPS_DYING_BIT, &ct->status); - - helper = ct->helper; - if (helper && helper->destroy) - helper->destroy(ct); - - /* To make sure we don't get any weird locking issues here: - * destroy_conntrack() MUST NOT be called with a write lock - * to ip_conntrack_lock!!! -HW */ - rcu_read_lock(); - proto = __ip_conntrack_proto_find(ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.protonum); - if (proto && proto->destroy) - proto->destroy(ct); - - destroyed = rcu_dereference(ip_conntrack_destroyed); - if (destroyed) - destroyed(ct); - - rcu_read_unlock(); - - write_lock_bh(&ip_conntrack_lock); - /* Expectations will have been removed in clean_from_lists, - * except TFTP can create an expectation on the first packet, - * before connection is in the list, so we need to clean here, - * too. */ - ip_ct_remove_expectations(ct); - - /* We overload first tuple to link into unconfirmed list. */ - if (!is_confirmed(ct)) { - BUG_ON(list_empty(&ct->tuplehash[IP_CT_DIR_ORIGINAL].list)); - list_del(&ct->tuplehash[IP_CT_DIR_ORIGINAL].list); - } - - CONNTRACK_STAT_INC(delete); - write_unlock_bh(&ip_conntrack_lock); - - if (ct->master) - ip_conntrack_put(ct->master); - - DEBUGP("destroy_conntrack: returning ct=%p to slab\n", ct); - ip_conntrack_free(ct); -} - -static void death_by_timeout(unsigned long ul_conntrack) -{ - struct ip_conntrack *ct = (void *)ul_conntrack; - - write_lock_bh(&ip_conntrack_lock); - /* Inside lock so preempt is disabled on module removal path. - * Otherwise we can get spurious warnings. */ - CONNTRACK_STAT_INC(delete_list); - clean_from_lists(ct); - write_unlock_bh(&ip_conntrack_lock); - ip_conntrack_put(ct); -} - -struct ip_conntrack_tuple_hash * -__ip_conntrack_find(const struct ip_conntrack_tuple *tuple, - const struct ip_conntrack *ignored_conntrack) -{ - struct ip_conntrack_tuple_hash *h; - unsigned int hash = hash_conntrack(tuple); - - list_for_each_entry(h, &ip_conntrack_hash[hash], list) { - if (tuplehash_to_ctrack(h) != ignored_conntrack && - ip_ct_tuple_equal(tuple, &h->tuple)) { - CONNTRACK_STAT_INC(found); - return h; - } - CONNTRACK_STAT_INC(searched); - } - - return NULL; -} - -/* Find a connection corresponding to a tuple. */ -struct ip_conntrack_tuple_hash * -ip_conntrack_find_get(const struct ip_conntrack_tuple *tuple, - const struct ip_conntrack *ignored_conntrack) -{ - struct ip_conntrack_tuple_hash *h; - - read_lock_bh(&ip_conntrack_lock); - h = __ip_conntrack_find(tuple, ignored_conntrack); - if (h) - atomic_inc(&tuplehash_to_ctrack(h)->ct_general.use); - read_unlock_bh(&ip_conntrack_lock); - - return h; -} - -static void __ip_conntrack_hash_insert(struct ip_conntrack *ct, - unsigned int hash, - unsigned int repl_hash) -{ - ct->id = ++ip_conntrack_next_id; - list_add(&ct->tuplehash[IP_CT_DIR_ORIGINAL].list, - &ip_conntrack_hash[hash]); - list_add(&ct->tuplehash[IP_CT_DIR_REPLY].list, - &ip_conntrack_hash[repl_hash]); -} - -void ip_conntrack_hash_insert(struct ip_conntrack *ct) -{ - unsigned int hash, repl_hash; - - hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); - repl_hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_REPLY].tuple); - - write_lock_bh(&ip_conntrack_lock); - __ip_conntrack_hash_insert(ct, hash, repl_hash); - write_unlock_bh(&ip_conntrack_lock); -} - -/* Confirm a connection given skb; places it in hash table */ -int -__ip_conntrack_confirm(struct sk_buff **pskb) -{ - unsigned int hash, repl_hash; - struct ip_conntrack_tuple_hash *h; - struct ip_conntrack *ct; - enum ip_conntrack_info ctinfo; - - ct = ip_conntrack_get(*pskb, &ctinfo); - - /* ipt_REJECT uses ip_conntrack_attach to attach related - ICMP/TCP RST packets in other direction. Actual packet - which created connection will be IP_CT_NEW or for an - expected connection, IP_CT_RELATED. */ - if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) - return NF_ACCEPT; - - hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); - repl_hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_REPLY].tuple); - - /* We're not in hash table, and we refuse to set up related - connections for unconfirmed conns. But packet copies and - REJECT will give spurious warnings here. */ - /* IP_NF_ASSERT(atomic_read(&ct->ct_general.use) == 1); */ - - /* No external references means noone else could have - confirmed us. */ - IP_NF_ASSERT(!is_confirmed(ct)); - DEBUGP("Confirming conntrack %p\n", ct); - - write_lock_bh(&ip_conntrack_lock); - - /* See if there's one in the list already, including reverse: - NAT could have grabbed it without realizing, since we're - not in the hash. If there is, we lost race. */ - list_for_each_entry(h, &ip_conntrack_hash[hash], list) - if (ip_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple, - &h->tuple)) - goto out; - list_for_each_entry(h, &ip_conntrack_hash[repl_hash], list) - if (ip_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple, - &h->tuple)) - goto out; - - /* Remove from unconfirmed list */ - list_del(&ct->tuplehash[IP_CT_DIR_ORIGINAL].list); - - __ip_conntrack_hash_insert(ct, hash, repl_hash); - /* Timer relative to confirmation time, not original - setting time, otherwise we'd get timer wrap in - weird delay cases. */ - ct->timeout.expires += jiffies; - add_timer(&ct->timeout); - atomic_inc(&ct->ct_general.use); - set_bit(IPS_CONFIRMED_BIT, &ct->status); - CONNTRACK_STAT_INC(insert); - write_unlock_bh(&ip_conntrack_lock); - if (ct->helper) - ip_conntrack_event_cache(IPCT_HELPER, *pskb); -#ifdef CONFIG_IP_NF_NAT_NEEDED - if (test_bit(IPS_SRC_NAT_DONE_BIT, &ct->status) || - test_bit(IPS_DST_NAT_DONE_BIT, &ct->status)) - ip_conntrack_event_cache(IPCT_NATINFO, *pskb); -#endif - ip_conntrack_event_cache(master_ct(ct) ? - IPCT_RELATED : IPCT_NEW, *pskb); - - return NF_ACCEPT; - -out: - CONNTRACK_STAT_INC(insert_failed); - write_unlock_bh(&ip_conntrack_lock); - return NF_DROP; -} - -/* Returns true if a connection correspondings to the tuple (required - for NAT). */ -int -ip_conntrack_tuple_taken(const struct ip_conntrack_tuple *tuple, - const struct ip_conntrack *ignored_conntrack) -{ - struct ip_conntrack_tuple_hash *h; - - read_lock_bh(&ip_conntrack_lock); - h = __ip_conntrack_find(tuple, ignored_conntrack); - read_unlock_bh(&ip_conntrack_lock); - - return h != NULL; -} - -/* There's a small race here where we may free a just-assured - connection. Too bad: we're in trouble anyway. */ -static int early_drop(struct list_head *chain) -{ - /* Traverse backwards: gives us oldest, which is roughly LRU */ - struct ip_conntrack_tuple_hash *h; - struct ip_conntrack *ct = NULL, *tmp; - int dropped = 0; - - read_lock_bh(&ip_conntrack_lock); - list_for_each_entry_reverse(h, chain, list) { - tmp = tuplehash_to_ctrack(h); - if (!test_bit(IPS_ASSURED_BIT, &tmp->status)) { - ct = tmp; - atomic_inc(&ct->ct_general.use); - break; - } - } - read_unlock_bh(&ip_conntrack_lock); - - if (!ct) - return dropped; - - if (del_timer(&ct->timeout)) { - death_by_timeout((unsigned long)ct); - dropped = 1; - CONNTRACK_STAT_INC_ATOMIC(early_drop); - } - ip_conntrack_put(ct); - return dropped; -} - -static struct ip_conntrack_helper * -__ip_conntrack_helper_find( const struct ip_conntrack_tuple *tuple) -{ - struct ip_conntrack_helper *h; - - list_for_each_entry(h, &helpers, list) { - if (ip_ct_tuple_mask_cmp(tuple, &h->tuple, &h->mask)) - return h; - } - return NULL; -} - -struct ip_conntrack_helper * -ip_conntrack_helper_find_get( const struct ip_conntrack_tuple *tuple) -{ - struct ip_conntrack_helper *helper; - - /* need ip_conntrack_lock to assure that helper exists until - * try_module_get() is called */ - read_lock_bh(&ip_conntrack_lock); - - helper = __ip_conntrack_helper_find(tuple); - if (helper) { - /* need to increase module usage count to assure helper will - * not go away while the caller is e.g. busy putting a - * conntrack in the hash that uses the helper */ - if (!try_module_get(helper->me)) - helper = NULL; - } - - read_unlock_bh(&ip_conntrack_lock); - - return helper; -} - -void ip_conntrack_helper_put(struct ip_conntrack_helper *helper) -{ - module_put(helper->me); -} - -struct ip_conntrack_protocol * -__ip_conntrack_proto_find(u_int8_t protocol) -{ - return ip_ct_protos[protocol]; -} - -/* this is guaranteed to always return a valid protocol helper, since - * it falls back to generic_protocol */ -struct ip_conntrack_protocol * -ip_conntrack_proto_find_get(u_int8_t protocol) -{ - struct ip_conntrack_protocol *p; - - rcu_read_lock(); - p = __ip_conntrack_proto_find(protocol); - if (p) { - if (!try_module_get(p->me)) - p = &ip_conntrack_generic_protocol; - } - rcu_read_unlock(); - - return p; -} - -void ip_conntrack_proto_put(struct ip_conntrack_protocol *p) -{ - module_put(p->me); -} - -struct ip_conntrack *ip_conntrack_alloc(struct ip_conntrack_tuple *orig, - struct ip_conntrack_tuple *repl) -{ - struct ip_conntrack *conntrack; - - if (!ip_conntrack_hash_rnd_initted) { - get_random_bytes(&ip_conntrack_hash_rnd, 4); - ip_conntrack_hash_rnd_initted = 1; - } - - /* We don't want any race condition at early drop stage */ - atomic_inc(&ip_conntrack_count); - - if (ip_conntrack_max - && atomic_read(&ip_conntrack_count) > ip_conntrack_max) { - unsigned int hash = hash_conntrack(orig); - /* Try dropping from this hash chain. */ - if (!early_drop(&ip_conntrack_hash[hash])) { - atomic_dec(&ip_conntrack_count); - if (net_ratelimit()) - printk(KERN_WARNING - "ip_conntrack: table full, dropping" - " packet.\n"); - return ERR_PTR(-ENOMEM); - } - } - - conntrack = kmem_cache_zalloc(ip_conntrack_cachep, GFP_ATOMIC); - if (!conntrack) { - DEBUGP("Can't allocate conntrack.\n"); - atomic_dec(&ip_conntrack_count); - return ERR_PTR(-ENOMEM); - } - - atomic_set(&conntrack->ct_general.use, 1); - conntrack->ct_general.destroy = destroy_conntrack; - conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig; - conntrack->tuplehash[IP_CT_DIR_REPLY].tuple = *repl; - /* Don't set timer yet: wait for confirmation */ - init_timer(&conntrack->timeout); - conntrack->timeout.data = (unsigned long)conntrack; - conntrack->timeout.function = death_by_timeout; - - return conntrack; -} - -void -ip_conntrack_free(struct ip_conntrack *conntrack) -{ - atomic_dec(&ip_conntrack_count); - kmem_cache_free(ip_conntrack_cachep, conntrack); -} - -/* Allocate a new conntrack: we return -ENOMEM if classification - * failed due to stress. Otherwise it really is unclassifiable */ -static struct ip_conntrack_tuple_hash * -init_conntrack(struct ip_conntrack_tuple *tuple, - struct ip_conntrack_protocol *protocol, - struct sk_buff *skb) -{ - struct ip_conntrack *conntrack; - struct ip_conntrack_tuple repl_tuple; - struct ip_conntrack_expect *exp; - - if (!ip_ct_invert_tuple(&repl_tuple, tuple, protocol)) { - DEBUGP("Can't invert tuple.\n"); - return NULL; - } - - conntrack = ip_conntrack_alloc(tuple, &repl_tuple); - if (conntrack == NULL || IS_ERR(conntrack)) - return (struct ip_conntrack_tuple_hash *)conntrack; - - if (!protocol->new(conntrack, skb)) { - ip_conntrack_free(conntrack); - return NULL; - } - - write_lock_bh(&ip_conntrack_lock); - exp = find_expectation(tuple); - - if (exp) { - DEBUGP("conntrack: expectation arrives ct=%p exp=%p\n", - conntrack, exp); - /* Welcome, Mr. Bond. We've been expecting you... */ - __set_bit(IPS_EXPECTED_BIT, &conntrack->status); - conntrack->master = exp->master; -#ifdef CONFIG_IP_NF_CONNTRACK_MARK - conntrack->mark = exp->master->mark; -#endif -#if defined(CONFIG_IP_NF_TARGET_MASQUERADE) || \ - defined(CONFIG_IP_NF_TARGET_MASQUERADE_MODULE) - /* this is ugly, but there is no other place where to put it */ - conntrack->nat.masq_index = exp->master->nat.masq_index; -#endif -#ifdef CONFIG_IP_NF_CONNTRACK_SECMARK - conntrack->secmark = exp->master->secmark; -#endif - nf_conntrack_get(&conntrack->master->ct_general); - CONNTRACK_STAT_INC(expect_new); - } else { - conntrack->helper = __ip_conntrack_helper_find(&repl_tuple); - - CONNTRACK_STAT_INC(new); - } - - /* Overload tuple linked list to put us in unconfirmed list. */ - list_add(&conntrack->tuplehash[IP_CT_DIR_ORIGINAL].list, &unconfirmed); - - write_unlock_bh(&ip_conntrack_lock); - - if (exp) { - if (exp->expectfn) - exp->expectfn(conntrack, exp); - ip_conntrack_expect_put(exp); - } - - return &conntrack->tuplehash[IP_CT_DIR_ORIGINAL]; -} - -/* On success, returns conntrack ptr, sets skb->nfct and ctinfo */ -static inline struct ip_conntrack * -resolve_normal_ct(struct sk_buff *skb, - struct ip_conntrack_protocol *proto, - int *set_reply, - unsigned int hooknum, - enum ip_conntrack_info *ctinfo) -{ - struct ip_conntrack_tuple tuple; - struct ip_conntrack_tuple_hash *h; - struct ip_conntrack *ct; - - IP_NF_ASSERT((ip_hdr(skb)->frag_off & htons(IP_OFFSET)) == 0); - - if (!ip_ct_get_tuple(ip_hdr(skb), skb, ip_hdrlen(skb), &tuple,proto)) - return NULL; - - /* look for tuple match */ - h = ip_conntrack_find_get(&tuple, NULL); - if (!h) { - h = init_conntrack(&tuple, proto, skb); - if (!h) - return NULL; - if (IS_ERR(h)) - return (void *)h; - } - ct = tuplehash_to_ctrack(h); - - /* It exists; we have (non-exclusive) reference. */ - if (DIRECTION(h) == IP_CT_DIR_REPLY) { - *ctinfo = IP_CT_ESTABLISHED + IP_CT_IS_REPLY; - /* Please set reply bit if this packet OK */ - *set_reply = 1; - } else { - /* Once we've had two way comms, always ESTABLISHED. */ - if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) { - DEBUGP("ip_conntrack_in: normal packet for %p\n", - ct); - *ctinfo = IP_CT_ESTABLISHED; - } else if (test_bit(IPS_EXPECTED_BIT, &ct->status)) { - DEBUGP("ip_conntrack_in: related packet for %p\n", - ct); - *ctinfo = IP_CT_RELATED; - } else { - DEBUGP("ip_conntrack_in: new packet for %p\n", - ct); - *ctinfo = IP_CT_NEW; - } - *set_reply = 0; - } - skb->nfct = &ct->ct_general; - skb->nfctinfo = *ctinfo; - return ct; -} - -/* Netfilter hook itself. */ -unsigned int ip_conntrack_in(unsigned int hooknum, - struct sk_buff **pskb, - const struct net_device *in, - const struct net_device *out, - int (*okfn)(struct sk_buff *)) -{ - struct ip_conntrack *ct; - enum ip_conntrack_info ctinfo; - struct ip_conntrack_protocol *proto; - int set_reply = 0; - int ret; - - /* Previously seen (loopback or untracked)? Ignore. */ - if ((*pskb)->nfct) { - CONNTRACK_STAT_INC_ATOMIC(ignore); - return NF_ACCEPT; - } - - /* Never happen */ - if (ip_hdr(*pskb)->frag_off & htons(IP_OFFSET)) { - if (net_ratelimit()) { - printk(KERN_ERR "ip_conntrack_in: Frag of proto %u (hook=%u)\n", - ip_hdr(*pskb)->protocol, hooknum); - } - return NF_DROP; - } - -/* Doesn't cover locally-generated broadcast, so not worth it. */ -#if 0 - /* Ignore broadcast: no `connection'. */ - if ((*pskb)->pkt_type == PACKET_BROADCAST) { - printk("Broadcast packet!\n"); - return NF_ACCEPT; - } else if ((ip_hdr(*pskb)->daddr & htonl(0x000000FF)) - == htonl(0x000000FF)) { - printk("Should bcast: %u.%u.%u.%u->%u.%u.%u.%u (sk=%p, ptype=%u)\n", - NIPQUAD(ip_hdr(*pskb)->saddr), - NIPQUAD(ip_hdr(*pskb)->daddr), - (*pskb)->sk, (*pskb)->pkt_type); - } -#endif - - /* rcu_read_lock()ed by nf_hook_slow */ - proto = __ip_conntrack_proto_find(ip_hdr(*pskb)->protocol); - - /* It may be an special packet, error, unclean... - * inverse of the return code tells to the netfilter - * core what to do with the packet. */ - if (proto->error != NULL - && (ret = proto->error(*pskb, &ctinfo, hooknum)) <= 0) { - CONNTRACK_STAT_INC_ATOMIC(error); - CONNTRACK_STAT_INC_ATOMIC(invalid); - return -ret; - } - - if (!(ct = resolve_normal_ct(*pskb, proto,&set_reply,hooknum,&ctinfo))) { - /* Not valid part of a connection */ - CONNTRACK_STAT_INC_ATOMIC(invalid); - return NF_ACCEPT; - } - - if (IS_ERR(ct)) { - /* Too stressed to deal. */ - CONNTRACK_STAT_INC_ATOMIC(drop); - return NF_DROP; - } - - IP_NF_ASSERT((*pskb)->nfct); - - ret = proto->packet(ct, *pskb, ctinfo); - if (ret < 0) { - /* Invalid: inverse of the return code tells - * the netfilter core what to do*/ - nf_conntrack_put((*pskb)->nfct); - (*pskb)->nfct = NULL; - CONNTRACK_STAT_INC_ATOMIC(invalid); - return -ret; - } - - if (set_reply && !test_and_set_bit(IPS_SEEN_REPLY_BIT, &ct->status)) - ip_conntrack_event_cache(IPCT_STATUS, *pskb); - - return ret; -} - -int invert_tuplepr(struct ip_conntrack_tuple *inverse, - const struct ip_conntrack_tuple *orig) -{ - struct ip_conntrack_protocol *proto; - int ret; - - rcu_read_lock(); - proto = __ip_conntrack_proto_find(orig->dst.protonum); - ret = ip_ct_invert_tuple(inverse, orig, proto); - rcu_read_unlock(); - - return ret; -} - -/* Would two expected things clash? */ -static inline int expect_clash(const struct ip_conntrack_expect *a, - const struct ip_conntrack_expect *b) -{ - /* Part covered by intersection of masks must be unequal, - otherwise they clash */ - struct ip_conntrack_tuple intersect_mask - = { { a->mask.src.ip & b->mask.src.ip, - { a->mask.src.u.all & b->mask.src.u.all } }, - { a->mask.dst.ip & b->mask.dst.ip, - { a->mask.dst.u.all & b->mask.dst.u.all }, - a->mask.dst.protonum & b->mask.dst.protonum } }; - - return ip_ct_tuple_mask_cmp(&a->tuple, &b->tuple, &intersect_mask); -} - -static inline int expect_matches(const struct ip_conntrack_expect *a, - const struct ip_conntrack_expect *b) -{ - return a->master == b->master - && ip_ct_tuple_equal(&a->tuple, &b->tuple) - && ip_ct_tuple_equal(&a->mask, &b->mask); -} - -/* Generally a bad idea to call this: could have matched already. */ -void ip_conntrack_unexpect_related(struct ip_conntrack_expect *exp) -{ - struct ip_conntrack_expect *i; - - write_lock_bh(&ip_conntrack_lock); - /* choose the the oldest expectation to evict */ - list_for_each_entry_reverse(i, &ip_conntrack_expect_list, list) { - if (expect_matches(i, exp) && del_timer(&i->timeout)) { - ip_ct_unlink_expect(i); - write_unlock_bh(&ip_conntrack_lock); - ip_conntrack_expect_put(i); - return; - } - } - write_unlock_bh(&ip_conntrack_lock); -} - -/* We don't increase the master conntrack refcount for non-fulfilled - * conntracks. During the conntrack destruction, the expectations are - * always killed before the conntrack itself */ -struct ip_conntrack_expect *ip_conntrack_expect_alloc(struct ip_conntrack *me) -{ - struct ip_conntrack_expect *new; - - new = kmem_cache_alloc(ip_conntrack_expect_cachep, GFP_ATOMIC); - if (!new) { - DEBUGP("expect_related: OOM allocating expect\n"); - return NULL; - } - new->master = me; - atomic_set(&new->use, 1); - return new; -} - -void ip_conntrack_expect_put(struct ip_conntrack_expect *exp) -{ - if (atomic_dec_and_test(&exp->use)) - kmem_cache_free(ip_conntrack_expect_cachep, exp); -} - -static void ip_conntrack_expect_insert(struct ip_conntrack_expect *exp) -{ - atomic_inc(&exp->use); - exp->master->expecting++; - list_add(&exp->list, &ip_conntrack_expect_list); - - init_timer(&exp->timeout); - exp->timeout.data = (unsigned long)exp; - exp->timeout.function = expectation_timed_out; - exp->timeout.expires = jiffies + exp->master->helper->timeout * HZ; - add_timer(&exp->timeout); - - exp->id = ++ip_conntrack_expect_next_id; - atomic_inc(&exp->use); - CONNTRACK_STAT_INC(expect_create); -} - -/* Race with expectations being used means we could have none to find; OK. */ -static void evict_oldest_expect(struct ip_conntrack *master) -{ - struct ip_conntrack_expect *i; - - list_for_each_entry_reverse(i, &ip_conntrack_expect_list, list) { - if (i->master == master) { - if (del_timer(&i->timeout)) { - ip_ct_unlink_expect(i); - ip_conntrack_expect_put(i); - } - break; - } - } -} - -static inline int refresh_timer(struct ip_conntrack_expect *i) -{ - if (!del_timer(&i->timeout)) - return 0; - - i->timeout.expires = jiffies + i->master->helper->timeout*HZ; - add_timer(&i->timeout); - return 1; -} - -int ip_conntrack_expect_related(struct ip_conntrack_expect *expect) -{ - struct ip_conntrack_expect *i; - int ret; - - DEBUGP("ip_conntrack_expect_related %p\n", related_to); - DEBUGP("tuple: "); DUMP_TUPLE(&expect->tuple); - DEBUGP("mask: "); DUMP_TUPLE(&expect->mask); - - write_lock_bh(&ip_conntrack_lock); - list_for_each_entry(i, &ip_conntrack_expect_list, list) { - if (expect_matches(i, expect)) { - /* Refresh timer: if it's dying, ignore.. */ - if (refresh_timer(i)) { - ret = 0; - goto out; - } - } else if (expect_clash(i, expect)) { - ret = -EBUSY; - goto out; - } - } - - /* Will be over limit? */ - if (expect->master->helper->max_expected && - expect->master->expecting >= expect->master->helper->max_expected) - evict_oldest_expect(expect->master); - - ip_conntrack_expect_insert(expect); - ip_conntrack_expect_event(IPEXP_NEW, expect); - ret = 0; -out: - write_unlock_bh(&ip_conntrack_lock); - return ret; -} - -/* Alter reply tuple (maybe alter helper). This is for NAT, and is - implicitly racy: see __ip_conntrack_confirm */ -void ip_conntrack_alter_reply(struct ip_conntrack *conntrack, - const struct ip_conntrack_tuple *newreply) -{ - write_lock_bh(&ip_conntrack_lock); - /* Should be unconfirmed, so not in hash table yet */ - IP_NF_ASSERT(!is_confirmed(conntrack)); - - DEBUGP("Altering reply tuple of %p to ", conntrack); - DUMP_TUPLE(newreply); - - conntrack->tuplehash[IP_CT_DIR_REPLY].tuple = *newreply; - if (!conntrack->master && conntrack->expecting == 0) - conntrack->helper = __ip_conntrack_helper_find(newreply); - write_unlock_bh(&ip_conntrack_lock); -} - -int ip_conntrack_helper_register(struct ip_conntrack_helper *me) -{ - BUG_ON(me->timeout == 0); - write_lock_bh(&ip_conntrack_lock); - list_add(&me->list, &helpers); - write_unlock_bh(&ip_conntrack_lock); - - return 0; -} - -struct ip_conntrack_helper * -__ip_conntrack_helper_find_byname(const char *name) -{ - struct ip_conntrack_helper *h; - - list_for_each_entry(h, &helpers, list) { - if (!strcmp(h->name, name)) - return h; - } - - return NULL; -} - -static inline void unhelp(struct ip_conntrack_tuple_hash *i, - const struct ip_conntrack_helper *me) -{ - if (tuplehash_to_ctrack(i)->helper == me) { - ip_conntrack_event(IPCT_HELPER, tuplehash_to_ctrack(i)); - tuplehash_to_ctrack(i)->helper = NULL; - } -} - -void ip_conntrack_helper_unregister(struct ip_conntrack_helper *me) -{ - unsigned int i; - struct ip_conntrack_tuple_hash *h; - struct ip_conntrack_expect *exp, *tmp; - - /* Need write lock here, to delete helper. */ - write_lock_bh(&ip_conntrack_lock); - list_del(&me->list); - - /* Get rid of expectations */ - list_for_each_entry_safe(exp, tmp, &ip_conntrack_expect_list, list) { - if (exp->master->helper == me && del_timer(&exp->timeout)) { - ip_ct_unlink_expect(exp); - ip_conntrack_expect_put(exp); - } - } - /* Get rid of expecteds, set helpers to NULL. */ - list_for_each_entry(h, &unconfirmed, list) - unhelp(h, me); - for (i = 0; i < ip_conntrack_htable_size; i++) { - list_for_each_entry(h, &ip_conntrack_hash[i], list) - unhelp(h, me); - } - write_unlock_bh(&ip_conntrack_lock); - - /* Someone could be still looking at the helper in a bh. */ - synchronize_net(); -} - -/* Refresh conntrack for this many jiffies and do accounting if do_acct is 1 */ -void __ip_ct_refresh_acct(struct ip_conntrack *ct, - enum ip_conntrack_info ctinfo, - const struct sk_buff *skb, - unsigned long extra_jiffies, - int do_acct) -{ - int event = 0; - - IP_NF_ASSERT(ct->timeout.data == (unsigned long)ct); - IP_NF_ASSERT(skb); - - write_lock_bh(&ip_conntrack_lock); - - /* Only update if this is not a fixed timeout */ - if (test_bit(IPS_FIXED_TIMEOUT_BIT, &ct->status)) { - write_unlock_bh(&ip_conntrack_lock); - return; - } - - /* If not in hash table, timer will not be active yet */ - if (!is_confirmed(ct)) { - ct->timeout.expires = extra_jiffies; - event = IPCT_REFRESH; - } else { - /* Need del_timer for race avoidance (may already be dying). */ - if (del_timer(&ct->timeout)) { - ct->timeout.expires = jiffies + extra_jiffies; - add_timer(&ct->timeout); - event = IPCT_REFRESH; - } - } - -#ifdef CONFIG_IP_NF_CT_ACCT - if (do_acct) { - ct->counters[CTINFO2DIR(ctinfo)].packets++; - ct->counters[CTINFO2DIR(ctinfo)].bytes += - ntohs(ip_hdr(skb)->tot_len); - if ((ct->counters[CTINFO2DIR(ctinfo)].packets & 0x80000000) - || (ct->counters[CTINFO2DIR(ctinfo)].bytes & 0x80000000)) - event |= IPCT_COUNTER_FILLING; - } -#endif - - write_unlock_bh(&ip_conntrack_lock); - - /* must be unlocked when calling event cache */ - if (event) - ip_conntrack_event_cache(event, skb); -} - -#if defined(CONFIG_IP_NF_CONNTRACK_NETLINK) || \ - defined(CONFIG_IP_NF_CONNTRACK_NETLINK_MODULE) -/* Generic function for tcp/udp/sctp/dccp and alike. This needs to be - * in ip_conntrack_core, since we don't want the protocols to autoload - * or depend on ctnetlink */ -int ip_ct_port_tuple_to_nfattr(struct sk_buff *skb, - const struct ip_conntrack_tuple *tuple) -{ - NFA_PUT(skb, CTA_PROTO_SRC_PORT, sizeof(__be16), - &tuple->src.u.tcp.port); - NFA_PUT(skb, CTA_PROTO_DST_PORT, sizeof(__be16), - &tuple->dst.u.tcp.port); - return 0; - -nfattr_failure: - return -1; -} - -int ip_ct_port_nfattr_to_tuple(struct nfattr *tb[], - struct ip_conntrack_tuple *t) -{ - if (!tb[CTA_PROTO_SRC_PORT-1] || !tb[CTA_PROTO_DST_PORT-1]) - return -EINVAL; - - t->src.u.tcp.port = - *(__be16 *)NFA_DATA(tb[CTA_PROTO_SRC_PORT-1]); - t->dst.u.tcp.port = - *(__be16 *)NFA_DATA(tb[CTA_PROTO_DST_PORT-1]); - - return 0; -} -#endif - -/* Returns new sk_buff, or NULL */ -struct sk_buff * -ip_ct_gather_frags(struct sk_buff *skb, u_int32_t user) -{ - skb_orphan(skb); - - local_bh_disable(); - skb = ip_defrag(skb, user); - local_bh_enable(); - - if (skb) - ip_send_check(ip_hdr(skb)); - return skb; -} - -/* Used by ipt_REJECT. */ -static void ip_conntrack_attach(struct sk_buff *nskb, struct sk_buff *skb) -{ - struct ip_conntrack *ct; - enum ip_conntrack_info ctinfo; - - /* This ICMP is in reverse direction to the packet which caused it */ - ct = ip_conntrack_get(skb, &ctinfo); - - if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) - ctinfo = IP_CT_RELATED + IP_CT_IS_REPLY; - else - ctinfo = IP_CT_RELATED; - - /* Attach to new skbuff, and increment count */ - nskb->nfct = &ct->ct_general; - nskb->nfctinfo = ctinfo; - nf_conntrack_get(nskb->nfct); -} - -/* Bring out ya dead! */ -static struct ip_conntrack * -get_next_corpse(int (*iter)(struct ip_conntrack *i, void *data), - void *data, unsigned int *bucket) -{ - struct ip_conntrack_tuple_hash *h; - struct ip_conntrack *ct; - - write_lock_bh(&ip_conntrack_lock); - for (; *bucket < ip_conntrack_htable_size; (*bucket)++) { - list_for_each_entry(h, &ip_conntrack_hash[*bucket], list) { - ct = tuplehash_to_ctrack(h); - if (iter(ct, data)) - goto found; - } - } - list_for_each_entry(h, &unconfirmed, list) { - ct = tuplehash_to_ctrack(h); - if (iter(ct, data)) - set_bit(IPS_DYING_BIT, &ct->status); - } - write_unlock_bh(&ip_conntrack_lock); - return NULL; - -found: - atomic_inc(&ct->ct_general.use); - write_unlock_bh(&ip_conntrack_lock); - return ct; -} - -void -ip_ct_iterate_cleanup(int (*iter)(struct ip_conntrack *i, void *), void *data) -{ - struct ip_conntrack *ct; - unsigned int bucket = 0; - - while ((ct = get_next_corpse(iter, data, &bucket)) != NULL) { - /* Time to push up daises... */ - if (del_timer(&ct->timeout)) - death_by_timeout((unsigned long)ct); - /* ... else the timer will get him soon. */ - - ip_conntrack_put(ct); - } -} - -/* Fast function for those who don't want to parse /proc (and I don't - blame them). */ -/* Reversing the socket's dst/src point of view gives us the reply - mapping. */ -static int -getorigdst(struct sock *sk, int optval, void __user *user, int *len) -{ - struct inet_sock *inet = inet_sk(sk); - struct ip_conntrack_tuple_hash *h; - struct ip_conntrack_tuple tuple; - - IP_CT_TUPLE_U_BLANK(&tuple); - tuple.src.ip = inet->rcv_saddr; - tuple.src.u.tcp.port = inet->sport; - tuple.dst.ip = inet->daddr; - tuple.dst.u.tcp.port = inet->dport; - tuple.dst.protonum = IPPROTO_TCP; - - /* We only do TCP at the moment: is there a better way? */ - if (strcmp(sk->sk_prot->name, "TCP")) { - DEBUGP("SO_ORIGINAL_DST: Not a TCP socket\n"); - return -ENOPROTOOPT; - } - - if ((unsigned int) *len < sizeof(struct sockaddr_in)) { - DEBUGP("SO_ORIGINAL_DST: len %u not %u\n", - *len, sizeof(struct sockaddr_in)); - return -EINVAL; - } - - h = ip_conntrack_find_get(&tuple, NULL); - if (h) { - struct sockaddr_in sin; - struct ip_conntrack *ct = tuplehash_to_ctrack(h); - - sin.sin_family = AF_INET; - sin.sin_port = ct->tuplehash[IP_CT_DIR_ORIGINAL] - .tuple.dst.u.tcp.port; - sin.sin_addr.s_addr = ct->tuplehash[IP_CT_DIR_ORIGINAL] - .tuple.dst.ip; - memset(sin.sin_zero, 0, sizeof(sin.sin_zero)); - - DEBUGP("SO_ORIGINAL_DST: %u.%u.%u.%u %u\n", - NIPQUAD(sin.sin_addr.s_addr), ntohs(sin.sin_port)); - ip_conntrack_put(ct); - if (copy_to_user(user, &sin, sizeof(sin)) != 0) - return -EFAULT; - else - return 0; - } - DEBUGP("SO_ORIGINAL_DST: Can't find %u.%u.%u.%u/%u-%u.%u.%u.%u/%u.\n", - NIPQUAD(tuple.src.ip), ntohs(tuple.src.u.tcp.port), - NIPQUAD(tuple.dst.ip), ntohs(tuple.dst.u.tcp.port)); - return -ENOENT; -} - -static struct nf_sockopt_ops so_getorigdst = { - .pf = PF_INET, - .get_optmin = SO_ORIGINAL_DST, - .get_optmax = SO_ORIGINAL_DST+1, - .get = &getorigdst, -}; - -static int kill_all(struct ip_conntrack *i, void *data) -{ - return 1; -} - -void ip_conntrack_flush(void) -{ - ip_ct_iterate_cleanup(kill_all, NULL); -} - -static void free_conntrack_hash(struct list_head *hash, int vmalloced,int size) -{ - if (vmalloced) - vfree(hash); - else - free_pages((unsigned long)hash, - get_order(sizeof(struct list_head) * size)); -} - -/* Mishearing the voices in his head, our hero wonders how he's - supposed to kill the mall. */ -void ip_conntrack_cleanup(void) -{ - rcu_assign_pointer(ip_ct_attach, NULL); - - /* This makes sure all current packets have passed through - netfilter framework. Roll on, two-stage module - delete... */ - synchronize_net(); - - ip_ct_event_cache_flush(); - i_see_dead_people: - ip_conntrack_flush(); - if (atomic_read(&ip_conntrack_count) != 0) { - schedule(); - goto i_see_dead_people; - } - /* wait until all references to ip_conntrack_untracked are dropped */ - while (atomic_read(&ip_conntrack_untracked.ct_general.use) > 1) - schedule(); - - kmem_cache_destroy(ip_conntrack_cachep); - kmem_cache_destroy(ip_conntrack_expect_cachep); - free_conntrack_hash(ip_conntrack_hash, ip_conntrack_vmalloc, - ip_conntrack_htable_size); - nf_unregister_sockopt(&so_getorigdst); -} - -static struct list_head *alloc_hashtable(int size, int *vmalloced) -{ - struct list_head *hash; - unsigned int i; - - *vmalloced = 0; - hash = (void*)__get_free_pages(GFP_KERNEL, - get_order(sizeof(struct list_head) - * size)); - if (!hash) { - *vmalloced = 1; - printk(KERN_WARNING"ip_conntrack: falling back to vmalloc.\n"); - hash = vmalloc(sizeof(struct list_head) * size); - } - - if (hash) - for (i = 0; i < size; i++) - INIT_LIST_HEAD(&hash[i]); - - return hash; -} - -static int set_hashsize(const char *val, struct kernel_param *kp) -{ - int i, bucket, hashsize, vmalloced; - int old_vmalloced, old_size; - int rnd; - struct list_head *hash, *old_hash; - struct ip_conntrack_tuple_hash *h; - - /* On boot, we can set this without any fancy locking. */ - if (!ip_conntrack_htable_size) - return param_set_int(val, kp); - - hashsize = simple_strtol(val, NULL, 0); - if (!hashsize) - return -EINVAL; - - hash = alloc_hashtable(hashsize, &vmalloced); - if (!hash) - return -ENOMEM; - - /* We have to rehash for the new table anyway, so we also can - * use a new random seed */ - get_random_bytes(&rnd, 4); - - write_lock_bh(&ip_conntrack_lock); - for (i = 0; i < ip_conntrack_htable_size; i++) { - while (!list_empty(&ip_conntrack_hash[i])) { - h = list_entry(ip_conntrack_hash[i].next, - struct ip_conntrack_tuple_hash, list); - list_del(&h->list); - bucket = __hash_conntrack(&h->tuple, hashsize, rnd); - list_add_tail(&h->list, &hash[bucket]); - } - } - old_size = ip_conntrack_htable_size; - old_vmalloced = ip_conntrack_vmalloc; - old_hash = ip_conntrack_hash; - - ip_conntrack_htable_size = hashsize; - ip_conntrack_vmalloc = vmalloced; - ip_conntrack_hash = hash; - ip_conntrack_hash_rnd = rnd; - write_unlock_bh(&ip_conntrack_lock); - - free_conntrack_hash(old_hash, old_vmalloced, old_size); - return 0; -} - -module_param_call(hashsize, set_hashsize, param_get_uint, - &ip_conntrack_htable_size, 0600); - -int __init ip_conntrack_init(void) -{ - unsigned int i; - int ret; - - /* Idea from tcp.c: use 1/16384 of memory. On i386: 32MB - * machine has 256 buckets. >= 1GB machines have 8192 buckets. */ - if (!ip_conntrack_htable_size) { - ip_conntrack_htable_size - = (((num_physpages << PAGE_SHIFT) / 16384) - / sizeof(struct list_head)); - if (num_physpages > (1024 * 1024 * 1024 / PAGE_SIZE)) - ip_conntrack_htable_size = 8192; - if (ip_conntrack_htable_size < 16) - ip_conntrack_htable_size = 16; - } - ip_conntrack_max = 8 * ip_conntrack_htable_size; - - printk("ip_conntrack version %s (%u buckets, %d max)" - " - %Zd bytes per conntrack\n", IP_CONNTRACK_VERSION, - ip_conntrack_htable_size, ip_conntrack_max, - sizeof(struct ip_conntrack)); - - ret = nf_register_sockopt(&so_getorigdst); - if (ret != 0) { - printk(KERN_ERR "Unable to register netfilter socket option\n"); - return ret; - } - - ip_conntrack_hash = alloc_hashtable(ip_conntrack_htable_size, - &ip_conntrack_vmalloc); - if (!ip_conntrack_hash) { - printk(KERN_ERR "Unable to create ip_conntrack_hash\n"); - goto err_unreg_sockopt; - } - - ip_conntrack_cachep = kmem_cache_create("ip_conntrack", - sizeof(struct ip_conntrack), 0, - 0, NULL, NULL); - if (!ip_conntrack_cachep) { - printk(KERN_ERR "Unable to create ip_conntrack slab cache\n"); - goto err_free_hash; - } - - ip_conntrack_expect_cachep = kmem_cache_create("ip_conntrack_expect", - sizeof(struct ip_conntrack_expect), - 0, 0, NULL, NULL); - if (!ip_conntrack_expect_cachep) { - printk(KERN_ERR "Unable to create ip_expect slab cache\n"); - goto err_free_conntrack_slab; - } - - /* Don't NEED lock here, but good form anyway. */ - write_lock_bh(&ip_conntrack_lock); - for (i = 0; i < MAX_IP_CT_PROTO; i++) - rcu_assign_pointer(ip_ct_protos[i], &ip_conntrack_generic_protocol); - /* Sew in builtin protocols. */ - rcu_assign_pointer(ip_ct_protos[IPPROTO_TCP], &ip_conntrack_protocol_tcp); - rcu_assign_pointer(ip_ct_protos[IPPROTO_UDP], &ip_conntrack_protocol_udp); - rcu_assign_pointer(ip_ct_protos[IPPROTO_ICMP], &ip_conntrack_protocol_icmp); - write_unlock_bh(&ip_conntrack_lock); - - /* For use by ipt_REJECT */ - rcu_assign_pointer(ip_ct_attach, ip_conntrack_attach); - - /* Set up fake conntrack: - - to never be deleted, not in any hashes */ - atomic_set(&ip_conntrack_untracked.ct_general.use, 1); - /* - and look it like as a confirmed connection */ - set_bit(IPS_CONFIRMED_BIT, &ip_conntrack_untracked.status); - - return ret; - -err_free_conntrack_slab: - kmem_cache_destroy(ip_conntrack_cachep); -err_free_hash: - free_conntrack_hash(ip_conntrack_hash, ip_conntrack_vmalloc, - ip_conntrack_htable_size); -err_unreg_sockopt: - nf_unregister_sockopt(&so_getorigdst); - - return -ENOMEM; -} diff --git a/net/ipv4/netfilter/ip_conntrack_ftp.c b/net/ipv4/netfilter/ip_conntrack_ftp.c deleted file mode 100644 index 92389987e789..000000000000 --- a/net/ipv4/netfilter/ip_conntrack_ftp.c +++ /dev/null @@ -1,520 +0,0 @@ -/* FTP extension for IP connection tracking. */ - -/* (C) 1999-2001 Paul `Rusty' Russell - * (C) 2002-2004 Netfilter Core Team - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Rusty Russell "); -MODULE_DESCRIPTION("ftp connection tracking helper"); - -/* This is slow, but it's simple. --RR */ -static char *ftp_buffer; -static DEFINE_SPINLOCK(ip_ftp_lock); - -#define MAX_PORTS 8 -static unsigned short ports[MAX_PORTS]; -static int ports_c; -module_param_array(ports, ushort, &ports_c, 0400); - -static int loose; -module_param(loose, bool, 0600); - -unsigned int (*ip_nat_ftp_hook)(struct sk_buff **pskb, - enum ip_conntrack_info ctinfo, - enum ip_ct_ftp_type type, - unsigned int matchoff, - unsigned int matchlen, - struct ip_conntrack_expect *exp, - u32 *seq); -EXPORT_SYMBOL_GPL(ip_nat_ftp_hook); - -#if 0 -#define DEBUGP printk -#else -#define DEBUGP(format, args...) -#endif - -static int try_rfc959(const char *, size_t, u_int32_t [], char); -static int try_eprt(const char *, size_t, u_int32_t [], char); -static int try_epsv_response(const char *, size_t, u_int32_t [], char); - -static const struct ftp_search { - const char *pattern; - size_t plen; - char skip; - char term; - enum ip_ct_ftp_type ftptype; - int (*getnum)(const char *, size_t, u_int32_t[], char); -} search[IP_CT_DIR_MAX][2] = { - [IP_CT_DIR_ORIGINAL] = { - { - .pattern = "PORT", - .plen = sizeof("PORT") - 1, - .skip = ' ', - .term = '\r', - .ftptype = IP_CT_FTP_PORT, - .getnum = try_rfc959, - }, - { - .pattern = "EPRT", - .plen = sizeof("EPRT") - 1, - .skip = ' ', - .term = '\r', - .ftptype = IP_CT_FTP_EPRT, - .getnum = try_eprt, - }, - }, - [IP_CT_DIR_REPLY] = { - { - .pattern = "227 ", - .plen = sizeof("227 ") - 1, - .skip = '(', - .term = ')', - .ftptype = IP_CT_FTP_PASV, - .getnum = try_rfc959, - }, - { - .pattern = "229 ", - .plen = sizeof("229 ") - 1, - .skip = '(', - .term = ')', - .ftptype = IP_CT_FTP_EPSV, - .getnum = try_epsv_response, - }, - }, -}; - -static int try_number(const char *data, size_t dlen, u_int32_t array[], - int array_size, char sep, char term) -{ - u_int32_t i, len; - - memset(array, 0, sizeof(array[0])*array_size); - - /* Keep data pointing at next char. */ - for (i = 0, len = 0; len < dlen && i < array_size; len++, data++) { - if (*data >= '0' && *data <= '9') { - array[i] = array[i]*10 + *data - '0'; - } - else if (*data == sep) - i++; - else { - /* Unexpected character; true if it's the - terminator and we're finished. */ - if (*data == term && i == array_size - 1) - return len; - - DEBUGP("Char %u (got %u nums) `%u' unexpected\n", - len, i, *data); - return 0; - } - } - DEBUGP("Failed to fill %u numbers separated by %c\n", array_size, sep); - - return 0; -} - -/* Returns 0, or length of numbers: 192,168,1,1,5,6 */ -static int try_rfc959(const char *data, size_t dlen, u_int32_t array[6], - char term) -{ - return try_number(data, dlen, array, 6, ',', term); -} - -/* Grab port: number up to delimiter */ -static int get_port(const char *data, int start, size_t dlen, char delim, - u_int32_t array[2]) -{ - u_int16_t port = 0; - int i; - - for (i = start; i < dlen; i++) { - /* Finished? */ - if (data[i] == delim) { - if (port == 0) - break; - array[0] = port >> 8; - array[1] = port; - return i + 1; - } - else if (data[i] >= '0' && data[i] <= '9') - port = port*10 + data[i] - '0'; - else /* Some other crap */ - break; - } - return 0; -} - -/* Returns 0, or length of numbers: |1|132.235.1.2|6275| */ -static int try_eprt(const char *data, size_t dlen, u_int32_t array[6], - char term) -{ - char delim; - int length; - - /* First character is delimiter, then "1" for IPv4, then - delimiter again. */ - if (dlen <= 3) return 0; - delim = data[0]; - if (isdigit(delim) || delim < 33 || delim > 126 - || data[1] != '1' || data[2] != delim) - return 0; - - DEBUGP("EPRT: Got |1|!\n"); - /* Now we have IP address. */ - length = try_number(data + 3, dlen - 3, array, 4, '.', delim); - if (length == 0) - return 0; - - DEBUGP("EPRT: Got IP address!\n"); - /* Start offset includes initial "|1|", and trailing delimiter */ - return get_port(data, 3 + length + 1, dlen, delim, array+4); -} - -/* Returns 0, or length of numbers: |||6446| */ -static int try_epsv_response(const char *data, size_t dlen, u_int32_t array[6], - char term) -{ - char delim; - - /* Three delimiters. */ - if (dlen <= 3) return 0; - delim = data[0]; - if (isdigit(delim) || delim < 33 || delim > 126 - || data[1] != delim || data[2] != delim) - return 0; - - return get_port(data, 3, dlen, delim, array+4); -} - -/* Return 1 for match, 0 for accept, -1 for partial. */ -static int find_pattern(const char *data, size_t dlen, - const char *pattern, size_t plen, - char skip, char term, - unsigned int *numoff, - unsigned int *numlen, - u_int32_t array[6], - int (*getnum)(const char *, size_t, u_int32_t[], char)) -{ - size_t i; - - DEBUGP("find_pattern `%s': dlen = %u\n", pattern, dlen); - if (dlen == 0) - return 0; - - if (dlen <= plen) { - /* Short packet: try for partial? */ - if (strnicmp(data, pattern, dlen) == 0) - return -1; - else return 0; - } - - if (strnicmp(data, pattern, plen) != 0) { -#if 0 - size_t i; - - DEBUGP("ftp: string mismatch\n"); - for (i = 0; i < plen; i++) { - DEBUGP("ftp:char %u `%c'(%u) vs `%c'(%u)\n", - i, data[i], data[i], - pattern[i], pattern[i]); - } -#endif - return 0; - } - - DEBUGP("Pattern matches!\n"); - /* Now we've found the constant string, try to skip - to the 'skip' character */ - for (i = plen; data[i] != skip; i++) - if (i == dlen - 1) return -1; - - /* Skip over the last character */ - i++; - - DEBUGP("Skipped up to `%c'!\n", skip); - - *numoff = i; - *numlen = getnum(data + i, dlen - i, array, term); - if (!*numlen) - return -1; - - DEBUGP("Match succeeded!\n"); - return 1; -} - -/* Look up to see if we're just after a \n. */ -static int find_nl_seq(u32 seq, const struct ip_ct_ftp_master *info, int dir) -{ - unsigned int i; - - for (i = 0; i < info->seq_aft_nl_num[dir]; i++) - if (info->seq_aft_nl[dir][i] == seq) - return 1; - return 0; -} - -/* We don't update if it's older than what we have. */ -static void update_nl_seq(u32 nl_seq, struct ip_ct_ftp_master *info, int dir, - struct sk_buff *skb) -{ - unsigned int i, oldest = NUM_SEQ_TO_REMEMBER; - - /* Look for oldest: if we find exact match, we're done. */ - for (i = 0; i < info->seq_aft_nl_num[dir]; i++) { - if (info->seq_aft_nl[dir][i] == nl_seq) - return; - - if (oldest == info->seq_aft_nl_num[dir] - || before(info->seq_aft_nl[dir][i], oldest)) - oldest = i; - } - - if (info->seq_aft_nl_num[dir] < NUM_SEQ_TO_REMEMBER) { - info->seq_aft_nl[dir][info->seq_aft_nl_num[dir]++] = nl_seq; - ip_conntrack_event_cache(IPCT_HELPINFO_VOLATILE, skb); - } else if (oldest != NUM_SEQ_TO_REMEMBER) { - info->seq_aft_nl[dir][oldest] = nl_seq; - ip_conntrack_event_cache(IPCT_HELPINFO_VOLATILE, skb); - } -} - -static int help(struct sk_buff **pskb, - struct ip_conntrack *ct, - enum ip_conntrack_info ctinfo) -{ - unsigned int dataoff, datalen; - struct tcphdr _tcph, *th; - char *fb_ptr; - int ret; - u32 seq, array[6] = { 0 }; - int dir = CTINFO2DIR(ctinfo); - unsigned int matchlen, matchoff; - struct ip_ct_ftp_master *ct_ftp_info = &ct->help.ct_ftp_info; - struct ip_conntrack_expect *exp; - unsigned int i; - int found = 0, ends_in_nl; - typeof(ip_nat_ftp_hook) ip_nat_ftp; - - /* Until there's been traffic both ways, don't look in packets. */ - if (ctinfo != IP_CT_ESTABLISHED - && ctinfo != IP_CT_ESTABLISHED+IP_CT_IS_REPLY) { - DEBUGP("ftp: Conntrackinfo = %u\n", ctinfo); - return NF_ACCEPT; - } - - th = skb_header_pointer(*pskb, ip_hdrlen(*pskb), - sizeof(_tcph), &_tcph); - if (th == NULL) - return NF_ACCEPT; - - dataoff = ip_hdrlen(*pskb) + th->doff * 4; - /* No data? */ - if (dataoff >= (*pskb)->len) { - DEBUGP("ftp: pskblen = %u\n", (*pskb)->len); - return NF_ACCEPT; - } - datalen = (*pskb)->len - dataoff; - - spin_lock_bh(&ip_ftp_lock); - fb_ptr = skb_header_pointer(*pskb, dataoff, - (*pskb)->len - dataoff, ftp_buffer); - BUG_ON(fb_ptr == NULL); - - ends_in_nl = (fb_ptr[datalen - 1] == '\n'); - seq = ntohl(th->seq) + datalen; - - /* Look up to see if we're just after a \n. */ - if (!find_nl_seq(ntohl(th->seq), ct_ftp_info, dir)) { - /* Now if this ends in \n, update ftp info. */ - DEBUGP("ip_conntrack_ftp_help: wrong seq pos %s(%u) or %s(%u)\n", - ct_ftp_info->seq_aft_nl[0][dir] - old_seq_aft_nl_set ? "":"(UNSET) ", old_seq_aft_nl); - ret = NF_ACCEPT; - goto out_update_nl; - } - - /* Initialize IP array to expected address (it's not mentioned - in EPSV responses) */ - array[0] = (ntohl(ct->tuplehash[dir].tuple.src.ip) >> 24) & 0xFF; - array[1] = (ntohl(ct->tuplehash[dir].tuple.src.ip) >> 16) & 0xFF; - array[2] = (ntohl(ct->tuplehash[dir].tuple.src.ip) >> 8) & 0xFF; - array[3] = ntohl(ct->tuplehash[dir].tuple.src.ip) & 0xFF; - - for (i = 0; i < ARRAY_SIZE(search[dir]); i++) { - found = find_pattern(fb_ptr, (*pskb)->len - dataoff, - search[dir][i].pattern, - search[dir][i].plen, - search[dir][i].skip, - search[dir][i].term, - &matchoff, &matchlen, - array, - search[dir][i].getnum); - if (found) break; - } - if (found == -1) { - /* We don't usually drop packets. After all, this is - connection tracking, not packet filtering. - However, it is necessary for accurate tracking in - this case. */ - if (net_ratelimit()) - printk("conntrack_ftp: partial %s %u+%u\n", - search[dir][i].pattern, - ntohl(th->seq), datalen); - ret = NF_DROP; - goto out; - } else if (found == 0) { /* No match */ - ret = NF_ACCEPT; - goto out_update_nl; - } - - DEBUGP("conntrack_ftp: match `%s' (%u bytes at %u)\n", - fb_ptr + matchoff, matchlen, ntohl(th->seq) + matchoff); - - /* Allocate expectation which will be inserted */ - exp = ip_conntrack_expect_alloc(ct); - if (exp == NULL) { - ret = NF_DROP; - goto out; - } - - /* We refer to the reverse direction ("!dir") tuples here, - * because we're expecting something in the other direction. - * Doesn't matter unless NAT is happening. */ - exp->tuple.dst.ip = ct->tuplehash[!dir].tuple.dst.ip; - - if (htonl((array[0] << 24) | (array[1] << 16) | (array[2] << 8) | array[3]) - != ct->tuplehash[dir].tuple.src.ip) { - /* Enrico Scholz's passive FTP to partially RNAT'd ftp - server: it really wants us to connect to a - different IP address. Simply don't record it for - NAT. */ - DEBUGP("conntrack_ftp: NOT RECORDING: %u,%u,%u,%u != %u.%u.%u.%u\n", - array[0], array[1], array[2], array[3], - NIPQUAD(ct->tuplehash[dir].tuple.src.ip)); - - /* Thanks to Cristiano Lincoln Mattos - for reporting this potential - problem (DMZ machines opening holes to internal - networks, or the packet filter itself). */ - if (!loose) { - ret = NF_ACCEPT; - goto out_put_expect; - } - exp->tuple.dst.ip = htonl((array[0] << 24) | (array[1] << 16) - | (array[2] << 8) | array[3]); - } - - exp->tuple.src.ip = ct->tuplehash[!dir].tuple.src.ip; - exp->tuple.dst.u.tcp.port = htons(array[4] << 8 | array[5]); - exp->tuple.src.u.tcp.port = 0; /* Don't care. */ - exp->tuple.dst.protonum = IPPROTO_TCP; - exp->mask = ((struct ip_conntrack_tuple) - { { htonl(0xFFFFFFFF), { 0 } }, - { htonl(0xFFFFFFFF), { .tcp = { htons(0xFFFF) } }, 0xFF }}); - - exp->expectfn = NULL; - exp->flags = 0; - - /* Now, NAT might want to mangle the packet, and register the - * (possibly changed) expectation itself. */ - ip_nat_ftp = rcu_dereference(ip_nat_ftp_hook); - if (ip_nat_ftp) - ret = ip_nat_ftp(pskb, ctinfo, search[dir][i].ftptype, - matchoff, matchlen, exp, &seq); - else { - /* Can't expect this? Best to drop packet now. */ - if (ip_conntrack_expect_related(exp) != 0) - ret = NF_DROP; - else - ret = NF_ACCEPT; - } - -out_put_expect: - ip_conntrack_expect_put(exp); - -out_update_nl: - /* Now if this ends in \n, update ftp info. Seq may have been - * adjusted by NAT code. */ - if (ends_in_nl) - update_nl_seq(seq, ct_ftp_info,dir, *pskb); - out: - spin_unlock_bh(&ip_ftp_lock); - return ret; -} - -static struct ip_conntrack_helper ftp[MAX_PORTS]; -static char ftp_names[MAX_PORTS][sizeof("ftp-65535")]; - -/* Not __exit: called from init() */ -static void ip_conntrack_ftp_fini(void) -{ - int i; - for (i = 0; i < ports_c; i++) { - DEBUGP("ip_ct_ftp: unregistering helper for port %d\n", - ports[i]); - ip_conntrack_helper_unregister(&ftp[i]); - } - - kfree(ftp_buffer); -} - -static int __init ip_conntrack_ftp_init(void) -{ - int i, ret; - char *tmpname; - - ftp_buffer = kmalloc(65536, GFP_KERNEL); - if (!ftp_buffer) - return -ENOMEM; - - if (ports_c == 0) - ports[ports_c++] = FTP_PORT; - - for (i = 0; i < ports_c; i++) { - ftp[i].tuple.src.u.tcp.port = htons(ports[i]); - ftp[i].tuple.dst.protonum = IPPROTO_TCP; - ftp[i].mask.src.u.tcp.port = htons(0xFFFF); - ftp[i].mask.dst.protonum = 0xFF; - ftp[i].max_expected = 1; - ftp[i].timeout = 5 * 60; /* 5 minutes */ - ftp[i].me = THIS_MODULE; - ftp[i].help = help; - - tmpname = &ftp_names[i][0]; - if (ports[i] == FTP_PORT) - sprintf(tmpname, "ftp"); - else - sprintf(tmpname, "ftp-%d", ports[i]); - ftp[i].name = tmpname; - - DEBUGP("ip_ct_ftp: registering helper for port %d\n", - ports[i]); - ret = ip_conntrack_helper_register(&ftp[i]); - - if (ret) { - ip_conntrack_ftp_fini(); - return ret; - } - } - return 0; -} - -module_init(ip_conntrack_ftp_init); -module_exit(ip_conntrack_ftp_fini); diff --git a/net/ipv4/netfilter/ip_conntrack_helper_h323.c b/net/ipv4/netfilter/ip_conntrack_helper_h323.c deleted file mode 100644 index cecb6e0c8ed0..000000000000 --- a/net/ipv4/netfilter/ip_conntrack_helper_h323.c +++ /dev/null @@ -1,1840 +0,0 @@ -/* - * H.323 connection tracking helper - * - * Copyright (c) 2006 Jing Min Zhao - * - * This source code is licensed under General Public License version 2. - * - * Based on the 'brute force' H.323 connection tracking module by - * Jozsef Kadlecsik - * - * For more information, please see http://nath323.sourceforge.net/ - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#if 0 -#define DEBUGP printk -#else -#define DEBUGP(format, args...) -#endif - -/* Parameters */ -static unsigned int default_rrq_ttl = 300; -module_param(default_rrq_ttl, uint, 0600); -MODULE_PARM_DESC(default_rrq_ttl, "use this TTL if it's missing in RRQ"); - -static int gkrouted_only = 1; -module_param(gkrouted_only, int, 0600); -MODULE_PARM_DESC(gkrouted_only, "only accept calls from gatekeeper"); - -static int callforward_filter = 1; -module_param(callforward_filter, bool, 0600); -MODULE_PARM_DESC(callforward_filter, "only create call forwarding expectations " - "if both endpoints are on different sides " - "(determined by routing information)"); - -/* Hooks for NAT */ -int (*set_h245_addr_hook) (struct sk_buff ** pskb, - unsigned char **data, int dataoff, - H245_TransportAddress * addr, - __be32 ip, u_int16_t port); -int (*set_h225_addr_hook) (struct sk_buff ** pskb, - unsigned char **data, int dataoff, - TransportAddress * addr, - __be32 ip, u_int16_t port); -int (*set_sig_addr_hook) (struct sk_buff ** pskb, - struct ip_conntrack * ct, - enum ip_conntrack_info ctinfo, - unsigned char **data, - TransportAddress * addr, int count); -int (*set_ras_addr_hook) (struct sk_buff ** pskb, - struct ip_conntrack * ct, - enum ip_conntrack_info ctinfo, - unsigned char **data, - TransportAddress * addr, int count); -int (*nat_rtp_rtcp_hook) (struct sk_buff ** pskb, - struct ip_conntrack * ct, - enum ip_conntrack_info ctinfo, - unsigned char **data, int dataoff, - H245_TransportAddress * addr, - u_int16_t port, u_int16_t rtp_port, - struct ip_conntrack_expect * rtp_exp, - struct ip_conntrack_expect * rtcp_exp); -int (*nat_t120_hook) (struct sk_buff ** pskb, - struct ip_conntrack * ct, - enum ip_conntrack_info ctinfo, - unsigned char **data, int dataoff, - H245_TransportAddress * addr, u_int16_t port, - struct ip_conntrack_expect * exp); -int (*nat_h245_hook) (struct sk_buff ** pskb, - struct ip_conntrack * ct, - enum ip_conntrack_info ctinfo, - unsigned char **data, int dataoff, - TransportAddress * addr, u_int16_t port, - struct ip_conntrack_expect * exp); -int (*nat_callforwarding_hook) (struct sk_buff ** pskb, - struct ip_conntrack * ct, - enum ip_conntrack_info ctinfo, - unsigned char **data, int dataoff, - TransportAddress * addr, u_int16_t port, - struct ip_conntrack_expect * exp); -int (*nat_q931_hook) (struct sk_buff ** pskb, - struct ip_conntrack * ct, - enum ip_conntrack_info ctinfo, - unsigned char **data, TransportAddress * addr, int idx, - u_int16_t port, struct ip_conntrack_expect * exp); - - -static DEFINE_SPINLOCK(ip_h323_lock); -static char *h323_buffer; - -/****************************************************************************/ -static int get_tpkt_data(struct sk_buff **pskb, struct ip_conntrack *ct, - enum ip_conntrack_info ctinfo, - unsigned char **data, int *datalen, int *dataoff) -{ - struct ip_ct_h323_master *info = &ct->help.ct_h323_info; - int dir = CTINFO2DIR(ctinfo); - struct tcphdr _tcph, *th; - int tcpdatalen; - int tcpdataoff; - unsigned char *tpkt; - int tpktlen; - int tpktoff; - - /* Get TCP header */ - th = skb_header_pointer(*pskb, ip_hdrlen(*pskb), - sizeof(_tcph), &_tcph); - if (th == NULL) - return 0; - - /* Get TCP data offset */ - tcpdataoff = ip_hdrlen(*pskb) + th->doff * 4; - - /* Get TCP data length */ - tcpdatalen = (*pskb)->len - tcpdataoff; - if (tcpdatalen <= 0) /* No TCP data */ - goto clear_out; - - if (*data == NULL) { /* first TPKT */ - /* Get first TPKT pointer */ - tpkt = skb_header_pointer(*pskb, tcpdataoff, tcpdatalen, - h323_buffer); - BUG_ON(tpkt == NULL); - - /* Validate TPKT identifier */ - if (tcpdatalen < 4 || tpkt[0] != 0x03 || tpkt[1] != 0) { - /* Netmeeting sends TPKT header and data separately */ - if (info->tpkt_len[dir] > 0) { - DEBUGP("ip_ct_h323: previous packet " - "indicated separate TPKT data of %hu " - "bytes\n", info->tpkt_len[dir]); - if (info->tpkt_len[dir] <= tcpdatalen) { - /* Yes, there was a TPKT header - * received */ - *data = tpkt; - *datalen = info->tpkt_len[dir]; - *dataoff = 0; - goto out; - } - - /* Fragmented TPKT */ - if (net_ratelimit()) - printk("ip_ct_h323: " - "fragmented TPKT\n"); - goto clear_out; - } - - /* It is not even a TPKT */ - return 0; - } - tpktoff = 0; - } else { /* Next TPKT */ - tpktoff = *dataoff + *datalen; - tcpdatalen -= tpktoff; - if (tcpdatalen <= 4) /* No more TPKT */ - goto clear_out; - tpkt = *data + *datalen; - - /* Validate TPKT identifier */ - if (tpkt[0] != 0x03 || tpkt[1] != 0) - goto clear_out; - } - - /* Validate TPKT length */ - tpktlen = tpkt[2] * 256 + tpkt[3]; - if (tpktlen < 4) - goto clear_out; - if (tpktlen > tcpdatalen) { - if (tcpdatalen == 4) { /* Separate TPKT header */ - /* Netmeeting sends TPKT header and data separately */ - DEBUGP("ip_ct_h323: separate TPKT header indicates " - "there will be TPKT data of %hu bytes\n", - tpktlen - 4); - info->tpkt_len[dir] = tpktlen - 4; - return 0; - } - - if (net_ratelimit()) - printk("ip_ct_h323: incomplete TPKT (fragmented?)\n"); - goto clear_out; - } - - /* This is the encapsulated data */ - *data = tpkt + 4; - *datalen = tpktlen - 4; - *dataoff = tpktoff + 4; - - out: - /* Clear TPKT length */ - info->tpkt_len[dir] = 0; - return 1; - - clear_out: - info->tpkt_len[dir] = 0; - return 0; -} - -/****************************************************************************/ -static int get_h245_addr(unsigned char *data, H245_TransportAddress * addr, - __be32 * ip, u_int16_t * port) -{ - unsigned char *p; - - if (addr->choice != eH245_TransportAddress_unicastAddress || - addr->unicastAddress.choice != eUnicastAddress_iPAddress) - return 0; - - p = data + addr->unicastAddress.iPAddress.network; - *ip = htonl((p[0] << 24) | (p[1] << 16) | (p[2] << 8) | (p[3])); - *port = (p[4] << 8) | (p[5]); - - return 1; -} - -/****************************************************************************/ -static int expect_rtp_rtcp(struct sk_buff **pskb, struct ip_conntrack *ct, - enum ip_conntrack_info ctinfo, - unsigned char **data, int dataoff, - H245_TransportAddress * addr) -{ - int dir = CTINFO2DIR(ctinfo); - int ret = 0; - __be32 ip; - u_int16_t port; - u_int16_t rtp_port; - struct ip_conntrack_expect *rtp_exp; - struct ip_conntrack_expect *rtcp_exp; - typeof(nat_rtp_rtcp_hook) nat_rtp_rtcp; - - /* Read RTP or RTCP address */ - if (!get_h245_addr(*data, addr, &ip, &port) || - ip != ct->tuplehash[dir].tuple.src.ip || port == 0) - return 0; - - /* RTP port is even */ - rtp_port = port & (~1); - - /* Create expect for RTP */ - if ((rtp_exp = ip_conntrack_expect_alloc(ct)) == NULL) - return -1; - rtp_exp->tuple.src.ip = ct->tuplehash[!dir].tuple.src.ip; - rtp_exp->tuple.src.u.udp.port = 0; - rtp_exp->tuple.dst.ip = ct->tuplehash[!dir].tuple.dst.ip; - rtp_exp->tuple.dst.u.udp.port = htons(rtp_port); - rtp_exp->tuple.dst.protonum = IPPROTO_UDP; - rtp_exp->mask.src.ip = htonl(0xFFFFFFFF); - rtp_exp->mask.src.u.udp.port = 0; - rtp_exp->mask.dst.ip = htonl(0xFFFFFFFF); - rtp_exp->mask.dst.u.udp.port = htons(0xFFFF); - rtp_exp->mask.dst.protonum = 0xFF; - rtp_exp->flags = 0; - - /* Create expect for RTCP */ - if ((rtcp_exp = ip_conntrack_expect_alloc(ct)) == NULL) { - ip_conntrack_expect_put(rtp_exp); - return -1; - } - rtcp_exp->tuple.src.ip = ct->tuplehash[!dir].tuple.src.ip; - rtcp_exp->tuple.src.u.udp.port = 0; - rtcp_exp->tuple.dst.ip = ct->tuplehash[!dir].tuple.dst.ip; - rtcp_exp->tuple.dst.u.udp.port = htons(rtp_port + 1); - rtcp_exp->tuple.dst.protonum = IPPROTO_UDP; - rtcp_exp->mask.src.ip = htonl(0xFFFFFFFF); - rtcp_exp->mask.src.u.udp.port = 0; - rtcp_exp->mask.dst.ip = htonl(0xFFFFFFFF); - rtcp_exp->mask.dst.u.udp.port = htons(0xFFFF); - rtcp_exp->mask.dst.protonum = 0xFF; - rtcp_exp->flags = 0; - - if (ct->tuplehash[dir].tuple.src.ip != - ct->tuplehash[!dir].tuple.dst.ip && - (nat_rtp_rtcp = rcu_dereference(nat_rtp_rtcp_hook))) { - /* NAT needed */ - ret = nat_rtp_rtcp(pskb, ct, ctinfo, data, dataoff, - addr, port, rtp_port, rtp_exp, rtcp_exp); - } else { /* Conntrack only */ - rtp_exp->expectfn = NULL; - rtcp_exp->expectfn = NULL; - - if (ip_conntrack_expect_related(rtp_exp) == 0) { - if (ip_conntrack_expect_related(rtcp_exp) == 0) { - DEBUGP("ip_ct_h323: expect RTP " - "%u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu\n", - NIPQUAD(rtp_exp->tuple.src.ip), - ntohs(rtp_exp->tuple.src.u.udp.port), - NIPQUAD(rtp_exp->tuple.dst.ip), - ntohs(rtp_exp->tuple.dst.u.udp.port)); - DEBUGP("ip_ct_h323: expect RTCP " - "%u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu\n", - NIPQUAD(rtcp_exp->tuple.src.ip), - ntohs(rtcp_exp->tuple.src.u.udp.port), - NIPQUAD(rtcp_exp->tuple.dst.ip), - ntohs(rtcp_exp->tuple.dst.u.udp.port)); - } else { - ip_conntrack_unexpect_related(rtp_exp); - ret = -1; - } - } else - ret = -1; - } - - ip_conntrack_expect_put(rtp_exp); - ip_conntrack_expect_put(rtcp_exp); - - return ret; -} - -/****************************************************************************/ -static int expect_t120(struct sk_buff **pskb, - struct ip_conntrack *ct, - enum ip_conntrack_info ctinfo, - unsigned char **data, int dataoff, - H245_TransportAddress * addr) -{ - int dir = CTINFO2DIR(ctinfo); - int ret = 0; - __be32 ip; - u_int16_t port; - struct ip_conntrack_expect *exp = NULL; - typeof(nat_t120_hook) nat_t120; - - /* Read T.120 address */ - if (!get_h245_addr(*data, addr, &ip, &port) || - ip != ct->tuplehash[dir].tuple.src.ip || port == 0) - return 0; - - /* Create expect for T.120 connections */ - if ((exp = ip_conntrack_expect_alloc(ct)) == NULL) - return -1; - exp->tuple.src.ip = ct->tuplehash[!dir].tuple.src.ip; - exp->tuple.src.u.tcp.port = 0; - exp->tuple.dst.ip = ct->tuplehash[!dir].tuple.dst.ip; - exp->tuple.dst.u.tcp.port = htons(port); - exp->tuple.dst.protonum = IPPROTO_TCP; - exp->mask.src.ip = htonl(0xFFFFFFFF); - exp->mask.src.u.tcp.port = 0; - exp->mask.dst.ip = htonl(0xFFFFFFFF); - exp->mask.dst.u.tcp.port = htons(0xFFFF); - exp->mask.dst.protonum = 0xFF; - exp->flags = IP_CT_EXPECT_PERMANENT; /* Accept multiple channels */ - - if (ct->tuplehash[dir].tuple.src.ip != - ct->tuplehash[!dir].tuple.dst.ip && - (nat_t120 = rcu_dereference(nat_t120_hook))) { - /* NAT needed */ - ret = nat_t120(pskb, ct, ctinfo, data, dataoff, addr, - port, exp); - } else { /* Conntrack only */ - exp->expectfn = NULL; - if (ip_conntrack_expect_related(exp) == 0) { - DEBUGP("ip_ct_h323: expect T.120 " - "%u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu\n", - NIPQUAD(exp->tuple.src.ip), - ntohs(exp->tuple.src.u.tcp.port), - NIPQUAD(exp->tuple.dst.ip), - ntohs(exp->tuple.dst.u.tcp.port)); - } else - ret = -1; - } - - ip_conntrack_expect_put(exp); - - return ret; -} - -/****************************************************************************/ -static int process_h245_channel(struct sk_buff **pskb, - struct ip_conntrack *ct, - enum ip_conntrack_info ctinfo, - unsigned char **data, int dataoff, - H2250LogicalChannelParameters * channel) -{ - int ret; - - if (channel->options & eH2250LogicalChannelParameters_mediaChannel) { - /* RTP */ - ret = expect_rtp_rtcp(pskb, ct, ctinfo, data, dataoff, - &channel->mediaChannel); - if (ret < 0) - return -1; - } - - if (channel-> - options & eH2250LogicalChannelParameters_mediaControlChannel) { - /* RTCP */ - ret = expect_rtp_rtcp(pskb, ct, ctinfo, data, dataoff, - &channel->mediaControlChannel); - if (ret < 0) - return -1; - } - - return 0; -} - -/****************************************************************************/ -static int process_olc(struct sk_buff **pskb, struct ip_conntrack *ct, - enum ip_conntrack_info ctinfo, - unsigned char **data, int dataoff, - OpenLogicalChannel * olc) -{ - int ret; - - DEBUGP("ip_ct_h323: OpenLogicalChannel\n"); - - if (olc->forwardLogicalChannelParameters.multiplexParameters.choice == - eOpenLogicalChannel_forwardLogicalChannelParameters_multiplexParameters_h2250LogicalChannelParameters) - { - ret = process_h245_channel(pskb, ct, ctinfo, data, dataoff, - &olc-> - forwardLogicalChannelParameters. - multiplexParameters. - h2250LogicalChannelParameters); - if (ret < 0) - return -1; - } - - if ((olc->options & - eOpenLogicalChannel_reverseLogicalChannelParameters) && - (olc->reverseLogicalChannelParameters.options & - eOpenLogicalChannel_reverseLogicalChannelParameters_multiplexParameters) - && (olc->reverseLogicalChannelParameters.multiplexParameters. - choice == - eOpenLogicalChannel_reverseLogicalChannelParameters_multiplexParameters_h2250LogicalChannelParameters)) - { - ret = - process_h245_channel(pskb, ct, ctinfo, data, dataoff, - &olc-> - reverseLogicalChannelParameters. - multiplexParameters. - h2250LogicalChannelParameters); - if (ret < 0) - return -1; - } - - if ((olc->options & eOpenLogicalChannel_separateStack) && - olc->forwardLogicalChannelParameters.dataType.choice == - eDataType_data && - olc->forwardLogicalChannelParameters.dataType.data.application. - choice == eDataApplicationCapability_application_t120 && - olc->forwardLogicalChannelParameters.dataType.data.application. - t120.choice == eDataProtocolCapability_separateLANStack && - olc->separateStack.networkAddress.choice == - eNetworkAccessParameters_networkAddress_localAreaAddress) { - ret = expect_t120(pskb, ct, ctinfo, data, dataoff, - &olc->separateStack.networkAddress. - localAreaAddress); - if (ret < 0) - return -1; - } - - return 0; -} - -/****************************************************************************/ -static int process_olca(struct sk_buff **pskb, struct ip_conntrack *ct, - enum ip_conntrack_info ctinfo, - unsigned char **data, int dataoff, - OpenLogicalChannelAck * olca) -{ - H2250LogicalChannelAckParameters *ack; - int ret; - - DEBUGP("ip_ct_h323: OpenLogicalChannelAck\n"); - - if ((olca->options & - eOpenLogicalChannelAck_reverseLogicalChannelParameters) && - (olca->reverseLogicalChannelParameters.options & - eOpenLogicalChannelAck_reverseLogicalChannelParameters_multiplexParameters) - && (olca->reverseLogicalChannelParameters.multiplexParameters. - choice == - eOpenLogicalChannelAck_reverseLogicalChannelParameters_multiplexParameters_h2250LogicalChannelParameters)) - { - ret = process_h245_channel(pskb, ct, ctinfo, data, dataoff, - &olca-> - reverseLogicalChannelParameters. - multiplexParameters. - h2250LogicalChannelParameters); - if (ret < 0) - return -1; - } - - if ((olca->options & - eOpenLogicalChannelAck_forwardMultiplexAckParameters) && - (olca->forwardMultiplexAckParameters.choice == - eOpenLogicalChannelAck_forwardMultiplexAckParameters_h2250LogicalChannelAckParameters)) - { - ack = &olca->forwardMultiplexAckParameters. - h2250LogicalChannelAckParameters; - if (ack->options & - eH2250LogicalChannelAckParameters_mediaChannel) { - /* RTP */ - ret = expect_rtp_rtcp(pskb, ct, ctinfo, data, dataoff, - &ack->mediaChannel); - if (ret < 0) - return -1; - } - - if (ack->options & - eH2250LogicalChannelAckParameters_mediaControlChannel) { - /* RTCP */ - ret = expect_rtp_rtcp(pskb, ct, ctinfo, data, dataoff, - &ack->mediaControlChannel); - if (ret < 0) - return -1; - } - } - - return 0; -} - -/****************************************************************************/ -static int process_h245(struct sk_buff **pskb, struct ip_conntrack *ct, - enum ip_conntrack_info ctinfo, - unsigned char **data, int dataoff, - MultimediaSystemControlMessage * mscm) -{ - switch (mscm->choice) { - case eMultimediaSystemControlMessage_request: - if (mscm->request.choice == - eRequestMessage_openLogicalChannel) { - return process_olc(pskb, ct, ctinfo, data, dataoff, - &mscm->request.openLogicalChannel); - } - DEBUGP("ip_ct_h323: H.245 Request %d\n", - mscm->request.choice); - break; - case eMultimediaSystemControlMessage_response: - if (mscm->response.choice == - eResponseMessage_openLogicalChannelAck) { - return process_olca(pskb, ct, ctinfo, data, dataoff, - &mscm->response. - openLogicalChannelAck); - } - DEBUGP("ip_ct_h323: H.245 Response %d\n", - mscm->response.choice); - break; - default: - DEBUGP("ip_ct_h323: H.245 signal %d\n", mscm->choice); - break; - } - - return 0; -} - -/****************************************************************************/ -static int h245_help(struct sk_buff **pskb, struct ip_conntrack *ct, - enum ip_conntrack_info ctinfo) -{ - static MultimediaSystemControlMessage mscm; - unsigned char *data = NULL; - int datalen; - int dataoff; - int ret; - - /* Until there's been traffic both ways, don't look in packets. */ - if (ctinfo != IP_CT_ESTABLISHED - && ctinfo != IP_CT_ESTABLISHED + IP_CT_IS_REPLY) { - return NF_ACCEPT; - } - DEBUGP("ip_ct_h245: skblen = %u\n", (*pskb)->len); - - spin_lock_bh(&ip_h323_lock); - - /* Process each TPKT */ - while (get_tpkt_data(pskb, ct, ctinfo, &data, &datalen, &dataoff)) { - DEBUGP("ip_ct_h245: TPKT %u.%u.%u.%u->%u.%u.%u.%u, len=%d\n", - NIPQUAD(ip_hdr(*pskb)->saddr), - NIPQUAD(ip_hdr(*pskb)->daddr), datalen); - - /* Decode H.245 signal */ - ret = DecodeMultimediaSystemControlMessage(data, datalen, - &mscm); - if (ret < 0) { - if (net_ratelimit()) - printk("ip_ct_h245: decoding error: %s\n", - ret == H323_ERROR_BOUND ? - "out of bound" : "out of range"); - /* We don't drop when decoding error */ - break; - } - - /* Process H.245 signal */ - if (process_h245(pskb, ct, ctinfo, &data, dataoff, &mscm) < 0) - goto drop; - } - - spin_unlock_bh(&ip_h323_lock); - return NF_ACCEPT; - - drop: - spin_unlock_bh(&ip_h323_lock); - if (net_ratelimit()) - printk("ip_ct_h245: packet dropped\n"); - return NF_DROP; -} - -/****************************************************************************/ -static struct ip_conntrack_helper ip_conntrack_helper_h245 = { - .name = "H.245", - .me = THIS_MODULE, - .max_expected = H323_RTP_CHANNEL_MAX * 4 + 2 /* T.120 */ , - .timeout = 240, - .tuple = {.dst = {.protonum = IPPROTO_TCP}}, - .mask = {.src = {.u = {0xFFFF}}, - .dst = {.protonum = 0xFF}}, - .help = h245_help -}; - -/****************************************************************************/ -void ip_conntrack_h245_expect(struct ip_conntrack *new, - struct ip_conntrack_expect *this) -{ - write_lock_bh(&ip_conntrack_lock); - new->helper = &ip_conntrack_helper_h245; - write_unlock_bh(&ip_conntrack_lock); -} - -/****************************************************************************/ -int get_h225_addr(unsigned char *data, TransportAddress * addr, - __be32 * ip, u_int16_t * port) -{ - unsigned char *p; - - if (addr->choice != eTransportAddress_ipAddress) - return 0; - - p = data + addr->ipAddress.ip; - *ip = htonl((p[0] << 24) | (p[1] << 16) | (p[2] << 8) | (p[3])); - *port = (p[4] << 8) | (p[5]); - - return 1; -} - -/****************************************************************************/ -static int expect_h245(struct sk_buff **pskb, struct ip_conntrack *ct, - enum ip_conntrack_info ctinfo, - unsigned char **data, int dataoff, - TransportAddress * addr) -{ - int dir = CTINFO2DIR(ctinfo); - int ret = 0; - __be32 ip; - u_int16_t port; - struct ip_conntrack_expect *exp = NULL; - typeof(nat_h245_hook) nat_h245; - - /* Read h245Address */ - if (!get_h225_addr(*data, addr, &ip, &port) || - ip != ct->tuplehash[dir].tuple.src.ip || port == 0) - return 0; - - /* Create expect for h245 connection */ - if ((exp = ip_conntrack_expect_alloc(ct)) == NULL) - return -1; - exp->tuple.src.ip = ct->tuplehash[!dir].tuple.src.ip; - exp->tuple.src.u.tcp.port = 0; - exp->tuple.dst.ip = ct->tuplehash[!dir].tuple.dst.ip; - exp->tuple.dst.u.tcp.port = htons(port); - exp->tuple.dst.protonum = IPPROTO_TCP; - exp->mask.src.ip = htonl(0xFFFFFFFF); - exp->mask.src.u.tcp.port = 0; - exp->mask.dst.ip = htonl(0xFFFFFFFF); - exp->mask.dst.u.tcp.port = htons(0xFFFF); - exp->mask.dst.protonum = 0xFF; - exp->flags = 0; - - if (ct->tuplehash[dir].tuple.src.ip != - ct->tuplehash[!dir].tuple.dst.ip && - (nat_h245 = rcu_dereference(nat_h245_hook))) { - /* NAT needed */ - ret = nat_h245(pskb, ct, ctinfo, data, dataoff, addr, - port, exp); - } else { /* Conntrack only */ - exp->expectfn = ip_conntrack_h245_expect; - - if (ip_conntrack_expect_related(exp) == 0) { - DEBUGP("ip_ct_q931: expect H.245 " - "%u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu\n", - NIPQUAD(exp->tuple.src.ip), - ntohs(exp->tuple.src.u.tcp.port), - NIPQUAD(exp->tuple.dst.ip), - ntohs(exp->tuple.dst.u.tcp.port)); - } else - ret = -1; - } - - ip_conntrack_expect_put(exp); - - return ret; -} - -/* Forwarding declaration */ -void ip_conntrack_q931_expect(struct ip_conntrack *new, - struct ip_conntrack_expect *this); - -/****************************************************************************/ -static int expect_callforwarding(struct sk_buff **pskb, - struct ip_conntrack *ct, - enum ip_conntrack_info ctinfo, - unsigned char **data, int dataoff, - TransportAddress * addr) -{ - int dir = CTINFO2DIR(ctinfo); - int ret = 0; - __be32 ip; - u_int16_t port; - struct ip_conntrack_expect *exp = NULL; - typeof(nat_callforwarding_hook) nat_callforwarding; - - /* Read alternativeAddress */ - if (!get_h225_addr(*data, addr, &ip, &port) || port == 0) - return 0; - - /* If the calling party is on the same side of the forward-to party, - * we don't need to track the second call */ - if (callforward_filter) { - struct rtable *rt1, *rt2; - struct flowi fl1 = { - .fl4_dst = ip, - }; - struct flowi fl2 = { - .fl4_dst = ct->tuplehash[!dir].tuple.src.ip, - }; - - if (ip_route_output_key(&rt1, &fl1) == 0) { - if (ip_route_output_key(&rt2, &fl2) == 0) { - if (rt1->rt_gateway == rt2->rt_gateway && - rt1->u.dst.dev == rt2->u.dst.dev) - ret = 1; - dst_release(&rt2->u.dst); - } - dst_release(&rt1->u.dst); - } - if (ret) { - DEBUGP("ip_ct_q931: Call Forwarding not tracked\n"); - return 0; - } - } - - /* Create expect for the second call leg */ - if ((exp = ip_conntrack_expect_alloc(ct)) == NULL) - return -1; - exp->tuple.src.ip = ct->tuplehash[!dir].tuple.src.ip; - exp->tuple.src.u.tcp.port = 0; - exp->tuple.dst.ip = ip; - exp->tuple.dst.u.tcp.port = htons(port); - exp->tuple.dst.protonum = IPPROTO_TCP; - exp->mask.src.ip = htonl(0xFFFFFFFF); - exp->mask.src.u.tcp.port = 0; - exp->mask.dst.ip = htonl(0xFFFFFFFF); - exp->mask.dst.u.tcp.port = htons(0xFFFF); - exp->mask.dst.protonum = 0xFF; - exp->flags = 0; - - if (ct->tuplehash[dir].tuple.src.ip != - ct->tuplehash[!dir].tuple.dst.ip && - (nat_callforwarding = rcu_dereference(nat_callforwarding_hook))) { - /* Need NAT */ - ret = nat_callforwarding(pskb, ct, ctinfo, data, dataoff, - addr, port, exp); - } else { /* Conntrack only */ - exp->expectfn = ip_conntrack_q931_expect; - - if (ip_conntrack_expect_related(exp) == 0) { - DEBUGP("ip_ct_q931: expect Call Forwarding " - "%u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu\n", - NIPQUAD(exp->tuple.src.ip), - ntohs(exp->tuple.src.u.tcp.port), - NIPQUAD(exp->tuple.dst.ip), - ntohs(exp->tuple.dst.u.tcp.port)); - } else - ret = -1; - } - - ip_conntrack_expect_put(exp); - - return ret; -} - -/****************************************************************************/ -static int process_setup(struct sk_buff **pskb, struct ip_conntrack *ct, - enum ip_conntrack_info ctinfo, - unsigned char **data, int dataoff, - Setup_UUIE * setup) -{ - int dir = CTINFO2DIR(ctinfo); - int ret; - int i; - __be32 ip; - u_int16_t port; - typeof(set_h225_addr_hook) set_h225_addr; - - DEBUGP("ip_ct_q931: Setup\n"); - - if (setup->options & eSetup_UUIE_h245Address) { - ret = expect_h245(pskb, ct, ctinfo, data, dataoff, - &setup->h245Address); - if (ret < 0) - return -1; - } - - set_h225_addr = rcu_dereference(set_h225_addr_hook); - - if ((setup->options & eSetup_UUIE_destCallSignalAddress) && - (set_h225_addr) && - get_h225_addr(*data, &setup->destCallSignalAddress, &ip, &port) && - ip != ct->tuplehash[!dir].tuple.src.ip) { - DEBUGP("ip_ct_q931: set destCallSignalAddress " - "%u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu\n", - NIPQUAD(ip), port, - NIPQUAD(ct->tuplehash[!dir].tuple.src.ip), - ntohs(ct->tuplehash[!dir].tuple.src.u.tcp.port)); - ret = set_h225_addr(pskb, data, dataoff, - &setup->destCallSignalAddress, - ct->tuplehash[!dir].tuple.src.ip, - ntohs(ct->tuplehash[!dir].tuple.src. - u.tcp.port)); - if (ret < 0) - return -1; - } - - if ((setup->options & eSetup_UUIE_sourceCallSignalAddress) && - (set_h225_addr) && - get_h225_addr(*data, &setup->sourceCallSignalAddress, &ip, &port) - && ip != ct->tuplehash[!dir].tuple.dst.ip) { - DEBUGP("ip_ct_q931: set sourceCallSignalAddress " - "%u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu\n", - NIPQUAD(ip), port, - NIPQUAD(ct->tuplehash[!dir].tuple.dst.ip), - ntohs(ct->tuplehash[!dir].tuple.dst.u.tcp.port)); - ret = set_h225_addr(pskb, data, dataoff, - &setup->sourceCallSignalAddress, - ct->tuplehash[!dir].tuple.dst.ip, - ntohs(ct->tuplehash[!dir].tuple.dst. - u.tcp.port)); - if (ret < 0) - return -1; - } - - if (setup->options & eSetup_UUIE_fastStart) { - for (i = 0; i < setup->fastStart.count; i++) { - ret = process_olc(pskb, ct, ctinfo, data, dataoff, - &setup->fastStart.item[i]); - if (ret < 0) - return -1; - } - } - - return 0; -} - -/****************************************************************************/ -static int process_callproceeding(struct sk_buff **pskb, - struct ip_conntrack *ct, - enum ip_conntrack_info ctinfo, - unsigned char **data, int dataoff, - CallProceeding_UUIE * callproc) -{ - int ret; - int i; - - DEBUGP("ip_ct_q931: CallProceeding\n"); - - if (callproc->options & eCallProceeding_UUIE_h245Address) { - ret = expect_h245(pskb, ct, ctinfo, data, dataoff, - &callproc->h245Address); - if (ret < 0) - return -1; - } - - if (callproc->options & eCallProceeding_UUIE_fastStart) { - for (i = 0; i < callproc->fastStart.count; i++) { - ret = process_olc(pskb, ct, ctinfo, data, dataoff, - &callproc->fastStart.item[i]); - if (ret < 0) - return -1; - } - } - - return 0; -} - -/****************************************************************************/ -static int process_connect(struct sk_buff **pskb, struct ip_conntrack *ct, - enum ip_conntrack_info ctinfo, - unsigned char **data, int dataoff, - Connect_UUIE * connect) -{ - int ret; - int i; - - DEBUGP("ip_ct_q931: Connect\n"); - - if (connect->options & eConnect_UUIE_h245Address) { - ret = expect_h245(pskb, ct, ctinfo, data, dataoff, - &connect->h245Address); - if (ret < 0) - return -1; - } - - if (connect->options & eConnect_UUIE_fastStart) { - for (i = 0; i < connect->fastStart.count; i++) { - ret = process_olc(pskb, ct, ctinfo, data, dataoff, - &connect->fastStart.item[i]); - if (ret < 0) - return -1; - } - } - - return 0; -} - -/****************************************************************************/ -static int process_alerting(struct sk_buff **pskb, struct ip_conntrack *ct, - enum ip_conntrack_info ctinfo, - unsigned char **data, int dataoff, - Alerting_UUIE * alert) -{ - int ret; - int i; - - DEBUGP("ip_ct_q931: Alerting\n"); - - if (alert->options & eAlerting_UUIE_h245Address) { - ret = expect_h245(pskb, ct, ctinfo, data, dataoff, - &alert->h245Address); - if (ret < 0) - return -1; - } - - if (alert->options & eAlerting_UUIE_fastStart) { - for (i = 0; i < alert->fastStart.count; i++) { - ret = process_olc(pskb, ct, ctinfo, data, dataoff, - &alert->fastStart.item[i]); - if (ret < 0) - return -1; - } - } - - return 0; -} - -/****************************************************************************/ -static int process_information(struct sk_buff **pskb, - struct ip_conntrack *ct, - enum ip_conntrack_info ctinfo, - unsigned char **data, int dataoff, - Information_UUIE * info) -{ - int ret; - int i; - - DEBUGP("ip_ct_q931: Information\n"); - - if (info->options & eInformation_UUIE_fastStart) { - for (i = 0; i < info->fastStart.count; i++) { - ret = process_olc(pskb, ct, ctinfo, data, dataoff, - &info->fastStart.item[i]); - if (ret < 0) - return -1; - } - } - - return 0; -} - -/****************************************************************************/ -static int process_facility(struct sk_buff **pskb, struct ip_conntrack *ct, - enum ip_conntrack_info ctinfo, - unsigned char **data, int dataoff, - Facility_UUIE * facility) -{ - int ret; - int i; - - DEBUGP("ip_ct_q931: Facility\n"); - - if (facility->reason.choice == eFacilityReason_callForwarded) { - if (facility->options & eFacility_UUIE_alternativeAddress) - return expect_callforwarding(pskb, ct, ctinfo, data, - dataoff, - &facility-> - alternativeAddress); - return 0; - } - - if (facility->options & eFacility_UUIE_h245Address) { - ret = expect_h245(pskb, ct, ctinfo, data, dataoff, - &facility->h245Address); - if (ret < 0) - return -1; - } - - if (facility->options & eFacility_UUIE_fastStart) { - for (i = 0; i < facility->fastStart.count; i++) { - ret = process_olc(pskb, ct, ctinfo, data, dataoff, - &facility->fastStart.item[i]); - if (ret < 0) - return -1; - } - } - - return 0; -} - -/****************************************************************************/ -static int process_progress(struct sk_buff **pskb, struct ip_conntrack *ct, - enum ip_conntrack_info ctinfo, - unsigned char **data, int dataoff, - Progress_UUIE * progress) -{ - int ret; - int i; - - DEBUGP("ip_ct_q931: Progress\n"); - - if (progress->options & eProgress_UUIE_h245Address) { - ret = expect_h245(pskb, ct, ctinfo, data, dataoff, - &progress->h245Address); - if (ret < 0) - return -1; - } - - if (progress->options & eProgress_UUIE_fastStart) { - for (i = 0; i < progress->fastStart.count; i++) { - ret = process_olc(pskb, ct, ctinfo, data, dataoff, - &progress->fastStart.item[i]); - if (ret < 0) - return -1; - } - } - - return 0; -} - -/****************************************************************************/ -static int process_q931(struct sk_buff **pskb, struct ip_conntrack *ct, - enum ip_conntrack_info ctinfo, - unsigned char **data, int dataoff, Q931 * q931) -{ - H323_UU_PDU *pdu = &q931->UUIE.h323_uu_pdu; - int i; - int ret = 0; - - switch (pdu->h323_message_body.choice) { - case eH323_UU_PDU_h323_message_body_setup: - ret = process_setup(pskb, ct, ctinfo, data, dataoff, - &pdu->h323_message_body.setup); - break; - case eH323_UU_PDU_h323_message_body_callProceeding: - ret = process_callproceeding(pskb, ct, ctinfo, data, dataoff, - &pdu->h323_message_body. - callProceeding); - break; - case eH323_UU_PDU_h323_message_body_connect: - ret = process_connect(pskb, ct, ctinfo, data, dataoff, - &pdu->h323_message_body.connect); - break; - case eH323_UU_PDU_h323_message_body_alerting: - ret = process_alerting(pskb, ct, ctinfo, data, dataoff, - &pdu->h323_message_body.alerting); - break; - case eH323_UU_PDU_h323_message_body_information: - ret = process_information(pskb, ct, ctinfo, data, dataoff, - &pdu->h323_message_body. - information); - break; - case eH323_UU_PDU_h323_message_body_facility: - ret = process_facility(pskb, ct, ctinfo, data, dataoff, - &pdu->h323_message_body.facility); - break; - case eH323_UU_PDU_h323_message_body_progress: - ret = process_progress(pskb, ct, ctinfo, data, dataoff, - &pdu->h323_message_body.progress); - break; - default: - DEBUGP("ip_ct_q931: Q.931 signal %d\n", - pdu->h323_message_body.choice); - break; - } - - if (ret < 0) - return -1; - - if (pdu->options & eH323_UU_PDU_h245Control) { - for (i = 0; i < pdu->h245Control.count; i++) { - ret = process_h245(pskb, ct, ctinfo, data, dataoff, - &pdu->h245Control.item[i]); - if (ret < 0) - return -1; - } - } - - return 0; -} - -/****************************************************************************/ -static int q931_help(struct sk_buff **pskb, struct ip_conntrack *ct, - enum ip_conntrack_info ctinfo) -{ - static Q931 q931; - unsigned char *data = NULL; - int datalen; - int dataoff; - int ret; - - /* Until there's been traffic both ways, don't look in packets. */ - if (ctinfo != IP_CT_ESTABLISHED - && ctinfo != IP_CT_ESTABLISHED + IP_CT_IS_REPLY) { - return NF_ACCEPT; - } - DEBUGP("ip_ct_q931: skblen = %u\n", (*pskb)->len); - - spin_lock_bh(&ip_h323_lock); - - /* Process each TPKT */ - while (get_tpkt_data(pskb, ct, ctinfo, &data, &datalen, &dataoff)) { - DEBUGP("ip_ct_q931: TPKT %u.%u.%u.%u->%u.%u.%u.%u, len=%d\n", - NIPQUAD(ip_hdr(*pskb)->saddr), - NIPQUAD(ip_hdr(*pskb)->daddr), datalen); - - /* Decode Q.931 signal */ - ret = DecodeQ931(data, datalen, &q931); - if (ret < 0) { - if (net_ratelimit()) - printk("ip_ct_q931: decoding error: %s\n", - ret == H323_ERROR_BOUND ? - "out of bound" : "out of range"); - /* We don't drop when decoding error */ - break; - } - - /* Process Q.931 signal */ - if (process_q931(pskb, ct, ctinfo, &data, dataoff, &q931) < 0) - goto drop; - } - - spin_unlock_bh(&ip_h323_lock); - return NF_ACCEPT; - - drop: - spin_unlock_bh(&ip_h323_lock); - if (net_ratelimit()) - printk("ip_ct_q931: packet dropped\n"); - return NF_DROP; -} - -/****************************************************************************/ -static struct ip_conntrack_helper ip_conntrack_helper_q931 = { - .name = "Q.931", - .me = THIS_MODULE, - .max_expected = H323_RTP_CHANNEL_MAX * 4 + 4 /* T.120 and H.245 */ , - .timeout = 240, - .tuple = {.src = {.u = {.tcp = {.port = __constant_htons(Q931_PORT)}}}, - .dst = {.protonum = IPPROTO_TCP}}, - .mask = {.src = {.u = {0xFFFF}}, - .dst = {.protonum = 0xFF}}, - .help = q931_help -}; - -/****************************************************************************/ -void ip_conntrack_q931_expect(struct ip_conntrack *new, - struct ip_conntrack_expect *this) -{ - write_lock_bh(&ip_conntrack_lock); - new->helper = &ip_conntrack_helper_q931; - write_unlock_bh(&ip_conntrack_lock); -} - -/****************************************************************************/ -static unsigned char *get_udp_data(struct sk_buff **pskb, int *datalen) -{ - struct udphdr _uh, *uh; - int dataoff; - - uh = skb_header_pointer(*pskb, ip_hdrlen(*pskb), sizeof(_uh), &_uh); - if (uh == NULL) - return NULL; - dataoff = ip_hdrlen(*pskb) + sizeof(_uh); - if (dataoff >= (*pskb)->len) - return NULL; - *datalen = (*pskb)->len - dataoff; - return skb_header_pointer(*pskb, dataoff, *datalen, h323_buffer); -} - -/****************************************************************************/ -static struct ip_conntrack_expect *find_expect(struct ip_conntrack *ct, - __be32 ip, u_int16_t port) -{ - struct ip_conntrack_expect *exp; - struct ip_conntrack_tuple tuple; - - tuple.src.ip = 0; - tuple.src.u.tcp.port = 0; - tuple.dst.ip = ip; - tuple.dst.u.tcp.port = htons(port); - tuple.dst.protonum = IPPROTO_TCP; - - exp = __ip_conntrack_expect_find(&tuple); - if (exp && exp->master == ct) - return exp; - return NULL; -} - -/****************************************************************************/ -static int set_expect_timeout(struct ip_conntrack_expect *exp, - unsigned timeout) -{ - if (!exp || !del_timer(&exp->timeout)) - return 0; - - exp->timeout.expires = jiffies + timeout * HZ; - add_timer(&exp->timeout); - - return 1; -} - -/****************************************************************************/ -static int expect_q931(struct sk_buff **pskb, struct ip_conntrack *ct, - enum ip_conntrack_info ctinfo, - unsigned char **data, - TransportAddress * addr, int count) -{ - struct ip_ct_h323_master *info = &ct->help.ct_h323_info; - int dir = CTINFO2DIR(ctinfo); - int ret = 0; - int i; - __be32 ip; - u_int16_t port; - struct ip_conntrack_expect *exp; - typeof(nat_q931_hook) nat_q931; - - /* Look for the first related address */ - for (i = 0; i < count; i++) { - if (get_h225_addr(*data, &addr[i], &ip, &port) && - ip == ct->tuplehash[dir].tuple.src.ip && port != 0) - break; - } - - if (i >= count) /* Not found */ - return 0; - - /* Create expect for Q.931 */ - if ((exp = ip_conntrack_expect_alloc(ct)) == NULL) - return -1; - exp->tuple.src.ip = gkrouted_only ? /* only accept calls from GK? */ - ct->tuplehash[!dir].tuple.src.ip : 0; - exp->tuple.src.u.tcp.port = 0; - exp->tuple.dst.ip = ct->tuplehash[!dir].tuple.dst.ip; - exp->tuple.dst.u.tcp.port = htons(port); - exp->tuple.dst.protonum = IPPROTO_TCP; - exp->mask.src.ip = gkrouted_only ? htonl(0xFFFFFFFF) : 0; - exp->mask.src.u.tcp.port = 0; - exp->mask.dst.ip = htonl(0xFFFFFFFF); - exp->mask.dst.u.tcp.port = htons(0xFFFF); - exp->mask.dst.protonum = 0xFF; - exp->flags = IP_CT_EXPECT_PERMANENT; /* Accept multiple calls */ - - nat_q931 = rcu_dereference(nat_q931_hook); - if (nat_q931) { /* Need NAT */ - ret = nat_q931(pskb, ct, ctinfo, data, addr, i, port, exp); - } else { /* Conntrack only */ - exp->expectfn = ip_conntrack_q931_expect; - - if (ip_conntrack_expect_related(exp) == 0) { - DEBUGP("ip_ct_ras: expect Q.931 " - "%u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu\n", - NIPQUAD(exp->tuple.src.ip), - ntohs(exp->tuple.src.u.tcp.port), - NIPQUAD(exp->tuple.dst.ip), - ntohs(exp->tuple.dst.u.tcp.port)); - - /* Save port for looking up expect in processing RCF */ - info->sig_port[dir] = port; - } else - ret = -1; - } - - ip_conntrack_expect_put(exp); - - return ret; -} - -/****************************************************************************/ -static int process_grq(struct sk_buff **pskb, struct ip_conntrack *ct, - enum ip_conntrack_info ctinfo, - unsigned char **data, GatekeeperRequest * grq) -{ - typeof(set_ras_addr_hook) set_ras_addr; - - DEBUGP("ip_ct_ras: GRQ\n"); - - set_ras_addr = rcu_dereference(set_ras_addr_hook); - if (set_ras_addr) /* NATed */ - return set_ras_addr(pskb, ct, ctinfo, data, - &grq->rasAddress, 1); - return 0; -} - -/* Declare before using */ -static void ip_conntrack_ras_expect(struct ip_conntrack *new, - struct ip_conntrack_expect *this); - -/****************************************************************************/ -static int process_gcf(struct sk_buff **pskb, struct ip_conntrack *ct, - enum ip_conntrack_info ctinfo, - unsigned char **data, GatekeeperConfirm * gcf) -{ - int dir = CTINFO2DIR(ctinfo); - int ret = 0; - __be32 ip; - u_int16_t port; - struct ip_conntrack_expect *exp; - - DEBUGP("ip_ct_ras: GCF\n"); - - if (!get_h225_addr(*data, &gcf->rasAddress, &ip, &port)) - return 0; - - /* Registration port is the same as discovery port */ - if (ip == ct->tuplehash[dir].tuple.src.ip && - port == ntohs(ct->tuplehash[dir].tuple.src.u.udp.port)) - return 0; - - /* Avoid RAS expectation loops. A GCF is never expected. */ - if (test_bit(IPS_EXPECTED_BIT, &ct->status)) - return 0; - - /* Need new expect */ - if ((exp = ip_conntrack_expect_alloc(ct)) == NULL) - return -1; - exp->tuple.src.ip = ct->tuplehash[!dir].tuple.src.ip; - exp->tuple.src.u.tcp.port = 0; - exp->tuple.dst.ip = ip; - exp->tuple.dst.u.tcp.port = htons(port); - exp->tuple.dst.protonum = IPPROTO_UDP; - exp->mask.src.ip = htonl(0xFFFFFFFF); - exp->mask.src.u.tcp.port = 0; - exp->mask.dst.ip = htonl(0xFFFFFFFF); - exp->mask.dst.u.tcp.port = htons(0xFFFF); - exp->mask.dst.protonum = 0xFF; - exp->flags = 0; - exp->expectfn = ip_conntrack_ras_expect; - if (ip_conntrack_expect_related(exp) == 0) { - DEBUGP("ip_ct_ras: expect RAS " - "%u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu\n", - NIPQUAD(exp->tuple.src.ip), - ntohs(exp->tuple.src.u.tcp.port), - NIPQUAD(exp->tuple.dst.ip), - ntohs(exp->tuple.dst.u.tcp.port)); - } else - ret = -1; - - ip_conntrack_expect_put(exp); - - return ret; -} - -/****************************************************************************/ -static int process_rrq(struct sk_buff **pskb, struct ip_conntrack *ct, - enum ip_conntrack_info ctinfo, - unsigned char **data, RegistrationRequest * rrq) -{ - struct ip_ct_h323_master *info = &ct->help.ct_h323_info; - int ret; - typeof(set_ras_addr_hook) set_ras_addr; - - DEBUGP("ip_ct_ras: RRQ\n"); - - ret = expect_q931(pskb, ct, ctinfo, data, - rrq->callSignalAddress.item, - rrq->callSignalAddress.count); - if (ret < 0) - return -1; - - set_ras_addr = rcu_dereference(set_ras_addr_hook); - if (set_ras_addr) { - ret = set_ras_addr(pskb, ct, ctinfo, data, - rrq->rasAddress.item, - rrq->rasAddress.count); - if (ret < 0) - return -1; - } - - if (rrq->options & eRegistrationRequest_timeToLive) { - DEBUGP("ip_ct_ras: RRQ TTL = %u seconds\n", rrq->timeToLive); - info->timeout = rrq->timeToLive; - } else - info->timeout = default_rrq_ttl; - - return 0; -} - -/****************************************************************************/ -static int process_rcf(struct sk_buff **pskb, struct ip_conntrack *ct, - enum ip_conntrack_info ctinfo, - unsigned char **data, RegistrationConfirm * rcf) -{ - struct ip_ct_h323_master *info = &ct->help.ct_h323_info; - int dir = CTINFO2DIR(ctinfo); - int ret; - struct ip_conntrack_expect *exp; - typeof(set_sig_addr_hook) set_sig_addr; - - DEBUGP("ip_ct_ras: RCF\n"); - - set_sig_addr = rcu_dereference(set_sig_addr_hook); - if (set_sig_addr) { - ret = set_sig_addr(pskb, ct, ctinfo, data, - rcf->callSignalAddress.item, - rcf->callSignalAddress.count); - if (ret < 0) - return -1; - } - - if (rcf->options & eRegistrationConfirm_timeToLive) { - DEBUGP("ip_ct_ras: RCF TTL = %u seconds\n", rcf->timeToLive); - info->timeout = rcf->timeToLive; - } - - if (info->timeout > 0) { - DEBUGP - ("ip_ct_ras: set RAS connection timeout to %u seconds\n", - info->timeout); - ip_ct_refresh(ct, *pskb, info->timeout * HZ); - - /* Set expect timeout */ - read_lock_bh(&ip_conntrack_lock); - exp = find_expect(ct, ct->tuplehash[dir].tuple.dst.ip, - info->sig_port[!dir]); - if (exp) { - DEBUGP("ip_ct_ras: set Q.931 expect " - "(%u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu) " - "timeout to %u seconds\n", - NIPQUAD(exp->tuple.src.ip), - ntohs(exp->tuple.src.u.tcp.port), - NIPQUAD(exp->tuple.dst.ip), - ntohs(exp->tuple.dst.u.tcp.port), - info->timeout); - set_expect_timeout(exp, info->timeout); - } - read_unlock_bh(&ip_conntrack_lock); - } - - return 0; -} - -/****************************************************************************/ -static int process_urq(struct sk_buff **pskb, struct ip_conntrack *ct, - enum ip_conntrack_info ctinfo, - unsigned char **data, UnregistrationRequest * urq) -{ - struct ip_ct_h323_master *info = &ct->help.ct_h323_info; - int dir = CTINFO2DIR(ctinfo); - int ret; - typeof(set_sig_addr_hook) set_sig_addr; - - DEBUGP("ip_ct_ras: URQ\n"); - - set_sig_addr = rcu_dereference(set_sig_addr_hook); - if (set_sig_addr) { - ret = set_sig_addr(pskb, ct, ctinfo, data, - urq->callSignalAddress.item, - urq->callSignalAddress.count); - if (ret < 0) - return -1; - } - - /* Clear old expect */ - ip_ct_remove_expectations(ct); - info->sig_port[dir] = 0; - info->sig_port[!dir] = 0; - - /* Give it 30 seconds for UCF or URJ */ - ip_ct_refresh(ct, *pskb, 30 * HZ); - - return 0; -} - -/****************************************************************************/ -static int process_arq(struct sk_buff **pskb, struct ip_conntrack *ct, - enum ip_conntrack_info ctinfo, - unsigned char **data, AdmissionRequest * arq) -{ - struct ip_ct_h323_master *info = &ct->help.ct_h323_info; - int dir = CTINFO2DIR(ctinfo); - __be32 ip; - u_int16_t port; - typeof(set_h225_addr_hook) set_h225_addr; - - DEBUGP("ip_ct_ras: ARQ\n"); - - set_h225_addr = rcu_dereference(set_h225_addr_hook); - if ((arq->options & eAdmissionRequest_destCallSignalAddress) && - get_h225_addr(*data, &arq->destCallSignalAddress, &ip, &port) && - ip == ct->tuplehash[dir].tuple.src.ip && - port == info->sig_port[dir] && set_h225_addr) { - /* Answering ARQ */ - return set_h225_addr(pskb, data, 0, - &arq->destCallSignalAddress, - ct->tuplehash[!dir].tuple.dst.ip, - info->sig_port[!dir]); - } - - if ((arq->options & eAdmissionRequest_srcCallSignalAddress) && - get_h225_addr(*data, &arq->srcCallSignalAddress, &ip, &port) && - ip == ct->tuplehash[dir].tuple.src.ip && set_h225_addr) { - /* Calling ARQ */ - return set_h225_addr(pskb, data, 0, - &arq->srcCallSignalAddress, - ct->tuplehash[!dir].tuple.dst.ip, - port); - } - - return 0; -} - -/****************************************************************************/ -static int process_acf(struct sk_buff **pskb, struct ip_conntrack *ct, - enum ip_conntrack_info ctinfo, - unsigned char **data, AdmissionConfirm * acf) -{ - int dir = CTINFO2DIR(ctinfo); - int ret = 0; - __be32 ip; - u_int16_t port; - struct ip_conntrack_expect *exp; - typeof(set_sig_addr_hook) set_sig_addr; - - DEBUGP("ip_ct_ras: ACF\n"); - - if (!get_h225_addr(*data, &acf->destCallSignalAddress, &ip, &port)) - return 0; - - if (ip == ct->tuplehash[dir].tuple.dst.ip) { /* Answering ACF */ - set_sig_addr = rcu_dereference(set_sig_addr_hook); - if (set_sig_addr) - return set_sig_addr(pskb, ct, ctinfo, data, - &acf->destCallSignalAddress, 1); - return 0; - } - - /* Need new expect */ - if ((exp = ip_conntrack_expect_alloc(ct)) == NULL) - return -1; - exp->tuple.src.ip = ct->tuplehash[!dir].tuple.src.ip; - exp->tuple.src.u.tcp.port = 0; - exp->tuple.dst.ip = ip; - exp->tuple.dst.u.tcp.port = htons(port); - exp->tuple.dst.protonum = IPPROTO_TCP; - exp->mask.src.ip = htonl(0xFFFFFFFF); - exp->mask.src.u.tcp.port = 0; - exp->mask.dst.ip = htonl(0xFFFFFFFF); - exp->mask.dst.u.tcp.port = htons(0xFFFF); - exp->mask.dst.protonum = 0xFF; - exp->flags = IP_CT_EXPECT_PERMANENT; - exp->expectfn = ip_conntrack_q931_expect; - - if (ip_conntrack_expect_related(exp) == 0) { - DEBUGP("ip_ct_ras: expect Q.931 " - "%u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu\n", - NIPQUAD(exp->tuple.src.ip), - ntohs(exp->tuple.src.u.tcp.port), - NIPQUAD(exp->tuple.dst.ip), - ntohs(exp->tuple.dst.u.tcp.port)); - } else - ret = -1; - - ip_conntrack_expect_put(exp); - - return ret; -} - -/****************************************************************************/ -static int process_lrq(struct sk_buff **pskb, struct ip_conntrack *ct, - enum ip_conntrack_info ctinfo, - unsigned char **data, LocationRequest * lrq) -{ - typeof(set_ras_addr_hook) set_ras_addr; - - DEBUGP("ip_ct_ras: LRQ\n"); - - set_ras_addr = rcu_dereference(set_ras_addr_hook); - if (set_ras_addr) - return set_ras_addr(pskb, ct, ctinfo, data, - &lrq->replyAddress, 1); - return 0; -} - -/****************************************************************************/ -static int process_lcf(struct sk_buff **pskb, struct ip_conntrack *ct, - enum ip_conntrack_info ctinfo, - unsigned char **data, LocationConfirm * lcf) -{ - int dir = CTINFO2DIR(ctinfo); - int ret = 0; - __be32 ip; - u_int16_t port; - struct ip_conntrack_expect *exp = NULL; - - DEBUGP("ip_ct_ras: LCF\n"); - - if (!get_h225_addr(*data, &lcf->callSignalAddress, &ip, &port)) - return 0; - - /* Need new expect for call signal */ - if ((exp = ip_conntrack_expect_alloc(ct)) == NULL) - return -1; - exp->tuple.src.ip = ct->tuplehash[!dir].tuple.src.ip; - exp->tuple.src.u.tcp.port = 0; - exp->tuple.dst.ip = ip; - exp->tuple.dst.u.tcp.port = htons(port); - exp->tuple.dst.protonum = IPPROTO_TCP; - exp->mask.src.ip = htonl(0xFFFFFFFF); - exp->mask.src.u.tcp.port = 0; - exp->mask.dst.ip = htonl(0xFFFFFFFF); - exp->mask.dst.u.tcp.port = htons(0xFFFF); - exp->mask.dst.protonum = 0xFF; - exp->flags = IP_CT_EXPECT_PERMANENT; - exp->expectfn = ip_conntrack_q931_expect; - - if (ip_conntrack_expect_related(exp) == 0) { - DEBUGP("ip_ct_ras: expect Q.931 " - "%u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu\n", - NIPQUAD(exp->tuple.src.ip), - ntohs(exp->tuple.src.u.tcp.port), - NIPQUAD(exp->tuple.dst.ip), - ntohs(exp->tuple.dst.u.tcp.port)); - } else - ret = -1; - - ip_conntrack_expect_put(exp); - - /* Ignore rasAddress */ - - return ret; -} - -/****************************************************************************/ -static int process_irr(struct sk_buff **pskb, struct ip_conntrack *ct, - enum ip_conntrack_info ctinfo, - unsigned char **data, InfoRequestResponse * irr) -{ - int ret; - typeof(set_ras_addr_hook) set_ras_addr; - typeof(set_sig_addr_hook) set_sig_addr; - - DEBUGP("ip_ct_ras: IRR\n"); - - set_ras_addr = rcu_dereference(set_ras_addr_hook); - if (set_ras_addr) { - ret = set_ras_addr(pskb, ct, ctinfo, data, - &irr->rasAddress, 1); - if (ret < 0) - return -1; - } - - set_sig_addr = rcu_dereference(set_sig_addr_hook); - if (set_sig_addr) { - ret = set_sig_addr(pskb, ct, ctinfo, data, - irr->callSignalAddress.item, - irr->callSignalAddress.count); - if (ret < 0) - return -1; - } - - return 0; -} - -/****************************************************************************/ -static int process_ras(struct sk_buff **pskb, struct ip_conntrack *ct, - enum ip_conntrack_info ctinfo, - unsigned char **data, RasMessage * ras) -{ - switch (ras->choice) { - case eRasMessage_gatekeeperRequest: - return process_grq(pskb, ct, ctinfo, data, - &ras->gatekeeperRequest); - case eRasMessage_gatekeeperConfirm: - return process_gcf(pskb, ct, ctinfo, data, - &ras->gatekeeperConfirm); - case eRasMessage_registrationRequest: - return process_rrq(pskb, ct, ctinfo, data, - &ras->registrationRequest); - case eRasMessage_registrationConfirm: - return process_rcf(pskb, ct, ctinfo, data, - &ras->registrationConfirm); - case eRasMessage_unregistrationRequest: - return process_urq(pskb, ct, ctinfo, data, - &ras->unregistrationRequest); - case eRasMessage_admissionRequest: - return process_arq(pskb, ct, ctinfo, data, - &ras->admissionRequest); - case eRasMessage_admissionConfirm: - return process_acf(pskb, ct, ctinfo, data, - &ras->admissionConfirm); - case eRasMessage_locationRequest: - return process_lrq(pskb, ct, ctinfo, data, - &ras->locationRequest); - case eRasMessage_locationConfirm: - return process_lcf(pskb, ct, ctinfo, data, - &ras->locationConfirm); - case eRasMessage_infoRequestResponse: - return process_irr(pskb, ct, ctinfo, data, - &ras->infoRequestResponse); - default: - DEBUGP("ip_ct_ras: RAS message %d\n", ras->choice); - break; - } - - return 0; -} - -/****************************************************************************/ -static int ras_help(struct sk_buff **pskb, struct ip_conntrack *ct, - enum ip_conntrack_info ctinfo) -{ - static RasMessage ras; - unsigned char *data; - int datalen = 0; - int ret; - - DEBUGP("ip_ct_ras: skblen = %u\n", (*pskb)->len); - - spin_lock_bh(&ip_h323_lock); - - /* Get UDP data */ - data = get_udp_data(pskb, &datalen); - if (data == NULL) - goto accept; - DEBUGP("ip_ct_ras: RAS message %u.%u.%u.%u->%u.%u.%u.%u, len=%d\n", - NIPQUAD(ip_hdr(*pskb)->saddr), - NIPQUAD(ip_hdr(*pskb)->daddr), datalen); - - /* Decode RAS message */ - ret = DecodeRasMessage(data, datalen, &ras); - if (ret < 0) { - if (net_ratelimit()) - printk("ip_ct_ras: decoding error: %s\n", - ret == H323_ERROR_BOUND ? - "out of bound" : "out of range"); - goto accept; - } - - /* Process RAS message */ - if (process_ras(pskb, ct, ctinfo, &data, &ras) < 0) - goto drop; - - accept: - spin_unlock_bh(&ip_h323_lock); - return NF_ACCEPT; - - drop: - spin_unlock_bh(&ip_h323_lock); - if (net_ratelimit()) - printk("ip_ct_ras: packet dropped\n"); - return NF_DROP; -} - -/****************************************************************************/ -static struct ip_conntrack_helper ip_conntrack_helper_ras = { - .name = "RAS", - .me = THIS_MODULE, - .max_expected = 32, - .timeout = 240, - .tuple = {.src = {.u = {.tcp = {.port = __constant_htons(RAS_PORT)}}}, - .dst = {.protonum = IPPROTO_UDP}}, - .mask = {.src = {.u = {0xFFFE}}, - .dst = {.protonum = 0xFF}}, - .help = ras_help, -}; - -/****************************************************************************/ -static void ip_conntrack_ras_expect(struct ip_conntrack *new, - struct ip_conntrack_expect *this) -{ - write_lock_bh(&ip_conntrack_lock); - new->helper = &ip_conntrack_helper_ras; - write_unlock_bh(&ip_conntrack_lock); -} - -/****************************************************************************/ -/* Not __exit - called from init() */ -static void fini(void) -{ - ip_conntrack_helper_unregister(&ip_conntrack_helper_ras); - ip_conntrack_helper_unregister(&ip_conntrack_helper_q931); - kfree(h323_buffer); - DEBUGP("ip_ct_h323: fini\n"); -} - -/****************************************************************************/ -static int __init init(void) -{ - int ret; - - h323_buffer = kmalloc(65536, GFP_KERNEL); - if (!h323_buffer) - return -ENOMEM; - if ((ret = ip_conntrack_helper_register(&ip_conntrack_helper_q931)) || - (ret = ip_conntrack_helper_register(&ip_conntrack_helper_ras))) { - fini(); - return ret; - } - DEBUGP("ip_ct_h323: init success\n"); - return 0; -} - -/****************************************************************************/ -module_init(init); -module_exit(fini); - -EXPORT_SYMBOL_GPL(get_h225_addr); -EXPORT_SYMBOL_GPL(ip_conntrack_h245_expect); -EXPORT_SYMBOL_GPL(ip_conntrack_q931_expect); -EXPORT_SYMBOL_GPL(set_h245_addr_hook); -EXPORT_SYMBOL_GPL(set_h225_addr_hook); -EXPORT_SYMBOL_GPL(set_sig_addr_hook); -EXPORT_SYMBOL_GPL(set_ras_addr_hook); -EXPORT_SYMBOL_GPL(nat_rtp_rtcp_hook); -EXPORT_SYMBOL_GPL(nat_t120_hook); -EXPORT_SYMBOL_GPL(nat_h245_hook); -EXPORT_SYMBOL_GPL(nat_callforwarding_hook); -EXPORT_SYMBOL_GPL(nat_q931_hook); - -MODULE_AUTHOR("Jing Min Zhao "); -MODULE_DESCRIPTION("H.323 connection tracking helper"); -MODULE_LICENSE("GPL"); diff --git a/net/ipv4/netfilter/ip_conntrack_helper_pptp.c b/net/ipv4/netfilter/ip_conntrack_helper_pptp.c deleted file mode 100644 index f5ab8e4b97cb..000000000000 --- a/net/ipv4/netfilter/ip_conntrack_helper_pptp.c +++ /dev/null @@ -1,684 +0,0 @@ -/* - * ip_conntrack_pptp.c - Version 3.0 - * - * Connection tracking support for PPTP (Point to Point Tunneling Protocol). - * PPTP is a a protocol for creating virtual private networks. - * It is a specification defined by Microsoft and some vendors - * working with Microsoft. PPTP is built on top of a modified - * version of the Internet Generic Routing Encapsulation Protocol. - * GRE is defined in RFC 1701 and RFC 1702. Documentation of - * PPTP can be found in RFC 2637 - * - * (C) 2000-2005 by Harald Welte - * - * Development of this code funded by Astaro AG (http://www.astaro.com/) - * - * Limitations: - * - We blindly assume that control connections are always - * established in PNS->PAC direction. This is a violation - * of RFFC2673 - * - We can only support one single call within each session - * - * TODO: - * - testing of incoming PPTP calls - * - * Changes: - * 2002-02-05 - Version 1.3 - * - Call ip_conntrack_unexpect_related() from - * pptp_destroy_siblings() to destroy expectations in case - * CALL_DISCONNECT_NOTIFY or tcp fin packet was seen - * (Philip Craig ) - * - Add Version information at module loadtime - * 2002-02-10 - Version 1.6 - * - move to C99 style initializers - * - remove second expectation if first arrives - * 2004-10-22 - Version 2.0 - * - merge Mandrake's 2.6.x port with recent 2.6.x API changes - * - fix lots of linear skb assumptions from Mandrake's port - * 2005-06-10 - Version 2.1 - * - use ip_conntrack_expect_free() instead of kfree() on the - * expect's (which are from the slab for quite some time) - * 2005-06-10 - Version 3.0 - * - port helper to post-2.6.11 API changes, - * funded by Oxcoda NetBox Blue (http://www.netboxblue.com/) - * 2005-07-30 - Version 3.1 - * - port helper to 2.6.13 API changes - * - */ - -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include - -#define IP_CT_PPTP_VERSION "3.1" - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Harald Welte "); -MODULE_DESCRIPTION("Netfilter connection tracking helper module for PPTP"); - -static DEFINE_SPINLOCK(ip_pptp_lock); - -int -(*ip_nat_pptp_hook_outbound)(struct sk_buff **pskb, - struct ip_conntrack *ct, - enum ip_conntrack_info ctinfo, - struct PptpControlHeader *ctlh, - union pptp_ctrl_union *pptpReq); - -int -(*ip_nat_pptp_hook_inbound)(struct sk_buff **pskb, - struct ip_conntrack *ct, - enum ip_conntrack_info ctinfo, - struct PptpControlHeader *ctlh, - union pptp_ctrl_union *pptpReq); - -void -(*ip_nat_pptp_hook_exp_gre)(struct ip_conntrack_expect *expect_orig, - struct ip_conntrack_expect *expect_reply); - -void -(*ip_nat_pptp_hook_expectfn)(struct ip_conntrack *ct, - struct ip_conntrack_expect *exp); - -#if 0 -/* PptpControlMessageType names */ -const char *pptp_msg_name[] = { - "UNKNOWN_MESSAGE", - "START_SESSION_REQUEST", - "START_SESSION_REPLY", - "STOP_SESSION_REQUEST", - "STOP_SESSION_REPLY", - "ECHO_REQUEST", - "ECHO_REPLY", - "OUT_CALL_REQUEST", - "OUT_CALL_REPLY", - "IN_CALL_REQUEST", - "IN_CALL_REPLY", - "IN_CALL_CONNECT", - "CALL_CLEAR_REQUEST", - "CALL_DISCONNECT_NOTIFY", - "WAN_ERROR_NOTIFY", - "SET_LINK_INFO" -}; -EXPORT_SYMBOL(pptp_msg_name); -#define DEBUGP(format, args...) printk(KERN_DEBUG "%s:%s: " format, __FILE__, __FUNCTION__, ## args) -#else -#define DEBUGP(format, args...) -#endif - -#define SECS *HZ -#define MINS * 60 SECS -#define HOURS * 60 MINS - -#define PPTP_GRE_TIMEOUT (10 MINS) -#define PPTP_GRE_STREAM_TIMEOUT (5 HOURS) - -static void pptp_expectfn(struct ip_conntrack *ct, - struct ip_conntrack_expect *exp) -{ - typeof(ip_nat_pptp_hook_expectfn) ip_nat_pptp_expectfn; - - DEBUGP("increasing timeouts\n"); - - /* increase timeout of GRE data channel conntrack entry */ - ct->proto.gre.timeout = PPTP_GRE_TIMEOUT; - ct->proto.gre.stream_timeout = PPTP_GRE_STREAM_TIMEOUT; - - /* Can you see how rusty this code is, compared with the pre-2.6.11 - * one? That's what happened to my shiny newnat of 2002 ;( -HW */ - - rcu_read_lock(); - ip_nat_pptp_expectfn = rcu_dereference(ip_nat_pptp_hook_expectfn); - if (!ip_nat_pptp_expectfn) { - struct ip_conntrack_tuple inv_t; - struct ip_conntrack_expect *exp_other; - - /* obviously this tuple inversion only works until you do NAT */ - invert_tuplepr(&inv_t, &exp->tuple); - DEBUGP("trying to unexpect other dir: "); - DUMP_TUPLE(&inv_t); - - exp_other = ip_conntrack_expect_find_get(&inv_t); - if (exp_other) { - /* delete other expectation. */ - DEBUGP("found\n"); - ip_conntrack_unexpect_related(exp_other); - ip_conntrack_expect_put(exp_other); - } else { - DEBUGP("not found\n"); - } - } else { - /* we need more than simple inversion */ - ip_nat_pptp_expectfn(ct, exp); - } - rcu_read_unlock(); -} - -static int destroy_sibling_or_exp(const struct ip_conntrack_tuple *t) -{ - struct ip_conntrack_tuple_hash *h; - struct ip_conntrack_expect *exp; - - DEBUGP("trying to timeout ct or exp for tuple "); - DUMP_TUPLE(t); - - h = ip_conntrack_find_get(t, NULL); - if (h) { - struct ip_conntrack *sibling = tuplehash_to_ctrack(h); - DEBUGP("setting timeout of conntrack %p to 0\n", sibling); - sibling->proto.gre.timeout = 0; - sibling->proto.gre.stream_timeout = 0; - if (del_timer(&sibling->timeout)) - sibling->timeout.function((unsigned long)sibling); - ip_conntrack_put(sibling); - return 1; - } else { - exp = ip_conntrack_expect_find_get(t); - if (exp) { - DEBUGP("unexpect_related of expect %p\n", exp); - ip_conntrack_unexpect_related(exp); - ip_conntrack_expect_put(exp); - return 1; - } - } - - return 0; -} - - -/* timeout GRE data connections */ -static void pptp_destroy_siblings(struct ip_conntrack *ct) -{ - struct ip_conntrack_tuple t; - - ip_ct_gre_keymap_destroy(ct); - /* Since ct->sibling_list has literally rusted away in 2.6.11, - * we now need another way to find out about our sibling - * contrack and expects... -HW */ - - /* try original (pns->pac) tuple */ - memcpy(&t, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple, sizeof(t)); - t.dst.protonum = IPPROTO_GRE; - t.src.u.gre.key = ct->help.ct_pptp_info.pns_call_id; - t.dst.u.gre.key = ct->help.ct_pptp_info.pac_call_id; - - if (!destroy_sibling_or_exp(&t)) - DEBUGP("failed to timeout original pns->pac ct/exp\n"); - - /* try reply (pac->pns) tuple */ - memcpy(&t, &ct->tuplehash[IP_CT_DIR_REPLY].tuple, sizeof(t)); - t.dst.protonum = IPPROTO_GRE; - t.src.u.gre.key = ct->help.ct_pptp_info.pac_call_id; - t.dst.u.gre.key = ct->help.ct_pptp_info.pns_call_id; - - if (!destroy_sibling_or_exp(&t)) - DEBUGP("failed to timeout reply pac->pns ct/exp\n"); -} - -/* expect GRE connections (PNS->PAC and PAC->PNS direction) */ -static inline int -exp_gre(struct ip_conntrack *ct, - __be16 callid, - __be16 peer_callid) -{ - struct ip_conntrack_expect *exp_orig, *exp_reply; - int ret = 1; - typeof(ip_nat_pptp_hook_exp_gre) ip_nat_pptp_exp_gre; - - exp_orig = ip_conntrack_expect_alloc(ct); - if (exp_orig == NULL) - goto out; - - exp_reply = ip_conntrack_expect_alloc(ct); - if (exp_reply == NULL) - goto out_put_orig; - - /* original direction, PNS->PAC */ - exp_orig->tuple.src.ip = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.ip; - exp_orig->tuple.src.u.gre.key = peer_callid; - exp_orig->tuple.dst.ip = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.ip; - exp_orig->tuple.dst.u.gre.key = callid; - exp_orig->tuple.dst.protonum = IPPROTO_GRE; - - exp_orig->mask.src.ip = htonl(0xffffffff); - exp_orig->mask.src.u.all = 0; - exp_orig->mask.dst.u.gre.key = htons(0xffff); - exp_orig->mask.dst.ip = htonl(0xffffffff); - exp_orig->mask.dst.protonum = 0xff; - - exp_orig->master = ct; - exp_orig->expectfn = pptp_expectfn; - exp_orig->flags = 0; - - /* both expectations are identical apart from tuple */ - memcpy(exp_reply, exp_orig, sizeof(*exp_reply)); - - /* reply direction, PAC->PNS */ - exp_reply->tuple.src.ip = ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.ip; - exp_reply->tuple.src.u.gre.key = callid; - exp_reply->tuple.dst.ip = ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.ip; - exp_reply->tuple.dst.u.gre.key = peer_callid; - exp_reply->tuple.dst.protonum = IPPROTO_GRE; - - ip_nat_pptp_exp_gre = rcu_dereference(ip_nat_pptp_hook_exp_gre); - if (ip_nat_pptp_exp_gre) - ip_nat_pptp_exp_gre(exp_orig, exp_reply); - if (ip_conntrack_expect_related(exp_orig) != 0) - goto out_put_both; - if (ip_conntrack_expect_related(exp_reply) != 0) - goto out_unexpect_orig; - - /* Add GRE keymap entries */ - if (ip_ct_gre_keymap_add(ct, &exp_orig->tuple, 0) != 0) - goto out_unexpect_both; - if (ip_ct_gre_keymap_add(ct, &exp_reply->tuple, 1) != 0) { - ip_ct_gre_keymap_destroy(ct); - goto out_unexpect_both; - } - ret = 0; - -out_put_both: - ip_conntrack_expect_put(exp_reply); -out_put_orig: - ip_conntrack_expect_put(exp_orig); -out: - return ret; - -out_unexpect_both: - ip_conntrack_unexpect_related(exp_reply); -out_unexpect_orig: - ip_conntrack_unexpect_related(exp_orig); - goto out_put_both; -} - -static inline int -pptp_inbound_pkt(struct sk_buff **pskb, - struct PptpControlHeader *ctlh, - union pptp_ctrl_union *pptpReq, - unsigned int reqlen, - struct ip_conntrack *ct, - enum ip_conntrack_info ctinfo) -{ - struct ip_ct_pptp_master *info = &ct->help.ct_pptp_info; - u_int16_t msg; - __be16 cid = 0, pcid = 0; - typeof(ip_nat_pptp_hook_inbound) ip_nat_pptp_inbound; - - msg = ntohs(ctlh->messageType); - DEBUGP("inbound control message %s\n", pptp_msg_name[msg]); - - switch (msg) { - case PPTP_START_SESSION_REPLY: - /* server confirms new control session */ - if (info->sstate < PPTP_SESSION_REQUESTED) - goto invalid; - if (pptpReq->srep.resultCode == PPTP_START_OK) - info->sstate = PPTP_SESSION_CONFIRMED; - else - info->sstate = PPTP_SESSION_ERROR; - break; - - case PPTP_STOP_SESSION_REPLY: - /* server confirms end of control session */ - if (info->sstate > PPTP_SESSION_STOPREQ) - goto invalid; - if (pptpReq->strep.resultCode == PPTP_STOP_OK) - info->sstate = PPTP_SESSION_NONE; - else - info->sstate = PPTP_SESSION_ERROR; - break; - - case PPTP_OUT_CALL_REPLY: - /* server accepted call, we now expect GRE frames */ - if (info->sstate != PPTP_SESSION_CONFIRMED) - goto invalid; - if (info->cstate != PPTP_CALL_OUT_REQ && - info->cstate != PPTP_CALL_OUT_CONF) - goto invalid; - - cid = pptpReq->ocack.callID; - pcid = pptpReq->ocack.peersCallID; - if (info->pns_call_id != pcid) - goto invalid; - DEBUGP("%s, CID=%X, PCID=%X\n", pptp_msg_name[msg], - ntohs(cid), ntohs(pcid)); - - if (pptpReq->ocack.resultCode == PPTP_OUTCALL_CONNECT) { - info->cstate = PPTP_CALL_OUT_CONF; - info->pac_call_id = cid; - exp_gre(ct, cid, pcid); - } else - info->cstate = PPTP_CALL_NONE; - break; - - case PPTP_IN_CALL_REQUEST: - /* server tells us about incoming call request */ - if (info->sstate != PPTP_SESSION_CONFIRMED) - goto invalid; - - cid = pptpReq->icreq.callID; - DEBUGP("%s, CID=%X\n", pptp_msg_name[msg], ntohs(cid)); - info->cstate = PPTP_CALL_IN_REQ; - info->pac_call_id = cid; - break; - - case PPTP_IN_CALL_CONNECT: - /* server tells us about incoming call established */ - if (info->sstate != PPTP_SESSION_CONFIRMED) - goto invalid; - if (info->cstate != PPTP_CALL_IN_REP && - info->cstate != PPTP_CALL_IN_CONF) - goto invalid; - - pcid = pptpReq->iccon.peersCallID; - cid = info->pac_call_id; - - if (info->pns_call_id != pcid) - goto invalid; - - DEBUGP("%s, PCID=%X\n", pptp_msg_name[msg], ntohs(pcid)); - info->cstate = PPTP_CALL_IN_CONF; - - /* we expect a GRE connection from PAC to PNS */ - exp_gre(ct, cid, pcid); - break; - - case PPTP_CALL_DISCONNECT_NOTIFY: - /* server confirms disconnect */ - cid = pptpReq->disc.callID; - DEBUGP("%s, CID=%X\n", pptp_msg_name[msg], ntohs(cid)); - info->cstate = PPTP_CALL_NONE; - - /* untrack this call id, unexpect GRE packets */ - pptp_destroy_siblings(ct); - break; - - case PPTP_WAN_ERROR_NOTIFY: - case PPTP_ECHO_REQUEST: - case PPTP_ECHO_REPLY: - /* I don't have to explain these ;) */ - break; - default: - goto invalid; - } - - ip_nat_pptp_inbound = rcu_dereference(ip_nat_pptp_hook_inbound); - if (ip_nat_pptp_inbound) - return ip_nat_pptp_inbound(pskb, ct, ctinfo, ctlh, pptpReq); - return NF_ACCEPT; - -invalid: - DEBUGP("invalid %s: type=%d cid=%u pcid=%u " - "cstate=%d sstate=%d pns_cid=%u pac_cid=%u\n", - msg <= PPTP_MSG_MAX ? pptp_msg_name[msg] : pptp_msg_name[0], - msg, ntohs(cid), ntohs(pcid), info->cstate, info->sstate, - ntohs(info->pns_call_id), ntohs(info->pac_call_id)); - return NF_ACCEPT; -} - -static inline int -pptp_outbound_pkt(struct sk_buff **pskb, - struct PptpControlHeader *ctlh, - union pptp_ctrl_union *pptpReq, - unsigned int reqlen, - struct ip_conntrack *ct, - enum ip_conntrack_info ctinfo) -{ - struct ip_ct_pptp_master *info = &ct->help.ct_pptp_info; - u_int16_t msg; - __be16 cid = 0, pcid = 0; - typeof(ip_nat_pptp_hook_outbound) ip_nat_pptp_outbound; - - msg = ntohs(ctlh->messageType); - DEBUGP("outbound control message %s\n", pptp_msg_name[msg]); - - switch (msg) { - case PPTP_START_SESSION_REQUEST: - /* client requests for new control session */ - if (info->sstate != PPTP_SESSION_NONE) - goto invalid; - info->sstate = PPTP_SESSION_REQUESTED; - break; - case PPTP_STOP_SESSION_REQUEST: - /* client requests end of control session */ - info->sstate = PPTP_SESSION_STOPREQ; - break; - - case PPTP_OUT_CALL_REQUEST: - /* client initiating connection to server */ - if (info->sstate != PPTP_SESSION_CONFIRMED) - goto invalid; - info->cstate = PPTP_CALL_OUT_REQ; - /* track PNS call id */ - cid = pptpReq->ocreq.callID; - DEBUGP("%s, CID=%X\n", pptp_msg_name[msg], ntohs(cid)); - info->pns_call_id = cid; - break; - case PPTP_IN_CALL_REPLY: - /* client answers incoming call */ - if (info->cstate != PPTP_CALL_IN_REQ && - info->cstate != PPTP_CALL_IN_REP) - goto invalid; - - cid = pptpReq->icack.callID; - pcid = pptpReq->icack.peersCallID; - if (info->pac_call_id != pcid) - goto invalid; - DEBUGP("%s, CID=%X PCID=%X\n", pptp_msg_name[msg], - ntohs(cid), ntohs(pcid)); - - if (pptpReq->icack.resultCode == PPTP_INCALL_ACCEPT) { - /* part two of the three-way handshake */ - info->cstate = PPTP_CALL_IN_REP; - info->pns_call_id = cid; - } else - info->cstate = PPTP_CALL_NONE; - break; - - case PPTP_CALL_CLEAR_REQUEST: - /* client requests hangup of call */ - if (info->sstate != PPTP_SESSION_CONFIRMED) - goto invalid; - /* FUTURE: iterate over all calls and check if - * call ID is valid. We don't do this without newnat, - * because we only know about last call */ - info->cstate = PPTP_CALL_CLEAR_REQ; - break; - case PPTP_SET_LINK_INFO: - case PPTP_ECHO_REQUEST: - case PPTP_ECHO_REPLY: - /* I don't have to explain these ;) */ - break; - default: - goto invalid; - } - - ip_nat_pptp_outbound = rcu_dereference(ip_nat_pptp_hook_outbound); - if (ip_nat_pptp_outbound) - return ip_nat_pptp_outbound(pskb, ct, ctinfo, ctlh, pptpReq); - return NF_ACCEPT; - -invalid: - DEBUGP("invalid %s: type=%d cid=%u pcid=%u " - "cstate=%d sstate=%d pns_cid=%u pac_cid=%u\n", - msg <= PPTP_MSG_MAX ? pptp_msg_name[msg] : pptp_msg_name[0], - msg, ntohs(cid), ntohs(pcid), info->cstate, info->sstate, - ntohs(info->pns_call_id), ntohs(info->pac_call_id)); - return NF_ACCEPT; -} - -static const unsigned int pptp_msg_size[] = { - [PPTP_START_SESSION_REQUEST] = sizeof(struct PptpStartSessionRequest), - [PPTP_START_SESSION_REPLY] = sizeof(struct PptpStartSessionReply), - [PPTP_STOP_SESSION_REQUEST] = sizeof(struct PptpStopSessionRequest), - [PPTP_STOP_SESSION_REPLY] = sizeof(struct PptpStopSessionReply), - [PPTP_OUT_CALL_REQUEST] = sizeof(struct PptpOutCallRequest), - [PPTP_OUT_CALL_REPLY] = sizeof(struct PptpOutCallReply), - [PPTP_IN_CALL_REQUEST] = sizeof(struct PptpInCallRequest), - [PPTP_IN_CALL_REPLY] = sizeof(struct PptpInCallReply), - [PPTP_IN_CALL_CONNECT] = sizeof(struct PptpInCallConnected), - [PPTP_CALL_CLEAR_REQUEST] = sizeof(struct PptpClearCallRequest), - [PPTP_CALL_DISCONNECT_NOTIFY] = sizeof(struct PptpCallDisconnectNotify), - [PPTP_WAN_ERROR_NOTIFY] = sizeof(struct PptpWanErrorNotify), - [PPTP_SET_LINK_INFO] = sizeof(struct PptpSetLinkInfo), -}; - -/* track caller id inside control connection, call expect_related */ -static int -conntrack_pptp_help(struct sk_buff **pskb, - struct ip_conntrack *ct, enum ip_conntrack_info ctinfo) - -{ - int dir = CTINFO2DIR(ctinfo); - struct ip_ct_pptp_master *info = &ct->help.ct_pptp_info; - struct tcphdr _tcph, *tcph; - struct pptp_pkt_hdr _pptph, *pptph; - struct PptpControlHeader _ctlh, *ctlh; - union pptp_ctrl_union _pptpReq, *pptpReq; - unsigned int tcplen = (*pskb)->len - ip_hdrlen(*pskb); - unsigned int datalen, reqlen, nexthdr_off; - int oldsstate, oldcstate; - int ret; - u_int16_t msg; - - /* don't do any tracking before tcp handshake complete */ - if (ctinfo != IP_CT_ESTABLISHED - && ctinfo != IP_CT_ESTABLISHED+IP_CT_IS_REPLY) { - DEBUGP("ctinfo = %u, skipping\n", ctinfo); - return NF_ACCEPT; - } - - nexthdr_off = ip_hdrlen(*pskb); - tcph = skb_header_pointer(*pskb, nexthdr_off, sizeof(_tcph), &_tcph); - BUG_ON(!tcph); - nexthdr_off += tcph->doff * 4; - datalen = tcplen - tcph->doff * 4; - - pptph = skb_header_pointer(*pskb, nexthdr_off, sizeof(_pptph), &_pptph); - if (!pptph) { - DEBUGP("no full PPTP header, can't track\n"); - return NF_ACCEPT; - } - nexthdr_off += sizeof(_pptph); - datalen -= sizeof(_pptph); - - /* if it's not a control message we can't do anything with it */ - if (ntohs(pptph->packetType) != PPTP_PACKET_CONTROL || - ntohl(pptph->magicCookie) != PPTP_MAGIC_COOKIE) { - DEBUGP("not a control packet\n"); - return NF_ACCEPT; - } - - ctlh = skb_header_pointer(*pskb, nexthdr_off, sizeof(_ctlh), &_ctlh); - if (!ctlh) - return NF_ACCEPT; - nexthdr_off += sizeof(_ctlh); - datalen -= sizeof(_ctlh); - - reqlen = datalen; - msg = ntohs(ctlh->messageType); - if (msg > 0 && msg <= PPTP_MSG_MAX && reqlen < pptp_msg_size[msg]) - return NF_ACCEPT; - if (reqlen > sizeof(*pptpReq)) - reqlen = sizeof(*pptpReq); - - pptpReq = skb_header_pointer(*pskb, nexthdr_off, reqlen, &_pptpReq); - if (!pptpReq) - return NF_ACCEPT; - - oldsstate = info->sstate; - oldcstate = info->cstate; - - spin_lock_bh(&ip_pptp_lock); - - /* FIXME: We just blindly assume that the control connection is always - * established from PNS->PAC. However, RFC makes no guarantee */ - if (dir == IP_CT_DIR_ORIGINAL) - /* client -> server (PNS -> PAC) */ - ret = pptp_outbound_pkt(pskb, ctlh, pptpReq, reqlen, ct, - ctinfo); - else - /* server -> client (PAC -> PNS) */ - ret = pptp_inbound_pkt(pskb, ctlh, pptpReq, reqlen, ct, - ctinfo); - DEBUGP("sstate: %d->%d, cstate: %d->%d\n", - oldsstate, info->sstate, oldcstate, info->cstate); - spin_unlock_bh(&ip_pptp_lock); - - return ret; -} - -/* control protocol helper */ -static struct ip_conntrack_helper pptp = { - .list = { NULL, NULL }, - .name = "pptp", - .me = THIS_MODULE, - .max_expected = 2, - .timeout = 5 * 60, - .tuple = { .src = { .ip = 0, - .u = { .tcp = { .port = - __constant_htons(PPTP_CONTROL_PORT) } } - }, - .dst = { .ip = 0, - .u = { .all = 0 }, - .protonum = IPPROTO_TCP - } - }, - .mask = { .src = { .ip = 0, - .u = { .tcp = { .port = __constant_htons(0xffff) } } - }, - .dst = { .ip = 0, - .u = { .all = 0 }, - .protonum = 0xff - } - }, - .help = conntrack_pptp_help, - .destroy = pptp_destroy_siblings, -}; - -extern void ip_ct_proto_gre_fini(void); -extern int __init ip_ct_proto_gre_init(void); - -/* ip_conntrack_pptp initialization */ -static int __init ip_conntrack_helper_pptp_init(void) -{ - int retcode; - - retcode = ip_ct_proto_gre_init(); - if (retcode < 0) - return retcode; - - DEBUGP(" registering helper\n"); - if ((retcode = ip_conntrack_helper_register(&pptp))) { - printk(KERN_ERR "Unable to register conntrack application " - "helper for pptp: %d\n", retcode); - ip_ct_proto_gre_fini(); - return retcode; - } - - printk("ip_conntrack_pptp version %s loaded\n", IP_CT_PPTP_VERSION); - return 0; -} - -static void __exit ip_conntrack_helper_pptp_fini(void) -{ - ip_conntrack_helper_unregister(&pptp); - ip_ct_proto_gre_fini(); - printk("ip_conntrack_pptp version %s unloaded\n", IP_CT_PPTP_VERSION); -} - -module_init(ip_conntrack_helper_pptp_init); -module_exit(ip_conntrack_helper_pptp_fini); - -EXPORT_SYMBOL(ip_nat_pptp_hook_outbound); -EXPORT_SYMBOL(ip_nat_pptp_hook_inbound); -EXPORT_SYMBOL(ip_nat_pptp_hook_exp_gre); -EXPORT_SYMBOL(ip_nat_pptp_hook_expectfn); diff --git a/net/ipv4/netfilter/ip_conntrack_irc.c b/net/ipv4/netfilter/ip_conntrack_irc.c deleted file mode 100644 index ee99abe482e3..000000000000 --- a/net/ipv4/netfilter/ip_conntrack_irc.c +++ /dev/null @@ -1,314 +0,0 @@ -/* IRC extension for IP connection tracking, Version 1.21 - * (C) 2000-2002 by Harald Welte - * based on RR's ip_conntrack_ftp.c - * - * ip_conntrack_irc.c,v 1.21 2002/02/05 14:49:26 laforge Exp - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - ** - * Module load syntax: - * insmod ip_conntrack_irc.o ports=port1,port2,...port - * max_dcc_channels=n dcc_timeout=secs - * - * please give the ports of all IRC servers You wish to connect to. - * If You don't specify ports, the default will be port 6667. - * With max_dcc_channels you can define the maximum number of not - * yet answered DCC channels per IRC session (default 8). - * With dcc_timeout you can specify how long the system waits for - * an expected DCC channel (default 300 seconds). - * - */ - -#include -#include -#include -#include -#include - -#include -#include -#include - -#define MAX_PORTS 8 -static unsigned short ports[MAX_PORTS]; -static int ports_c; -static unsigned int max_dcc_channels = 8; -static unsigned int dcc_timeout = 300; -/* This is slow, but it's simple. --RR */ -static char *irc_buffer; -static DEFINE_SPINLOCK(irc_buffer_lock); - -unsigned int (*ip_nat_irc_hook)(struct sk_buff **pskb, - enum ip_conntrack_info ctinfo, - unsigned int matchoff, - unsigned int matchlen, - struct ip_conntrack_expect *exp); -EXPORT_SYMBOL_GPL(ip_nat_irc_hook); - -MODULE_AUTHOR("Harald Welte "); -MODULE_DESCRIPTION("IRC (DCC) connection tracking helper"); -MODULE_LICENSE("GPL"); -module_param_array(ports, ushort, &ports_c, 0400); -MODULE_PARM_DESC(ports, "port numbers of IRC servers"); -module_param(max_dcc_channels, uint, 0400); -MODULE_PARM_DESC(max_dcc_channels, "max number of expected DCC channels per IRC session"); -module_param(dcc_timeout, uint, 0400); -MODULE_PARM_DESC(dcc_timeout, "timeout on for unestablished DCC channels"); - -static const char *dccprotos[] = { "SEND ", "CHAT ", "MOVE ", "TSEND ", "SCHAT " }; -#define MINMATCHLEN 5 - -#if 0 -#define DEBUGP(format, args...) printk(KERN_DEBUG "%s:%s:" format, \ - __FILE__, __FUNCTION__ , ## args) -#else -#define DEBUGP(format, args...) -#endif - -static int parse_dcc(char *data, char *data_end, u_int32_t *ip, - u_int16_t *port, char **ad_beg_p, char **ad_end_p) -/* tries to get the ip_addr and port out of a dcc command - return value: -1 on failure, 0 on success - data pointer to first byte of DCC command data - data_end pointer to last byte of dcc command data - ip returns parsed ip of dcc command - port returns parsed port of dcc command - ad_beg_p returns pointer to first byte of addr data - ad_end_p returns pointer to last byte of addr data */ -{ - - /* at least 12: "AAAAAAAA P\1\n" */ - while (*data++ != ' ') - if (data > data_end - 12) - return -1; - - *ad_beg_p = data; - *ip = simple_strtoul(data, &data, 10); - - /* skip blanks between ip and port */ - while (*data == ' ') { - if (data >= data_end) - return -1; - data++; - } - - *port = simple_strtoul(data, &data, 10); - *ad_end_p = data; - - return 0; -} - -static int help(struct sk_buff **pskb, - struct ip_conntrack *ct, enum ip_conntrack_info ctinfo) -{ - unsigned int dataoff; - struct tcphdr _tcph, *th; - char *data, *data_limit, *ib_ptr; - int dir = CTINFO2DIR(ctinfo); - struct ip_conntrack_expect *exp; - u32 seq; - u_int32_t dcc_ip; - u_int16_t dcc_port; - int i, ret = NF_ACCEPT; - char *addr_beg_p, *addr_end_p; - typeof(ip_nat_irc_hook) ip_nat_irc; - - DEBUGP("entered\n"); - - /* If packet is coming from IRC server */ - if (dir == IP_CT_DIR_REPLY) - return NF_ACCEPT; - - /* Until there's been traffic both ways, don't look in packets. */ - if (ctinfo != IP_CT_ESTABLISHED - && ctinfo != IP_CT_ESTABLISHED + IP_CT_IS_REPLY) { - DEBUGP("Conntrackinfo = %u\n", ctinfo); - return NF_ACCEPT; - } - - /* Not a full tcp header? */ - th = skb_header_pointer(*pskb, ip_hdrlen(*pskb), - sizeof(_tcph), &_tcph); - if (th == NULL) - return NF_ACCEPT; - - /* No data? */ - dataoff = ip_hdrlen(*pskb) + th->doff * 4; - if (dataoff >= (*pskb)->len) - return NF_ACCEPT; - - spin_lock_bh(&irc_buffer_lock); - ib_ptr = skb_header_pointer(*pskb, dataoff, - (*pskb)->len - dataoff, irc_buffer); - BUG_ON(ib_ptr == NULL); - - data = ib_ptr; - data_limit = ib_ptr + (*pskb)->len - dataoff; - - /* strlen("\1DCC SENT t AAAAAAAA P\1\n")=24 - * 5+MINMATCHLEN+strlen("t AAAAAAAA P\1\n")=14 */ - while (data < (data_limit - (19 + MINMATCHLEN))) { - if (memcmp(data, "\1DCC ", 5)) { - data++; - continue; - } - - data += 5; - /* we have at least (19+MINMATCHLEN)-5 bytes valid data left */ - - DEBUGP("DCC found in master %u.%u.%u.%u:%u %u.%u.%u.%u:%u...\n", - NIPQUAD(iph->saddr), ntohs(th->source), - NIPQUAD(iph->daddr), ntohs(th->dest)); - - for (i = 0; i < ARRAY_SIZE(dccprotos); i++) { - if (memcmp(data, dccprotos[i], strlen(dccprotos[i]))) { - /* no match */ - continue; - } - - DEBUGP("DCC %s detected\n", dccprotos[i]); - data += strlen(dccprotos[i]); - /* we have at least - * (19+MINMATCHLEN)-5-dccprotos[i].matchlen bytes valid - * data left (== 14/13 bytes) */ - if (parse_dcc((char *)data, data_limit, &dcc_ip, - &dcc_port, &addr_beg_p, &addr_end_p)) { - /* unable to parse */ - DEBUGP("unable to parse dcc command\n"); - continue; - } - DEBUGP("DCC bound ip/port: %u.%u.%u.%u:%u\n", - HIPQUAD(dcc_ip), dcc_port); - - /* dcc_ip can be the internal OR external (NAT'ed) IP - * Tiago Sousa */ - if (ct->tuplehash[dir].tuple.src.ip != htonl(dcc_ip) - && ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.ip != htonl(dcc_ip)) { - if (net_ratelimit()) - printk(KERN_WARNING - "Forged DCC command from " - "%u.%u.%u.%u: %u.%u.%u.%u:%u\n", - NIPQUAD(ct->tuplehash[dir].tuple.src.ip), - HIPQUAD(dcc_ip), dcc_port); - - continue; - } - - exp = ip_conntrack_expect_alloc(ct); - if (exp == NULL) { - ret = NF_DROP; - goto out; - } - - /* save position of address in dcc string, - * necessary for NAT */ - DEBUGP("tcph->seq = %u\n", th->seq); - seq = ntohl(th->seq) + (addr_beg_p - ib_ptr); - - /* We refer to the reverse direction ("!dir") - * tuples here, because we're expecting - * something in the other * direction. - * Doesn't matter unless NAT is happening. */ - exp->tuple = ((struct ip_conntrack_tuple) - { { 0, { 0 } }, - { ct->tuplehash[!dir].tuple.dst.ip, - { .tcp = { htons(dcc_port) } }, - IPPROTO_TCP }}); - exp->mask = ((struct ip_conntrack_tuple) - { { 0, { 0 } }, - { htonl(0xFFFFFFFF), - { .tcp = { htons(0xFFFF) } }, 0xFF }}); - exp->expectfn = NULL; - exp->flags = 0; - ip_nat_irc = rcu_dereference(ip_nat_irc_hook); - if (ip_nat_irc) - ret = ip_nat_irc(pskb, ctinfo, - addr_beg_p - ib_ptr, - addr_end_p - addr_beg_p, - exp); - else if (ip_conntrack_expect_related(exp) != 0) - ret = NF_DROP; - ip_conntrack_expect_put(exp); - goto out; - } /* for .. NUM_DCCPROTO */ - } /* while data < ... */ - - out: - spin_unlock_bh(&irc_buffer_lock); - return ret; -} - -static struct ip_conntrack_helper irc_helpers[MAX_PORTS]; -static char irc_names[MAX_PORTS][sizeof("irc-65535")]; - -static void ip_conntrack_irc_fini(void); - -static int __init ip_conntrack_irc_init(void) -{ - int i, ret; - struct ip_conntrack_helper *hlpr; - char *tmpname; - - if (max_dcc_channels < 1) { - printk("ip_conntrack_irc: max_dcc_channels must be a positive integer\n"); - return -EBUSY; - } - - irc_buffer = kmalloc(65536, GFP_KERNEL); - if (!irc_buffer) - return -ENOMEM; - - /* If no port given, default to standard irc port */ - if (ports_c == 0) - ports[ports_c++] = IRC_PORT; - - for (i = 0; i < ports_c; i++) { - hlpr = &irc_helpers[i]; - hlpr->tuple.src.u.tcp.port = htons(ports[i]); - hlpr->tuple.dst.protonum = IPPROTO_TCP; - hlpr->mask.src.u.tcp.port = htons(0xFFFF); - hlpr->mask.dst.protonum = 0xFF; - hlpr->max_expected = max_dcc_channels; - hlpr->timeout = dcc_timeout; - hlpr->me = THIS_MODULE; - hlpr->help = help; - - tmpname = &irc_names[i][0]; - if (ports[i] == IRC_PORT) - sprintf(tmpname, "irc"); - else - sprintf(tmpname, "irc-%d", i); - hlpr->name = tmpname; - - DEBUGP("port #%d: %d\n", i, ports[i]); - - ret = ip_conntrack_helper_register(hlpr); - - if (ret) { - printk("ip_conntrack_irc: ERROR registering port %d\n", - ports[i]); - ip_conntrack_irc_fini(); - return -EBUSY; - } - } - return 0; -} - -/* This function is intentionally _NOT_ defined as __exit, because - * it is needed by the init function */ -static void ip_conntrack_irc_fini(void) -{ - int i; - for (i = 0; i < ports_c; i++) { - DEBUGP("unregistering port %d\n", - ports[i]); - ip_conntrack_helper_unregister(&irc_helpers[i]); - } - kfree(irc_buffer); -} - -module_init(ip_conntrack_irc_init); -module_exit(ip_conntrack_irc_fini); diff --git a/net/ipv4/netfilter/ip_conntrack_netbios_ns.c b/net/ipv4/netfilter/ip_conntrack_netbios_ns.c deleted file mode 100644 index df07c5f1d874..000000000000 --- a/net/ipv4/netfilter/ip_conntrack_netbios_ns.c +++ /dev/null @@ -1,143 +0,0 @@ -/* - * NetBIOS name service broadcast connection tracking helper - * - * (c) 2005 Patrick McHardy - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ -/* - * This helper tracks locally originating NetBIOS name service - * requests by issuing permanent expectations (valid until - * timing out) matching all reply connections from the - * destination network. The only NetBIOS specific thing is - * actually the port number. - */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include - -#define NMBD_PORT 137 - -MODULE_AUTHOR("Patrick McHardy "); -MODULE_DESCRIPTION("NetBIOS name service broadcast connection tracking helper"); -MODULE_LICENSE("GPL"); - -static unsigned int timeout = 3; -module_param(timeout, uint, 0400); -MODULE_PARM_DESC(timeout, "timeout for master connection/replies in seconds"); - -static int help(struct sk_buff **pskb, - struct ip_conntrack *ct, enum ip_conntrack_info ctinfo) -{ - struct ip_conntrack_expect *exp; - struct iphdr *iph = ip_hdr(*pskb); - struct rtable *rt = (struct rtable *)(*pskb)->dst; - struct in_device *in_dev; - __be32 mask = 0; - - /* we're only interested in locally generated packets */ - if ((*pskb)->sk == NULL) - goto out; - if (rt == NULL || !(rt->rt_flags & RTCF_BROADCAST)) - goto out; - if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) - goto out; - - rcu_read_lock(); - in_dev = __in_dev_get_rcu(rt->u.dst.dev); - if (in_dev != NULL) { - for_primary_ifa(in_dev) { - if (ifa->ifa_broadcast == iph->daddr) { - mask = ifa->ifa_mask; - break; - } - } endfor_ifa(in_dev); - } - rcu_read_unlock(); - - if (mask == 0) - goto out; - - exp = ip_conntrack_expect_alloc(ct); - if (exp == NULL) - goto out; - - exp->tuple = ct->tuplehash[IP_CT_DIR_REPLY].tuple; - exp->tuple.src.u.udp.port = htons(NMBD_PORT); - - exp->mask.src.ip = mask; - exp->mask.src.u.udp.port = htons(0xFFFF); - exp->mask.dst.ip = htonl(0xFFFFFFFF); - exp->mask.dst.u.udp.port = htons(0xFFFF); - exp->mask.dst.protonum = 0xFF; - - exp->expectfn = NULL; - exp->flags = IP_CT_EXPECT_PERMANENT; - - ip_conntrack_expect_related(exp); - ip_conntrack_expect_put(exp); - - ip_ct_refresh(ct, *pskb, timeout * HZ); -out: - return NF_ACCEPT; -} - -static struct ip_conntrack_helper helper = { - .name = "netbios-ns", - .tuple = { - .src = { - .u = { - .udp = { - .port = __constant_htons(NMBD_PORT), - } - } - }, - .dst = { - .protonum = IPPROTO_UDP, - }, - }, - .mask = { - .src = { - .u = { - .udp = { - .port = __constant_htons(0xFFFF), - } - } - }, - .dst = { - .protonum = 0xFF, - }, - }, - .max_expected = 1, - .me = THIS_MODULE, - .help = help, -}; - -static int __init ip_conntrack_netbios_ns_init(void) -{ - helper.timeout = timeout; - return ip_conntrack_helper_register(&helper); -} - -static void __exit ip_conntrack_netbios_ns_fini(void) -{ - ip_conntrack_helper_unregister(&helper); -} - -module_init(ip_conntrack_netbios_ns_init); -module_exit(ip_conntrack_netbios_ns_fini); diff --git a/net/ipv4/netfilter/ip_conntrack_netlink.c b/net/ipv4/netfilter/ip_conntrack_netlink.c deleted file mode 100644 index 9228b76ccd9a..000000000000 --- a/net/ipv4/netfilter/ip_conntrack_netlink.c +++ /dev/null @@ -1,1577 +0,0 @@ -/* Connection tracking via netlink socket. Allows for user space - * protocol helpers and general trouble making from userspace. - * - * (C) 2001 by Jay Schulist - * (C) 2002-2005 by Harald Welte - * (C) 2003 by Patrick Mchardy - * (C) 2005-2006 by Pablo Neira Ayuso - * - * I've reworked this stuff to use attributes instead of conntrack - * structures. 5.44 am. I need more tea. --pablo 05/07/11. - * - * Initial connection tracking via netlink development funded and - * generally made possible by Network Robots, Inc. (www.networkrobots.com) - * - * Further development of this code funded by Astaro AG (http://www.astaro.com) - * - * This software may be used and distributed according to the terms - * of the GNU General Public License, incorporated herein by reference. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include - -#include -#include - -MODULE_LICENSE("GPL"); - -static char __initdata version[] = "0.90"; - -static inline int -ctnetlink_dump_tuples_proto(struct sk_buff *skb, - const struct ip_conntrack_tuple *tuple, - struct ip_conntrack_protocol *proto) -{ - int ret = 0; - struct nfattr *nest_parms = NFA_NEST(skb, CTA_TUPLE_PROTO); - - NFA_PUT(skb, CTA_PROTO_NUM, sizeof(u_int8_t), &tuple->dst.protonum); - - if (likely(proto->tuple_to_nfattr)) - ret = proto->tuple_to_nfattr(skb, tuple); - - NFA_NEST_END(skb, nest_parms); - - return ret; - -nfattr_failure: - return -1; -} - -static inline int -ctnetlink_dump_tuples_ip(struct sk_buff *skb, - const struct ip_conntrack_tuple *tuple) -{ - struct nfattr *nest_parms = NFA_NEST(skb, CTA_TUPLE_IP); - - NFA_PUT(skb, CTA_IP_V4_SRC, sizeof(__be32), &tuple->src.ip); - NFA_PUT(skb, CTA_IP_V4_DST, sizeof(__be32), &tuple->dst.ip); - - NFA_NEST_END(skb, nest_parms); - - return 0; - -nfattr_failure: - return -1; -} - -static inline int -ctnetlink_dump_tuples(struct sk_buff *skb, - const struct ip_conntrack_tuple *tuple) -{ - int ret; - struct ip_conntrack_protocol *proto; - - ret = ctnetlink_dump_tuples_ip(skb, tuple); - if (unlikely(ret < 0)) - return ret; - - proto = ip_conntrack_proto_find_get(tuple->dst.protonum); - ret = ctnetlink_dump_tuples_proto(skb, tuple, proto); - ip_conntrack_proto_put(proto); - - return ret; -} - -static inline int -ctnetlink_dump_status(struct sk_buff *skb, const struct ip_conntrack *ct) -{ - __be32 status = htonl((u_int32_t) ct->status); - NFA_PUT(skb, CTA_STATUS, sizeof(status), &status); - return 0; - -nfattr_failure: - return -1; -} - -static inline int -ctnetlink_dump_timeout(struct sk_buff *skb, const struct ip_conntrack *ct) -{ - long timeout_l = ct->timeout.expires - jiffies; - __be32 timeout; - - if (timeout_l < 0) - timeout = 0; - else - timeout = htonl(timeout_l / HZ); - - NFA_PUT(skb, CTA_TIMEOUT, sizeof(timeout), &timeout); - return 0; - -nfattr_failure: - return -1; -} - -static inline int -ctnetlink_dump_protoinfo(struct sk_buff *skb, const struct ip_conntrack *ct) -{ - struct ip_conntrack_protocol *proto = ip_conntrack_proto_find_get(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum); - - struct nfattr *nest_proto; - int ret; - - if (!proto->to_nfattr) { - ip_conntrack_proto_put(proto); - return 0; - } - - nest_proto = NFA_NEST(skb, CTA_PROTOINFO); - - ret = proto->to_nfattr(skb, nest_proto, ct); - - ip_conntrack_proto_put(proto); - - NFA_NEST_END(skb, nest_proto); - - return ret; - -nfattr_failure: - ip_conntrack_proto_put(proto); - return -1; -} - -static inline int -ctnetlink_dump_helpinfo(struct sk_buff *skb, const struct ip_conntrack *ct) -{ - struct nfattr *nest_helper; - - if (!ct->helper) - return 0; - - nest_helper = NFA_NEST(skb, CTA_HELP); - NFA_PUT(skb, CTA_HELP_NAME, strlen(ct->helper->name), ct->helper->name); - - if (ct->helper->to_nfattr) - ct->helper->to_nfattr(skb, ct); - - NFA_NEST_END(skb, nest_helper); - - return 0; - -nfattr_failure: - return -1; -} - -#ifdef CONFIG_IP_NF_CT_ACCT -static inline int -ctnetlink_dump_counters(struct sk_buff *skb, const struct ip_conntrack *ct, - enum ip_conntrack_dir dir) -{ - enum ctattr_type type = dir ? CTA_COUNTERS_REPLY: CTA_COUNTERS_ORIG; - struct nfattr *nest_count = NFA_NEST(skb, type); - __be32 tmp; - - tmp = htonl(ct->counters[dir].packets); - NFA_PUT(skb, CTA_COUNTERS32_PACKETS, sizeof(__be32), &tmp); - - tmp = htonl(ct->counters[dir].bytes); - NFA_PUT(skb, CTA_COUNTERS32_BYTES, sizeof(__be32), &tmp); - - NFA_NEST_END(skb, nest_count); - - return 0; - -nfattr_failure: - return -1; -} -#else -#define ctnetlink_dump_counters(a, b, c) (0) -#endif - -#ifdef CONFIG_IP_NF_CONNTRACK_MARK -static inline int -ctnetlink_dump_mark(struct sk_buff *skb, const struct ip_conntrack *ct) -{ - __be32 mark = htonl(ct->mark); - - NFA_PUT(skb, CTA_MARK, sizeof(__be32), &mark); - return 0; - -nfattr_failure: - return -1; -} -#else -#define ctnetlink_dump_mark(a, b) (0) -#endif - -static inline int -ctnetlink_dump_id(struct sk_buff *skb, const struct ip_conntrack *ct) -{ - __be32 id = htonl(ct->id); - NFA_PUT(skb, CTA_ID, sizeof(__be32), &id); - return 0; - -nfattr_failure: - return -1; -} - -static inline int -ctnetlink_dump_use(struct sk_buff *skb, const struct ip_conntrack *ct) -{ - __be32 use = htonl(atomic_read(&ct->ct_general.use)); - - NFA_PUT(skb, CTA_USE, sizeof(__be32), &use); - return 0; - -nfattr_failure: - return -1; -} - -#define tuple(ct, dir) (&(ct)->tuplehash[dir].tuple) - -static int -ctnetlink_fill_info(struct sk_buff *skb, u32 pid, u32 seq, - int event, int nowait, - const struct ip_conntrack *ct) -{ - struct nlmsghdr *nlh; - struct nfgenmsg *nfmsg; - struct nfattr *nest_parms; - unsigned char *b; - - b = skb->tail; - - event |= NFNL_SUBSYS_CTNETLINK << 8; - nlh = NLMSG_PUT(skb, pid, seq, event, sizeof(struct nfgenmsg)); - nfmsg = NLMSG_DATA(nlh); - - nlh->nlmsg_flags = (nowait && pid) ? NLM_F_MULTI : 0; - nfmsg->nfgen_family = AF_INET; - nfmsg->version = NFNETLINK_V0; - nfmsg->res_id = 0; - - nest_parms = NFA_NEST(skb, CTA_TUPLE_ORIG); - if (ctnetlink_dump_tuples(skb, tuple(ct, IP_CT_DIR_ORIGINAL)) < 0) - goto nfattr_failure; - NFA_NEST_END(skb, nest_parms); - - nest_parms = NFA_NEST(skb, CTA_TUPLE_REPLY); - if (ctnetlink_dump_tuples(skb, tuple(ct, IP_CT_DIR_REPLY)) < 0) - goto nfattr_failure; - NFA_NEST_END(skb, nest_parms); - - if (ctnetlink_dump_status(skb, ct) < 0 || - ctnetlink_dump_timeout(skb, ct) < 0 || - ctnetlink_dump_counters(skb, ct, IP_CT_DIR_ORIGINAL) < 0 || - ctnetlink_dump_counters(skb, ct, IP_CT_DIR_REPLY) < 0 || - ctnetlink_dump_protoinfo(skb, ct) < 0 || - ctnetlink_dump_helpinfo(skb, ct) < 0 || - ctnetlink_dump_mark(skb, ct) < 0 || - ctnetlink_dump_id(skb, ct) < 0 || - ctnetlink_dump_use(skb, ct) < 0) - goto nfattr_failure; - - nlh->nlmsg_len = skb->tail - b; - return skb->len; - -nlmsg_failure: -nfattr_failure: - skb_trim(skb, b - skb->data); - return -1; -} - -#ifdef CONFIG_IP_NF_CONNTRACK_EVENTS -static int ctnetlink_conntrack_event(struct notifier_block *this, - unsigned long events, void *ptr) -{ - struct nlmsghdr *nlh; - struct nfgenmsg *nfmsg; - struct nfattr *nest_parms; - struct ip_conntrack *ct = (struct ip_conntrack *)ptr; - struct sk_buff *skb; - unsigned int type; - unsigned char *b; - unsigned int flags = 0, group; - - /* ignore our fake conntrack entry */ - if (ct == &ip_conntrack_untracked) - return NOTIFY_DONE; - - if (events & IPCT_DESTROY) { - type = IPCTNL_MSG_CT_DELETE; - group = NFNLGRP_CONNTRACK_DESTROY; - } else if (events & (IPCT_NEW | IPCT_RELATED)) { - type = IPCTNL_MSG_CT_NEW; - flags = NLM_F_CREATE|NLM_F_EXCL; - group = NFNLGRP_CONNTRACK_NEW; - } else if (events & (IPCT_STATUS | IPCT_PROTOINFO)) { - type = IPCTNL_MSG_CT_NEW; - group = NFNLGRP_CONNTRACK_UPDATE; - } else - return NOTIFY_DONE; - - if (!nfnetlink_has_listeners(group)) - return NOTIFY_DONE; - - skb = alloc_skb(NLMSG_GOODSIZE, GFP_ATOMIC); - if (!skb) - return NOTIFY_DONE; - - b = skb->tail; - - type |= NFNL_SUBSYS_CTNETLINK << 8; - nlh = NLMSG_PUT(skb, 0, 0, type, sizeof(struct nfgenmsg)); - nfmsg = NLMSG_DATA(nlh); - - nlh->nlmsg_flags = flags; - nfmsg->nfgen_family = AF_INET; - nfmsg->version = NFNETLINK_V0; - nfmsg->res_id = 0; - - nest_parms = NFA_NEST(skb, CTA_TUPLE_ORIG); - if (ctnetlink_dump_tuples(skb, tuple(ct, IP_CT_DIR_ORIGINAL)) < 0) - goto nfattr_failure; - NFA_NEST_END(skb, nest_parms); - - nest_parms = NFA_NEST(skb, CTA_TUPLE_REPLY); - if (ctnetlink_dump_tuples(skb, tuple(ct, IP_CT_DIR_REPLY)) < 0) - goto nfattr_failure; - NFA_NEST_END(skb, nest_parms); - - if (events & IPCT_DESTROY) { - if (ctnetlink_dump_counters(skb, ct, IP_CT_DIR_ORIGINAL) < 0 || - ctnetlink_dump_counters(skb, ct, IP_CT_DIR_REPLY) < 0) - goto nfattr_failure; - } else { - if (ctnetlink_dump_status(skb, ct) < 0) - goto nfattr_failure; - - if (ctnetlink_dump_timeout(skb, ct) < 0) - goto nfattr_failure; - - if (events & IPCT_PROTOINFO - && ctnetlink_dump_protoinfo(skb, ct) < 0) - goto nfattr_failure; - - if ((events & IPCT_HELPER || ct->helper) - && ctnetlink_dump_helpinfo(skb, ct) < 0) - goto nfattr_failure; - -#ifdef CONFIG_IP_NF_CONNTRACK_MARK - if ((events & IPCT_MARK || ct->mark) - && ctnetlink_dump_mark(skb, ct) < 0) - goto nfattr_failure; -#endif - - if (events & IPCT_COUNTER_FILLING && - (ctnetlink_dump_counters(skb, ct, IP_CT_DIR_ORIGINAL) < 0 || - ctnetlink_dump_counters(skb, ct, IP_CT_DIR_REPLY) < 0)) - goto nfattr_failure; - } - - nlh->nlmsg_len = skb->tail - b; - nfnetlink_send(skb, 0, group, 0); - return NOTIFY_DONE; - -nlmsg_failure: -nfattr_failure: - kfree_skb(skb); - return NOTIFY_DONE; -} -#endif /* CONFIG_IP_NF_CONNTRACK_EVENTS */ - -static int ctnetlink_done(struct netlink_callback *cb) -{ - if (cb->args[1]) - ip_conntrack_put((struct ip_conntrack *)cb->args[1]); - return 0; -} - -static int -ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb) -{ - struct ip_conntrack *ct, *last; - struct ip_conntrack_tuple_hash *h; - struct list_head *i; - - read_lock_bh(&ip_conntrack_lock); - last = (struct ip_conntrack *)cb->args[1]; - for (; cb->args[0] < ip_conntrack_htable_size; cb->args[0]++) { -restart: - list_for_each_prev(i, &ip_conntrack_hash[cb->args[0]]) { - h = (struct ip_conntrack_tuple_hash *) i; - if (DIRECTION(h) != IP_CT_DIR_ORIGINAL) - continue; - ct = tuplehash_to_ctrack(h); - if (cb->args[1]) { - if (ct != last) - continue; - cb->args[1] = 0; - } - if (ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).pid, - cb->nlh->nlmsg_seq, - IPCTNL_MSG_CT_NEW, - 1, ct) < 0) { - nf_conntrack_get(&ct->ct_general); - cb->args[1] = (unsigned long)ct; - goto out; - } -#ifdef CONFIG_NF_CT_ACCT - if (NFNL_MSG_TYPE(cb->nlh->nlmsg_type) == - IPCTNL_MSG_CT_GET_CTRZERO) - memset(&ct->counters, 0, sizeof(ct->counters)); -#endif - } - if (cb->args[1]) { - cb->args[1] = 0; - goto restart; - } - } -out: - read_unlock_bh(&ip_conntrack_lock); - if (last) - ip_conntrack_put(last); - - return skb->len; -} - -static const size_t cta_min_ip[CTA_IP_MAX] = { - [CTA_IP_V4_SRC-1] = sizeof(__be32), - [CTA_IP_V4_DST-1] = sizeof(__be32), -}; - -static inline int -ctnetlink_parse_tuple_ip(struct nfattr *attr, struct ip_conntrack_tuple *tuple) -{ - struct nfattr *tb[CTA_IP_MAX]; - - nfattr_parse_nested(tb, CTA_IP_MAX, attr); - - if (nfattr_bad_size(tb, CTA_IP_MAX, cta_min_ip)) - return -EINVAL; - - if (!tb[CTA_IP_V4_SRC-1]) - return -EINVAL; - tuple->src.ip = *(__be32 *)NFA_DATA(tb[CTA_IP_V4_SRC-1]); - - if (!tb[CTA_IP_V4_DST-1]) - return -EINVAL; - tuple->dst.ip = *(__be32 *)NFA_DATA(tb[CTA_IP_V4_DST-1]); - - return 0; -} - -static const size_t cta_min_proto[CTA_PROTO_MAX] = { - [CTA_PROTO_NUM-1] = sizeof(u_int8_t), - [CTA_PROTO_SRC_PORT-1] = sizeof(u_int16_t), - [CTA_PROTO_DST_PORT-1] = sizeof(u_int16_t), - [CTA_PROTO_ICMP_TYPE-1] = sizeof(u_int8_t), - [CTA_PROTO_ICMP_CODE-1] = sizeof(u_int8_t), - [CTA_PROTO_ICMP_ID-1] = sizeof(u_int16_t), -}; - -static inline int -ctnetlink_parse_tuple_proto(struct nfattr *attr, - struct ip_conntrack_tuple *tuple) -{ - struct nfattr *tb[CTA_PROTO_MAX]; - struct ip_conntrack_protocol *proto; - int ret = 0; - - nfattr_parse_nested(tb, CTA_PROTO_MAX, attr); - - if (nfattr_bad_size(tb, CTA_PROTO_MAX, cta_min_proto)) - return -EINVAL; - - if (!tb[CTA_PROTO_NUM-1]) - return -EINVAL; - tuple->dst.protonum = *(u_int8_t *)NFA_DATA(tb[CTA_PROTO_NUM-1]); - - proto = ip_conntrack_proto_find_get(tuple->dst.protonum); - - if (likely(proto->nfattr_to_tuple)) - ret = proto->nfattr_to_tuple(tb, tuple); - - ip_conntrack_proto_put(proto); - - return ret; -} - -static inline int -ctnetlink_parse_tuple(struct nfattr *cda[], struct ip_conntrack_tuple *tuple, - enum ctattr_tuple type) -{ - struct nfattr *tb[CTA_TUPLE_MAX]; - int err; - - memset(tuple, 0, sizeof(*tuple)); - - nfattr_parse_nested(tb, CTA_TUPLE_MAX, cda[type-1]); - - if (!tb[CTA_TUPLE_IP-1]) - return -EINVAL; - - err = ctnetlink_parse_tuple_ip(tb[CTA_TUPLE_IP-1], tuple); - if (err < 0) - return err; - - if (!tb[CTA_TUPLE_PROTO-1]) - return -EINVAL; - - err = ctnetlink_parse_tuple_proto(tb[CTA_TUPLE_PROTO-1], tuple); - if (err < 0) - return err; - - /* orig and expect tuples get DIR_ORIGINAL */ - if (type == CTA_TUPLE_REPLY) - tuple->dst.dir = IP_CT_DIR_REPLY; - else - tuple->dst.dir = IP_CT_DIR_ORIGINAL; - - return 0; -} - -#ifdef CONFIG_IP_NF_NAT_NEEDED -static const size_t cta_min_protonat[CTA_PROTONAT_MAX] = { - [CTA_PROTONAT_PORT_MIN-1] = sizeof(u_int16_t), - [CTA_PROTONAT_PORT_MAX-1] = sizeof(u_int16_t), -}; - -static int ctnetlink_parse_nat_proto(struct nfattr *attr, - const struct ip_conntrack *ct, - struct ip_nat_range *range) -{ - struct nfattr *tb[CTA_PROTONAT_MAX]; - struct ip_nat_protocol *npt; - - nfattr_parse_nested(tb, CTA_PROTONAT_MAX, attr); - - if (nfattr_bad_size(tb, CTA_PROTONAT_MAX, cta_min_protonat)) - return -EINVAL; - - npt = ip_nat_proto_find_get(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum); - - if (!npt->nfattr_to_range) { - ip_nat_proto_put(npt); - return 0; - } - - /* nfattr_to_range returns 1 if it parsed, 0 if not, neg. on error */ - if (npt->nfattr_to_range(tb, range) > 0) - range->flags |= IP_NAT_RANGE_PROTO_SPECIFIED; - - ip_nat_proto_put(npt); - - return 0; -} - -static const size_t cta_min_nat[CTA_NAT_MAX] = { - [CTA_NAT_MINIP-1] = sizeof(__be32), - [CTA_NAT_MAXIP-1] = sizeof(__be32), -}; - -static inline int -ctnetlink_parse_nat(struct nfattr *nat, - const struct ip_conntrack *ct, struct ip_nat_range *range) -{ - struct nfattr *tb[CTA_NAT_MAX]; - int err; - - memset(range, 0, sizeof(*range)); - - nfattr_parse_nested(tb, CTA_NAT_MAX, nat); - - if (nfattr_bad_size(tb, CTA_NAT_MAX, cta_min_nat)) - return -EINVAL; - - if (tb[CTA_NAT_MINIP-1]) - range->min_ip = *(__be32 *)NFA_DATA(tb[CTA_NAT_MINIP-1]); - - if (!tb[CTA_NAT_MAXIP-1]) - range->max_ip = range->min_ip; - else - range->max_ip = *(__be32 *)NFA_DATA(tb[CTA_NAT_MAXIP-1]); - - if (range->min_ip) - range->flags |= IP_NAT_RANGE_MAP_IPS; - - if (!tb[CTA_NAT_PROTO-1]) - return 0; - - err = ctnetlink_parse_nat_proto(tb[CTA_NAT_PROTO-1], ct, range); - if (err < 0) - return err; - - return 0; -} -#endif - -static inline int -ctnetlink_parse_help(struct nfattr *attr, char **helper_name) -{ - struct nfattr *tb[CTA_HELP_MAX]; - - nfattr_parse_nested(tb, CTA_HELP_MAX, attr); - - if (!tb[CTA_HELP_NAME-1]) - return -EINVAL; - - *helper_name = NFA_DATA(tb[CTA_HELP_NAME-1]); - - return 0; -} - -static const size_t cta_min[CTA_MAX] = { - [CTA_STATUS-1] = sizeof(__be32), - [CTA_TIMEOUT-1] = sizeof(__be32), - [CTA_MARK-1] = sizeof(__be32), - [CTA_USE-1] = sizeof(__be32), - [CTA_ID-1] = sizeof(__be32) -}; - -static int -ctnetlink_del_conntrack(struct sock *ctnl, struct sk_buff *skb, - struct nlmsghdr *nlh, struct nfattr *cda[], int *errp) -{ - struct ip_conntrack_tuple_hash *h; - struct ip_conntrack_tuple tuple; - struct ip_conntrack *ct; - int err = 0; - - if (nfattr_bad_size(cda, CTA_MAX, cta_min)) - return -EINVAL; - - if (cda[CTA_TUPLE_ORIG-1]) - err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_ORIG); - else if (cda[CTA_TUPLE_REPLY-1]) - err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_REPLY); - else { - /* Flush the whole table */ - ip_conntrack_flush(); - return 0; - } - - if (err < 0) - return err; - - h = ip_conntrack_find_get(&tuple, NULL); - if (!h) - return -ENOENT; - - ct = tuplehash_to_ctrack(h); - - if (cda[CTA_ID-1]) { - u_int32_t id = ntohl(*(__be32 *)NFA_DATA(cda[CTA_ID-1])); - if (ct->id != id) { - ip_conntrack_put(ct); - return -ENOENT; - } - } - if (del_timer(&ct->timeout)) - ct->timeout.function((unsigned long)ct); - - ip_conntrack_put(ct); - - return 0; -} - -static int -ctnetlink_get_conntrack(struct sock *ctnl, struct sk_buff *skb, - struct nlmsghdr *nlh, struct nfattr *cda[], int *errp) -{ - struct ip_conntrack_tuple_hash *h; - struct ip_conntrack_tuple tuple; - struct ip_conntrack *ct; - struct sk_buff *skb2 = NULL; - int err = 0; - - if (nlh->nlmsg_flags & NLM_F_DUMP) { - struct nfgenmsg *msg = NLMSG_DATA(nlh); - u32 rlen; - - if (msg->nfgen_family != AF_INET) - return -EAFNOSUPPORT; - -#ifndef CONFIG_IP_NF_CT_ACCT - if (NFNL_MSG_TYPE(nlh->nlmsg_type) == IPCTNL_MSG_CT_GET_CTRZERO) - return -ENOTSUPP; -#endif - if ((*errp = netlink_dump_start(ctnl, skb, nlh, - ctnetlink_dump_table, - ctnetlink_done)) != 0) - return -EINVAL; - - rlen = NLMSG_ALIGN(nlh->nlmsg_len); - if (rlen > skb->len) - rlen = skb->len; - skb_pull(skb, rlen); - return 0; - } - - if (nfattr_bad_size(cda, CTA_MAX, cta_min)) - return -EINVAL; - - if (cda[CTA_TUPLE_ORIG-1]) - err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_ORIG); - else if (cda[CTA_TUPLE_REPLY-1]) - err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_REPLY); - else - return -EINVAL; - - if (err < 0) - return err; - - h = ip_conntrack_find_get(&tuple, NULL); - if (!h) - return -ENOENT; - - ct = tuplehash_to_ctrack(h); - - err = -ENOMEM; - skb2 = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); - if (!skb2) { - ip_conntrack_put(ct); - return -ENOMEM; - } - - err = ctnetlink_fill_info(skb2, NETLINK_CB(skb).pid, nlh->nlmsg_seq, - IPCTNL_MSG_CT_NEW, 1, ct); - ip_conntrack_put(ct); - if (err <= 0) - goto free; - - err = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid, MSG_DONTWAIT); - if (err < 0) - goto out; - - return 0; - -free: - kfree_skb(skb2); -out: - return err; -} - -static inline int -ctnetlink_change_status(struct ip_conntrack *ct, struct nfattr *cda[]) -{ - unsigned long d; - unsigned status = ntohl(*(__be32 *)NFA_DATA(cda[CTA_STATUS-1])); - d = ct->status ^ status; - - if (d & (IPS_EXPECTED|IPS_CONFIRMED|IPS_DYING)) - /* unchangeable */ - return -EINVAL; - - if (d & IPS_SEEN_REPLY && !(status & IPS_SEEN_REPLY)) - /* SEEN_REPLY bit can only be set */ - return -EINVAL; - - - if (d & IPS_ASSURED && !(status & IPS_ASSURED)) - /* ASSURED bit can only be set */ - return -EINVAL; - - if (cda[CTA_NAT_SRC-1] || cda[CTA_NAT_DST-1]) { -#ifndef CONFIG_IP_NF_NAT_NEEDED - return -EINVAL; -#else - struct ip_nat_range range; - - if (cda[CTA_NAT_DST-1]) { - if (ctnetlink_parse_nat(cda[CTA_NAT_DST-1], ct, - &range) < 0) - return -EINVAL; - if (ip_nat_initialized(ct, - HOOK2MANIP(NF_IP_PRE_ROUTING))) - return -EEXIST; - ip_nat_setup_info(ct, &range, NF_IP_PRE_ROUTING); - } - if (cda[CTA_NAT_SRC-1]) { - if (ctnetlink_parse_nat(cda[CTA_NAT_SRC-1], ct, - &range) < 0) - return -EINVAL; - if (ip_nat_initialized(ct, - HOOK2MANIP(NF_IP_POST_ROUTING))) - return -EEXIST; - ip_nat_setup_info(ct, &range, NF_IP_POST_ROUTING); - } -#endif - } - - /* Be careful here, modifying NAT bits can screw up things, - * so don't let users modify them directly if they don't pass - * ip_nat_range. */ - ct->status |= status & ~(IPS_NAT_DONE_MASK | IPS_NAT_MASK); - return 0; -} - - -static inline int -ctnetlink_change_helper(struct ip_conntrack *ct, struct nfattr *cda[]) -{ - struct ip_conntrack_helper *helper; - char *helpname; - int err; - - /* don't change helper of sibling connections */ - if (ct->master) - return -EINVAL; - - err = ctnetlink_parse_help(cda[CTA_HELP-1], &helpname); - if (err < 0) - return err; - - helper = __ip_conntrack_helper_find_byname(helpname); - if (!helper) { - if (!strcmp(helpname, "")) - helper = NULL; - else - return -EINVAL; - } - - if (ct->helper) { - if (!helper) { - /* we had a helper before ... */ - ip_ct_remove_expectations(ct); - ct->helper = NULL; - } else { - /* need to zero data of old helper */ - memset(&ct->help, 0, sizeof(ct->help)); - } - } - - ct->helper = helper; - - return 0; -} - -static inline int -ctnetlink_change_timeout(struct ip_conntrack *ct, struct nfattr *cda[]) -{ - u_int32_t timeout = ntohl(*(__be32 *)NFA_DATA(cda[CTA_TIMEOUT-1])); - - if (!del_timer(&ct->timeout)) - return -ETIME; - - ct->timeout.expires = jiffies + timeout * HZ; - add_timer(&ct->timeout); - - return 0; -} - -static inline int -ctnetlink_change_protoinfo(struct ip_conntrack *ct, struct nfattr *cda[]) -{ - struct nfattr *tb[CTA_PROTOINFO_MAX], *attr = cda[CTA_PROTOINFO-1]; - struct ip_conntrack_protocol *proto; - u_int16_t npt = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum; - int err = 0; - - nfattr_parse_nested(tb, CTA_PROTOINFO_MAX, attr); - - proto = ip_conntrack_proto_find_get(npt); - - if (proto->from_nfattr) - err = proto->from_nfattr(tb, ct); - ip_conntrack_proto_put(proto); - - return err; -} - -static int -ctnetlink_change_conntrack(struct ip_conntrack *ct, struct nfattr *cda[]) -{ - int err; - - if (cda[CTA_HELP-1]) { - err = ctnetlink_change_helper(ct, cda); - if (err < 0) - return err; - } - - if (cda[CTA_TIMEOUT-1]) { - err = ctnetlink_change_timeout(ct, cda); - if (err < 0) - return err; - } - - if (cda[CTA_STATUS-1]) { - err = ctnetlink_change_status(ct, cda); - if (err < 0) - return err; - } - - if (cda[CTA_PROTOINFO-1]) { - err = ctnetlink_change_protoinfo(ct, cda); - if (err < 0) - return err; - } - -#if defined(CONFIG_IP_NF_CONNTRACK_MARK) - if (cda[CTA_MARK-1]) - ct->mark = ntohl(*(__be32 *)NFA_DATA(cda[CTA_MARK-1])); -#endif - - return 0; -} - -static int -ctnetlink_create_conntrack(struct nfattr *cda[], - struct ip_conntrack_tuple *otuple, - struct ip_conntrack_tuple *rtuple) -{ - struct ip_conntrack *ct; - int err = -EINVAL; - - ct = ip_conntrack_alloc(otuple, rtuple); - if (ct == NULL || IS_ERR(ct)) - return -ENOMEM; - - if (!cda[CTA_TIMEOUT-1]) - goto err; - ct->timeout.expires = ntohl(*(__be32 *)NFA_DATA(cda[CTA_TIMEOUT-1])); - - ct->timeout.expires = jiffies + ct->timeout.expires * HZ; - ct->status |= IPS_CONFIRMED; - - if (cda[CTA_STATUS-1]) { - err = ctnetlink_change_status(ct, cda); - if (err < 0) - goto err; - } - - if (cda[CTA_PROTOINFO-1]) { - err = ctnetlink_change_protoinfo(ct, cda); - if (err < 0) - goto err; - } - -#if defined(CONFIG_IP_NF_CONNTRACK_MARK) - if (cda[CTA_MARK-1]) - ct->mark = ntohl(*(__be32 *)NFA_DATA(cda[CTA_MARK-1])); -#endif - - ct->helper = ip_conntrack_helper_find_get(rtuple); - - add_timer(&ct->timeout); - ip_conntrack_hash_insert(ct); - - if (ct->helper) - ip_conntrack_helper_put(ct->helper); - - return 0; - -err: - ip_conntrack_free(ct); - return err; -} - -static int -ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb, - struct nlmsghdr *nlh, struct nfattr *cda[], int *errp) -{ - struct ip_conntrack_tuple otuple, rtuple; - struct ip_conntrack_tuple_hash *h = NULL; - int err = 0; - - if (nfattr_bad_size(cda, CTA_MAX, cta_min)) - return -EINVAL; - - if (cda[CTA_TUPLE_ORIG-1]) { - err = ctnetlink_parse_tuple(cda, &otuple, CTA_TUPLE_ORIG); - if (err < 0) - return err; - } - - if (cda[CTA_TUPLE_REPLY-1]) { - err = ctnetlink_parse_tuple(cda, &rtuple, CTA_TUPLE_REPLY); - if (err < 0) - return err; - } - - write_lock_bh(&ip_conntrack_lock); - if (cda[CTA_TUPLE_ORIG-1]) - h = __ip_conntrack_find(&otuple, NULL); - else if (cda[CTA_TUPLE_REPLY-1]) - h = __ip_conntrack_find(&rtuple, NULL); - - if (h == NULL) { - write_unlock_bh(&ip_conntrack_lock); - err = -ENOENT; - if (nlh->nlmsg_flags & NLM_F_CREATE) - err = ctnetlink_create_conntrack(cda, &otuple, &rtuple); - return err; - } - /* implicit 'else' */ - - /* we only allow nat config for new conntracks */ - if (cda[CTA_NAT_SRC-1] || cda[CTA_NAT_DST-1]) { - err = -EINVAL; - goto out_unlock; - } - - /* We manipulate the conntrack inside the global conntrack table lock, - * so there's no need to increase the refcount */ - err = -EEXIST; - if (!(nlh->nlmsg_flags & NLM_F_EXCL)) - err = ctnetlink_change_conntrack(tuplehash_to_ctrack(h), cda); - -out_unlock: - write_unlock_bh(&ip_conntrack_lock); - return err; -} - -/*********************************************************************** - * EXPECT - ***********************************************************************/ - -static inline int -ctnetlink_exp_dump_tuple(struct sk_buff *skb, - const struct ip_conntrack_tuple *tuple, - enum ctattr_expect type) -{ - struct nfattr *nest_parms = NFA_NEST(skb, type); - - if (ctnetlink_dump_tuples(skb, tuple) < 0) - goto nfattr_failure; - - NFA_NEST_END(skb, nest_parms); - - return 0; - -nfattr_failure: - return -1; -} - -static inline int -ctnetlink_exp_dump_mask(struct sk_buff *skb, - const struct ip_conntrack_tuple *tuple, - const struct ip_conntrack_tuple *mask) -{ - int ret; - struct ip_conntrack_protocol *proto; - struct nfattr *nest_parms = NFA_NEST(skb, CTA_EXPECT_MASK); - - ret = ctnetlink_dump_tuples_ip(skb, mask); - if (unlikely(ret < 0)) - goto nfattr_failure; - - proto = ip_conntrack_proto_find_get(tuple->dst.protonum); - ret = ctnetlink_dump_tuples_proto(skb, mask, proto); - ip_conntrack_proto_put(proto); - if (unlikely(ret < 0)) - goto nfattr_failure; - - NFA_NEST_END(skb, nest_parms); - - return 0; - -nfattr_failure: - return -1; -} - -static inline int -ctnetlink_exp_dump_expect(struct sk_buff *skb, - const struct ip_conntrack_expect *exp) -{ - struct ip_conntrack *master = exp->master; - __be32 timeout = htonl((exp->timeout.expires - jiffies) / HZ); - __be32 id = htonl(exp->id); - - if (ctnetlink_exp_dump_tuple(skb, &exp->tuple, CTA_EXPECT_TUPLE) < 0) - goto nfattr_failure; - if (ctnetlink_exp_dump_mask(skb, &exp->tuple, &exp->mask) < 0) - goto nfattr_failure; - if (ctnetlink_exp_dump_tuple(skb, - &master->tuplehash[IP_CT_DIR_ORIGINAL].tuple, - CTA_EXPECT_MASTER) < 0) - goto nfattr_failure; - - NFA_PUT(skb, CTA_EXPECT_TIMEOUT, sizeof(__be32), &timeout); - NFA_PUT(skb, CTA_EXPECT_ID, sizeof(__be32), &id); - - return 0; - -nfattr_failure: - return -1; -} - -static int -ctnetlink_exp_fill_info(struct sk_buff *skb, u32 pid, u32 seq, - int event, - int nowait, - const struct ip_conntrack_expect *exp) -{ - struct nlmsghdr *nlh; - struct nfgenmsg *nfmsg; - unsigned char *b; - - b = skb->tail; - - event |= NFNL_SUBSYS_CTNETLINK_EXP << 8; - nlh = NLMSG_PUT(skb, pid, seq, event, sizeof(struct nfgenmsg)); - nfmsg = NLMSG_DATA(nlh); - - nlh->nlmsg_flags = (nowait && pid) ? NLM_F_MULTI : 0; - nfmsg->nfgen_family = AF_INET; - nfmsg->version = NFNETLINK_V0; - nfmsg->res_id = 0; - - if (ctnetlink_exp_dump_expect(skb, exp) < 0) - goto nfattr_failure; - - nlh->nlmsg_len = skb->tail - b; - return skb->len; - -nlmsg_failure: -nfattr_failure: - skb_trim(skb, b - skb->data); - return -1; -} - -#ifdef CONFIG_IP_NF_CONNTRACK_EVENTS -static int ctnetlink_expect_event(struct notifier_block *this, - unsigned long events, void *ptr) -{ - struct nlmsghdr *nlh; - struct nfgenmsg *nfmsg; - struct ip_conntrack_expect *exp = (struct ip_conntrack_expect *)ptr; - struct sk_buff *skb; - unsigned int type; - unsigned char *b; - int flags = 0; - - if (events & IPEXP_NEW) { - type = IPCTNL_MSG_EXP_NEW; - flags = NLM_F_CREATE|NLM_F_EXCL; - } else - return NOTIFY_DONE; - - if (!nfnetlink_has_listeners(NFNLGRP_CONNTRACK_EXP_NEW)) - return NOTIFY_DONE; - - skb = alloc_skb(NLMSG_GOODSIZE, GFP_ATOMIC); - if (!skb) - return NOTIFY_DONE; - - b = skb->tail; - - type |= NFNL_SUBSYS_CTNETLINK_EXP << 8; - nlh = NLMSG_PUT(skb, 0, 0, type, sizeof(struct nfgenmsg)); - nfmsg = NLMSG_DATA(nlh); - - nlh->nlmsg_flags = flags; - nfmsg->nfgen_family = AF_INET; - nfmsg->version = NFNETLINK_V0; - nfmsg->res_id = 0; - - if (ctnetlink_exp_dump_expect(skb, exp) < 0) - goto nfattr_failure; - - nlh->nlmsg_len = skb->tail - b; - nfnetlink_send(skb, 0, NFNLGRP_CONNTRACK_EXP_NEW, 0); - return NOTIFY_DONE; - -nlmsg_failure: -nfattr_failure: - kfree_skb(skb); - return NOTIFY_DONE; -} -#endif - -static int -ctnetlink_exp_dump_table(struct sk_buff *skb, struct netlink_callback *cb) -{ - struct ip_conntrack_expect *exp = NULL; - struct list_head *i; - u_int32_t *id = (u_int32_t *) &cb->args[0]; - - read_lock_bh(&ip_conntrack_lock); - list_for_each_prev(i, &ip_conntrack_expect_list) { - exp = (struct ip_conntrack_expect *) i; - if (exp->id <= *id) - continue; - if (ctnetlink_exp_fill_info(skb, NETLINK_CB(cb->skb).pid, - cb->nlh->nlmsg_seq, - IPCTNL_MSG_EXP_NEW, - 1, exp) < 0) - goto out; - *id = exp->id; - } -out: - read_unlock_bh(&ip_conntrack_lock); - - return skb->len; -} - -static const size_t cta_min_exp[CTA_EXPECT_MAX] = { - [CTA_EXPECT_TIMEOUT-1] = sizeof(__be32), - [CTA_EXPECT_ID-1] = sizeof(__be32) -}; - -static int -ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb, - struct nlmsghdr *nlh, struct nfattr *cda[], int *errp) -{ - struct ip_conntrack_tuple tuple; - struct ip_conntrack_expect *exp; - struct sk_buff *skb2; - int err = 0; - - if (nfattr_bad_size(cda, CTA_EXPECT_MAX, cta_min_exp)) - return -EINVAL; - - if (nlh->nlmsg_flags & NLM_F_DUMP) { - struct nfgenmsg *msg = NLMSG_DATA(nlh); - u32 rlen; - - if (msg->nfgen_family != AF_INET) - return -EAFNOSUPPORT; - - if ((*errp = netlink_dump_start(ctnl, skb, nlh, - ctnetlink_exp_dump_table, - ctnetlink_done)) != 0) - return -EINVAL; - rlen = NLMSG_ALIGN(nlh->nlmsg_len); - if (rlen > skb->len) - rlen = skb->len; - skb_pull(skb, rlen); - return 0; - } - - if (cda[CTA_EXPECT_MASTER-1]) - err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_MASTER); - else - return -EINVAL; - - if (err < 0) - return err; - - exp = ip_conntrack_expect_find_get(&tuple); - if (!exp) - return -ENOENT; - - if (cda[CTA_EXPECT_ID-1]) { - __be32 id = *(__be32 *)NFA_DATA(cda[CTA_EXPECT_ID-1]); - if (exp->id != ntohl(id)) { - ip_conntrack_expect_put(exp); - return -ENOENT; - } - } - - err = -ENOMEM; - skb2 = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); - if (!skb2) - goto out; - - err = ctnetlink_exp_fill_info(skb2, NETLINK_CB(skb).pid, - nlh->nlmsg_seq, IPCTNL_MSG_EXP_NEW, - 1, exp); - if (err <= 0) - goto free; - - ip_conntrack_expect_put(exp); - - return netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid, MSG_DONTWAIT); - -free: - kfree_skb(skb2); -out: - ip_conntrack_expect_put(exp); - return err; -} - -static int -ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb, - struct nlmsghdr *nlh, struct nfattr *cda[], int *errp) -{ - struct ip_conntrack_expect *exp, *tmp; - struct ip_conntrack_tuple tuple; - struct ip_conntrack_helper *h; - int err; - - if (nfattr_bad_size(cda, CTA_EXPECT_MAX, cta_min_exp)) - return -EINVAL; - - if (cda[CTA_EXPECT_TUPLE-1]) { - /* delete a single expect by tuple */ - err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE); - if (err < 0) - return err; - - /* bump usage count to 2 */ - exp = ip_conntrack_expect_find_get(&tuple); - if (!exp) - return -ENOENT; - - if (cda[CTA_EXPECT_ID-1]) { - __be32 id = - *(__be32 *)NFA_DATA(cda[CTA_EXPECT_ID-1]); - if (exp->id != ntohl(id)) { - ip_conntrack_expect_put(exp); - return -ENOENT; - } - } - - /* after list removal, usage count == 1 */ - ip_conntrack_unexpect_related(exp); - /* have to put what we 'get' above. - * after this line usage count == 0 */ - ip_conntrack_expect_put(exp); - } else if (cda[CTA_EXPECT_HELP_NAME-1]) { - char *name = NFA_DATA(cda[CTA_EXPECT_HELP_NAME-1]); - - /* delete all expectations for this helper */ - write_lock_bh(&ip_conntrack_lock); - h = __ip_conntrack_helper_find_byname(name); - if (!h) { - write_unlock_bh(&ip_conntrack_lock); - return -EINVAL; - } - list_for_each_entry_safe(exp, tmp, &ip_conntrack_expect_list, - list) { - if (exp->master->helper == h - && del_timer(&exp->timeout)) { - ip_ct_unlink_expect(exp); - ip_conntrack_expect_put(exp); - } - } - write_unlock_bh(&ip_conntrack_lock); - } else { - /* This basically means we have to flush everything*/ - write_lock_bh(&ip_conntrack_lock); - list_for_each_entry_safe(exp, tmp, &ip_conntrack_expect_list, - list) { - if (del_timer(&exp->timeout)) { - ip_ct_unlink_expect(exp); - ip_conntrack_expect_put(exp); - } - } - write_unlock_bh(&ip_conntrack_lock); - } - - return 0; -} -static int -ctnetlink_change_expect(struct ip_conntrack_expect *x, struct nfattr *cda[]) -{ - return -EOPNOTSUPP; -} - -static int -ctnetlink_create_expect(struct nfattr *cda[]) -{ - struct ip_conntrack_tuple tuple, mask, master_tuple; - struct ip_conntrack_tuple_hash *h = NULL; - struct ip_conntrack_expect *exp; - struct ip_conntrack *ct; - int err = 0; - - /* caller guarantees that those three CTA_EXPECT_* exist */ - err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE); - if (err < 0) - return err; - err = ctnetlink_parse_tuple(cda, &mask, CTA_EXPECT_MASK); - if (err < 0) - return err; - err = ctnetlink_parse_tuple(cda, &master_tuple, CTA_EXPECT_MASTER); - if (err < 0) - return err; - - /* Look for master conntrack of this expectation */ - h = ip_conntrack_find_get(&master_tuple, NULL); - if (!h) - return -ENOENT; - ct = tuplehash_to_ctrack(h); - - if (!ct->helper) { - /* such conntrack hasn't got any helper, abort */ - err = -EINVAL; - goto out; - } - - exp = ip_conntrack_expect_alloc(ct); - if (!exp) { - err = -ENOMEM; - goto out; - } - - exp->expectfn = NULL; - exp->flags = 0; - exp->master = ct; - memcpy(&exp->tuple, &tuple, sizeof(struct ip_conntrack_tuple)); - memcpy(&exp->mask, &mask, sizeof(struct ip_conntrack_tuple)); - - err = ip_conntrack_expect_related(exp); - ip_conntrack_expect_put(exp); - -out: - ip_conntrack_put(tuplehash_to_ctrack(h)); - return err; -} - -static int -ctnetlink_new_expect(struct sock *ctnl, struct sk_buff *skb, - struct nlmsghdr *nlh, struct nfattr *cda[], int *errp) -{ - struct ip_conntrack_tuple tuple; - struct ip_conntrack_expect *exp; - int err = 0; - - if (nfattr_bad_size(cda, CTA_EXPECT_MAX, cta_min_exp)) - return -EINVAL; - - if (!cda[CTA_EXPECT_TUPLE-1] - || !cda[CTA_EXPECT_MASK-1] - || !cda[CTA_EXPECT_MASTER-1]) - return -EINVAL; - - err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE); - if (err < 0) - return err; - - write_lock_bh(&ip_conntrack_lock); - exp = __ip_conntrack_expect_find(&tuple); - - if (!exp) { - write_unlock_bh(&ip_conntrack_lock); - err = -ENOENT; - if (nlh->nlmsg_flags & NLM_F_CREATE) - err = ctnetlink_create_expect(cda); - return err; - } - - err = -EEXIST; - if (!(nlh->nlmsg_flags & NLM_F_EXCL)) - err = ctnetlink_change_expect(exp, cda); - write_unlock_bh(&ip_conntrack_lock); - - return err; -} - -#ifdef CONFIG_IP_NF_CONNTRACK_EVENTS -static struct notifier_block ctnl_notifier = { - .notifier_call = ctnetlink_conntrack_event, -}; - -static struct notifier_block ctnl_notifier_exp = { - .notifier_call = ctnetlink_expect_event, -}; -#endif - -static struct nfnl_callback ctnl_cb[IPCTNL_MSG_MAX] = { - [IPCTNL_MSG_CT_NEW] = { .call = ctnetlink_new_conntrack, - .attr_count = CTA_MAX, }, - [IPCTNL_MSG_CT_GET] = { .call = ctnetlink_get_conntrack, - .attr_count = CTA_MAX, }, - [IPCTNL_MSG_CT_DELETE] = { .call = ctnetlink_del_conntrack, - .attr_count = CTA_MAX, }, - [IPCTNL_MSG_CT_GET_CTRZERO] = { .call = ctnetlink_get_conntrack, - .attr_count = CTA_MAX, }, -}; - -static struct nfnl_callback ctnl_exp_cb[IPCTNL_MSG_EXP_MAX] = { - [IPCTNL_MSG_EXP_GET] = { .call = ctnetlink_get_expect, - .attr_count = CTA_EXPECT_MAX, }, - [IPCTNL_MSG_EXP_NEW] = { .call = ctnetlink_new_expect, - .attr_count = CTA_EXPECT_MAX, }, - [IPCTNL_MSG_EXP_DELETE] = { .call = ctnetlink_del_expect, - .attr_count = CTA_EXPECT_MAX, }, -}; - -static struct nfnetlink_subsystem ctnl_subsys = { - .name = "conntrack", - .subsys_id = NFNL_SUBSYS_CTNETLINK, - .cb_count = IPCTNL_MSG_MAX, - .cb = ctnl_cb, -}; - -static struct nfnetlink_subsystem ctnl_exp_subsys = { - .name = "conntrack_expect", - .subsys_id = NFNL_SUBSYS_CTNETLINK_EXP, - .cb_count = IPCTNL_MSG_EXP_MAX, - .cb = ctnl_exp_cb, -}; - -MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK); -MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK_EXP); - -static int __init ctnetlink_init(void) -{ - int ret; - - printk("ctnetlink v%s: registering with nfnetlink.\n", version); - ret = nfnetlink_subsys_register(&ctnl_subsys); - if (ret < 0) { - printk("ctnetlink_init: cannot register with nfnetlink.\n"); - goto err_out; - } - - ret = nfnetlink_subsys_register(&ctnl_exp_subsys); - if (ret < 0) { - printk("ctnetlink_init: cannot register exp with nfnetlink.\n"); - goto err_unreg_subsys; - } - -#ifdef CONFIG_IP_NF_CONNTRACK_EVENTS - ret = ip_conntrack_register_notifier(&ctnl_notifier); - if (ret < 0) { - printk("ctnetlink_init: cannot register notifier.\n"); - goto err_unreg_exp_subsys; - } - - ret = ip_conntrack_expect_register_notifier(&ctnl_notifier_exp); - if (ret < 0) { - printk("ctnetlink_init: cannot expect register notifier.\n"); - goto err_unreg_notifier; - } -#endif - - return 0; - -#ifdef CONFIG_IP_NF_CONNTRACK_EVENTS -err_unreg_notifier: - ip_conntrack_unregister_notifier(&ctnl_notifier); -err_unreg_exp_subsys: - nfnetlink_subsys_unregister(&ctnl_exp_subsys); -#endif -err_unreg_subsys: - nfnetlink_subsys_unregister(&ctnl_subsys); -err_out: - return ret; -} - -static void __exit ctnetlink_exit(void) -{ - printk("ctnetlink: unregistering from nfnetlink.\n"); - -#ifdef CONFIG_IP_NF_CONNTRACK_EVENTS - ip_conntrack_expect_unregister_notifier(&ctnl_notifier_exp); - ip_conntrack_unregister_notifier(&ctnl_notifier); -#endif - - nfnetlink_subsys_unregister(&ctnl_exp_subsys); - nfnetlink_subsys_unregister(&ctnl_subsys); - return; -} - -module_init(ctnetlink_init); -module_exit(ctnetlink_exit); diff --git a/net/ipv4/netfilter/ip_conntrack_proto_generic.c b/net/ipv4/netfilter/ip_conntrack_proto_generic.c deleted file mode 100644 index 88af82e98658..000000000000 --- a/net/ipv4/netfilter/ip_conntrack_proto_generic.c +++ /dev/null @@ -1,74 +0,0 @@ -/* (C) 1999-2001 Paul `Rusty' Russell - * (C) 2002-2004 Netfilter Core Team - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include -#include -#include -#include - -unsigned int ip_ct_generic_timeout __read_mostly = 600*HZ; - -static int generic_pkt_to_tuple(const struct sk_buff *skb, - unsigned int dataoff, - struct ip_conntrack_tuple *tuple) -{ - tuple->src.u.all = 0; - tuple->dst.u.all = 0; - - return 1; -} - -static int generic_invert_tuple(struct ip_conntrack_tuple *tuple, - const struct ip_conntrack_tuple *orig) -{ - tuple->src.u.all = 0; - tuple->dst.u.all = 0; - - return 1; -} - -/* Print out the per-protocol part of the tuple. */ -static int generic_print_tuple(struct seq_file *s, - const struct ip_conntrack_tuple *tuple) -{ - return 0; -} - -/* Print out the private part of the conntrack. */ -static int generic_print_conntrack(struct seq_file *s, - const struct ip_conntrack *state) -{ - return 0; -} - -/* Returns verdict for packet, or -1 for invalid. */ -static int packet(struct ip_conntrack *conntrack, - const struct sk_buff *skb, - enum ip_conntrack_info ctinfo) -{ - ip_ct_refresh_acct(conntrack, ctinfo, skb, ip_ct_generic_timeout); - return NF_ACCEPT; -} - -/* Called when a new connection for this protocol found. */ -static int new(struct ip_conntrack *conntrack, const struct sk_buff *skb) -{ - return 1; -} - -struct ip_conntrack_protocol ip_conntrack_generic_protocol = -{ - .proto = 0, - .name = "unknown", - .pkt_to_tuple = generic_pkt_to_tuple, - .invert_tuple = generic_invert_tuple, - .print_tuple = generic_print_tuple, - .print_conntrack = generic_print_conntrack, - .packet = packet, - .new = new, -}; diff --git a/net/ipv4/netfilter/ip_conntrack_proto_gre.c b/net/ipv4/netfilter/ip_conntrack_proto_gre.c deleted file mode 100644 index ac1c49ef36a9..000000000000 --- a/net/ipv4/netfilter/ip_conntrack_proto_gre.c +++ /dev/null @@ -1,328 +0,0 @@ -/* - * ip_conntrack_proto_gre.c - Version 3.0 - * - * Connection tracking protocol helper module for GRE. - * - * GRE is a generic encapsulation protocol, which is generally not very - * suited for NAT, as it has no protocol-specific part as port numbers. - * - * It has an optional key field, which may help us distinguishing two - * connections between the same two hosts. - * - * GRE is defined in RFC 1701 and RFC 1702, as well as RFC 2784 - * - * PPTP is built on top of a modified version of GRE, and has a mandatory - * field called "CallID", which serves us for the same purpose as the key - * field in plain GRE. - * - * Documentation about PPTP can be found in RFC 2637 - * - * (C) 2000-2005 by Harald Welte - * - * Development of this code funded by Astaro AG (http://www.astaro.com/) - * - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -static DEFINE_RWLOCK(ip_ct_gre_lock); - -#include -#include -#include - -#include -#include - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Harald Welte "); -MODULE_DESCRIPTION("netfilter connection tracking protocol helper for GRE"); - -/* shamelessly stolen from ip_conntrack_proto_udp.c */ -#define GRE_TIMEOUT (30*HZ) -#define GRE_STREAM_TIMEOUT (180*HZ) - -#if 0 -#define DEBUGP(format, args...) printk(KERN_DEBUG "%s:%s: " format, __FILE__, __FUNCTION__, ## args) -#define DUMP_TUPLE_GRE(x) printk("%u.%u.%u.%u:0x%x -> %u.%u.%u.%u:0x%x\n", \ - NIPQUAD((x)->src.ip), ntohs((x)->src.u.gre.key), \ - NIPQUAD((x)->dst.ip), ntohs((x)->dst.u.gre.key)) -#else -#define DEBUGP(x, args...) -#define DUMP_TUPLE_GRE(x) -#endif - -/* GRE KEYMAP HANDLING FUNCTIONS */ -static LIST_HEAD(gre_keymap_list); - -static inline int gre_key_cmpfn(const struct ip_ct_gre_keymap *km, - const struct ip_conntrack_tuple *t) -{ - return ((km->tuple.src.ip == t->src.ip) && - (km->tuple.dst.ip == t->dst.ip) && - (km->tuple.dst.protonum == t->dst.protonum) && - (km->tuple.dst.u.all == t->dst.u.all)); -} - -/* look up the source key for a given tuple */ -static __be16 gre_keymap_lookup(struct ip_conntrack_tuple *t) -{ - struct ip_ct_gre_keymap *km; - __be16 key = 0; - - read_lock_bh(&ip_ct_gre_lock); - list_for_each_entry(km, &gre_keymap_list, list) { - if (gre_key_cmpfn(km, t)) { - key = km->tuple.src.u.gre.key; - break; - } - } - read_unlock_bh(&ip_ct_gre_lock); - - DEBUGP("lookup src key 0x%x up key for ", key); - DUMP_TUPLE_GRE(t); - - return key; -} - -/* add a single keymap entry, associate with specified master ct */ -int -ip_ct_gre_keymap_add(struct ip_conntrack *ct, - struct ip_conntrack_tuple *t, int reply) -{ - struct ip_ct_gre_keymap **exist_km, *km; - - if (!ct->helper || strcmp(ct->helper->name, "pptp")) { - DEBUGP("refusing to add GRE keymap to non-pptp session\n"); - return -1; - } - - if (!reply) - exist_km = &ct->help.ct_pptp_info.keymap_orig; - else - exist_km = &ct->help.ct_pptp_info.keymap_reply; - - if (*exist_km) { - /* check whether it's a retransmission */ - list_for_each_entry(km, &gre_keymap_list, list) { - if (gre_key_cmpfn(km, t) && km == *exist_km) - return 0; - } - DEBUGP("trying to override keymap_%s for ct %p\n", - reply? "reply":"orig", ct); - return -EEXIST; - } - - km = kmalloc(sizeof(*km), GFP_ATOMIC); - if (!km) - return -ENOMEM; - - memcpy(&km->tuple, t, sizeof(*t)); - *exist_km = km; - - DEBUGP("adding new entry %p: ", km); - DUMP_TUPLE_GRE(&km->tuple); - - write_lock_bh(&ip_ct_gre_lock); - list_add_tail(&km->list, &gre_keymap_list); - write_unlock_bh(&ip_ct_gre_lock); - - return 0; -} - -/* destroy the keymap entries associated with specified master ct */ -void ip_ct_gre_keymap_destroy(struct ip_conntrack *ct) -{ - DEBUGP("entering for ct %p\n", ct); - - if (!ct->helper || strcmp(ct->helper->name, "pptp")) { - DEBUGP("refusing to destroy GRE keymap to non-pptp session\n"); - return; - } - - write_lock_bh(&ip_ct_gre_lock); - if (ct->help.ct_pptp_info.keymap_orig) { - DEBUGP("removing %p from list\n", - ct->help.ct_pptp_info.keymap_orig); - list_del(&ct->help.ct_pptp_info.keymap_orig->list); - kfree(ct->help.ct_pptp_info.keymap_orig); - ct->help.ct_pptp_info.keymap_orig = NULL; - } - if (ct->help.ct_pptp_info.keymap_reply) { - DEBUGP("removing %p from list\n", - ct->help.ct_pptp_info.keymap_reply); - list_del(&ct->help.ct_pptp_info.keymap_reply->list); - kfree(ct->help.ct_pptp_info.keymap_reply); - ct->help.ct_pptp_info.keymap_reply = NULL; - } - write_unlock_bh(&ip_ct_gre_lock); -} - - -/* PUBLIC CONNTRACK PROTO HELPER FUNCTIONS */ - -/* invert gre part of tuple */ -static int gre_invert_tuple(struct ip_conntrack_tuple *tuple, - const struct ip_conntrack_tuple *orig) -{ - tuple->dst.u.gre.key = orig->src.u.gre.key; - tuple->src.u.gre.key = orig->dst.u.gre.key; - - return 1; -} - -/* gre hdr info to tuple */ -static int gre_pkt_to_tuple(const struct sk_buff *skb, - unsigned int dataoff, - struct ip_conntrack_tuple *tuple) -{ - struct gre_hdr_pptp _pgrehdr, *pgrehdr; - __be16 srckey; - struct gre_hdr _grehdr, *grehdr; - - /* first only delinearize old RFC1701 GRE header */ - grehdr = skb_header_pointer(skb, dataoff, sizeof(_grehdr), &_grehdr); - if (!grehdr || grehdr->version != GRE_VERSION_PPTP) { - /* try to behave like "ip_conntrack_proto_generic" */ - tuple->src.u.all = 0; - tuple->dst.u.all = 0; - return 1; - } - - /* PPTP header is variable length, only need up to the call_id field */ - pgrehdr = skb_header_pointer(skb, dataoff, 8, &_pgrehdr); - if (!pgrehdr) - return 1; - - if (ntohs(grehdr->protocol) != GRE_PROTOCOL_PPTP) { - DEBUGP("GRE_VERSION_PPTP but unknown proto\n"); - return 0; - } - - tuple->dst.u.gre.key = pgrehdr->call_id; - srckey = gre_keymap_lookup(tuple); - tuple->src.u.gre.key = srckey; - - return 1; -} - -/* print gre part of tuple */ -static int gre_print_tuple(struct seq_file *s, - const struct ip_conntrack_tuple *tuple) -{ - return seq_printf(s, "srckey=0x%x dstkey=0x%x ", - ntohs(tuple->src.u.gre.key), - ntohs(tuple->dst.u.gre.key)); -} - -/* print private data for conntrack */ -static int gre_print_conntrack(struct seq_file *s, - const struct ip_conntrack *ct) -{ - return seq_printf(s, "timeout=%u, stream_timeout=%u ", - (ct->proto.gre.timeout / HZ), - (ct->proto.gre.stream_timeout / HZ)); -} - -/* Returns verdict for packet, and may modify conntrack */ -static int gre_packet(struct ip_conntrack *ct, - const struct sk_buff *skb, - enum ip_conntrack_info conntrackinfo) -{ - /* If we've seen traffic both ways, this is a GRE connection. - * Extend timeout. */ - if (ct->status & IPS_SEEN_REPLY) { - ip_ct_refresh_acct(ct, conntrackinfo, skb, - ct->proto.gre.stream_timeout); - /* Also, more likely to be important, and not a probe. */ - set_bit(IPS_ASSURED_BIT, &ct->status); - ip_conntrack_event_cache(IPCT_STATUS, skb); - } else - ip_ct_refresh_acct(ct, conntrackinfo, skb, - ct->proto.gre.timeout); - - return NF_ACCEPT; -} - -/* Called when a new connection for this protocol found. */ -static int gre_new(struct ip_conntrack *ct, - const struct sk_buff *skb) -{ - DEBUGP(": "); - DUMP_TUPLE_GRE(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); - - /* initialize to sane value. Ideally a conntrack helper - * (e.g. in case of pptp) is increasing them */ - ct->proto.gre.stream_timeout = GRE_STREAM_TIMEOUT; - ct->proto.gre.timeout = GRE_TIMEOUT; - - return 1; -} - -/* Called when a conntrack entry has already been removed from the hashes - * and is about to be deleted from memory */ -static void gre_destroy(struct ip_conntrack *ct) -{ - struct ip_conntrack *master = ct->master; - DEBUGP(" entering\n"); - - if (!master) - DEBUGP("no master !?!\n"); - else - ip_ct_gre_keymap_destroy(master); -} - -/* protocol helper struct */ -static struct ip_conntrack_protocol gre = { - .proto = IPPROTO_GRE, - .name = "gre", - .pkt_to_tuple = gre_pkt_to_tuple, - .invert_tuple = gre_invert_tuple, - .print_tuple = gre_print_tuple, - .print_conntrack = gre_print_conntrack, - .packet = gre_packet, - .new = gre_new, - .destroy = gre_destroy, - .me = THIS_MODULE, -#if defined(CONFIG_IP_NF_CONNTRACK_NETLINK) || \ - defined(CONFIG_IP_NF_CONNTRACK_NETLINK_MODULE) - .tuple_to_nfattr = ip_ct_port_tuple_to_nfattr, - .nfattr_to_tuple = ip_ct_port_nfattr_to_tuple, -#endif -}; - -/* ip_conntrack_proto_gre initialization */ -int __init ip_ct_proto_gre_init(void) -{ - return ip_conntrack_protocol_register(&gre); -} - -/* This cannot be __exit, as it is invoked from ip_conntrack_helper_pptp.c's - * init() code on errors. - */ -void ip_ct_proto_gre_fini(void) -{ - struct list_head *pos, *n; - - /* delete all keymap entries */ - write_lock_bh(&ip_ct_gre_lock); - list_for_each_safe(pos, n, &gre_keymap_list) { - DEBUGP("deleting keymap %p at module unload time\n", pos); - list_del(pos); - kfree(pos); - } - write_unlock_bh(&ip_ct_gre_lock); - - ip_conntrack_protocol_unregister(&gre); -} - -EXPORT_SYMBOL(ip_ct_gre_keymap_add); -EXPORT_SYMBOL(ip_ct_gre_keymap_destroy); diff --git a/net/ipv4/netfilter/ip_conntrack_proto_icmp.c b/net/ipv4/netfilter/ip_conntrack_proto_icmp.c deleted file mode 100644 index e253f3ee52d0..000000000000 --- a/net/ipv4/netfilter/ip_conntrack_proto_icmp.c +++ /dev/null @@ -1,315 +0,0 @@ -/* (C) 1999-2001 Paul `Rusty' Russell - * (C) 2002-2004 Netfilter Core Team - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -unsigned int ip_ct_icmp_timeout __read_mostly = 30*HZ; - -#if 0 -#define DEBUGP printk -#else -#define DEBUGP(format, args...) -#endif - -static int icmp_pkt_to_tuple(const struct sk_buff *skb, - unsigned int dataoff, - struct ip_conntrack_tuple *tuple) -{ - struct icmphdr _hdr, *hp; - - hp = skb_header_pointer(skb, dataoff, sizeof(_hdr), &_hdr); - if (hp == NULL) - return 0; - - tuple->dst.u.icmp.type = hp->type; - tuple->src.u.icmp.id = hp->un.echo.id; - tuple->dst.u.icmp.code = hp->code; - - return 1; -} - -/* Add 1; spaces filled with 0. */ -static const u_int8_t invmap[] = { - [ICMP_ECHO] = ICMP_ECHOREPLY + 1, - [ICMP_ECHOREPLY] = ICMP_ECHO + 1, - [ICMP_TIMESTAMP] = ICMP_TIMESTAMPREPLY + 1, - [ICMP_TIMESTAMPREPLY] = ICMP_TIMESTAMP + 1, - [ICMP_INFO_REQUEST] = ICMP_INFO_REPLY + 1, - [ICMP_INFO_REPLY] = ICMP_INFO_REQUEST + 1, - [ICMP_ADDRESS] = ICMP_ADDRESSREPLY + 1, - [ICMP_ADDRESSREPLY] = ICMP_ADDRESS + 1 -}; - -static int icmp_invert_tuple(struct ip_conntrack_tuple *tuple, - const struct ip_conntrack_tuple *orig) -{ - if (orig->dst.u.icmp.type >= sizeof(invmap) - || !invmap[orig->dst.u.icmp.type]) - return 0; - - tuple->src.u.icmp.id = orig->src.u.icmp.id; - tuple->dst.u.icmp.type = invmap[orig->dst.u.icmp.type] - 1; - tuple->dst.u.icmp.code = orig->dst.u.icmp.code; - return 1; -} - -/* Print out the per-protocol part of the tuple. */ -static int icmp_print_tuple(struct seq_file *s, - const struct ip_conntrack_tuple *tuple) -{ - return seq_printf(s, "type=%u code=%u id=%u ", - tuple->dst.u.icmp.type, - tuple->dst.u.icmp.code, - ntohs(tuple->src.u.icmp.id)); -} - -/* Print out the private part of the conntrack. */ -static int icmp_print_conntrack(struct seq_file *s, - const struct ip_conntrack *conntrack) -{ - return 0; -} - -/* Returns verdict for packet, or -1 for invalid. */ -static int icmp_packet(struct ip_conntrack *ct, - const struct sk_buff *skb, - enum ip_conntrack_info ctinfo) -{ - /* Try to delete connection immediately after all replies: - won't actually vanish as we still have skb, and del_timer - means this will only run once even if count hits zero twice - (theoretically possible with SMP) */ - if (CTINFO2DIR(ctinfo) == IP_CT_DIR_REPLY) { - if (atomic_dec_and_test(&ct->proto.icmp.count) - && del_timer(&ct->timeout)) - ct->timeout.function((unsigned long)ct); - } else { - atomic_inc(&ct->proto.icmp.count); - ip_conntrack_event_cache(IPCT_PROTOINFO_VOLATILE, skb); - ip_ct_refresh_acct(ct, ctinfo, skb, ip_ct_icmp_timeout); - } - - return NF_ACCEPT; -} - -/* Called when a new connection for this protocol found. */ -static int icmp_new(struct ip_conntrack *conntrack, - const struct sk_buff *skb) -{ - static const u_int8_t valid_new[] = { - [ICMP_ECHO] = 1, - [ICMP_TIMESTAMP] = 1, - [ICMP_INFO_REQUEST] = 1, - [ICMP_ADDRESS] = 1 - }; - - if (conntrack->tuplehash[0].tuple.dst.u.icmp.type >= sizeof(valid_new) - || !valid_new[conntrack->tuplehash[0].tuple.dst.u.icmp.type]) { - /* Can't create a new ICMP `conn' with this. */ - DEBUGP("icmp: can't create new conn with type %u\n", - conntrack->tuplehash[0].tuple.dst.u.icmp.type); - DUMP_TUPLE(&conntrack->tuplehash[0].tuple); - return 0; - } - atomic_set(&conntrack->proto.icmp.count, 0); - return 1; -} - -static int -icmp_error_message(struct sk_buff *skb, - enum ip_conntrack_info *ctinfo, - unsigned int hooknum) -{ - struct ip_conntrack_tuple innertuple, origtuple; - struct { - struct icmphdr icmp; - struct iphdr ip; - } _in, *inside; - struct ip_conntrack_protocol *innerproto; - struct ip_conntrack_tuple_hash *h; - int dataoff; - - IP_NF_ASSERT(skb->nfct == NULL); - - /* Not enough header? */ - inside = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(_in), &_in); - if (inside == NULL) - return -NF_ACCEPT; - - /* Ignore ICMP's containing fragments (shouldn't happen) */ - if (inside->ip.frag_off & htons(IP_OFFSET)) { - DEBUGP("icmp_error_track: fragment of proto %u\n", - inside->ip.protocol); - return -NF_ACCEPT; - } - - innerproto = ip_conntrack_proto_find_get(inside->ip.protocol); - dataoff = ip_hdrlen(skb) + sizeof(inside->icmp) + inside->ip.ihl * 4; - /* Are they talking about one of our connections? */ - if (!ip_ct_get_tuple(&inside->ip, skb, dataoff, &origtuple, innerproto)) { - DEBUGP("icmp_error: ! get_tuple p=%u", inside->ip.protocol); - ip_conntrack_proto_put(innerproto); - return -NF_ACCEPT; - } - - /* Ordinarily, we'd expect the inverted tupleproto, but it's - been preserved inside the ICMP. */ - if (!ip_ct_invert_tuple(&innertuple, &origtuple, innerproto)) { - DEBUGP("icmp_error_track: Can't invert tuple\n"); - ip_conntrack_proto_put(innerproto); - return -NF_ACCEPT; - } - ip_conntrack_proto_put(innerproto); - - *ctinfo = IP_CT_RELATED; - - h = ip_conntrack_find_get(&innertuple, NULL); - if (!h) { - /* Locally generated ICMPs will match inverted if they - haven't been SNAT'ed yet */ - /* FIXME: NAT code has to handle half-done double NAT --RR */ - if (hooknum == NF_IP_LOCAL_OUT) - h = ip_conntrack_find_get(&origtuple, NULL); - - if (!h) { - DEBUGP("icmp_error_track: no match\n"); - return -NF_ACCEPT; - } - /* Reverse direction from that found */ - if (DIRECTION(h) != IP_CT_DIR_REPLY) - *ctinfo += IP_CT_IS_REPLY; - } else { - if (DIRECTION(h) == IP_CT_DIR_REPLY) - *ctinfo += IP_CT_IS_REPLY; - } - - /* Update skb to refer to this connection */ - skb->nfct = &tuplehash_to_ctrack(h)->ct_general; - skb->nfctinfo = *ctinfo; - return -NF_ACCEPT; -} - -/* Small and modified version of icmp_rcv */ -static int -icmp_error(struct sk_buff *skb, enum ip_conntrack_info *ctinfo, - unsigned int hooknum) -{ - struct icmphdr _ih, *icmph; - - /* Not enough header? */ - icmph = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(_ih), &_ih); - if (icmph == NULL) { - if (LOG_INVALID(IPPROTO_ICMP)) - nf_log_packet(PF_INET, 0, skb, NULL, NULL, NULL, - "ip_ct_icmp: short packet "); - return -NF_ACCEPT; - } - - /* See ip_conntrack_proto_tcp.c */ - if (ip_conntrack_checksum && hooknum == NF_IP_PRE_ROUTING && - nf_ip_checksum(skb, hooknum, ip_hdrlen(skb), 0)) { - if (LOG_INVALID(IPPROTO_ICMP)) - nf_log_packet(PF_INET, 0, skb, NULL, NULL, NULL, - "ip_ct_icmp: bad ICMP checksum "); - return -NF_ACCEPT; - } - - /* - * 18 is the highest 'known' ICMP type. Anything else is a mystery - * - * RFC 1122: 3.2.2 Unknown ICMP messages types MUST be silently - * discarded. - */ - if (icmph->type > NR_ICMP_TYPES) { - if (LOG_INVALID(IPPROTO_ICMP)) - nf_log_packet(PF_INET, 0, skb, NULL, NULL, NULL, - "ip_ct_icmp: invalid ICMP type "); - return -NF_ACCEPT; - } - - /* Need to track icmp error message? */ - if (icmph->type != ICMP_DEST_UNREACH - && icmph->type != ICMP_SOURCE_QUENCH - && icmph->type != ICMP_TIME_EXCEEDED - && icmph->type != ICMP_PARAMETERPROB - && icmph->type != ICMP_REDIRECT) - return NF_ACCEPT; - - return icmp_error_message(skb, ctinfo, hooknum); -} - -#if defined(CONFIG_IP_NF_CONNTRACK_NETLINK) || \ - defined(CONFIG_IP_NF_CONNTRACK_NETLINK_MODULE) -static int icmp_tuple_to_nfattr(struct sk_buff *skb, - const struct ip_conntrack_tuple *t) -{ - NFA_PUT(skb, CTA_PROTO_ICMP_ID, sizeof(__be16), - &t->src.u.icmp.id); - NFA_PUT(skb, CTA_PROTO_ICMP_TYPE, sizeof(u_int8_t), - &t->dst.u.icmp.type); - NFA_PUT(skb, CTA_PROTO_ICMP_CODE, sizeof(u_int8_t), - &t->dst.u.icmp.code); - - return 0; - -nfattr_failure: - return -1; -} - -static int icmp_nfattr_to_tuple(struct nfattr *tb[], - struct ip_conntrack_tuple *tuple) -{ - if (!tb[CTA_PROTO_ICMP_TYPE-1] - || !tb[CTA_PROTO_ICMP_CODE-1] - || !tb[CTA_PROTO_ICMP_ID-1]) - return -EINVAL; - - tuple->dst.u.icmp.type = - *(u_int8_t *)NFA_DATA(tb[CTA_PROTO_ICMP_TYPE-1]); - tuple->dst.u.icmp.code = - *(u_int8_t *)NFA_DATA(tb[CTA_PROTO_ICMP_CODE-1]); - tuple->src.u.icmp.id = - *(__be16 *)NFA_DATA(tb[CTA_PROTO_ICMP_ID-1]); - - if (tuple->dst.u.icmp.type >= sizeof(invmap) - || !invmap[tuple->dst.u.icmp.type]) - return -EINVAL; - - return 0; -} -#endif - -struct ip_conntrack_protocol ip_conntrack_protocol_icmp = -{ - .proto = IPPROTO_ICMP, - .name = "icmp", - .pkt_to_tuple = icmp_pkt_to_tuple, - .invert_tuple = icmp_invert_tuple, - .print_tuple = icmp_print_tuple, - .print_conntrack = icmp_print_conntrack, - .packet = icmp_packet, - .new = icmp_new, - .error = icmp_error, -#if defined(CONFIG_IP_NF_CONNTRACK_NETLINK) || \ - defined(CONFIG_IP_NF_CONNTRACK_NETLINK_MODULE) - .tuple_to_nfattr = icmp_tuple_to_nfattr, - .nfattr_to_tuple = icmp_nfattr_to_tuple, -#endif -}; diff --git a/net/ipv4/netfilter/ip_conntrack_proto_sctp.c b/net/ipv4/netfilter/ip_conntrack_proto_sctp.c deleted file mode 100644 index 91d0c05c8e86..000000000000 --- a/net/ipv4/netfilter/ip_conntrack_proto_sctp.c +++ /dev/null @@ -1,659 +0,0 @@ -/* - * Connection tracking protocol helper module for SCTP. - * - * SCTP is defined in RFC 2960. References to various sections in this code - * are to this RFC. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -/* - * Added support for proc manipulation of timeouts. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -#if 0 -#define DEBUGP(format, ...) printk(format, ## __VA_ARGS__) -#else -#define DEBUGP(format, args...) -#endif - -/* Protects conntrack->proto.sctp */ -static DEFINE_RWLOCK(sctp_lock); - -/* FIXME: Examine ipfilter's timeouts and conntrack transitions more - closely. They're more complex. --RR - - And so for me for SCTP :D -Kiran */ - -static const char *sctp_conntrack_names[] = { - "NONE", - "CLOSED", - "COOKIE_WAIT", - "COOKIE_ECHOED", - "ESTABLISHED", - "SHUTDOWN_SENT", - "SHUTDOWN_RECD", - "SHUTDOWN_ACK_SENT", -}; - -#define SECS * HZ -#define MINS * 60 SECS -#define HOURS * 60 MINS -#define DAYS * 24 HOURS - -static unsigned int ip_ct_sctp_timeout_closed __read_mostly = 10 SECS; -static unsigned int ip_ct_sctp_timeout_cookie_wait __read_mostly = 3 SECS; -static unsigned int ip_ct_sctp_timeout_cookie_echoed __read_mostly = 3 SECS; -static unsigned int ip_ct_sctp_timeout_established __read_mostly = 5 DAYS; -static unsigned int ip_ct_sctp_timeout_shutdown_sent __read_mostly = 300 SECS / 1000; -static unsigned int ip_ct_sctp_timeout_shutdown_recd __read_mostly = 300 SECS / 1000; -static unsigned int ip_ct_sctp_timeout_shutdown_ack_sent __read_mostly = 3 SECS; - -static const unsigned int * sctp_timeouts[] -= { NULL, /* SCTP_CONNTRACK_NONE */ - &ip_ct_sctp_timeout_closed, /* SCTP_CONNTRACK_CLOSED */ - &ip_ct_sctp_timeout_cookie_wait, /* SCTP_CONNTRACK_COOKIE_WAIT */ - &ip_ct_sctp_timeout_cookie_echoed, /* SCTP_CONNTRACK_COOKIE_ECHOED */ - &ip_ct_sctp_timeout_established, /* SCTP_CONNTRACK_ESTABLISHED */ - &ip_ct_sctp_timeout_shutdown_sent, /* SCTP_CONNTRACK_SHUTDOWN_SENT */ - &ip_ct_sctp_timeout_shutdown_recd, /* SCTP_CONNTRACK_SHUTDOWN_RECD */ - &ip_ct_sctp_timeout_shutdown_ack_sent /* SCTP_CONNTRACK_SHUTDOWN_ACK_SENT */ - }; - -#define sNO SCTP_CONNTRACK_NONE -#define sCL SCTP_CONNTRACK_CLOSED -#define sCW SCTP_CONNTRACK_COOKIE_WAIT -#define sCE SCTP_CONNTRACK_COOKIE_ECHOED -#define sES SCTP_CONNTRACK_ESTABLISHED -#define sSS SCTP_CONNTRACK_SHUTDOWN_SENT -#define sSR SCTP_CONNTRACK_SHUTDOWN_RECD -#define sSA SCTP_CONNTRACK_SHUTDOWN_ACK_SENT -#define sIV SCTP_CONNTRACK_MAX - -/* - These are the descriptions of the states: - -NOTE: These state names are tantalizingly similar to the states of an -SCTP endpoint. But the interpretation of the states is a little different, -considering that these are the states of the connection and not of an end -point. Please note the subtleties. -Kiran - -NONE - Nothing so far. -COOKIE WAIT - We have seen an INIT chunk in the original direction, or also - an INIT_ACK chunk in the reply direction. -COOKIE ECHOED - We have seen a COOKIE_ECHO chunk in the original direction. -ESTABLISHED - We have seen a COOKIE_ACK in the reply direction. -SHUTDOWN_SENT - We have seen a SHUTDOWN chunk in the original direction. -SHUTDOWN_RECD - We have seen a SHUTDOWN chunk in the reply directoin. -SHUTDOWN_ACK_SENT - We have seen a SHUTDOWN_ACK chunk in the direction opposite - to that of the SHUTDOWN chunk. -CLOSED - We have seen a SHUTDOWN_COMPLETE chunk in the direction of - the SHUTDOWN chunk. Connection is closed. -*/ - -/* TODO - - I have assumed that the first INIT is in the original direction. - This messes things when an INIT comes in the reply direction in CLOSED - state. - - Check the error type in the reply dir before transitioning from -cookie echoed to closed. - - Sec 5.2.4 of RFC 2960 - - Multi Homing support. -*/ - -/* SCTP conntrack state transitions */ -static const enum sctp_conntrack sctp_conntracks[2][9][SCTP_CONNTRACK_MAX] = { - { -/* ORIGINAL */ -/* sNO, sCL, sCW, sCE, sES, sSS, sSR, sSA */ -/* init */ {sCW, sCW, sCW, sCE, sES, sSS, sSR, sSA}, -/* init_ack */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA}, -/* abort */ {sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL}, -/* shutdown */ {sCL, sCL, sCW, sCE, sSS, sSS, sSR, sSA}, -/* shutdown_ack */ {sSA, sCL, sCW, sCE, sES, sSA, sSA, sSA}, -/* error */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA},/* Cant have Stale cookie*/ -/* cookie_echo */ {sCL, sCL, sCE, sCE, sES, sSS, sSR, sSA},/* 5.2.4 - Big TODO */ -/* cookie_ack */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA},/* Cant come in orig dir */ -/* shutdown_comp*/ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sCL} - }, - { -/* REPLY */ -/* sNO, sCL, sCW, sCE, sES, sSS, sSR, sSA */ -/* init */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA},/* INIT in sCL Big TODO */ -/* init_ack */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA}, -/* abort */ {sIV, sCL, sCL, sCL, sCL, sCL, sCL, sCL}, -/* shutdown */ {sIV, sCL, sCW, sCE, sSR, sSS, sSR, sSA}, -/* shutdown_ack */ {sIV, sCL, sCW, sCE, sES, sSA, sSA, sSA}, -/* error */ {sIV, sCL, sCW, sCL, sES, sSS, sSR, sSA}, -/* cookie_echo */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA},/* Cant come in reply dir */ -/* cookie_ack */ {sIV, sCL, sCW, sES, sES, sSS, sSR, sSA}, -/* shutdown_comp*/ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sCL} - } -}; - -static int sctp_pkt_to_tuple(const struct sk_buff *skb, - unsigned int dataoff, - struct ip_conntrack_tuple *tuple) -{ - sctp_sctphdr_t _hdr, *hp; - - DEBUGP(__FUNCTION__); - DEBUGP("\n"); - - /* Actually only need first 8 bytes. */ - hp = skb_header_pointer(skb, dataoff, 8, &_hdr); - if (hp == NULL) - return 0; - - tuple->src.u.sctp.port = hp->source; - tuple->dst.u.sctp.port = hp->dest; - return 1; -} - -static int sctp_invert_tuple(struct ip_conntrack_tuple *tuple, - const struct ip_conntrack_tuple *orig) -{ - DEBUGP(__FUNCTION__); - DEBUGP("\n"); - - tuple->src.u.sctp.port = orig->dst.u.sctp.port; - tuple->dst.u.sctp.port = orig->src.u.sctp.port; - return 1; -} - -/* Print out the per-protocol part of the tuple. */ -static int sctp_print_tuple(struct seq_file *s, - const struct ip_conntrack_tuple *tuple) -{ - DEBUGP(__FUNCTION__); - DEBUGP("\n"); - - return seq_printf(s, "sport=%hu dport=%hu ", - ntohs(tuple->src.u.sctp.port), - ntohs(tuple->dst.u.sctp.port)); -} - -/* Print out the private part of the conntrack. */ -static int sctp_print_conntrack(struct seq_file *s, - const struct ip_conntrack *conntrack) -{ - enum sctp_conntrack state; - - DEBUGP(__FUNCTION__); - DEBUGP("\n"); - - read_lock_bh(&sctp_lock); - state = conntrack->proto.sctp.state; - read_unlock_bh(&sctp_lock); - - return seq_printf(s, "%s ", sctp_conntrack_names[state]); -} - -#define for_each_sctp_chunk(skb, sch, _sch, offset, count) \ -for (offset = ip_hdrlen(skb) + sizeof(sctp_sctphdr_t), count = 0; \ - offset < skb->len && \ - (sch = skb_header_pointer(skb, offset, sizeof(_sch), &_sch)); \ - offset += (ntohs(sch->length) + 3) & ~3, count++) - -/* Some validity checks to make sure the chunks are fine */ -static int do_basic_checks(struct ip_conntrack *conntrack, - const struct sk_buff *skb, - char *map) -{ - u_int32_t offset, count; - sctp_chunkhdr_t _sch, *sch; - int flag; - - DEBUGP(__FUNCTION__); - DEBUGP("\n"); - - flag = 0; - - for_each_sctp_chunk (skb, sch, _sch, offset, count) { - DEBUGP("Chunk Num: %d Type: %d\n", count, sch->type); - - if (sch->type == SCTP_CID_INIT - || sch->type == SCTP_CID_INIT_ACK - || sch->type == SCTP_CID_SHUTDOWN_COMPLETE) { - flag = 1; - } - - /* - * Cookie Ack/Echo chunks not the first OR - * Init / Init Ack / Shutdown compl chunks not the only chunks - * OR zero-length. - */ - if (((sch->type == SCTP_CID_COOKIE_ACK - || sch->type == SCTP_CID_COOKIE_ECHO - || flag) - && count !=0) || !sch->length) { - DEBUGP("Basic checks failed\n"); - return 1; - } - - if (map) { - set_bit(sch->type, (void *)map); - } - } - - DEBUGP("Basic checks passed\n"); - return count == 0; -} - -static int new_state(enum ip_conntrack_dir dir, - enum sctp_conntrack cur_state, - int chunk_type) -{ - int i; - - DEBUGP(__FUNCTION__); - DEBUGP("\n"); - - DEBUGP("Chunk type: %d\n", chunk_type); - - switch (chunk_type) { - case SCTP_CID_INIT: - DEBUGP("SCTP_CID_INIT\n"); - i = 0; break; - case SCTP_CID_INIT_ACK: - DEBUGP("SCTP_CID_INIT_ACK\n"); - i = 1; break; - case SCTP_CID_ABORT: - DEBUGP("SCTP_CID_ABORT\n"); - i = 2; break; - case SCTP_CID_SHUTDOWN: - DEBUGP("SCTP_CID_SHUTDOWN\n"); - i = 3; break; - case SCTP_CID_SHUTDOWN_ACK: - DEBUGP("SCTP_CID_SHUTDOWN_ACK\n"); - i = 4; break; - case SCTP_CID_ERROR: - DEBUGP("SCTP_CID_ERROR\n"); - i = 5; break; - case SCTP_CID_COOKIE_ECHO: - DEBUGP("SCTP_CID_COOKIE_ECHO\n"); - i = 6; break; - case SCTP_CID_COOKIE_ACK: - DEBUGP("SCTP_CID_COOKIE_ACK\n"); - i = 7; break; - case SCTP_CID_SHUTDOWN_COMPLETE: - DEBUGP("SCTP_CID_SHUTDOWN_COMPLETE\n"); - i = 8; break; - default: - /* Other chunks like DATA, SACK, HEARTBEAT and - its ACK do not cause a change in state */ - DEBUGP("Unknown chunk type, Will stay in %s\n", - sctp_conntrack_names[cur_state]); - return cur_state; - } - - DEBUGP("dir: %d cur_state: %s chunk_type: %d new_state: %s\n", - dir, sctp_conntrack_names[cur_state], chunk_type, - sctp_conntrack_names[sctp_conntracks[dir][i][cur_state]]); - - return sctp_conntracks[dir][i][cur_state]; -} - -/* Returns verdict for packet, or -1 for invalid. */ -static int sctp_packet(struct ip_conntrack *conntrack, - const struct sk_buff *skb, - enum ip_conntrack_info ctinfo) -{ - enum sctp_conntrack newconntrack, oldsctpstate; - struct iphdr *iph = ip_hdr(skb); - sctp_sctphdr_t _sctph, *sh; - sctp_chunkhdr_t _sch, *sch; - u_int32_t offset, count; - char map[256 / sizeof (char)] = {0}; - - DEBUGP(__FUNCTION__); - DEBUGP("\n"); - - sh = skb_header_pointer(skb, iph->ihl * 4, sizeof(_sctph), &_sctph); - if (sh == NULL) - return -1; - - if (do_basic_checks(conntrack, skb, map) != 0) - return -1; - - /* Check the verification tag (Sec 8.5) */ - if (!test_bit(SCTP_CID_INIT, (void *)map) - && !test_bit(SCTP_CID_SHUTDOWN_COMPLETE, (void *)map) - && !test_bit(SCTP_CID_COOKIE_ECHO, (void *)map) - && !test_bit(SCTP_CID_ABORT, (void *)map) - && !test_bit(SCTP_CID_SHUTDOWN_ACK, (void *)map) - && (sh->vtag != conntrack->proto.sctp.vtag[CTINFO2DIR(ctinfo)])) { - DEBUGP("Verification tag check failed\n"); - return -1; - } - - oldsctpstate = newconntrack = SCTP_CONNTRACK_MAX; - for_each_sctp_chunk (skb, sch, _sch, offset, count) { - write_lock_bh(&sctp_lock); - - /* Special cases of Verification tag check (Sec 8.5.1) */ - if (sch->type == SCTP_CID_INIT) { - /* Sec 8.5.1 (A) */ - if (sh->vtag != 0) { - write_unlock_bh(&sctp_lock); - return -1; - } - } else if (sch->type == SCTP_CID_ABORT) { - /* Sec 8.5.1 (B) */ - if (!(sh->vtag == conntrack->proto.sctp.vtag[CTINFO2DIR(ctinfo)]) - && !(sh->vtag == conntrack->proto.sctp.vtag - [1 - CTINFO2DIR(ctinfo)])) { - write_unlock_bh(&sctp_lock); - return -1; - } - } else if (sch->type == SCTP_CID_SHUTDOWN_COMPLETE) { - /* Sec 8.5.1 (C) */ - if (!(sh->vtag == conntrack->proto.sctp.vtag[CTINFO2DIR(ctinfo)]) - && !(sh->vtag == conntrack->proto.sctp.vtag - [1 - CTINFO2DIR(ctinfo)] - && (sch->flags & 1))) { - write_unlock_bh(&sctp_lock); - return -1; - } - } else if (sch->type == SCTP_CID_COOKIE_ECHO) { - /* Sec 8.5.1 (D) */ - if (!(sh->vtag == conntrack->proto.sctp.vtag[CTINFO2DIR(ctinfo)])) { - write_unlock_bh(&sctp_lock); - return -1; - } - } - - oldsctpstate = conntrack->proto.sctp.state; - newconntrack = new_state(CTINFO2DIR(ctinfo), oldsctpstate, sch->type); - - /* Invalid */ - if (newconntrack == SCTP_CONNTRACK_MAX) { - DEBUGP("ip_conntrack_sctp: Invalid dir=%i ctype=%u conntrack=%u\n", - CTINFO2DIR(ctinfo), sch->type, oldsctpstate); - write_unlock_bh(&sctp_lock); - return -1; - } - - /* If it is an INIT or an INIT ACK note down the vtag */ - if (sch->type == SCTP_CID_INIT - || sch->type == SCTP_CID_INIT_ACK) { - sctp_inithdr_t _inithdr, *ih; - - ih = skb_header_pointer(skb, offset + sizeof(sctp_chunkhdr_t), - sizeof(_inithdr), &_inithdr); - if (ih == NULL) { - write_unlock_bh(&sctp_lock); - return -1; - } - DEBUGP("Setting vtag %x for dir %d\n", - ih->init_tag, !CTINFO2DIR(ctinfo)); - conntrack->proto.sctp.vtag[!CTINFO2DIR(ctinfo)] = ih->init_tag; - } - - conntrack->proto.sctp.state = newconntrack; - if (oldsctpstate != newconntrack) - ip_conntrack_event_cache(IPCT_PROTOINFO, skb); - write_unlock_bh(&sctp_lock); - } - - ip_ct_refresh_acct(conntrack, ctinfo, skb, *sctp_timeouts[newconntrack]); - - if (oldsctpstate == SCTP_CONNTRACK_COOKIE_ECHOED - && CTINFO2DIR(ctinfo) == IP_CT_DIR_REPLY - && newconntrack == SCTP_CONNTRACK_ESTABLISHED) { - DEBUGP("Setting assured bit\n"); - set_bit(IPS_ASSURED_BIT, &conntrack->status); - ip_conntrack_event_cache(IPCT_STATUS, skb); - } - - return NF_ACCEPT; -} - -/* Called when a new connection for this protocol found. */ -static int sctp_new(struct ip_conntrack *conntrack, - const struct sk_buff *skb) -{ - enum sctp_conntrack newconntrack; - struct iphdr *iph = ip_hdr(skb); - sctp_sctphdr_t _sctph, *sh; - sctp_chunkhdr_t _sch, *sch; - u_int32_t offset, count; - char map[256 / sizeof (char)] = {0}; - - DEBUGP(__FUNCTION__); - DEBUGP("\n"); - - sh = skb_header_pointer(skb, iph->ihl * 4, sizeof(_sctph), &_sctph); - if (sh == NULL) - return 0; - - if (do_basic_checks(conntrack, skb, map) != 0) - return 0; - - /* If an OOTB packet has any of these chunks discard (Sec 8.4) */ - if ((test_bit (SCTP_CID_ABORT, (void *)map)) - || (test_bit (SCTP_CID_SHUTDOWN_COMPLETE, (void *)map)) - || (test_bit (SCTP_CID_COOKIE_ACK, (void *)map))) { - return 0; - } - - newconntrack = SCTP_CONNTRACK_MAX; - for_each_sctp_chunk (skb, sch, _sch, offset, count) { - /* Don't need lock here: this conntrack not in circulation yet */ - newconntrack = new_state (IP_CT_DIR_ORIGINAL, - SCTP_CONNTRACK_NONE, sch->type); - - /* Invalid: delete conntrack */ - if (newconntrack == SCTP_CONNTRACK_MAX) { - DEBUGP("ip_conntrack_sctp: invalid new deleting.\n"); - return 0; - } - - /* Copy the vtag into the state info */ - if (sch->type == SCTP_CID_INIT) { - if (sh->vtag == 0) { - sctp_inithdr_t _inithdr, *ih; - - ih = skb_header_pointer(skb, offset + sizeof(sctp_chunkhdr_t), - sizeof(_inithdr), &_inithdr); - if (ih == NULL) - return 0; - - DEBUGP("Setting vtag %x for new conn\n", - ih->init_tag); - - conntrack->proto.sctp.vtag[IP_CT_DIR_REPLY] = - ih->init_tag; - } else { - /* Sec 8.5.1 (A) */ - return 0; - } - } - /* If it is a shutdown ack OOTB packet, we expect a return - shutdown complete, otherwise an ABORT Sec 8.4 (5) and (8) */ - else { - DEBUGP("Setting vtag %x for new conn OOTB\n", - sh->vtag); - conntrack->proto.sctp.vtag[IP_CT_DIR_REPLY] = sh->vtag; - } - - conntrack->proto.sctp.state = newconntrack; - } - - return 1; -} - -static struct ip_conntrack_protocol ip_conntrack_protocol_sctp = { - .proto = IPPROTO_SCTP, - .name = "sctp", - .pkt_to_tuple = sctp_pkt_to_tuple, - .invert_tuple = sctp_invert_tuple, - .print_tuple = sctp_print_tuple, - .print_conntrack = sctp_print_conntrack, - .packet = sctp_packet, - .new = sctp_new, - .destroy = NULL, - .me = THIS_MODULE, -#if defined(CONFIG_IP_NF_CONNTRACK_NETLINK) || \ - defined(CONFIG_IP_NF_CONNTRACK_NETLINK_MODULE) - .tuple_to_nfattr = ip_ct_port_tuple_to_nfattr, - .nfattr_to_tuple = ip_ct_port_nfattr_to_tuple, -#endif -}; - -#ifdef CONFIG_SYSCTL -static ctl_table ip_ct_sysctl_table[] = { - { - .ctl_name = NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_CLOSED, - .procname = "ip_conntrack_sctp_timeout_closed", - .data = &ip_ct_sctp_timeout_closed, - .maxlen = sizeof(unsigned int), - .mode = 0644, - .proc_handler = &proc_dointvec_jiffies, - }, - { - .ctl_name = NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_COOKIE_WAIT, - .procname = "ip_conntrack_sctp_timeout_cookie_wait", - .data = &ip_ct_sctp_timeout_cookie_wait, - .maxlen = sizeof(unsigned int), - .mode = 0644, - .proc_handler = &proc_dointvec_jiffies, - }, - { - .ctl_name = NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_COOKIE_ECHOED, - .procname = "ip_conntrack_sctp_timeout_cookie_echoed", - .data = &ip_ct_sctp_timeout_cookie_echoed, - .maxlen = sizeof(unsigned int), - .mode = 0644, - .proc_handler = &proc_dointvec_jiffies, - }, - { - .ctl_name = NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_ESTABLISHED, - .procname = "ip_conntrack_sctp_timeout_established", - .data = &ip_ct_sctp_timeout_established, - .maxlen = sizeof(unsigned int), - .mode = 0644, - .proc_handler = &proc_dointvec_jiffies, - }, - { - .ctl_name = NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_SENT, - .procname = "ip_conntrack_sctp_timeout_shutdown_sent", - .data = &ip_ct_sctp_timeout_shutdown_sent, - .maxlen = sizeof(unsigned int), - .mode = 0644, - .proc_handler = &proc_dointvec_jiffies, - }, - { - .ctl_name = NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_RECD, - .procname = "ip_conntrack_sctp_timeout_shutdown_recd", - .data = &ip_ct_sctp_timeout_shutdown_recd, - .maxlen = sizeof(unsigned int), - .mode = 0644, - .proc_handler = &proc_dointvec_jiffies, - }, - { - .ctl_name = NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_ACK_SENT, - .procname = "ip_conntrack_sctp_timeout_shutdown_ack_sent", - .data = &ip_ct_sctp_timeout_shutdown_ack_sent, - .maxlen = sizeof(unsigned int), - .mode = 0644, - .proc_handler = &proc_dointvec_jiffies, - }, - { .ctl_name = 0 } -}; - -static ctl_table ip_ct_netfilter_table[] = { - { - .ctl_name = NET_IPV4_NETFILTER, - .procname = "netfilter", - .mode = 0555, - .child = ip_ct_sysctl_table, - }, - { .ctl_name = 0 } -}; - -static ctl_table ip_ct_ipv4_table[] = { - { - .ctl_name = NET_IPV4, - .procname = "ipv4", - .mode = 0555, - .child = ip_ct_netfilter_table, - }, - { .ctl_name = 0 } -}; - -static ctl_table ip_ct_net_table[] = { - { - .ctl_name = CTL_NET, - .procname = "net", - .mode = 0555, - .child = ip_ct_ipv4_table, - }, - { .ctl_name = 0 } -}; - -static struct ctl_table_header *ip_ct_sysctl_header; -#endif - -static int __init ip_conntrack_proto_sctp_init(void) -{ - int ret; - - ret = ip_conntrack_protocol_register(&ip_conntrack_protocol_sctp); - if (ret) { - printk("ip_conntrack_proto_sctp: protocol register failed\n"); - goto out; - } - -#ifdef CONFIG_SYSCTL - ip_ct_sysctl_header = register_sysctl_table(ip_ct_net_table); - if (ip_ct_sysctl_header == NULL) { - ret = -ENOMEM; - printk("ip_conntrack_proto_sctp: can't register to sysctl.\n"); - goto cleanup; - } -#endif - - return ret; - -#ifdef CONFIG_SYSCTL - cleanup: - ip_conntrack_protocol_unregister(&ip_conntrack_protocol_sctp); -#endif - out: - DEBUGP("SCTP conntrack module loading %s\n", - ret ? "failed": "succeeded"); - return ret; -} - -static void __exit ip_conntrack_proto_sctp_fini(void) -{ - ip_conntrack_protocol_unregister(&ip_conntrack_protocol_sctp); -#ifdef CONFIG_SYSCTL - unregister_sysctl_table(ip_ct_sysctl_header); -#endif - DEBUGP("SCTP conntrack module unloaded\n"); -} - -module_init(ip_conntrack_proto_sctp_init); -module_exit(ip_conntrack_proto_sctp_fini); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Kiran Kumar Immidi"); -MODULE_DESCRIPTION("Netfilter connection tracking protocol helper for SCTP"); diff --git a/net/ipv4/netfilter/ip_conntrack_proto_tcp.c b/net/ipv4/netfilter/ip_conntrack_proto_tcp.c deleted file mode 100644 index d03436edfd93..000000000000 --- a/net/ipv4/netfilter/ip_conntrack_proto_tcp.c +++ /dev/null @@ -1,1163 +0,0 @@ -/* (C) 1999-2001 Paul `Rusty' Russell - * (C) 2002-2004 Netfilter Core Team - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * Jozsef Kadlecsik : - * - Real stateful connection tracking - * - Modified state transitions table - * - Window scaling support added - * - SACK support added - * - * Willy Tarreau: - * - State table bugfixes - * - More robust state changes - * - Tuning timer parameters - * - * version 2.2 - */ - -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -#include -#include -#include - -#if 0 -#define DEBUGP printk -#define DEBUGP_VARS -#else -#define DEBUGP(format, args...) -#endif - -/* Protects conntrack->proto.tcp */ -static DEFINE_RWLOCK(tcp_lock); - -/* "Be conservative in what you do, - be liberal in what you accept from others." - If it's non-zero, we mark only out of window RST segments as INVALID. */ -int ip_ct_tcp_be_liberal __read_mostly = 0; - -/* If it is set to zero, we disable picking up already established - connections. */ -int ip_ct_tcp_loose __read_mostly = 1; - -/* Max number of the retransmitted packets without receiving an (acceptable) - ACK from the destination. If this number is reached, a shorter timer - will be started. */ -int ip_ct_tcp_max_retrans __read_mostly = 3; - - /* FIXME: Examine ipfilter's timeouts and conntrack transitions more - closely. They're more complex. --RR */ - -static const char *tcp_conntrack_names[] = { - "NONE", - "SYN_SENT", - "SYN_RECV", - "ESTABLISHED", - "FIN_WAIT", - "CLOSE_WAIT", - "LAST_ACK", - "TIME_WAIT", - "CLOSE", - "LISTEN" -}; - -#define SECS * HZ -#define MINS * 60 SECS -#define HOURS * 60 MINS -#define DAYS * 24 HOURS - -unsigned int ip_ct_tcp_timeout_syn_sent __read_mostly = 2 MINS; -unsigned int ip_ct_tcp_timeout_syn_recv __read_mostly = 60 SECS; -unsigned int ip_ct_tcp_timeout_established __read_mostly = 5 DAYS; -unsigned int ip_ct_tcp_timeout_fin_wait __read_mostly = 2 MINS; -unsigned int ip_ct_tcp_timeout_close_wait __read_mostly = 60 SECS; -unsigned int ip_ct_tcp_timeout_last_ack __read_mostly = 30 SECS; -unsigned int ip_ct_tcp_timeout_time_wait __read_mostly = 2 MINS; -unsigned int ip_ct_tcp_timeout_close __read_mostly = 10 SECS; - -/* RFC1122 says the R2 limit should be at least 100 seconds. - Linux uses 15 packets as limit, which corresponds - to ~13-30min depending on RTO. */ -unsigned int ip_ct_tcp_timeout_max_retrans __read_mostly = 5 MINS; - -static const unsigned int * tcp_timeouts[] -= { NULL, /* TCP_CONNTRACK_NONE */ - &ip_ct_tcp_timeout_syn_sent, /* TCP_CONNTRACK_SYN_SENT, */ - &ip_ct_tcp_timeout_syn_recv, /* TCP_CONNTRACK_SYN_RECV, */ - &ip_ct_tcp_timeout_established, /* TCP_CONNTRACK_ESTABLISHED, */ - &ip_ct_tcp_timeout_fin_wait, /* TCP_CONNTRACK_FIN_WAIT, */ - &ip_ct_tcp_timeout_close_wait, /* TCP_CONNTRACK_CLOSE_WAIT, */ - &ip_ct_tcp_timeout_last_ack, /* TCP_CONNTRACK_LAST_ACK, */ - &ip_ct_tcp_timeout_time_wait, /* TCP_CONNTRACK_TIME_WAIT, */ - &ip_ct_tcp_timeout_close, /* TCP_CONNTRACK_CLOSE, */ - NULL, /* TCP_CONNTRACK_LISTEN */ - }; - -#define sNO TCP_CONNTRACK_NONE -#define sSS TCP_CONNTRACK_SYN_SENT -#define sSR TCP_CONNTRACK_SYN_RECV -#define sES TCP_CONNTRACK_ESTABLISHED -#define sFW TCP_CONNTRACK_FIN_WAIT -#define sCW TCP_CONNTRACK_CLOSE_WAIT -#define sLA TCP_CONNTRACK_LAST_ACK -#define sTW TCP_CONNTRACK_TIME_WAIT -#define sCL TCP_CONNTRACK_CLOSE -#define sLI TCP_CONNTRACK_LISTEN -#define sIV TCP_CONNTRACK_MAX -#define sIG TCP_CONNTRACK_IGNORE - -/* What TCP flags are set from RST/SYN/FIN/ACK. */ -enum tcp_bit_set { - TCP_SYN_SET, - TCP_SYNACK_SET, - TCP_FIN_SET, - TCP_ACK_SET, - TCP_RST_SET, - TCP_NONE_SET, -}; - -/* - * The TCP state transition table needs a few words... - * - * We are the man in the middle. All the packets go through us - * but might get lost in transit to the destination. - * It is assumed that the destinations can't receive segments - * we haven't seen. - * - * The checked segment is in window, but our windows are *not* - * equivalent with the ones of the sender/receiver. We always - * try to guess the state of the current sender. - * - * The meaning of the states are: - * - * NONE: initial state - * SYN_SENT: SYN-only packet seen - * SYN_RECV: SYN-ACK packet seen - * ESTABLISHED: ACK packet seen - * FIN_WAIT: FIN packet seen - * CLOSE_WAIT: ACK seen (after FIN) - * LAST_ACK: FIN seen (after FIN) - * TIME_WAIT: last ACK seen - * CLOSE: closed connection - * - * LISTEN state is not used. - * - * Packets marked as IGNORED (sIG): - * if they may be either invalid or valid - * and the receiver may send back a connection - * closing RST or a SYN/ACK. - * - * Packets marked as INVALID (sIV): - * if they are invalid - * or we do not support the request (simultaneous open) - */ -static const enum tcp_conntrack tcp_conntracks[2][6][TCP_CONNTRACK_MAX] = { - { -/* ORIGINAL */ -/* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sLI */ -/*syn*/ { sSS, sSS, sIG, sIG, sIG, sIG, sIG, sSS, sSS, sIV }, -/* - * sNO -> sSS Initialize a new connection - * sSS -> sSS Retransmitted SYN - * sSR -> sIG Late retransmitted SYN? - * sES -> sIG Error: SYNs in window outside the SYN_SENT state - * are errors. Receiver will reply with RST - * and close the connection. - * Or we are not in sync and hold a dead connection. - * sFW -> sIG - * sCW -> sIG - * sLA -> sIG - * sTW -> sSS Reopened connection (RFC 1122). - * sCL -> sSS - */ -/* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sLI */ -/*synack*/ { sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV }, -/* - * A SYN/ACK from the client is always invalid: - * - either it tries to set up a simultaneous open, which is - * not supported; - * - or the firewall has just been inserted between the two hosts - * during the session set-up. The SYN will be retransmitted - * by the true client (or it'll time out). - */ -/* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sLI */ -/*fin*/ { sIV, sIV, sFW, sFW, sLA, sLA, sLA, sTW, sCL, sIV }, -/* - * sNO -> sIV Too late and no reason to do anything... - * sSS -> sIV Client migth not send FIN in this state: - * we enforce waiting for a SYN/ACK reply first. - * sSR -> sFW Close started. - * sES -> sFW - * sFW -> sLA FIN seen in both directions, waiting for - * the last ACK. - * Migth be a retransmitted FIN as well... - * sCW -> sLA - * sLA -> sLA Retransmitted FIN. Remain in the same state. - * sTW -> sTW - * sCL -> sCL - */ -/* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sLI */ -/*ack*/ { sES, sIV, sES, sES, sCW, sCW, sTW, sTW, sCL, sIV }, -/* - * sNO -> sES Assumed. - * sSS -> sIV ACK is invalid: we haven't seen a SYN/ACK yet. - * sSR -> sES Established state is reached. - * sES -> sES :-) - * sFW -> sCW Normal close request answered by ACK. - * sCW -> sCW - * sLA -> sTW Last ACK detected. - * sTW -> sTW Retransmitted last ACK. Remain in the same state. - * sCL -> sCL - */ -/* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sLI */ -/*rst*/ { sIV, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sIV }, -/*none*/ { sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV } - }, - { -/* REPLY */ -/* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sLI */ -/*syn*/ { sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV }, -/* - * sNO -> sIV Never reached. - * sSS -> sIV Simultaneous open, not supported - * sSR -> sIV Simultaneous open, not supported. - * sES -> sIV Server may not initiate a connection. - * sFW -> sIV - * sCW -> sIV - * sLA -> sIV - * sTW -> sIV Reopened connection, but server may not do it. - * sCL -> sIV - */ -/* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sLI */ -/*synack*/ { sIV, sSR, sSR, sIG, sIG, sIG, sIG, sIG, sIG, sIV }, -/* - * sSS -> sSR Standard open. - * sSR -> sSR Retransmitted SYN/ACK. - * sES -> sIG Late retransmitted SYN/ACK? - * sFW -> sIG Might be SYN/ACK answering ignored SYN - * sCW -> sIG - * sLA -> sIG - * sTW -> sIG - * sCL -> sIG - */ -/* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sLI */ -/*fin*/ { sIV, sIV, sFW, sFW, sLA, sLA, sLA, sTW, sCL, sIV }, -/* - * sSS -> sIV Server might not send FIN in this state. - * sSR -> sFW Close started. - * sES -> sFW - * sFW -> sLA FIN seen in both directions. - * sCW -> sLA - * sLA -> sLA Retransmitted FIN. - * sTW -> sTW - * sCL -> sCL - */ -/* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sLI */ -/*ack*/ { sIV, sIG, sSR, sES, sCW, sCW, sTW, sTW, sCL, sIV }, -/* - * sSS -> sIG Might be a half-open connection. - * sSR -> sSR Might answer late resent SYN. - * sES -> sES :-) - * sFW -> sCW Normal close request answered by ACK. - * sCW -> sCW - * sLA -> sTW Last ACK detected. - * sTW -> sTW Retransmitted last ACK. - * sCL -> sCL - */ -/* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sLI */ -/*rst*/ { sIV, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sIV }, -/*none*/ { sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV } - } -}; - -static int tcp_pkt_to_tuple(const struct sk_buff *skb, - unsigned int dataoff, - struct ip_conntrack_tuple *tuple) -{ - struct tcphdr _hdr, *hp; - - /* Actually only need first 8 bytes. */ - hp = skb_header_pointer(skb, dataoff, 8, &_hdr); - if (hp == NULL) - return 0; - - tuple->src.u.tcp.port = hp->source; - tuple->dst.u.tcp.port = hp->dest; - - return 1; -} - -static int tcp_invert_tuple(struct ip_conntrack_tuple *tuple, - const struct ip_conntrack_tuple *orig) -{ - tuple->src.u.tcp.port = orig->dst.u.tcp.port; - tuple->dst.u.tcp.port = orig->src.u.tcp.port; - return 1; -} - -/* Print out the per-protocol part of the tuple. */ -static int tcp_print_tuple(struct seq_file *s, - const struct ip_conntrack_tuple *tuple) -{ - return seq_printf(s, "sport=%hu dport=%hu ", - ntohs(tuple->src.u.tcp.port), - ntohs(tuple->dst.u.tcp.port)); -} - -/* Print out the private part of the conntrack. */ -static int tcp_print_conntrack(struct seq_file *s, - const struct ip_conntrack *conntrack) -{ - enum tcp_conntrack state; - - read_lock_bh(&tcp_lock); - state = conntrack->proto.tcp.state; - read_unlock_bh(&tcp_lock); - - return seq_printf(s, "%s ", tcp_conntrack_names[state]); -} - -#if defined(CONFIG_IP_NF_CONNTRACK_NETLINK) || \ - defined(CONFIG_IP_NF_CONNTRACK_NETLINK_MODULE) -static int tcp_to_nfattr(struct sk_buff *skb, struct nfattr *nfa, - const struct ip_conntrack *ct) -{ - struct nfattr *nest_parms; - - read_lock_bh(&tcp_lock); - nest_parms = NFA_NEST(skb, CTA_PROTOINFO_TCP); - NFA_PUT(skb, CTA_PROTOINFO_TCP_STATE, sizeof(u_int8_t), - &ct->proto.tcp.state); - read_unlock_bh(&tcp_lock); - - NFA_NEST_END(skb, nest_parms); - - return 0; - -nfattr_failure: - read_unlock_bh(&tcp_lock); - return -1; -} - -static const size_t cta_min_tcp[CTA_PROTOINFO_TCP_MAX] = { - [CTA_PROTOINFO_TCP_STATE-1] = sizeof(u_int8_t), -}; - -static int nfattr_to_tcp(struct nfattr *cda[], struct ip_conntrack *ct) -{ - struct nfattr *attr = cda[CTA_PROTOINFO_TCP-1]; - struct nfattr *tb[CTA_PROTOINFO_TCP_MAX]; - - /* updates could not contain anything about the private - * protocol info, in that case skip the parsing */ - if (!attr) - return 0; - - nfattr_parse_nested(tb, CTA_PROTOINFO_TCP_MAX, attr); - - if (nfattr_bad_size(tb, CTA_PROTOINFO_TCP_MAX, cta_min_tcp)) - return -EINVAL; - - if (!tb[CTA_PROTOINFO_TCP_STATE-1]) - return -EINVAL; - - write_lock_bh(&tcp_lock); - ct->proto.tcp.state = - *(u_int8_t *)NFA_DATA(tb[CTA_PROTOINFO_TCP_STATE-1]); - write_unlock_bh(&tcp_lock); - - return 0; -} -#endif - -static unsigned int get_conntrack_index(const struct tcphdr *tcph) -{ - if (tcph->rst) return TCP_RST_SET; - else if (tcph->syn) return (tcph->ack ? TCP_SYNACK_SET : TCP_SYN_SET); - else if (tcph->fin) return TCP_FIN_SET; - else if (tcph->ack) return TCP_ACK_SET; - else return TCP_NONE_SET; -} - -/* TCP connection tracking based on 'Real Stateful TCP Packet Filtering - in IP Filter' by Guido van Rooij. - - http://www.nluug.nl/events/sane2000/papers.html - http://www.iae.nl/users/guido/papers/tcp_filtering.ps.gz - - The boundaries and the conditions are changed according to RFC793: - the packet must intersect the window (i.e. segments may be - after the right or before the left edge) and thus receivers may ACK - segments after the right edge of the window. - - td_maxend = max(sack + max(win,1)) seen in reply packets - td_maxwin = max(max(win, 1)) + (sack - ack) seen in sent packets - td_maxwin += seq + len - sender.td_maxend - if seq + len > sender.td_maxend - td_end = max(seq + len) seen in sent packets - - I. Upper bound for valid data: seq <= sender.td_maxend - II. Lower bound for valid data: seq + len >= sender.td_end - receiver.td_maxwin - III. Upper bound for valid ack: sack <= receiver.td_end - IV. Lower bound for valid ack: ack >= receiver.td_end - MAXACKWINDOW - - where sack is the highest right edge of sack block found in the packet. - - The upper bound limit for a valid ack is not ignored - - we doesn't have to deal with fragments. -*/ - -static inline __u32 segment_seq_plus_len(__u32 seq, - size_t len, - struct iphdr *iph, - struct tcphdr *tcph) -{ - return (seq + len - (iph->ihl + tcph->doff)*4 - + (tcph->syn ? 1 : 0) + (tcph->fin ? 1 : 0)); -} - -/* Fixme: what about big packets? */ -#define MAXACKWINCONST 66000 -#define MAXACKWINDOW(sender) \ - ((sender)->td_maxwin > MAXACKWINCONST ? (sender)->td_maxwin \ - : MAXACKWINCONST) - -/* - * Simplified tcp_parse_options routine from tcp_input.c - */ -static void tcp_options(const struct sk_buff *skb, - struct iphdr *iph, - struct tcphdr *tcph, - struct ip_ct_tcp_state *state) -{ - unsigned char buff[(15 * 4) - sizeof(struct tcphdr)]; - unsigned char *ptr; - int length = (tcph->doff*4) - sizeof(struct tcphdr); - - if (!length) - return; - - ptr = skb_header_pointer(skb, - (iph->ihl * 4) + sizeof(struct tcphdr), - length, buff); - BUG_ON(ptr == NULL); - - state->td_scale = - state->flags = 0; - - while (length > 0) { - int opcode=*ptr++; - int opsize; - - switch (opcode) { - case TCPOPT_EOL: - return; - case TCPOPT_NOP: /* Ref: RFC 793 section 3.1 */ - length--; - continue; - default: - opsize=*ptr++; - if (opsize < 2) /* "silly options" */ - return; - if (opsize > length) - break; /* don't parse partial options */ - - if (opcode == TCPOPT_SACK_PERM - && opsize == TCPOLEN_SACK_PERM) - state->flags |= IP_CT_TCP_FLAG_SACK_PERM; - else if (opcode == TCPOPT_WINDOW - && opsize == TCPOLEN_WINDOW) { - state->td_scale = *(u_int8_t *)ptr; - - if (state->td_scale > 14) { - /* See RFC1323 */ - state->td_scale = 14; - } - state->flags |= - IP_CT_TCP_FLAG_WINDOW_SCALE; - } - ptr += opsize - 2; - length -= opsize; - } - } -} - -static void tcp_sack(const struct sk_buff *skb, - struct iphdr *iph, - struct tcphdr *tcph, - __u32 *sack) -{ - unsigned char buff[(15 * 4) - sizeof(struct tcphdr)]; - unsigned char *ptr; - int length = (tcph->doff*4) - sizeof(struct tcphdr); - __u32 tmp; - - if (!length) - return; - - ptr = skb_header_pointer(skb, - (iph->ihl * 4) + sizeof(struct tcphdr), - length, buff); - BUG_ON(ptr == NULL); - - /* Fast path for timestamp-only option */ - if (length == TCPOLEN_TSTAMP_ALIGNED*4 - && *(__be32 *)ptr == htonl((TCPOPT_NOP << 24) - | (TCPOPT_NOP << 16) - | (TCPOPT_TIMESTAMP << 8) - | TCPOLEN_TIMESTAMP)) - return; - - while (length > 0) { - int opcode=*ptr++; - int opsize, i; - - switch (opcode) { - case TCPOPT_EOL: - return; - case TCPOPT_NOP: /* Ref: RFC 793 section 3.1 */ - length--; - continue; - default: - opsize=*ptr++; - if (opsize < 2) /* "silly options" */ - return; - if (opsize > length) - break; /* don't parse partial options */ - - if (opcode == TCPOPT_SACK - && opsize >= (TCPOLEN_SACK_BASE - + TCPOLEN_SACK_PERBLOCK) - && !((opsize - TCPOLEN_SACK_BASE) - % TCPOLEN_SACK_PERBLOCK)) { - for (i = 0; - i < (opsize - TCPOLEN_SACK_BASE); - i += TCPOLEN_SACK_PERBLOCK) { - tmp = ntohl(*((__be32 *)(ptr+i)+1)); - - if (after(tmp, *sack)) - *sack = tmp; - } - return; - } - ptr += opsize - 2; - length -= opsize; - } - } -} - -static int tcp_in_window(struct ip_ct_tcp *state, - enum ip_conntrack_dir dir, - unsigned int index, - const struct sk_buff *skb, - struct iphdr *iph, - struct tcphdr *tcph) -{ - struct ip_ct_tcp_state *sender = &state->seen[dir]; - struct ip_ct_tcp_state *receiver = &state->seen[!dir]; - __u32 seq, ack, sack, end, win, swin; - int res; - - /* - * Get the required data from the packet. - */ - seq = ntohl(tcph->seq); - ack = sack = ntohl(tcph->ack_seq); - win = ntohs(tcph->window); - end = segment_seq_plus_len(seq, skb->len, iph, tcph); - - if (receiver->flags & IP_CT_TCP_FLAG_SACK_PERM) - tcp_sack(skb, iph, tcph, &sack); - - DEBUGP("tcp_in_window: START\n"); - DEBUGP("tcp_in_window: src=%u.%u.%u.%u:%hu dst=%u.%u.%u.%u:%hu " - "seq=%u ack=%u sack=%u win=%u end=%u\n", - NIPQUAD(iph->saddr), ntohs(tcph->source), - NIPQUAD(iph->daddr), ntohs(tcph->dest), - seq, ack, sack, win, end); - DEBUGP("tcp_in_window: sender end=%u maxend=%u maxwin=%u scale=%i " - "receiver end=%u maxend=%u maxwin=%u scale=%i\n", - sender->td_end, sender->td_maxend, sender->td_maxwin, - sender->td_scale, - receiver->td_end, receiver->td_maxend, receiver->td_maxwin, - receiver->td_scale); - - if (sender->td_end == 0) { - /* - * Initialize sender data. - */ - if (tcph->syn && tcph->ack) { - /* - * Outgoing SYN-ACK in reply to a SYN. - */ - sender->td_end = - sender->td_maxend = end; - sender->td_maxwin = (win == 0 ? 1 : win); - - tcp_options(skb, iph, tcph, sender); - /* - * RFC 1323: - * Both sides must send the Window Scale option - * to enable window scaling in either direction. - */ - if (!(sender->flags & IP_CT_TCP_FLAG_WINDOW_SCALE - && receiver->flags & IP_CT_TCP_FLAG_WINDOW_SCALE)) - sender->td_scale = - receiver->td_scale = 0; - } else { - /* - * We are in the middle of a connection, - * its history is lost for us. - * Let's try to use the data from the packet. - */ - sender->td_end = end; - sender->td_maxwin = (win == 0 ? 1 : win); - sender->td_maxend = end + sender->td_maxwin; - } - } else if (((state->state == TCP_CONNTRACK_SYN_SENT - && dir == IP_CT_DIR_ORIGINAL) - || (state->state == TCP_CONNTRACK_SYN_RECV - && dir == IP_CT_DIR_REPLY)) - && after(end, sender->td_end)) { - /* - * RFC 793: "if a TCP is reinitialized ... then it need - * not wait at all; it must only be sure to use sequence - * numbers larger than those recently used." - */ - sender->td_end = - sender->td_maxend = end; - sender->td_maxwin = (win == 0 ? 1 : win); - - tcp_options(skb, iph, tcph, sender); - } - - if (!(tcph->ack)) { - /* - * If there is no ACK, just pretend it was set and OK. - */ - ack = sack = receiver->td_end; - } else if (((tcp_flag_word(tcph) & (TCP_FLAG_ACK|TCP_FLAG_RST)) == - (TCP_FLAG_ACK|TCP_FLAG_RST)) - && (ack == 0)) { - /* - * Broken TCP stacks, that set ACK in RST packets as well - * with zero ack value. - */ - ack = sack = receiver->td_end; - } - - if (seq == end - && (!tcph->rst - || (seq == 0 && state->state == TCP_CONNTRACK_SYN_SENT))) - /* - * Packets contains no data: we assume it is valid - * and check the ack value only. - * However RST segments are always validated by their - * SEQ number, except when seq == 0 (reset sent answering - * SYN. - */ - seq = end = sender->td_end; - - DEBUGP("tcp_in_window: src=%u.%u.%u.%u:%hu dst=%u.%u.%u.%u:%hu " - "seq=%u ack=%u sack =%u win=%u end=%u\n", - NIPQUAD(iph->saddr), ntohs(tcph->source), - NIPQUAD(iph->daddr), ntohs(tcph->dest), - seq, ack, sack, win, end); - DEBUGP("tcp_in_window: sender end=%u maxend=%u maxwin=%u scale=%i " - "receiver end=%u maxend=%u maxwin=%u scale=%i\n", - sender->td_end, sender->td_maxend, sender->td_maxwin, - sender->td_scale, - receiver->td_end, receiver->td_maxend, receiver->td_maxwin, - receiver->td_scale); - - DEBUGP("tcp_in_window: I=%i II=%i III=%i IV=%i\n", - before(seq, sender->td_maxend + 1), - after(end, sender->td_end - receiver->td_maxwin - 1), - before(sack, receiver->td_end + 1), - after(ack, receiver->td_end - MAXACKWINDOW(sender))); - - if (before(seq, sender->td_maxend + 1) && - after(end, sender->td_end - receiver->td_maxwin - 1) && - before(sack, receiver->td_end + 1) && - after(ack, receiver->td_end - MAXACKWINDOW(sender))) { - /* - * Take into account window scaling (RFC 1323). - */ - if (!tcph->syn) - win <<= sender->td_scale; - - /* - * Update sender data. - */ - swin = win + (sack - ack); - if (sender->td_maxwin < swin) - sender->td_maxwin = swin; - if (after(end, sender->td_end)) - sender->td_end = end; - /* - * Update receiver data. - */ - if (after(end, sender->td_maxend)) - receiver->td_maxwin += end - sender->td_maxend; - if (after(sack + win, receiver->td_maxend - 1)) { - receiver->td_maxend = sack + win; - if (win == 0) - receiver->td_maxend++; - } - - /* - * Check retransmissions. - */ - if (index == TCP_ACK_SET) { - if (state->last_dir == dir - && state->last_seq == seq - && state->last_ack == ack - && state->last_end == end - && state->last_win == win) - state->retrans++; - else { - state->last_dir = dir; - state->last_seq = seq; - state->last_ack = ack; - state->last_end = end; - state->last_win = win; - state->retrans = 0; - } - } - res = 1; - } else { - res = 0; - if (sender->flags & IP_CT_TCP_FLAG_BE_LIBERAL || - ip_ct_tcp_be_liberal) - res = 1; - if (!res && LOG_INVALID(IPPROTO_TCP)) - nf_log_packet(PF_INET, 0, skb, NULL, NULL, NULL, - "ip_ct_tcp: %s ", - before(seq, sender->td_maxend + 1) ? - after(end, sender->td_end - receiver->td_maxwin - 1) ? - before(sack, receiver->td_end + 1) ? - after(ack, receiver->td_end - MAXACKWINDOW(sender)) ? "BUG" - : "ACK is under the lower bound (possible overly delayed ACK)" - : "ACK is over the upper bound (ACKed data not seen yet)" - : "SEQ is under the lower bound (already ACKed data retransmitted)" - : "SEQ is over the upper bound (over the window of the receiver)"); - } - - DEBUGP("tcp_in_window: res=%i sender end=%u maxend=%u maxwin=%u " - "receiver end=%u maxend=%u maxwin=%u\n", - res, sender->td_end, sender->td_maxend, sender->td_maxwin, - receiver->td_end, receiver->td_maxend, receiver->td_maxwin); - - return res; -} - -#ifdef CONFIG_IP_NF_NAT_NEEDED -/* Update sender->td_end after NAT successfully mangled the packet */ -void ip_conntrack_tcp_update(struct sk_buff *skb, - struct ip_conntrack *conntrack, - enum ip_conntrack_dir dir) -{ - struct iphdr *iph = ip_hdr(skb); - struct tcphdr *tcph = (void *)iph + ip_hdrlen(skb); - __u32 end; -#ifdef DEBUGP_VARS - struct ip_ct_tcp_state *sender = &conntrack->proto.tcp.seen[dir]; - struct ip_ct_tcp_state *receiver = &conntrack->proto.tcp.seen[!dir]; -#endif - - end = segment_seq_plus_len(ntohl(tcph->seq), skb->len, iph, tcph); - - write_lock_bh(&tcp_lock); - /* - * We have to worry for the ack in the reply packet only... - */ - if (after(end, conntrack->proto.tcp.seen[dir].td_end)) - conntrack->proto.tcp.seen[dir].td_end = end; - conntrack->proto.tcp.last_end = end; - write_unlock_bh(&tcp_lock); - DEBUGP("tcp_update: sender end=%u maxend=%u maxwin=%u scale=%i " - "receiver end=%u maxend=%u maxwin=%u scale=%i\n", - sender->td_end, sender->td_maxend, sender->td_maxwin, - sender->td_scale, - receiver->td_end, receiver->td_maxend, receiver->td_maxwin, - receiver->td_scale); -} - -#endif - -#define TH_FIN 0x01 -#define TH_SYN 0x02 -#define TH_RST 0x04 -#define TH_PUSH 0x08 -#define TH_ACK 0x10 -#define TH_URG 0x20 -#define TH_ECE 0x40 -#define TH_CWR 0x80 - -/* table of valid flag combinations - ECE and CWR are always valid */ -static const u8 tcp_valid_flags[(TH_FIN|TH_SYN|TH_RST|TH_PUSH|TH_ACK|TH_URG) + 1] = -{ - [TH_SYN] = 1, - [TH_SYN|TH_PUSH] = 1, - [TH_SYN|TH_URG] = 1, - [TH_SYN|TH_PUSH|TH_URG] = 1, - [TH_SYN|TH_ACK] = 1, - [TH_SYN|TH_ACK|TH_PUSH] = 1, - [TH_RST] = 1, - [TH_RST|TH_ACK] = 1, - [TH_RST|TH_ACK|TH_PUSH] = 1, - [TH_FIN|TH_ACK] = 1, - [TH_ACK] = 1, - [TH_ACK|TH_PUSH] = 1, - [TH_ACK|TH_URG] = 1, - [TH_ACK|TH_URG|TH_PUSH] = 1, - [TH_FIN|TH_ACK|TH_PUSH] = 1, - [TH_FIN|TH_ACK|TH_URG] = 1, - [TH_FIN|TH_ACK|TH_URG|TH_PUSH] = 1, -}; - -/* Protect conntrack agaist broken packets. Code taken from ipt_unclean.c. */ -static int tcp_error(struct sk_buff *skb, - enum ip_conntrack_info *ctinfo, - unsigned int hooknum) -{ - const unsigned int hdrlen = ip_hdrlen(skb); - struct tcphdr _tcph, *th; - unsigned int tcplen = skb->len - hdrlen; - u_int8_t tcpflags; - - /* Smaller that minimal TCP header? */ - th = skb_header_pointer(skb, hdrlen, - sizeof(_tcph), &_tcph); - if (th == NULL) { - if (LOG_INVALID(IPPROTO_TCP)) - nf_log_packet(PF_INET, 0, skb, NULL, NULL, NULL, - "ip_ct_tcp: short packet "); - return -NF_ACCEPT; - } - - /* Not whole TCP header or malformed packet */ - if (th->doff*4 < sizeof(struct tcphdr) || tcplen < th->doff*4) { - if (LOG_INVALID(IPPROTO_TCP)) - nf_log_packet(PF_INET, 0, skb, NULL, NULL, NULL, - "ip_ct_tcp: truncated/malformed packet "); - return -NF_ACCEPT; - } - - /* Checksum invalid? Ignore. - * We skip checking packets on the outgoing path - * because it is assumed to be correct. - */ - /* FIXME: Source route IP option packets --RR */ - if (ip_conntrack_checksum && hooknum == NF_IP_PRE_ROUTING && - nf_ip_checksum(skb, hooknum, hdrlen, IPPROTO_TCP)) { - if (LOG_INVALID(IPPROTO_TCP)) - nf_log_packet(PF_INET, 0, skb, NULL, NULL, NULL, - "ip_ct_tcp: bad TCP checksum "); - return -NF_ACCEPT; - } - - /* Check TCP flags. */ - tcpflags = (((u_int8_t *)th)[13] & ~(TH_ECE|TH_CWR)); - if (!tcp_valid_flags[tcpflags]) { - if (LOG_INVALID(IPPROTO_TCP)) - nf_log_packet(PF_INET, 0, skb, NULL, NULL, NULL, - "ip_ct_tcp: invalid TCP flag combination "); - return -NF_ACCEPT; - } - - return NF_ACCEPT; -} - -/* Returns verdict for packet, or -1 for invalid. */ -static int tcp_packet(struct ip_conntrack *conntrack, - const struct sk_buff *skb, - enum ip_conntrack_info ctinfo) -{ - enum tcp_conntrack new_state, old_state; - enum ip_conntrack_dir dir; - struct iphdr *iph = ip_hdr(skb); - struct tcphdr *th, _tcph; - unsigned long timeout; - unsigned int index; - - th = skb_header_pointer(skb, iph->ihl * 4, - sizeof(_tcph), &_tcph); - BUG_ON(th == NULL); - - write_lock_bh(&tcp_lock); - old_state = conntrack->proto.tcp.state; - dir = CTINFO2DIR(ctinfo); - index = get_conntrack_index(th); - new_state = tcp_conntracks[dir][index][old_state]; - - switch (new_state) { - case TCP_CONNTRACK_IGNORE: - /* Ignored packets: - * - * a) SYN in ORIGINAL - * b) SYN/ACK in REPLY - * c) ACK in reply direction after initial SYN in original. - */ - if (index == TCP_SYNACK_SET - && conntrack->proto.tcp.last_index == TCP_SYN_SET - && conntrack->proto.tcp.last_dir != dir - && ntohl(th->ack_seq) == - conntrack->proto.tcp.last_end) { - /* This SYN/ACK acknowledges a SYN that we earlier - * ignored as invalid. This means that the client and - * the server are both in sync, while the firewall is - * not. We kill this session and block the SYN/ACK so - * that the client cannot but retransmit its SYN and - * thus initiate a clean new session. - */ - write_unlock_bh(&tcp_lock); - if (LOG_INVALID(IPPROTO_TCP)) - nf_log_packet(PF_INET, 0, skb, NULL, NULL, - NULL, "ip_ct_tcp: " - "killing out of sync session "); - if (del_timer(&conntrack->timeout)) - conntrack->timeout.function((unsigned long) - conntrack); - return -NF_DROP; - } - conntrack->proto.tcp.last_index = index; - conntrack->proto.tcp.last_dir = dir; - conntrack->proto.tcp.last_seq = ntohl(th->seq); - conntrack->proto.tcp.last_end = - segment_seq_plus_len(ntohl(th->seq), skb->len, iph, th); - - write_unlock_bh(&tcp_lock); - if (LOG_INVALID(IPPROTO_TCP)) - nf_log_packet(PF_INET, 0, skb, NULL, NULL, NULL, - "ip_ct_tcp: invalid packet ignored "); - return NF_ACCEPT; - case TCP_CONNTRACK_MAX: - /* Invalid packet */ - DEBUGP("ip_ct_tcp: Invalid dir=%i index=%u ostate=%u\n", - dir, get_conntrack_index(th), - old_state); - write_unlock_bh(&tcp_lock); - if (LOG_INVALID(IPPROTO_TCP)) - nf_log_packet(PF_INET, 0, skb, NULL, NULL, NULL, - "ip_ct_tcp: invalid state "); - return -NF_ACCEPT; - case TCP_CONNTRACK_SYN_SENT: - if (old_state < TCP_CONNTRACK_TIME_WAIT) - break; - if ((conntrack->proto.tcp.seen[dir].flags & - IP_CT_TCP_FLAG_CLOSE_INIT) - || after(ntohl(th->seq), - conntrack->proto.tcp.seen[dir].td_end)) { - /* Attempt to reopen a closed connection. - * Delete this connection and look up again. */ - write_unlock_bh(&tcp_lock); - if (del_timer(&conntrack->timeout)) - conntrack->timeout.function((unsigned long) - conntrack); - return -NF_REPEAT; - } else { - write_unlock_bh(&tcp_lock); - if (LOG_INVALID(IPPROTO_TCP)) - nf_log_packet(PF_INET, 0, skb, NULL, NULL, - NULL, "ip_ct_tcp: invalid SYN"); - return -NF_ACCEPT; - } - case TCP_CONNTRACK_CLOSE: - if (index == TCP_RST_SET - && ((test_bit(IPS_SEEN_REPLY_BIT, &conntrack->status) - && conntrack->proto.tcp.last_index == TCP_SYN_SET) - || (!test_bit(IPS_ASSURED_BIT, &conntrack->status) - && conntrack->proto.tcp.last_index == TCP_ACK_SET)) - && ntohl(th->ack_seq) == conntrack->proto.tcp.last_end) { - /* RST sent to invalid SYN or ACK we had let through - * at a) and c) above: - * - * a) SYN was in window then - * c) we hold a half-open connection. - * - * Delete our connection entry. - * We skip window checking, because packet might ACK - * segments we ignored. */ - goto in_window; - } - /* Just fall through */ - default: - /* Keep compilers happy. */ - break; - } - - if (!tcp_in_window(&conntrack->proto.tcp, dir, index, - skb, iph, th)) { - write_unlock_bh(&tcp_lock); - return -NF_ACCEPT; - } - in_window: - /* From now on we have got in-window packets */ - conntrack->proto.tcp.last_index = index; - - DEBUGP("tcp_conntracks: src=%u.%u.%u.%u:%hu dst=%u.%u.%u.%u:%hu " - "syn=%i ack=%i fin=%i rst=%i old=%i new=%i\n", - NIPQUAD(iph->saddr), ntohs(th->source), - NIPQUAD(iph->daddr), ntohs(th->dest), - (th->syn ? 1 : 0), (th->ack ? 1 : 0), - (th->fin ? 1 : 0), (th->rst ? 1 : 0), - old_state, new_state); - - conntrack->proto.tcp.state = new_state; - if (old_state != new_state - && (new_state == TCP_CONNTRACK_FIN_WAIT - || new_state == TCP_CONNTRACK_CLOSE)) - conntrack->proto.tcp.seen[dir].flags |= IP_CT_TCP_FLAG_CLOSE_INIT; - timeout = conntrack->proto.tcp.retrans >= ip_ct_tcp_max_retrans - && *tcp_timeouts[new_state] > ip_ct_tcp_timeout_max_retrans - ? ip_ct_tcp_timeout_max_retrans : *tcp_timeouts[new_state]; - write_unlock_bh(&tcp_lock); - - ip_conntrack_event_cache(IPCT_PROTOINFO_VOLATILE, skb); - if (new_state != old_state) - ip_conntrack_event_cache(IPCT_PROTOINFO, skb); - - if (!test_bit(IPS_SEEN_REPLY_BIT, &conntrack->status)) { - /* If only reply is a RST, we can consider ourselves not to - have an established connection: this is a fairly common - problem case, so we can delete the conntrack - immediately. --RR */ - if (th->rst) { - if (del_timer(&conntrack->timeout)) - conntrack->timeout.function((unsigned long) - conntrack); - return NF_ACCEPT; - } - } else if (!test_bit(IPS_ASSURED_BIT, &conntrack->status) - && (old_state == TCP_CONNTRACK_SYN_RECV - || old_state == TCP_CONNTRACK_ESTABLISHED) - && new_state == TCP_CONNTRACK_ESTABLISHED) { - /* Set ASSURED if we see see valid ack in ESTABLISHED - after SYN_RECV or a valid answer for a picked up - connection. */ - set_bit(IPS_ASSURED_BIT, &conntrack->status); - ip_conntrack_event_cache(IPCT_STATUS, skb); - } - ip_ct_refresh_acct(conntrack, ctinfo, skb, timeout); - - return NF_ACCEPT; -} - -/* Called when a new connection for this protocol found. */ -static int tcp_new(struct ip_conntrack *conntrack, - const struct sk_buff *skb) -{ - enum tcp_conntrack new_state; - struct iphdr *iph = ip_hdr(skb); - struct tcphdr *th, _tcph; -#ifdef DEBUGP_VARS - struct ip_ct_tcp_state *sender = &conntrack->proto.tcp.seen[0]; - struct ip_ct_tcp_state *receiver = &conntrack->proto.tcp.seen[1]; -#endif - - th = skb_header_pointer(skb, iph->ihl * 4, - sizeof(_tcph), &_tcph); - BUG_ON(th == NULL); - - /* Don't need lock here: this conntrack not in circulation yet */ - new_state - = tcp_conntracks[0][get_conntrack_index(th)] - [TCP_CONNTRACK_NONE]; - - /* Invalid: delete conntrack */ - if (new_state >= TCP_CONNTRACK_MAX) { - DEBUGP("ip_ct_tcp: invalid new deleting.\n"); - return 0; - } - - if (new_state == TCP_CONNTRACK_SYN_SENT) { - /* SYN packet */ - conntrack->proto.tcp.seen[0].td_end = - segment_seq_plus_len(ntohl(th->seq), skb->len, - iph, th); - conntrack->proto.tcp.seen[0].td_maxwin = ntohs(th->window); - if (conntrack->proto.tcp.seen[0].td_maxwin == 0) - conntrack->proto.tcp.seen[0].td_maxwin = 1; - conntrack->proto.tcp.seen[0].td_maxend = - conntrack->proto.tcp.seen[0].td_end; - - tcp_options(skb, iph, th, &conntrack->proto.tcp.seen[0]); - conntrack->proto.tcp.seen[1].flags = 0; - } else if (ip_ct_tcp_loose == 0) { - /* Don't try to pick up connections. */ - return 0; - } else { - /* - * We are in the middle of a connection, - * its history is lost for us. - * Let's try to use the data from the packet. - */ - conntrack->proto.tcp.seen[0].td_end = - segment_seq_plus_len(ntohl(th->seq), skb->len, - iph, th); - conntrack->proto.tcp.seen[0].td_maxwin = ntohs(th->window); - if (conntrack->proto.tcp.seen[0].td_maxwin == 0) - conntrack->proto.tcp.seen[0].td_maxwin = 1; - conntrack->proto.tcp.seen[0].td_maxend = - conntrack->proto.tcp.seen[0].td_end + - conntrack->proto.tcp.seen[0].td_maxwin; - conntrack->proto.tcp.seen[0].td_scale = 0; - - /* We assume SACK and liberal window checking to handle - * window scaling */ - conntrack->proto.tcp.seen[0].flags = - conntrack->proto.tcp.seen[1].flags = IP_CT_TCP_FLAG_SACK_PERM | - IP_CT_TCP_FLAG_BE_LIBERAL; - } - - conntrack->proto.tcp.seen[1].td_end = 0; - conntrack->proto.tcp.seen[1].td_maxend = 0; - conntrack->proto.tcp.seen[1].td_maxwin = 1; - conntrack->proto.tcp.seen[1].td_scale = 0; - - /* tcp_packet will set them */ - conntrack->proto.tcp.state = TCP_CONNTRACK_NONE; - conntrack->proto.tcp.last_index = TCP_NONE_SET; - - DEBUGP("tcp_new: sender end=%u maxend=%u maxwin=%u scale=%i " - "receiver end=%u maxend=%u maxwin=%u scale=%i\n", - sender->td_end, sender->td_maxend, sender->td_maxwin, - sender->td_scale, - receiver->td_end, receiver->td_maxend, receiver->td_maxwin, - receiver->td_scale); - return 1; -} - -struct ip_conntrack_protocol ip_conntrack_protocol_tcp = -{ - .proto = IPPROTO_TCP, - .name = "tcp", - .pkt_to_tuple = tcp_pkt_to_tuple, - .invert_tuple = tcp_invert_tuple, - .print_tuple = tcp_print_tuple, - .print_conntrack = tcp_print_conntrack, - .packet = tcp_packet, - .new = tcp_new, - .error = tcp_error, -#if defined(CONFIG_IP_NF_CONNTRACK_NETLINK) || \ - defined(CONFIG_IP_NF_CONNTRACK_NETLINK_MODULE) - .to_nfattr = tcp_to_nfattr, - .from_nfattr = nfattr_to_tcp, - .tuple_to_nfattr = ip_ct_port_tuple_to_nfattr, - .nfattr_to_tuple = ip_ct_port_nfattr_to_tuple, -#endif -}; diff --git a/net/ipv4/netfilter/ip_conntrack_proto_udp.c b/net/ipv4/netfilter/ip_conntrack_proto_udp.c deleted file mode 100644 index 3b47987bf1bb..000000000000 --- a/net/ipv4/netfilter/ip_conntrack_proto_udp.c +++ /dev/null @@ -1,148 +0,0 @@ -/* (C) 1999-2001 Paul `Rusty' Russell - * (C) 2002-2004 Netfilter Core Team - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -unsigned int ip_ct_udp_timeout __read_mostly = 30*HZ; -unsigned int ip_ct_udp_timeout_stream __read_mostly = 180*HZ; - -static int udp_pkt_to_tuple(const struct sk_buff *skb, - unsigned int dataoff, - struct ip_conntrack_tuple *tuple) -{ - struct udphdr _hdr, *hp; - - /* Actually only need first 8 bytes. */ - hp = skb_header_pointer(skb, dataoff, sizeof(_hdr), &_hdr); - if (hp == NULL) - return 0; - - tuple->src.u.udp.port = hp->source; - tuple->dst.u.udp.port = hp->dest; - - return 1; -} - -static int udp_invert_tuple(struct ip_conntrack_tuple *tuple, - const struct ip_conntrack_tuple *orig) -{ - tuple->src.u.udp.port = orig->dst.u.udp.port; - tuple->dst.u.udp.port = orig->src.u.udp.port; - return 1; -} - -/* Print out the per-protocol part of the tuple. */ -static int udp_print_tuple(struct seq_file *s, - const struct ip_conntrack_tuple *tuple) -{ - return seq_printf(s, "sport=%hu dport=%hu ", - ntohs(tuple->src.u.udp.port), - ntohs(tuple->dst.u.udp.port)); -} - -/* Print out the private part of the conntrack. */ -static int udp_print_conntrack(struct seq_file *s, - const struct ip_conntrack *conntrack) -{ - return 0; -} - -/* Returns verdict for packet, and may modify conntracktype */ -static int udp_packet(struct ip_conntrack *conntrack, - const struct sk_buff *skb, - enum ip_conntrack_info ctinfo) -{ - /* If we've seen traffic both ways, this is some kind of UDP - stream. Extend timeout. */ - if (test_bit(IPS_SEEN_REPLY_BIT, &conntrack->status)) { - ip_ct_refresh_acct(conntrack, ctinfo, skb, - ip_ct_udp_timeout_stream); - /* Also, more likely to be important, and not a probe */ - if (!test_and_set_bit(IPS_ASSURED_BIT, &conntrack->status)) - ip_conntrack_event_cache(IPCT_STATUS, skb); - } else - ip_ct_refresh_acct(conntrack, ctinfo, skb, ip_ct_udp_timeout); - - return NF_ACCEPT; -} - -/* Called when a new connection for this protocol found. */ -static int udp_new(struct ip_conntrack *conntrack, const struct sk_buff *skb) -{ - return 1; -} - -static int udp_error(struct sk_buff *skb, enum ip_conntrack_info *ctinfo, - unsigned int hooknum) -{ - const unsigned int hdrlen = ip_hdrlen(skb); - unsigned int udplen = skb->len - hdrlen; - struct udphdr _hdr, *hdr; - - /* Header is too small? */ - hdr = skb_header_pointer(skb, hdrlen, sizeof(_hdr), &_hdr); - if (hdr == NULL) { - if (LOG_INVALID(IPPROTO_UDP)) - nf_log_packet(PF_INET, 0, skb, NULL, NULL, NULL, - "ip_ct_udp: short packet "); - return -NF_ACCEPT; - } - - /* Truncated/malformed packets */ - if (ntohs(hdr->len) > udplen || ntohs(hdr->len) < sizeof(*hdr)) { - if (LOG_INVALID(IPPROTO_UDP)) - nf_log_packet(PF_INET, 0, skb, NULL, NULL, NULL, - "ip_ct_udp: truncated/malformed packet "); - return -NF_ACCEPT; - } - - /* Packet with no checksum */ - if (!hdr->check) - return NF_ACCEPT; - - /* Checksum invalid? Ignore. - * We skip checking packets on the outgoing path - * because the checksum is assumed to be correct. - * FIXME: Source route IP option packets --RR */ - if (ip_conntrack_checksum && hooknum == NF_IP_PRE_ROUTING && - nf_ip_checksum(skb, hooknum, hdrlen, IPPROTO_UDP)) { - if (LOG_INVALID(IPPROTO_UDP)) - nf_log_packet(PF_INET, 0, skb, NULL, NULL, NULL, - "ip_ct_udp: bad UDP checksum "); - return -NF_ACCEPT; - } - - return NF_ACCEPT; -} - -struct ip_conntrack_protocol ip_conntrack_protocol_udp = -{ - .proto = IPPROTO_UDP, - .name = "udp", - .pkt_to_tuple = udp_pkt_to_tuple, - .invert_tuple = udp_invert_tuple, - .print_tuple = udp_print_tuple, - .print_conntrack = udp_print_conntrack, - .packet = udp_packet, - .new = udp_new, - .error = udp_error, -#if defined(CONFIG_IP_NF_CONNTRACK_NETLINK) || \ - defined(CONFIG_IP_NF_CONNTRACK_NETLINK_MODULE) - .tuple_to_nfattr = ip_ct_port_tuple_to_nfattr, - .nfattr_to_tuple = ip_ct_port_nfattr_to_tuple, -#endif -}; diff --git a/net/ipv4/netfilter/ip_conntrack_sip.c b/net/ipv4/netfilter/ip_conntrack_sip.c deleted file mode 100644 index 7363e2a5cea4..000000000000 --- a/net/ipv4/netfilter/ip_conntrack_sip.c +++ /dev/null @@ -1,520 +0,0 @@ -/* SIP extension for IP connection tracking. - * - * (C) 2005 by Christian Hentschel - * based on RR's ip_conntrack_ftp.c and other modules. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include - -#if 0 -#define DEBUGP printk -#else -#define DEBUGP(format, args...) -#endif - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Christian Hentschel "); -MODULE_DESCRIPTION("SIP connection tracking helper"); - -#define MAX_PORTS 8 -static unsigned short ports[MAX_PORTS]; -static int ports_c; -module_param_array(ports, ushort, &ports_c, 0400); -MODULE_PARM_DESC(ports, "port numbers of sip servers"); - -static unsigned int sip_timeout = SIP_TIMEOUT; -module_param(sip_timeout, uint, 0600); -MODULE_PARM_DESC(sip_timeout, "timeout for the master SIP session"); - -unsigned int (*ip_nat_sip_hook)(struct sk_buff **pskb, - enum ip_conntrack_info ctinfo, - struct ip_conntrack *ct, - const char **dptr); -EXPORT_SYMBOL_GPL(ip_nat_sip_hook); - -unsigned int (*ip_nat_sdp_hook)(struct sk_buff **pskb, - enum ip_conntrack_info ctinfo, - struct ip_conntrack_expect *exp, - const char *dptr); -EXPORT_SYMBOL_GPL(ip_nat_sdp_hook); - -static int digits_len(const char *dptr, const char *limit, int *shift); -static int epaddr_len(const char *dptr, const char *limit, int *shift); -static int skp_digits_len(const char *dptr, const char *limit, int *shift); -static int skp_epaddr_len(const char *dptr, const char *limit, int *shift); - -struct sip_header_nfo { - const char *lname; - const char *sname; - const char *ln_str; - size_t lnlen; - size_t snlen; - size_t ln_strlen; - int case_sensitive; - int (*match_len)(const char *, const char *, int *); -}; - -static struct sip_header_nfo ct_sip_hdrs[] = { - [POS_REG_REQ_URI] = { /* SIP REGISTER request URI */ - .lname = "sip:", - .lnlen = sizeof("sip:") - 1, - .ln_str = ":", - .ln_strlen = sizeof(":") - 1, - .match_len = epaddr_len - }, - [POS_REQ_URI] = { /* SIP request URI */ - .lname = "sip:", - .lnlen = sizeof("sip:") - 1, - .ln_str = "@", - .ln_strlen = sizeof("@") - 1, - .match_len = epaddr_len - }, - [POS_FROM] = { /* SIP From header */ - .lname = "From:", - .lnlen = sizeof("From:") - 1, - .sname = "\r\nf:", - .snlen = sizeof("\r\nf:") - 1, - .ln_str = "sip:", - .ln_strlen = sizeof("sip:") - 1, - .match_len = skp_epaddr_len, - }, - [POS_TO] = { /* SIP To header */ - .lname = "To:", - .lnlen = sizeof("To:") - 1, - .sname = "\r\nt:", - .snlen = sizeof("\r\nt:") - 1, - .ln_str = "sip:", - .ln_strlen = sizeof("sip:") - 1, - .match_len = skp_epaddr_len, - }, - [POS_VIA] = { /* SIP Via header */ - .lname = "Via:", - .lnlen = sizeof("Via:") - 1, - .sname = "\r\nv:", - .snlen = sizeof("\r\nv:") - 1, /* rfc3261 "\r\n" */ - .ln_str = "UDP ", - .ln_strlen = sizeof("UDP ") - 1, - .match_len = epaddr_len, - }, - [POS_CONTACT] = { /* SIP Contact header */ - .lname = "Contact:", - .lnlen = sizeof("Contact:") - 1, - .sname = "\r\nm:", - .snlen = sizeof("\r\nm:") - 1, - .ln_str = "sip:", - .ln_strlen = sizeof("sip:") - 1, - .match_len = skp_epaddr_len - }, - [POS_CONTENT] = { /* SIP Content length header */ - .lname = "Content-Length:", - .lnlen = sizeof("Content-Length:") - 1, - .sname = "\r\nl:", - .snlen = sizeof("\r\nl:") - 1, - .ln_str = ":", - .ln_strlen = sizeof(":") - 1, - .match_len = skp_digits_len - }, - [POS_MEDIA] = { /* SDP media info */ - .case_sensitive = 1, - .lname = "\nm=", - .lnlen = sizeof("\nm=") - 1, - .sname = "\rm=", - .snlen = sizeof("\rm=") - 1, - .ln_str = "audio ", - .ln_strlen = sizeof("audio ") - 1, - .match_len = digits_len - }, - [POS_OWNER] = { /* SDP owner address*/ - .case_sensitive = 1, - .lname = "\no=", - .lnlen = sizeof("\no=") - 1, - .sname = "\ro=", - .snlen = sizeof("\ro=") - 1, - .ln_str = "IN IP4 ", - .ln_strlen = sizeof("IN IP4 ") - 1, - .match_len = epaddr_len - }, - [POS_CONNECTION] = { /* SDP connection info */ - .case_sensitive = 1, - .lname = "\nc=", - .lnlen = sizeof("\nc=") - 1, - .sname = "\rc=", - .snlen = sizeof("\rc=") - 1, - .ln_str = "IN IP4 ", - .ln_strlen = sizeof("IN IP4 ") - 1, - .match_len = epaddr_len - }, - [POS_SDP_HEADER] = { /* SDP version header */ - .case_sensitive = 1, - .lname = "\nv=", - .lnlen = sizeof("\nv=") - 1, - .sname = "\rv=", - .snlen = sizeof("\rv=") - 1, - .ln_str = "=", - .ln_strlen = sizeof("=") - 1, - .match_len = digits_len - } -}; - -/* get line lenght until first CR or LF seen. */ -int ct_sip_lnlen(const char *line, const char *limit) -{ - const char *k = line; - - while ((line <= limit) && (*line == '\r' || *line == '\n')) - line++; - - while (line <= limit) { - if (*line == '\r' || *line == '\n') - break; - line++; - } - return line - k; -} -EXPORT_SYMBOL_GPL(ct_sip_lnlen); - -/* Linear string search, case sensitive. */ -const char *ct_sip_search(const char *needle, const char *haystack, - size_t needle_len, size_t haystack_len, - int case_sensitive) -{ - const char *limit = haystack + (haystack_len - needle_len); - - while (haystack <= limit) { - if (case_sensitive) { - if (strncmp(haystack, needle, needle_len) == 0) - return haystack; - } else { - if (strnicmp(haystack, needle, needle_len) == 0) - return haystack; - } - haystack++; - } - return NULL; -} -EXPORT_SYMBOL_GPL(ct_sip_search); - -static int digits_len(const char *dptr, const char *limit, int *shift) -{ - int len = 0; - while (dptr <= limit && isdigit(*dptr)) { - dptr++; - len++; - } - return len; -} - -/* get digits lenght, skiping blank spaces. */ -static int skp_digits_len(const char *dptr, const char *limit, int *shift) -{ - for (; dptr <= limit && *dptr == ' '; dptr++) - (*shift)++; - - return digits_len(dptr, limit, shift); -} - -/* Simple ipaddr parser.. */ -static int parse_ipaddr(const char *cp, const char **endp, - __be32 *ipaddr, const char *limit) -{ - unsigned long int val; - int i, digit = 0; - - for (i = 0, *ipaddr = 0; cp <= limit && i < 4; i++) { - digit = 0; - if (!isdigit(*cp)) - break; - - val = simple_strtoul(cp, (char **)&cp, 10); - if (val > 0xFF) - return -1; - - ((u_int8_t *)ipaddr)[i] = val; - digit = 1; - - if (*cp != '.') - break; - cp++; - } - if (!digit) - return -1; - - if (endp) - *endp = cp; - - return 0; -} - -/* skip ip address. returns it lenght. */ -static int epaddr_len(const char *dptr, const char *limit, int *shift) -{ - const char *aux = dptr; - __be32 ip; - - if (parse_ipaddr(dptr, &dptr, &ip, limit) < 0) { - DEBUGP("ip: %s parse failed.!\n", dptr); - return 0; - } - - /* Port number */ - if (*dptr == ':') { - dptr++; - dptr += digits_len(dptr, limit, shift); - } - return dptr - aux; -} - -/* get address length, skiping user info. */ -static int skp_epaddr_len(const char *dptr, const char *limit, int *shift) -{ - int s = *shift; - - /* Search for @, but stop at the end of the line. - * We are inside a sip: URI, so we don't need to worry about - * continuation lines. */ - while (dptr <= limit && - *dptr != '@' && *dptr != '\r' && *dptr != '\n') { - (*shift)++; - dptr++; - } - - if (dptr <= limit && *dptr == '@') { - dptr++; - (*shift)++; - } else - *shift = s; - - return epaddr_len(dptr, limit, shift); -} - -/* Returns 0 if not found, -1 error parsing. */ -int ct_sip_get_info(const char *dptr, size_t dlen, - unsigned int *matchoff, - unsigned int *matchlen, - enum sip_header_pos pos) -{ - struct sip_header_nfo *hnfo = &ct_sip_hdrs[pos]; - const char *limit, *aux, *k = dptr; - int shift = 0; - - limit = dptr + (dlen - hnfo->lnlen); - - while (dptr <= limit) { - if ((strncmp(dptr, hnfo->lname, hnfo->lnlen) != 0) && - (hnfo->sname == NULL || - strncmp(dptr, hnfo->sname, hnfo->snlen) != 0)) { - dptr++; - continue; - } - aux = ct_sip_search(hnfo->ln_str, dptr, hnfo->ln_strlen, - ct_sip_lnlen(dptr, limit), - hnfo->case_sensitive); - if (!aux) { - DEBUGP("'%s' not found in '%s'.\n", hnfo->ln_str, - hnfo->lname); - return -1; - } - aux += hnfo->ln_strlen; - - *matchlen = hnfo->match_len(aux, limit, &shift); - if (!*matchlen) - return -1; - - *matchoff = (aux - k) + shift; - - DEBUGP("%s match succeeded! - len: %u\n", hnfo->lname, - *matchlen); - return 1; - } - DEBUGP("%s header not found.\n", hnfo->lname); - return 0; -} -EXPORT_SYMBOL_GPL(ct_sip_get_info); - -static int set_expected_rtp(struct sk_buff **pskb, - struct ip_conntrack *ct, - enum ip_conntrack_info ctinfo, - __be32 ipaddr, u_int16_t port, - const char *dptr) -{ - struct ip_conntrack_expect *exp; - enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); - int ret; - typeof(ip_nat_sdp_hook) ip_nat_sdp; - - exp = ip_conntrack_expect_alloc(ct); - if (exp == NULL) - return NF_DROP; - - exp->tuple.src.ip = ct->tuplehash[!dir].tuple.src.ip; - exp->tuple.src.u.udp.port = 0; - exp->tuple.dst.ip = ipaddr; - exp->tuple.dst.u.udp.port = htons(port); - exp->tuple.dst.protonum = IPPROTO_UDP; - - exp->mask.src.ip = htonl(0xFFFFFFFF); - exp->mask.src.u.udp.port = 0; - exp->mask.dst.ip = htonl(0xFFFFFFFF); - exp->mask.dst.u.udp.port = htons(0xFFFF); - exp->mask.dst.protonum = 0xFF; - - exp->expectfn = NULL; - exp->flags = 0; - - ip_nat_sdp = rcu_dereference(ip_nat_sdp_hook); - if (ip_nat_sdp) - ret = ip_nat_sdp(pskb, ctinfo, exp, dptr); - else { - if (ip_conntrack_expect_related(exp) != 0) - ret = NF_DROP; - else - ret = NF_ACCEPT; - } - ip_conntrack_expect_put(exp); - - return ret; -} - -static int sip_help(struct sk_buff **pskb, - struct ip_conntrack *ct, - enum ip_conntrack_info ctinfo) -{ - unsigned int dataoff, datalen; - const char *dptr; - int ret = NF_ACCEPT; - int matchoff, matchlen; - __be32 ipaddr; - u_int16_t port; - typeof(ip_nat_sip_hook) ip_nat_sip; - - /* No Data ? */ - dataoff = ip_hdrlen(*pskb) + sizeof(struct udphdr); - if (dataoff >= (*pskb)->len) { - DEBUGP("skb->len = %u\n", (*pskb)->len); - return NF_ACCEPT; - } - - ip_ct_refresh(ct, *pskb, sip_timeout * HZ); - - if (!skb_is_nonlinear(*pskb)) - dptr = (*pskb)->data + dataoff; - else { - DEBUGP("Copy of skbuff not supported yet.\n"); - goto out; - } - - ip_nat_sip = rcu_dereference(ip_nat_sip_hook); - if (ip_nat_sip) { - if (!ip_nat_sip(pskb, ctinfo, ct, &dptr)) { - ret = NF_DROP; - goto out; - } - } - - /* After this point NAT, could have mangled skb, so - we need to recalculate payload lenght. */ - datalen = (*pskb)->len - dataoff; - - if (datalen < (sizeof("SIP/2.0 200") - 1)) - goto out; - - /* RTP info only in some SDP pkts */ - if (memcmp(dptr, "INVITE", sizeof("INVITE") - 1) != 0 && - memcmp(dptr, "SIP/2.0 200", sizeof("SIP/2.0 200") - 1) != 0) { - goto out; - } - /* Get ip and port address from SDP packet. */ - if (ct_sip_get_info(dptr, datalen, &matchoff, &matchlen, - POS_CONNECTION) > 0) { - - /* We'll drop only if there are parse problems. */ - if (parse_ipaddr(dptr + matchoff, NULL, &ipaddr, - dptr + datalen) < 0) { - ret = NF_DROP; - goto out; - } - if (ct_sip_get_info(dptr, datalen, &matchoff, &matchlen, - POS_MEDIA) > 0) { - - port = simple_strtoul(dptr + matchoff, NULL, 10); - if (port < 1024) { - ret = NF_DROP; - goto out; - } - ret = set_expected_rtp(pskb, ct, ctinfo, - ipaddr, port, dptr); - } - } -out: - return ret; -} - -static struct ip_conntrack_helper sip[MAX_PORTS]; -static char sip_names[MAX_PORTS][10]; - -static void fini(void) -{ - int i; - for (i = 0; i < ports_c; i++) { - DEBUGP("unregistering helper for port %d\n", ports[i]); - ip_conntrack_helper_unregister(&sip[i]); - } -} - -static int __init init(void) -{ - int i, ret; - char *tmpname; - - if (ports_c == 0) - ports[ports_c++] = SIP_PORT; - - for (i = 0; i < ports_c; i++) { - /* Create helper structure */ - memset(&sip[i], 0, sizeof(struct ip_conntrack_helper)); - - sip[i].tuple.dst.protonum = IPPROTO_UDP; - sip[i].tuple.src.u.udp.port = htons(ports[i]); - sip[i].mask.src.u.udp.port = htons(0xFFFF); - sip[i].mask.dst.protonum = 0xFF; - sip[i].max_expected = 2; - sip[i].timeout = 3 * 60; /* 3 minutes */ - sip[i].me = THIS_MODULE; - sip[i].help = sip_help; - - tmpname = &sip_names[i][0]; - if (ports[i] == SIP_PORT) - sprintf(tmpname, "sip"); - else - sprintf(tmpname, "sip-%d", i); - sip[i].name = tmpname; - - DEBUGP("port #%d: %d\n", i, ports[i]); - - ret = ip_conntrack_helper_register(&sip[i]); - if (ret) { - printk("ERROR registering helper for port %d\n", - ports[i]); - fini(); - return ret; - } - } - return 0; -} - -module_init(init); -module_exit(fini); diff --git a/net/ipv4/netfilter/ip_conntrack_standalone.c b/net/ipv4/netfilter/ip_conntrack_standalone.c deleted file mode 100644 index c32200153d62..000000000000 --- a/net/ipv4/netfilter/ip_conntrack_standalone.c +++ /dev/null @@ -1,962 +0,0 @@ -/* This file contains all the functions required for the standalone - ip_conntrack module. - - These are not required by the compatibility layer. -*/ - -/* (C) 1999-2001 Paul `Rusty' Russell - * (C) 2002-2005 Netfilter Core Team - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#ifdef CONFIG_SYSCTL -#include -#endif -#include -#include -#include - -#include -#include -#include -#include - -#if 0 -#define DEBUGP printk -#else -#define DEBUGP(format, args...) -#endif - -MODULE_LICENSE("GPL"); - -extern atomic_t ip_conntrack_count; -DECLARE_PER_CPU(struct ip_conntrack_stat, ip_conntrack_stat); - -static int kill_proto(struct ip_conntrack *i, void *data) -{ - return (i->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum == - *((u_int8_t *) data)); -} - -#ifdef CONFIG_PROC_FS -static int -print_tuple(struct seq_file *s, const struct ip_conntrack_tuple *tuple, - struct ip_conntrack_protocol *proto) -{ - seq_printf(s, "src=%u.%u.%u.%u dst=%u.%u.%u.%u ", - NIPQUAD(tuple->src.ip), NIPQUAD(tuple->dst.ip)); - return proto->print_tuple(s, tuple); -} - -#ifdef CONFIG_IP_NF_CT_ACCT -static unsigned int -seq_print_counters(struct seq_file *s, - const struct ip_conntrack_counter *counter) -{ - return seq_printf(s, "packets=%llu bytes=%llu ", - (unsigned long long)counter->packets, - (unsigned long long)counter->bytes); -} -#else -#define seq_print_counters(x, y) 0 -#endif - -struct ct_iter_state { - unsigned int bucket; -}; - -static struct list_head *ct_get_first(struct seq_file *seq) -{ - struct ct_iter_state *st = seq->private; - - for (st->bucket = 0; - st->bucket < ip_conntrack_htable_size; - st->bucket++) { - if (!list_empty(&ip_conntrack_hash[st->bucket])) - return ip_conntrack_hash[st->bucket].next; - } - return NULL; -} - -static struct list_head *ct_get_next(struct seq_file *seq, struct list_head *head) -{ - struct ct_iter_state *st = seq->private; - - head = head->next; - while (head == &ip_conntrack_hash[st->bucket]) { - if (++st->bucket >= ip_conntrack_htable_size) - return NULL; - head = ip_conntrack_hash[st->bucket].next; - } - return head; -} - -static struct list_head *ct_get_idx(struct seq_file *seq, loff_t pos) -{ - struct list_head *head = ct_get_first(seq); - - if (head) - while (pos && (head = ct_get_next(seq, head))) - pos--; - return pos ? NULL : head; -} - -static void *ct_seq_start(struct seq_file *seq, loff_t *pos) -{ - read_lock_bh(&ip_conntrack_lock); - return ct_get_idx(seq, *pos); -} - -static void *ct_seq_next(struct seq_file *s, void *v, loff_t *pos) -{ - (*pos)++; - return ct_get_next(s, v); -} - -static void ct_seq_stop(struct seq_file *s, void *v) -{ - read_unlock_bh(&ip_conntrack_lock); -} - -static int ct_seq_show(struct seq_file *s, void *v) -{ - const struct ip_conntrack_tuple_hash *hash = v; - const struct ip_conntrack *conntrack = tuplehash_to_ctrack(hash); - struct ip_conntrack_protocol *proto; - - IP_NF_ASSERT(conntrack); - - /* we only want to print DIR_ORIGINAL */ - if (DIRECTION(hash)) - return 0; - - proto = __ip_conntrack_proto_find(conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum); - IP_NF_ASSERT(proto); - - if (seq_printf(s, "%-8s %u %ld ", - proto->name, - conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum, - timer_pending(&conntrack->timeout) - ? (long)(conntrack->timeout.expires - jiffies)/HZ - : 0) != 0) - return -ENOSPC; - - if (proto->print_conntrack(s, conntrack)) - return -ENOSPC; - - if (print_tuple(s, &conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple, - proto)) - return -ENOSPC; - - if (seq_print_counters(s, &conntrack->counters[IP_CT_DIR_ORIGINAL])) - return -ENOSPC; - - if (!(test_bit(IPS_SEEN_REPLY_BIT, &conntrack->status))) - if (seq_printf(s, "[UNREPLIED] ")) - return -ENOSPC; - - if (print_tuple(s, &conntrack->tuplehash[IP_CT_DIR_REPLY].tuple, - proto)) - return -ENOSPC; - - if (seq_print_counters(s, &conntrack->counters[IP_CT_DIR_REPLY])) - return -ENOSPC; - - if (test_bit(IPS_ASSURED_BIT, &conntrack->status)) - if (seq_printf(s, "[ASSURED] ")) - return -ENOSPC; - -#if defined(CONFIG_IP_NF_CONNTRACK_MARK) - if (seq_printf(s, "mark=%u ", conntrack->mark)) - return -ENOSPC; -#endif - -#ifdef CONFIG_IP_NF_CONNTRACK_SECMARK - if (seq_printf(s, "secmark=%u ", conntrack->secmark)) - return -ENOSPC; -#endif - - if (seq_printf(s, "use=%u\n", atomic_read(&conntrack->ct_general.use))) - return -ENOSPC; - - return 0; -} - -static struct seq_operations ct_seq_ops = { - .start = ct_seq_start, - .next = ct_seq_next, - .stop = ct_seq_stop, - .show = ct_seq_show -}; - -static int ct_open(struct inode *inode, struct file *file) -{ - struct seq_file *seq; - struct ct_iter_state *st; - int ret; - - st = kmalloc(sizeof(struct ct_iter_state), GFP_KERNEL); - if (st == NULL) - return -ENOMEM; - ret = seq_open(file, &ct_seq_ops); - if (ret) - goto out_free; - seq = file->private_data; - seq->private = st; - memset(st, 0, sizeof(struct ct_iter_state)); - return ret; -out_free: - kfree(st); - return ret; -} - -static const struct file_operations ct_file_ops = { - .owner = THIS_MODULE, - .open = ct_open, - .read = seq_read, - .llseek = seq_lseek, - .release = seq_release_private, -}; - -/* expects */ -static void *exp_seq_start(struct seq_file *s, loff_t *pos) -{ - struct list_head *e = &ip_conntrack_expect_list; - loff_t i; - - /* strange seq_file api calls stop even if we fail, - * thus we need to grab lock since stop unlocks */ - read_lock_bh(&ip_conntrack_lock); - - if (list_empty(e)) - return NULL; - - for (i = 0; i <= *pos; i++) { - e = e->next; - if (e == &ip_conntrack_expect_list) - return NULL; - } - return e; -} - -static void *exp_seq_next(struct seq_file *s, void *v, loff_t *pos) -{ - struct list_head *e = v; - - ++*pos; - e = e->next; - - if (e == &ip_conntrack_expect_list) - return NULL; - - return e; -} - -static void exp_seq_stop(struct seq_file *s, void *v) -{ - read_unlock_bh(&ip_conntrack_lock); -} - -static int exp_seq_show(struct seq_file *s, void *v) -{ - struct ip_conntrack_expect *expect = v; - - if (expect->timeout.function) - seq_printf(s, "%ld ", timer_pending(&expect->timeout) - ? (long)(expect->timeout.expires - jiffies)/HZ : 0); - else - seq_printf(s, "- "); - - seq_printf(s, "proto=%u ", expect->tuple.dst.protonum); - - print_tuple(s, &expect->tuple, - __ip_conntrack_proto_find(expect->tuple.dst.protonum)); - return seq_putc(s, '\n'); -} - -static struct seq_operations exp_seq_ops = { - .start = exp_seq_start, - .next = exp_seq_next, - .stop = exp_seq_stop, - .show = exp_seq_show -}; - -static int exp_open(struct inode *inode, struct file *file) -{ - return seq_open(file, &exp_seq_ops); -} - -static const struct file_operations exp_file_ops = { - .owner = THIS_MODULE, - .open = exp_open, - .read = seq_read, - .llseek = seq_lseek, - .release = seq_release -}; - -static void *ct_cpu_seq_start(struct seq_file *seq, loff_t *pos) -{ - int cpu; - - if (*pos == 0) - return SEQ_START_TOKEN; - - for (cpu = *pos-1; cpu < NR_CPUS; ++cpu) { - if (!cpu_possible(cpu)) - continue; - *pos = cpu+1; - return &per_cpu(ip_conntrack_stat, cpu); - } - - return NULL; -} - -static void *ct_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos) -{ - int cpu; - - for (cpu = *pos; cpu < NR_CPUS; ++cpu) { - if (!cpu_possible(cpu)) - continue; - *pos = cpu+1; - return &per_cpu(ip_conntrack_stat, cpu); - } - - return NULL; -} - -static void ct_cpu_seq_stop(struct seq_file *seq, void *v) -{ -} - -static int ct_cpu_seq_show(struct seq_file *seq, void *v) -{ - unsigned int nr_conntracks = atomic_read(&ip_conntrack_count); - struct ip_conntrack_stat *st = v; - - if (v == SEQ_START_TOKEN) { - seq_printf(seq, "entries searched found new invalid ignore delete delete_list insert insert_failed drop early_drop icmp_error expect_new expect_create expect_delete\n"); - return 0; - } - - seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x " - "%08x %08x %08x %08x %08x %08x %08x %08x \n", - nr_conntracks, - st->searched, - st->found, - st->new, - st->invalid, - st->ignore, - st->delete, - st->delete_list, - st->insert, - st->insert_failed, - st->drop, - st->early_drop, - st->error, - - st->expect_new, - st->expect_create, - st->expect_delete - ); - return 0; -} - -static struct seq_operations ct_cpu_seq_ops = { - .start = ct_cpu_seq_start, - .next = ct_cpu_seq_next, - .stop = ct_cpu_seq_stop, - .show = ct_cpu_seq_show, -}; - -static int ct_cpu_seq_open(struct inode *inode, struct file *file) -{ - return seq_open(file, &ct_cpu_seq_ops); -} - -static const struct file_operations ct_cpu_seq_fops = { - .owner = THIS_MODULE, - .open = ct_cpu_seq_open, - .read = seq_read, - .llseek = seq_lseek, - .release = seq_release_private, -}; -#endif - -static unsigned int ip_confirm(unsigned int hooknum, - struct sk_buff **pskb, - const struct net_device *in, - const struct net_device *out, - int (*okfn)(struct sk_buff *)) -{ - /* We've seen it coming out the other side: confirm it */ - return ip_conntrack_confirm(pskb); -} - -static unsigned int ip_conntrack_help(unsigned int hooknum, - struct sk_buff **pskb, - const struct net_device *in, - const struct net_device *out, - int (*okfn)(struct sk_buff *)) -{ - struct ip_conntrack *ct; - enum ip_conntrack_info ctinfo; - - /* This is where we call the helper: as the packet goes out. */ - ct = ip_conntrack_get(*pskb, &ctinfo); - if (ct && ct->helper && ctinfo != IP_CT_RELATED + IP_CT_IS_REPLY) { - unsigned int ret; - ret = ct->helper->help(pskb, ct, ctinfo); - if (ret != NF_ACCEPT) - return ret; - } - return NF_ACCEPT; -} - -static unsigned int ip_conntrack_defrag(unsigned int hooknum, - struct sk_buff **pskb, - const struct net_device *in, - const struct net_device *out, - int (*okfn)(struct sk_buff *)) -{ -#if !defined(CONFIG_IP_NF_NAT) && !defined(CONFIG_IP_NF_NAT_MODULE) - /* Previously seen (loopback)? Ignore. Do this before - fragment check. */ - if ((*pskb)->nfct) - return NF_ACCEPT; -#endif - - /* Gather fragments. */ - if (ip_hdr(*pskb)->frag_off & htons(IP_MF | IP_OFFSET)) { - *pskb = ip_ct_gather_frags(*pskb, - hooknum == NF_IP_PRE_ROUTING ? - IP_DEFRAG_CONNTRACK_IN : - IP_DEFRAG_CONNTRACK_OUT); - if (!*pskb) - return NF_STOLEN; - } - return NF_ACCEPT; -} - -static unsigned int ip_conntrack_local(unsigned int hooknum, - struct sk_buff **pskb, - const struct net_device *in, - const struct net_device *out, - int (*okfn)(struct sk_buff *)) -{ - /* root is playing with raw sockets. */ - if ((*pskb)->len < sizeof(struct iphdr) - || ip_hdrlen(*pskb) < sizeof(struct iphdr)) { - if (net_ratelimit()) - printk("ipt_hook: happy cracking.\n"); - return NF_ACCEPT; - } - return ip_conntrack_in(hooknum, pskb, in, out, okfn); -} - -/* Connection tracking may drop packets, but never alters them, so - make it the first hook. */ -static struct nf_hook_ops ip_conntrack_ops[] = { - { - .hook = ip_conntrack_defrag, - .owner = THIS_MODULE, - .pf = PF_INET, - .hooknum = NF_IP_PRE_ROUTING, - .priority = NF_IP_PRI_CONNTRACK_DEFRAG, - }, - { - .hook = ip_conntrack_in, - .owner = THIS_MODULE, - .pf = PF_INET, - .hooknum = NF_IP_PRE_ROUTING, - .priority = NF_IP_PRI_CONNTRACK, - }, - { - .hook = ip_conntrack_defrag, - .owner = THIS_MODULE, - .pf = PF_INET, - .hooknum = NF_IP_LOCAL_OUT, - .priority = NF_IP_PRI_CONNTRACK_DEFRAG, - }, - { - .hook = ip_conntrack_local, - .owner = THIS_MODULE, - .pf = PF_INET, - .hooknum = NF_IP_LOCAL_OUT, - .priority = NF_IP_PRI_CONNTRACK, - }, - { - .hook = ip_conntrack_help, - .owner = THIS_MODULE, - .pf = PF_INET, - .hooknum = NF_IP_POST_ROUTING, - .priority = NF_IP_PRI_CONNTRACK_HELPER, - }, - { - .hook = ip_conntrack_help, - .owner = THIS_MODULE, - .pf = PF_INET, - .hooknum = NF_IP_LOCAL_IN, - .priority = NF_IP_PRI_CONNTRACK_HELPER, - }, - { - .hook = ip_confirm, - .owner = THIS_MODULE, - .pf = PF_INET, - .hooknum = NF_IP_POST_ROUTING, - .priority = NF_IP_PRI_CONNTRACK_CONFIRM, - }, - { - .hook = ip_confirm, - .owner = THIS_MODULE, - .pf = PF_INET, - .hooknum = NF_IP_LOCAL_IN, - .priority = NF_IP_PRI_CONNTRACK_CONFIRM, - }, -}; - -/* Sysctl support */ - -int ip_conntrack_checksum __read_mostly = 1; - -#ifdef CONFIG_SYSCTL - -/* From ip_conntrack_core.c */ -extern int ip_conntrack_max; -extern unsigned int ip_conntrack_htable_size; - -/* From ip_conntrack_proto_tcp.c */ -extern unsigned int ip_ct_tcp_timeout_syn_sent; -extern unsigned int ip_ct_tcp_timeout_syn_recv; -extern unsigned int ip_ct_tcp_timeout_established; -extern unsigned int ip_ct_tcp_timeout_fin_wait; -extern unsigned int ip_ct_tcp_timeout_close_wait; -extern unsigned int ip_ct_tcp_timeout_last_ack; -extern unsigned int ip_ct_tcp_timeout_time_wait; -extern unsigned int ip_ct_tcp_timeout_close; -extern unsigned int ip_ct_tcp_timeout_max_retrans; -extern int ip_ct_tcp_loose; -extern int ip_ct_tcp_be_liberal; -extern int ip_ct_tcp_max_retrans; - -/* From ip_conntrack_proto_udp.c */ -extern unsigned int ip_ct_udp_timeout; -extern unsigned int ip_ct_udp_timeout_stream; - -/* From ip_conntrack_proto_icmp.c */ -extern unsigned int ip_ct_icmp_timeout; - -/* From ip_conntrack_proto_generic.c */ -extern unsigned int ip_ct_generic_timeout; - -/* Log invalid packets of a given protocol */ -static int log_invalid_proto_min = 0; -static int log_invalid_proto_max = 255; - -static struct ctl_table_header *ip_ct_sysctl_header; - -static ctl_table ip_ct_sysctl_table[] = { - { - .ctl_name = NET_IPV4_NF_CONNTRACK_MAX, - .procname = "ip_conntrack_max", - .data = &ip_conntrack_max, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = &proc_dointvec, - }, - { - .ctl_name = NET_IPV4_NF_CONNTRACK_COUNT, - .procname = "ip_conntrack_count", - .data = &ip_conntrack_count, - .maxlen = sizeof(int), - .mode = 0444, - .proc_handler = &proc_dointvec, - }, - { - .ctl_name = NET_IPV4_NF_CONNTRACK_BUCKETS, - .procname = "ip_conntrack_buckets", - .data = &ip_conntrack_htable_size, - .maxlen = sizeof(unsigned int), - .mode = 0444, - .proc_handler = &proc_dointvec, - }, - { - .ctl_name = NET_IPV4_NF_CONNTRACK_CHECKSUM, - .procname = "ip_conntrack_checksum", - .data = &ip_conntrack_checksum, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = &proc_dointvec, - }, - { - .ctl_name = NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_SYN_SENT, - .procname = "ip_conntrack_tcp_timeout_syn_sent", - .data = &ip_ct_tcp_timeout_syn_sent, - .maxlen = sizeof(unsigned int), - .mode = 0644, - .proc_handler = &proc_dointvec_jiffies, - }, - { - .ctl_name = NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_SYN_RECV, - .procname = "ip_conntrack_tcp_timeout_syn_recv", - .data = &ip_ct_tcp_timeout_syn_recv, - .maxlen = sizeof(unsigned int), - .mode = 0644, - .proc_handler = &proc_dointvec_jiffies, - }, - { - .ctl_name = NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_ESTABLISHED, - .procname = "ip_conntrack_tcp_timeout_established", - .data = &ip_ct_tcp_timeout_established, - .maxlen = sizeof(unsigned int), - .mode = 0644, - .proc_handler = &proc_dointvec_jiffies, - }, - { - .ctl_name = NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_FIN_WAIT, - .procname = "ip_conntrack_tcp_timeout_fin_wait", - .data = &ip_ct_tcp_timeout_fin_wait, - .maxlen = sizeof(unsigned int), - .mode = 0644, - .proc_handler = &proc_dointvec_jiffies, - }, - { - .ctl_name = NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_CLOSE_WAIT, - .procname = "ip_conntrack_tcp_timeout_close_wait", - .data = &ip_ct_tcp_timeout_close_wait, - .maxlen = sizeof(unsigned int), - .mode = 0644, - .proc_handler = &proc_dointvec_jiffies, - }, - { - .ctl_name = NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_LAST_ACK, - .procname = "ip_conntrack_tcp_timeout_last_ack", - .data = &ip_ct_tcp_timeout_last_ack, - .maxlen = sizeof(unsigned int), - .mode = 0644, - .proc_handler = &proc_dointvec_jiffies, - }, - { - .ctl_name = NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_TIME_WAIT, - .procname = "ip_conntrack_tcp_timeout_time_wait", - .data = &ip_ct_tcp_timeout_time_wait, - .maxlen = sizeof(unsigned int), - .mode = 0644, - .proc_handler = &proc_dointvec_jiffies, - }, - { - .ctl_name = NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_CLOSE, - .procname = "ip_conntrack_tcp_timeout_close", - .data = &ip_ct_tcp_timeout_close, - .maxlen = sizeof(unsigned int), - .mode = 0644, - .proc_handler = &proc_dointvec_jiffies, - }, - { - .ctl_name = NET_IPV4_NF_CONNTRACK_UDP_TIMEOUT, - .procname = "ip_conntrack_udp_timeout", - .data = &ip_ct_udp_timeout, - .maxlen = sizeof(unsigned int), - .mode = 0644, - .proc_handler = &proc_dointvec_jiffies, - }, - { - .ctl_name = NET_IPV4_NF_CONNTRACK_UDP_TIMEOUT_STREAM, - .procname = "ip_conntrack_udp_timeout_stream", - .data = &ip_ct_udp_timeout_stream, - .maxlen = sizeof(unsigned int), - .mode = 0644, - .proc_handler = &proc_dointvec_jiffies, - }, - { - .ctl_name = NET_IPV4_NF_CONNTRACK_ICMP_TIMEOUT, - .procname = "ip_conntrack_icmp_timeout", - .data = &ip_ct_icmp_timeout, - .maxlen = sizeof(unsigned int), - .mode = 0644, - .proc_handler = &proc_dointvec_jiffies, - }, - { - .ctl_name = NET_IPV4_NF_CONNTRACK_GENERIC_TIMEOUT, - .procname = "ip_conntrack_generic_timeout", - .data = &ip_ct_generic_timeout, - .maxlen = sizeof(unsigned int), - .mode = 0644, - .proc_handler = &proc_dointvec_jiffies, - }, - { - .ctl_name = NET_IPV4_NF_CONNTRACK_LOG_INVALID, - .procname = "ip_conntrack_log_invalid", - .data = &ip_ct_log_invalid, - .maxlen = sizeof(unsigned int), - .mode = 0644, - .proc_handler = &proc_dointvec_minmax, - .strategy = &sysctl_intvec, - .extra1 = &log_invalid_proto_min, - .extra2 = &log_invalid_proto_max, - }, - { - .ctl_name = NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_MAX_RETRANS, - .procname = "ip_conntrack_tcp_timeout_max_retrans", - .data = &ip_ct_tcp_timeout_max_retrans, - .maxlen = sizeof(unsigned int), - .mode = 0644, - .proc_handler = &proc_dointvec_jiffies, - }, - { - .ctl_name = NET_IPV4_NF_CONNTRACK_TCP_LOOSE, - .procname = "ip_conntrack_tcp_loose", - .data = &ip_ct_tcp_loose, - .maxlen = sizeof(unsigned int), - .mode = 0644, - .proc_handler = &proc_dointvec, - }, - { - .ctl_name = NET_IPV4_NF_CONNTRACK_TCP_BE_LIBERAL, - .procname = "ip_conntrack_tcp_be_liberal", - .data = &ip_ct_tcp_be_liberal, - .maxlen = sizeof(unsigned int), - .mode = 0644, - .proc_handler = &proc_dointvec, - }, - { - .ctl_name = NET_IPV4_NF_CONNTRACK_TCP_MAX_RETRANS, - .procname = "ip_conntrack_tcp_max_retrans", - .data = &ip_ct_tcp_max_retrans, - .maxlen = sizeof(unsigned int), - .mode = 0644, - .proc_handler = &proc_dointvec, - }, - { .ctl_name = 0 } -}; - -#define NET_IP_CONNTRACK_MAX 2089 - -static ctl_table ip_ct_netfilter_table[] = { - { - .ctl_name = NET_IPV4_NETFILTER, - .procname = "netfilter", - .mode = 0555, - .child = ip_ct_sysctl_table, - }, - { - .ctl_name = NET_IP_CONNTRACK_MAX, - .procname = "ip_conntrack_max", - .data = &ip_conntrack_max, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = &proc_dointvec - }, - { .ctl_name = 0 } -}; - -static ctl_table ip_ct_ipv4_table[] = { - { - .ctl_name = NET_IPV4, - .procname = "ipv4", - .mode = 0555, - .child = ip_ct_netfilter_table, - }, - { .ctl_name = 0 } -}; - -static ctl_table ip_ct_net_table[] = { - { - .ctl_name = CTL_NET, - .procname = "net", - .mode = 0555, - .child = ip_ct_ipv4_table, - }, - { .ctl_name = 0 } -}; - -EXPORT_SYMBOL(ip_ct_log_invalid); -#endif /* CONFIG_SYSCTL */ - -/* FIXME: Allow NULL functions and sub in pointers to generic for - them. --RR */ -int ip_conntrack_protocol_register(struct ip_conntrack_protocol *proto) -{ - int ret = 0; - - write_lock_bh(&ip_conntrack_lock); - if (ip_ct_protos[proto->proto] != &ip_conntrack_generic_protocol) { - ret = -EBUSY; - goto out; - } - rcu_assign_pointer(ip_ct_protos[proto->proto], proto); - out: - write_unlock_bh(&ip_conntrack_lock); - return ret; -} - -void ip_conntrack_protocol_unregister(struct ip_conntrack_protocol *proto) -{ - write_lock_bh(&ip_conntrack_lock); - rcu_assign_pointer(ip_ct_protos[proto->proto], - &ip_conntrack_generic_protocol); - write_unlock_bh(&ip_conntrack_lock); - synchronize_rcu(); - - /* Remove all contrack entries for this protocol */ - ip_ct_iterate_cleanup(kill_proto, &proto->proto); -} - -static int __init ip_conntrack_standalone_init(void) -{ -#ifdef CONFIG_PROC_FS - struct proc_dir_entry *proc, *proc_exp, *proc_stat; -#endif - int ret = 0; - - ret = ip_conntrack_init(); - if (ret < 0) - return ret; - -#ifdef CONFIG_PROC_FS - ret = -ENOMEM; - proc = proc_net_fops_create("ip_conntrack", 0440, &ct_file_ops); - if (!proc) goto cleanup_init; - - proc_exp = proc_net_fops_create("ip_conntrack_expect", 0440, - &exp_file_ops); - if (!proc_exp) goto cleanup_proc; - - proc_stat = create_proc_entry("ip_conntrack", S_IRUGO, proc_net_stat); - if (!proc_stat) - goto cleanup_proc_exp; - - proc_stat->proc_fops = &ct_cpu_seq_fops; - proc_stat->owner = THIS_MODULE; -#endif - - ret = nf_register_hooks(ip_conntrack_ops, ARRAY_SIZE(ip_conntrack_ops)); - if (ret < 0) { - printk("ip_conntrack: can't register hooks.\n"); - goto cleanup_proc_stat; - } -#ifdef CONFIG_SYSCTL - ip_ct_sysctl_header = register_sysctl_table(ip_ct_net_table); - if (ip_ct_sysctl_header == NULL) { - printk("ip_conntrack: can't register to sysctl.\n"); - ret = -ENOMEM; - goto cleanup_hooks; - } -#endif - return ret; - -#ifdef CONFIG_SYSCTL - cleanup_hooks: - nf_unregister_hooks(ip_conntrack_ops, ARRAY_SIZE(ip_conntrack_ops)); -#endif - cleanup_proc_stat: -#ifdef CONFIG_PROC_FS - remove_proc_entry("ip_conntrack", proc_net_stat); - cleanup_proc_exp: - proc_net_remove("ip_conntrack_expect"); - cleanup_proc: - proc_net_remove("ip_conntrack"); - cleanup_init: -#endif /* CONFIG_PROC_FS */ - ip_conntrack_cleanup(); - return ret; -} - -static void __exit ip_conntrack_standalone_fini(void) -{ - synchronize_net(); -#ifdef CONFIG_SYSCTL - unregister_sysctl_table(ip_ct_sysctl_header); -#endif - nf_unregister_hooks(ip_conntrack_ops, ARRAY_SIZE(ip_conntrack_ops)); -#ifdef CONFIG_PROC_FS - remove_proc_entry("ip_conntrack", proc_net_stat); - proc_net_remove("ip_conntrack_expect"); - proc_net_remove("ip_conntrack"); -#endif /* CONFIG_PROC_FS */ - ip_conntrack_cleanup(); -} - -module_init(ip_conntrack_standalone_init); -module_exit(ip_conntrack_standalone_fini); - -/* Some modules need us, but don't depend directly on any symbol. - They should call this. */ -void need_conntrack(void) -{ -} - -#ifdef CONFIG_IP_NF_CONNTRACK_EVENTS -EXPORT_SYMBOL_GPL(ip_conntrack_chain); -EXPORT_SYMBOL_GPL(ip_conntrack_expect_chain); -EXPORT_SYMBOL_GPL(ip_conntrack_register_notifier); -EXPORT_SYMBOL_GPL(ip_conntrack_unregister_notifier); -EXPORT_SYMBOL_GPL(__ip_ct_event_cache_init); -EXPORT_PER_CPU_SYMBOL_GPL(ip_conntrack_ecache); -#endif -EXPORT_SYMBOL(ip_conntrack_protocol_register); -EXPORT_SYMBOL(ip_conntrack_protocol_unregister); -EXPORT_SYMBOL(ip_ct_get_tuple); -EXPORT_SYMBOL(invert_tuplepr); -EXPORT_SYMBOL(ip_conntrack_alter_reply); -EXPORT_SYMBOL(ip_conntrack_destroyed); -EXPORT_SYMBOL(need_conntrack); -EXPORT_SYMBOL(ip_conntrack_helper_register); -EXPORT_SYMBOL(ip_conntrack_helper_unregister); -EXPORT_SYMBOL(ip_ct_iterate_cleanup); -EXPORT_SYMBOL(__ip_ct_refresh_acct); - -EXPORT_SYMBOL(ip_conntrack_expect_alloc); -EXPORT_SYMBOL(ip_conntrack_expect_put); -EXPORT_SYMBOL_GPL(__ip_conntrack_expect_find); -EXPORT_SYMBOL_GPL(ip_conntrack_expect_find_get); -EXPORT_SYMBOL(ip_conntrack_expect_related); -EXPORT_SYMBOL(ip_conntrack_unexpect_related); -EXPORT_SYMBOL_GPL(ip_conntrack_expect_list); -EXPORT_SYMBOL_GPL(ip_ct_unlink_expect); - -EXPORT_SYMBOL(ip_conntrack_tuple_taken); -EXPORT_SYMBOL(ip_ct_gather_frags); -EXPORT_SYMBOL(ip_conntrack_htable_size); -EXPORT_SYMBOL(ip_conntrack_lock); -EXPORT_SYMBOL(ip_conntrack_hash); -EXPORT_SYMBOL(ip_conntrack_untracked); -EXPORT_SYMBOL_GPL(ip_conntrack_find_get); -#ifdef CONFIG_IP_NF_NAT_NEEDED -EXPORT_SYMBOL(ip_conntrack_tcp_update); -#endif - -EXPORT_SYMBOL_GPL(ip_conntrack_flush); -EXPORT_SYMBOL_GPL(__ip_conntrack_find); - -EXPORT_SYMBOL_GPL(ip_conntrack_alloc); -EXPORT_SYMBOL_GPL(ip_conntrack_free); -EXPORT_SYMBOL_GPL(ip_conntrack_hash_insert); - -EXPORT_SYMBOL_GPL(ip_ct_remove_expectations); - -EXPORT_SYMBOL_GPL(ip_conntrack_helper_find_get); -EXPORT_SYMBOL_GPL(ip_conntrack_helper_put); -EXPORT_SYMBOL_GPL(__ip_conntrack_helper_find_byname); - -EXPORT_SYMBOL_GPL(ip_conntrack_proto_find_get); -EXPORT_SYMBOL_GPL(ip_conntrack_proto_put); -EXPORT_SYMBOL_GPL(__ip_conntrack_proto_find); -EXPORT_SYMBOL_GPL(ip_conntrack_checksum); -#if defined(CONFIG_IP_NF_CONNTRACK_NETLINK) || \ - defined(CONFIG_IP_NF_CONNTRACK_NETLINK_MODULE) -EXPORT_SYMBOL_GPL(ip_ct_port_tuple_to_nfattr); -EXPORT_SYMBOL_GPL(ip_ct_port_nfattr_to_tuple); -#endif diff --git a/net/ipv4/netfilter/ip_conntrack_tftp.c b/net/ipv4/netfilter/ip_conntrack_tftp.c deleted file mode 100644 index afc6809a3888..000000000000 --- a/net/ipv4/netfilter/ip_conntrack_tftp.c +++ /dev/null @@ -1,161 +0,0 @@ -/* (C) 2001-2002 Magnus Boden - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * Version: 0.0.7 - * - * Thu 21 Mar 2002 Harald Welte - * - port to newnat API - * - */ - -#include -#include -#include - -#include -#include -#include -#include -#include - -MODULE_AUTHOR("Magnus Boden "); -MODULE_DESCRIPTION("tftp connection tracking helper"); -MODULE_LICENSE("GPL"); - -#define MAX_PORTS 8 -static unsigned short ports[MAX_PORTS]; -static int ports_c; -module_param_array(ports, ushort, &ports_c, 0400); -MODULE_PARM_DESC(ports, "port numbers of tftp servers"); - -#if 0 -#define DEBUGP(format, args...) printk("%s:%s:" format, \ - __FILE__, __FUNCTION__ , ## args) -#else -#define DEBUGP(format, args...) -#endif - -unsigned int (*ip_nat_tftp_hook)(struct sk_buff **pskb, - enum ip_conntrack_info ctinfo, - struct ip_conntrack_expect *exp); -EXPORT_SYMBOL_GPL(ip_nat_tftp_hook); - -static int tftp_help(struct sk_buff **pskb, - struct ip_conntrack *ct, - enum ip_conntrack_info ctinfo) -{ - struct tftphdr _tftph, *tfh; - struct ip_conntrack_expect *exp; - unsigned int ret = NF_ACCEPT; - typeof(ip_nat_tftp_hook) ip_nat_tftp; - - tfh = skb_header_pointer(*pskb, - ip_hdrlen(*pskb) + sizeof(struct udphdr), - sizeof(_tftph), &_tftph); - if (tfh == NULL) - return NF_ACCEPT; - - switch (ntohs(tfh->opcode)) { - /* RRQ and WRQ works the same way */ - case TFTP_OPCODE_READ: - case TFTP_OPCODE_WRITE: - DEBUGP(""); - DUMP_TUPLE(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); - DUMP_TUPLE(&ct->tuplehash[IP_CT_DIR_REPLY].tuple); - - exp = ip_conntrack_expect_alloc(ct); - if (exp == NULL) - return NF_DROP; - - exp->tuple = ct->tuplehash[IP_CT_DIR_REPLY].tuple; - exp->mask.src.ip = htonl(0xffffffff); - exp->mask.src.u.udp.port = 0; - exp->mask.dst.ip = htonl(0xffffffff); - exp->mask.dst.u.udp.port = htons(0xffff); - exp->mask.dst.protonum = 0xff; - exp->expectfn = NULL; - exp->flags = 0; - - DEBUGP("expect: "); - DUMP_TUPLE(&exp->tuple); - DUMP_TUPLE(&exp->mask); - ip_nat_tftp = rcu_dereference(ip_nat_tftp_hook); - if (ip_nat_tftp) - ret = ip_nat_tftp(pskb, ctinfo, exp); - else if (ip_conntrack_expect_related(exp) != 0) - ret = NF_DROP; - ip_conntrack_expect_put(exp); - break; - case TFTP_OPCODE_DATA: - case TFTP_OPCODE_ACK: - DEBUGP("Data/ACK opcode\n"); - break; - case TFTP_OPCODE_ERROR: - DEBUGP("Error opcode\n"); - break; - default: - DEBUGP("Unknown opcode\n"); - } - return NF_ACCEPT; -} - -static struct ip_conntrack_helper tftp[MAX_PORTS]; -static char tftp_names[MAX_PORTS][sizeof("tftp-65535")]; - -static void ip_conntrack_tftp_fini(void) -{ - int i; - - for (i = 0 ; i < ports_c; i++) { - DEBUGP("unregistering helper for port %d\n", - ports[i]); - ip_conntrack_helper_unregister(&tftp[i]); - } -} - -static int __init ip_conntrack_tftp_init(void) -{ - int i, ret; - char *tmpname; - - if (ports_c == 0) - ports[ports_c++] = TFTP_PORT; - - for (i = 0; i < ports_c; i++) { - /* Create helper structure */ - memset(&tftp[i], 0, sizeof(struct ip_conntrack_helper)); - - tftp[i].tuple.dst.protonum = IPPROTO_UDP; - tftp[i].tuple.src.u.udp.port = htons(ports[i]); - tftp[i].mask.dst.protonum = 0xFF; - tftp[i].mask.src.u.udp.port = htons(0xFFFF); - tftp[i].max_expected = 1; - tftp[i].timeout = 5 * 60; /* 5 minutes */ - tftp[i].me = THIS_MODULE; - tftp[i].help = tftp_help; - - tmpname = &tftp_names[i][0]; - if (ports[i] == TFTP_PORT) - sprintf(tmpname, "tftp"); - else - sprintf(tmpname, "tftp-%d", i); - tftp[i].name = tmpname; - - DEBUGP("port #%d: %d\n", i, ports[i]); - - ret=ip_conntrack_helper_register(&tftp[i]); - if (ret) { - printk("ERROR registering helper for port %d\n", - ports[i]); - ip_conntrack_tftp_fini(); - return(ret); - } - } - return(0); -} - -module_init(ip_conntrack_tftp_init); -module_exit(ip_conntrack_tftp_fini); diff --git a/net/ipv4/netfilter/ip_nat_amanda.c b/net/ipv4/netfilter/ip_nat_amanda.c deleted file mode 100644 index 85df1a9aed33..000000000000 --- a/net/ipv4/netfilter/ip_nat_amanda.c +++ /dev/null @@ -1,85 +0,0 @@ -/* Amanda extension for TCP NAT alteration. - * (C) 2002 by Brian J. Murrell - * based on a copy of HW's ip_nat_irc.c as well as other modules - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - * - * Module load syntax: - * insmod ip_nat_amanda.o - */ - -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include - - -MODULE_AUTHOR("Brian J. Murrell "); -MODULE_DESCRIPTION("Amanda NAT helper"); -MODULE_LICENSE("GPL"); - -static unsigned int help(struct sk_buff **pskb, - enum ip_conntrack_info ctinfo, - unsigned int matchoff, - unsigned int matchlen, - struct ip_conntrack_expect *exp) -{ - char buffer[sizeof("65535")]; - u_int16_t port; - unsigned int ret; - - /* Connection comes from client. */ - exp->saved_proto.tcp.port = exp->tuple.dst.u.tcp.port; - exp->dir = IP_CT_DIR_ORIGINAL; - - /* When you see the packet, we need to NAT it the same as the - * this one (ie. same IP: it will be TCP and master is UDP). */ - exp->expectfn = ip_nat_follow_master; - - /* Try to get same port: if not, try to change it. */ - for (port = ntohs(exp->saved_proto.tcp.port); port != 0; port++) { - exp->tuple.dst.u.tcp.port = htons(port); - if (ip_conntrack_expect_related(exp) == 0) - break; - } - - if (port == 0) - return NF_DROP; - - sprintf(buffer, "%u", port); - ret = ip_nat_mangle_udp_packet(pskb, exp->master, ctinfo, - matchoff, matchlen, - buffer, strlen(buffer)); - if (ret != NF_ACCEPT) - ip_conntrack_unexpect_related(exp); - return ret; -} - -static void __exit ip_nat_amanda_fini(void) -{ - rcu_assign_pointer(ip_nat_amanda_hook, NULL); - synchronize_rcu(); -} - -static int __init ip_nat_amanda_init(void) -{ - BUG_ON(rcu_dereference(ip_nat_amanda_hook)); - rcu_assign_pointer(ip_nat_amanda_hook, help); - return 0; -} - -module_init(ip_nat_amanda_init); -module_exit(ip_nat_amanda_fini); diff --git a/net/ipv4/netfilter/ip_nat_core.c b/net/ipv4/netfilter/ip_nat_core.c deleted file mode 100644 index cf46930606f2..000000000000 --- a/net/ipv4/netfilter/ip_nat_core.c +++ /dev/null @@ -1,633 +0,0 @@ -/* NAT for netfilter; shared with compatibility layer. */ - -/* (C) 1999-2001 Paul `Rusty' Russell - * (C) 2002-2004 Netfilter Core Team - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include /* For tcp_prot in getorigdst */ -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include - -#if 0 -#define DEBUGP printk -#else -#define DEBUGP(format, args...) -#endif - -DEFINE_RWLOCK(ip_nat_lock); - -/* Calculated at init based on memory size */ -static unsigned int ip_nat_htable_size; - -static struct list_head *bysource; - -#define MAX_IP_NAT_PROTO 256 -static struct ip_nat_protocol *ip_nat_protos[MAX_IP_NAT_PROTO]; - -static inline struct ip_nat_protocol * -__ip_nat_proto_find(u_int8_t protonum) -{ - return rcu_dereference(ip_nat_protos[protonum]); -} - -struct ip_nat_protocol * -ip_nat_proto_find_get(u_int8_t protonum) -{ - struct ip_nat_protocol *p; - - rcu_read_lock(); - p = __ip_nat_proto_find(protonum); - if (!try_module_get(p->me)) - p = &ip_nat_unknown_protocol; - rcu_read_unlock(); - - return p; -} -EXPORT_SYMBOL_GPL(ip_nat_proto_find_get); - -void -ip_nat_proto_put(struct ip_nat_protocol *p) -{ - module_put(p->me); -} -EXPORT_SYMBOL_GPL(ip_nat_proto_put); - -/* We keep an extra hash for each conntrack, for fast searching. */ -static inline unsigned int -hash_by_src(const struct ip_conntrack_tuple *tuple) -{ - /* Original src, to ensure we map it consistently if poss. */ - return jhash_3words((__force u32)tuple->src.ip, tuple->src.u.all, - tuple->dst.protonum, 0) % ip_nat_htable_size; -} - -/* Noone using conntrack by the time this called. */ -static void ip_nat_cleanup_conntrack(struct ip_conntrack *conn) -{ - if (!(conn->status & IPS_NAT_DONE_MASK)) - return; - - write_lock_bh(&ip_nat_lock); - list_del(&conn->nat.info.bysource); - write_unlock_bh(&ip_nat_lock); -} - -/* Is this tuple already taken? (not by us) */ -int -ip_nat_used_tuple(const struct ip_conntrack_tuple *tuple, - const struct ip_conntrack *ignored_conntrack) -{ - /* Conntrack tracking doesn't keep track of outgoing tuples; only - incoming ones. NAT means they don't have a fixed mapping, - so we invert the tuple and look for the incoming reply. - - We could keep a separate hash if this proves too slow. */ - struct ip_conntrack_tuple reply; - - invert_tuplepr(&reply, tuple); - return ip_conntrack_tuple_taken(&reply, ignored_conntrack); -} -EXPORT_SYMBOL(ip_nat_used_tuple); - -/* If we source map this tuple so reply looks like reply_tuple, will - * that meet the constraints of range. */ -static int -in_range(const struct ip_conntrack_tuple *tuple, - const struct ip_nat_range *range) -{ - struct ip_nat_protocol *proto; - int ret = 0; - - /* If we are supposed to map IPs, then we must be in the - range specified, otherwise let this drag us onto a new src IP. */ - if (range->flags & IP_NAT_RANGE_MAP_IPS) { - if (ntohl(tuple->src.ip) < ntohl(range->min_ip) - || ntohl(tuple->src.ip) > ntohl(range->max_ip)) - return 0; - } - - rcu_read_lock(); - proto = __ip_nat_proto_find(tuple->dst.protonum); - if (!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED) - || proto->in_range(tuple, IP_NAT_MANIP_SRC, - &range->min, &range->max)) - ret = 1; - rcu_read_unlock(); - - return ret; -} - -static inline int -same_src(const struct ip_conntrack *ct, - const struct ip_conntrack_tuple *tuple) -{ - return (ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum - == tuple->dst.protonum - && ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.ip - == tuple->src.ip - && ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u.all - == tuple->src.u.all); -} - -/* Only called for SRC manip */ -static int -find_appropriate_src(const struct ip_conntrack_tuple *tuple, - struct ip_conntrack_tuple *result, - const struct ip_nat_range *range) -{ - unsigned int h = hash_by_src(tuple); - struct ip_conntrack *ct; - - read_lock_bh(&ip_nat_lock); - list_for_each_entry(ct, &bysource[h], nat.info.bysource) { - if (same_src(ct, tuple)) { - /* Copy source part from reply tuple. */ - invert_tuplepr(result, - &ct->tuplehash[IP_CT_DIR_REPLY].tuple); - result->dst = tuple->dst; - - if (in_range(result, range)) { - read_unlock_bh(&ip_nat_lock); - return 1; - } - } - } - read_unlock_bh(&ip_nat_lock); - return 0; -} - -/* For [FUTURE] fragmentation handling, we want the least-used - src-ip/dst-ip/proto triple. Fairness doesn't come into it. Thus - if the range specifies 1.2.3.4 ports 10000-10005 and 1.2.3.5 ports - 1-65535, we don't do pro-rata allocation based on ports; we choose - the ip with the lowest src-ip/dst-ip/proto usage. -*/ -static void -find_best_ips_proto(struct ip_conntrack_tuple *tuple, - const struct ip_nat_range *range, - const struct ip_conntrack *conntrack, - enum ip_nat_manip_type maniptype) -{ - __be32 *var_ipp; - /* Host order */ - u_int32_t minip, maxip, j; - - /* No IP mapping? Do nothing. */ - if (!(range->flags & IP_NAT_RANGE_MAP_IPS)) - return; - - if (maniptype == IP_NAT_MANIP_SRC) - var_ipp = &tuple->src.ip; - else - var_ipp = &tuple->dst.ip; - - /* Fast path: only one choice. */ - if (range->min_ip == range->max_ip) { - *var_ipp = range->min_ip; - return; - } - - /* Hashing source and destination IPs gives a fairly even - * spread in practice (if there are a small number of IPs - * involved, there usually aren't that many connections - * anyway). The consistency means that servers see the same - * client coming from the same IP (some Internet Banking sites - * like this), even across reboots. */ - minip = ntohl(range->min_ip); - maxip = ntohl(range->max_ip); - j = jhash_2words((__force u32)tuple->src.ip, (__force u32)tuple->dst.ip, 0); - *var_ipp = htonl(minip + j % (maxip - minip + 1)); -} - -/* Manipulate the tuple into the range given. For NF_IP_POST_ROUTING, - * we change the source to map into the range. For NF_IP_PRE_ROUTING - * and NF_IP_LOCAL_OUT, we change the destination to map into the - * range. It might not be possible to get a unique tuple, but we try. - * At worst (or if we race), we will end up with a final duplicate in - * __ip_conntrack_confirm and drop the packet. */ -static void -get_unique_tuple(struct ip_conntrack_tuple *tuple, - const struct ip_conntrack_tuple *orig_tuple, - const struct ip_nat_range *range, - struct ip_conntrack *conntrack, - enum ip_nat_manip_type maniptype) -{ - struct ip_nat_protocol *proto; - - /* 1) If this srcip/proto/src-proto-part is currently mapped, - and that same mapping gives a unique tuple within the given - range, use that. - - This is only required for source (ie. NAT/masq) mappings. - So far, we don't do local source mappings, so multiple - manips not an issue. */ - if (maniptype == IP_NAT_MANIP_SRC) { - if (find_appropriate_src(orig_tuple, tuple, range)) { - DEBUGP("get_unique_tuple: Found current src map\n"); - if (!(range->flags & IP_NAT_RANGE_PROTO_RANDOM)) - if (!ip_nat_used_tuple(tuple, conntrack)) - return; - } - } - - /* 2) Select the least-used IP/proto combination in the given - range. */ - *tuple = *orig_tuple; - find_best_ips_proto(tuple, range, conntrack, maniptype); - - /* 3) The per-protocol part of the manip is made to map into - the range to make a unique tuple. */ - - rcu_read_lock(); - proto = __ip_nat_proto_find(orig_tuple->dst.protonum); - - /* Change protocol info to have some randomization */ - if (range->flags & IP_NAT_RANGE_PROTO_RANDOM) { - proto->unique_tuple(tuple, range, maniptype, conntrack); - goto out; - } - - /* Only bother mapping if it's not already in range and unique */ - if ((!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED) - || proto->in_range(tuple, maniptype, &range->min, &range->max)) - && !ip_nat_used_tuple(tuple, conntrack)) - goto out; - - /* Last change: get protocol to try to obtain unique tuple. */ - proto->unique_tuple(tuple, range, maniptype, conntrack); -out: - rcu_read_unlock(); -} - -unsigned int -ip_nat_setup_info(struct ip_conntrack *conntrack, - const struct ip_nat_range *range, - unsigned int hooknum) -{ - struct ip_conntrack_tuple curr_tuple, new_tuple; - struct ip_nat_info *info = &conntrack->nat.info; - int have_to_hash = !(conntrack->status & IPS_NAT_DONE_MASK); - enum ip_nat_manip_type maniptype = HOOK2MANIP(hooknum); - - IP_NF_ASSERT(hooknum == NF_IP_PRE_ROUTING - || hooknum == NF_IP_POST_ROUTING - || hooknum == NF_IP_LOCAL_IN - || hooknum == NF_IP_LOCAL_OUT); - BUG_ON(ip_nat_initialized(conntrack, maniptype)); - - /* What we've got will look like inverse of reply. Normally - this is what is in the conntrack, except for prior - manipulations (future optimization: if num_manips == 0, - orig_tp = - conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple) */ - invert_tuplepr(&curr_tuple, - &conntrack->tuplehash[IP_CT_DIR_REPLY].tuple); - - get_unique_tuple(&new_tuple, &curr_tuple, range, conntrack, maniptype); - - if (!ip_ct_tuple_equal(&new_tuple, &curr_tuple)) { - struct ip_conntrack_tuple reply; - - /* Alter conntrack table so will recognize replies. */ - invert_tuplepr(&reply, &new_tuple); - ip_conntrack_alter_reply(conntrack, &reply); - - /* Non-atomic: we own this at the moment. */ - if (maniptype == IP_NAT_MANIP_SRC) - conntrack->status |= IPS_SRC_NAT; - else - conntrack->status |= IPS_DST_NAT; - } - - /* Place in source hash if this is the first time. */ - if (have_to_hash) { - unsigned int srchash - = hash_by_src(&conntrack->tuplehash[IP_CT_DIR_ORIGINAL] - .tuple); - write_lock_bh(&ip_nat_lock); - list_add(&info->bysource, &bysource[srchash]); - write_unlock_bh(&ip_nat_lock); - } - - /* It's done. */ - if (maniptype == IP_NAT_MANIP_DST) - set_bit(IPS_DST_NAT_DONE_BIT, &conntrack->status); - else - set_bit(IPS_SRC_NAT_DONE_BIT, &conntrack->status); - - return NF_ACCEPT; -} -EXPORT_SYMBOL(ip_nat_setup_info); - -/* Returns true if succeeded. */ -static int -manip_pkt(u_int16_t proto, - struct sk_buff **pskb, - unsigned int iphdroff, - const struct ip_conntrack_tuple *target, - enum ip_nat_manip_type maniptype) -{ - struct iphdr *iph; - struct ip_nat_protocol *p; - - if (!skb_make_writable(pskb, iphdroff + sizeof(*iph))) - return 0; - - iph = (void *)(*pskb)->data + iphdroff; - - /* Manipulate protcol part. */ - - /* rcu_read_lock()ed by nf_hook_slow */ - p = __ip_nat_proto_find(proto); - if (!p->manip_pkt(pskb, iphdroff, target, maniptype)) - return 0; - - iph = (void *)(*pskb)->data + iphdroff; - - if (maniptype == IP_NAT_MANIP_SRC) { - nf_csum_replace4(&iph->check, iph->saddr, target->src.ip); - iph->saddr = target->src.ip; - } else { - nf_csum_replace4(&iph->check, iph->daddr, target->dst.ip); - iph->daddr = target->dst.ip; - } - return 1; -} - -/* Do packet manipulations according to ip_nat_setup_info. */ -unsigned int ip_nat_packet(struct ip_conntrack *ct, - enum ip_conntrack_info ctinfo, - unsigned int hooknum, - struct sk_buff **pskb) -{ - enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); - unsigned long statusbit; - enum ip_nat_manip_type mtype = HOOK2MANIP(hooknum); - - if (mtype == IP_NAT_MANIP_SRC) - statusbit = IPS_SRC_NAT; - else - statusbit = IPS_DST_NAT; - - /* Invert if this is reply dir. */ - if (dir == IP_CT_DIR_REPLY) - statusbit ^= IPS_NAT_MASK; - - /* Non-atomic: these bits don't change. */ - if (ct->status & statusbit) { - struct ip_conntrack_tuple target; - - /* We are aiming to look like inverse of other direction. */ - invert_tuplepr(&target, &ct->tuplehash[!dir].tuple); - - if (!manip_pkt(target.dst.protonum, pskb, 0, &target, mtype)) - return NF_DROP; - } - return NF_ACCEPT; -} -EXPORT_SYMBOL_GPL(ip_nat_packet); - -/* Dir is direction ICMP is coming from (opposite to packet it contains) */ -int ip_nat_icmp_reply_translation(struct ip_conntrack *ct, - enum ip_conntrack_info ctinfo, - unsigned int hooknum, - struct sk_buff **pskb) -{ - struct { - struct icmphdr icmp; - struct iphdr ip; - } *inside; - struct ip_conntrack_protocol *proto; - struct ip_conntrack_tuple inner, target; - int hdrlen = ip_hdrlen(*pskb); - enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); - unsigned long statusbit; - enum ip_nat_manip_type manip = HOOK2MANIP(hooknum); - - if (!skb_make_writable(pskb, hdrlen + sizeof(*inside))) - return 0; - - inside = (void *)(*pskb)->data + ip_hdrlen(*pskb); - - /* We're actually going to mangle it beyond trivial checksum - adjustment, so make sure the current checksum is correct. */ - if (nf_ip_checksum(*pskb, hooknum, hdrlen, 0)) - return 0; - - /* Must be RELATED */ - IP_NF_ASSERT((*pskb)->nfctinfo == IP_CT_RELATED || - (*pskb)->nfctinfo == IP_CT_RELATED+IP_CT_IS_REPLY); - - /* Redirects on non-null nats must be dropped, else they'll - start talking to each other without our translation, and be - confused... --RR */ - if (inside->icmp.type == ICMP_REDIRECT) { - /* If NAT isn't finished, assume it and drop. */ - if ((ct->status & IPS_NAT_DONE_MASK) != IPS_NAT_DONE_MASK) - return 0; - - if (ct->status & IPS_NAT_MASK) - return 0; - } - - DEBUGP("icmp_reply_translation: translating error %p manp %u dir %s\n", - *pskb, manip, dir == IP_CT_DIR_ORIGINAL ? "ORIG" : "REPLY"); - - /* rcu_read_lock()ed by nf_hook_slow */ - proto = __ip_conntrack_proto_find(inside->ip.protocol); - if (!ip_ct_get_tuple(&inside->ip, *pskb, ip_hdrlen(*pskb) + - sizeof(struct icmphdr) + inside->ip.ihl*4, - &inner, proto)) - return 0; - - /* Change inner back to look like incoming packet. We do the - opposite manip on this hook to normal, because it might not - pass all hooks (locally-generated ICMP). Consider incoming - packet: PREROUTING (DST manip), routing produces ICMP, goes - through POSTROUTING (which must correct the DST manip). */ - if (!manip_pkt(inside->ip.protocol, pskb, - ip_hdrlen(*pskb) + sizeof(inside->icmp), - &ct->tuplehash[!dir].tuple, - !manip)) - return 0; - - if ((*pskb)->ip_summed != CHECKSUM_PARTIAL) { - /* Reloading "inside" here since manip_pkt inner. */ - inside = (void *)(*pskb)->data + ip_hdrlen(*pskb); - inside->icmp.checksum = 0; - inside->icmp.checksum = csum_fold(skb_checksum(*pskb, hdrlen, - (*pskb)->len - hdrlen, - 0)); - } - - /* Change outer to look the reply to an incoming packet - * (proto 0 means don't invert per-proto part). */ - if (manip == IP_NAT_MANIP_SRC) - statusbit = IPS_SRC_NAT; - else - statusbit = IPS_DST_NAT; - - /* Invert if this is reply dir. */ - if (dir == IP_CT_DIR_REPLY) - statusbit ^= IPS_NAT_MASK; - - if (ct->status & statusbit) { - invert_tuplepr(&target, &ct->tuplehash[!dir].tuple); - if (!manip_pkt(0, pskb, 0, &target, manip)) - return 0; - } - - return 1; -} -EXPORT_SYMBOL_GPL(ip_nat_icmp_reply_translation); - -/* Protocol registration. */ -int ip_nat_protocol_register(struct ip_nat_protocol *proto) -{ - int ret = 0; - - write_lock_bh(&ip_nat_lock); - if (ip_nat_protos[proto->protonum] != &ip_nat_unknown_protocol) { - ret = -EBUSY; - goto out; - } - rcu_assign_pointer(ip_nat_protos[proto->protonum], proto); - out: - write_unlock_bh(&ip_nat_lock); - return ret; -} -EXPORT_SYMBOL(ip_nat_protocol_register); - -/* Noone stores the protocol anywhere; simply delete it. */ -void ip_nat_protocol_unregister(struct ip_nat_protocol *proto) -{ - write_lock_bh(&ip_nat_lock); - rcu_assign_pointer(ip_nat_protos[proto->protonum], - &ip_nat_unknown_protocol); - write_unlock_bh(&ip_nat_lock); - synchronize_rcu(); -} -EXPORT_SYMBOL(ip_nat_protocol_unregister); - -#if defined(CONFIG_IP_NF_CONNTRACK_NETLINK) || \ - defined(CONFIG_IP_NF_CONNTRACK_NETLINK_MODULE) -int -ip_nat_port_range_to_nfattr(struct sk_buff *skb, - const struct ip_nat_range *range) -{ - NFA_PUT(skb, CTA_PROTONAT_PORT_MIN, sizeof(__be16), - &range->min.tcp.port); - NFA_PUT(skb, CTA_PROTONAT_PORT_MAX, sizeof(__be16), - &range->max.tcp.port); - - return 0; - -nfattr_failure: - return -1; -} - -int -ip_nat_port_nfattr_to_range(struct nfattr *tb[], struct ip_nat_range *range) -{ - int ret = 0; - - /* we have to return whether we actually parsed something or not */ - - if (tb[CTA_PROTONAT_PORT_MIN-1]) { - ret = 1; - range->min.tcp.port = - *(__be16 *)NFA_DATA(tb[CTA_PROTONAT_PORT_MIN-1]); - } - - if (!tb[CTA_PROTONAT_PORT_MAX-1]) { - if (ret) - range->max.tcp.port = range->min.tcp.port; - } else { - ret = 1; - range->max.tcp.port = - *(__be16 *)NFA_DATA(tb[CTA_PROTONAT_PORT_MAX-1]); - } - - return ret; -} -EXPORT_SYMBOL_GPL(ip_nat_port_nfattr_to_range); -EXPORT_SYMBOL_GPL(ip_nat_port_range_to_nfattr); -#endif - -static int __init ip_nat_init(void) -{ - size_t i; - - /* Leave them the same for the moment. */ - ip_nat_htable_size = ip_conntrack_htable_size; - - /* One vmalloc for both hash tables */ - bysource = vmalloc(sizeof(struct list_head) * ip_nat_htable_size); - if (!bysource) - return -ENOMEM; - - /* Sew in builtin protocols. */ - write_lock_bh(&ip_nat_lock); - for (i = 0; i < MAX_IP_NAT_PROTO; i++) - rcu_assign_pointer(ip_nat_protos[i], &ip_nat_unknown_protocol); - rcu_assign_pointer(ip_nat_protos[IPPROTO_TCP], &ip_nat_protocol_tcp); - rcu_assign_pointer(ip_nat_protos[IPPROTO_UDP], &ip_nat_protocol_udp); - rcu_assign_pointer(ip_nat_protos[IPPROTO_ICMP], &ip_nat_protocol_icmp); - write_unlock_bh(&ip_nat_lock); - - for (i = 0; i < ip_nat_htable_size; i++) { - INIT_LIST_HEAD(&bysource[i]); - } - - /* FIXME: Man, this is a hack. */ - IP_NF_ASSERT(rcu_dereference(ip_conntrack_destroyed) == NULL); - rcu_assign_pointer(ip_conntrack_destroyed, ip_nat_cleanup_conntrack); - - /* Initialize fake conntrack so that NAT will skip it */ - ip_conntrack_untracked.status |= IPS_NAT_DONE_MASK; - return 0; -} - -/* Clear NAT section of all conntracks, in case we're loaded again. */ -static int clean_nat(struct ip_conntrack *i, void *data) -{ - memset(&i->nat, 0, sizeof(i->nat)); - i->status &= ~(IPS_NAT_MASK | IPS_NAT_DONE_MASK | IPS_SEQ_ADJUST); - return 0; -} - -static void __exit ip_nat_cleanup(void) -{ - ip_ct_iterate_cleanup(&clean_nat, NULL); - rcu_assign_pointer(ip_conntrack_destroyed, NULL); - synchronize_rcu(); - vfree(bysource); -} - -MODULE_LICENSE("GPL"); - -module_init(ip_nat_init); -module_exit(ip_nat_cleanup); diff --git a/net/ipv4/netfilter/ip_nat_ftp.c b/net/ipv4/netfilter/ip_nat_ftp.c deleted file mode 100644 index 32e01d8dffcb..000000000000 --- a/net/ipv4/netfilter/ip_nat_ftp.c +++ /dev/null @@ -1,180 +0,0 @@ -/* FTP extension for TCP NAT alteration. */ - -/* (C) 1999-2001 Paul `Rusty' Russell - * (C) 2002-2004 Netfilter Core Team - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Rusty Russell "); -MODULE_DESCRIPTION("ftp NAT helper"); - -#if 0 -#define DEBUGP printk -#else -#define DEBUGP(format, args...) -#endif - -/* FIXME: Time out? --RR */ - -static int -mangle_rfc959_packet(struct sk_buff **pskb, - __be32 newip, - u_int16_t port, - unsigned int matchoff, - unsigned int matchlen, - struct ip_conntrack *ct, - enum ip_conntrack_info ctinfo, - u32 *seq) -{ - char buffer[sizeof("nnn,nnn,nnn,nnn,nnn,nnn")]; - - sprintf(buffer, "%u,%u,%u,%u,%u,%u", - NIPQUAD(newip), port>>8, port&0xFF); - - DEBUGP("calling ip_nat_mangle_tcp_packet\n"); - - *seq += strlen(buffer) - matchlen; - return ip_nat_mangle_tcp_packet(pskb, ct, ctinfo, matchoff, - matchlen, buffer, strlen(buffer)); -} - -/* |1|132.235.1.2|6275| */ -static int -mangle_eprt_packet(struct sk_buff **pskb, - __be32 newip, - u_int16_t port, - unsigned int matchoff, - unsigned int matchlen, - struct ip_conntrack *ct, - enum ip_conntrack_info ctinfo, - u32 *seq) -{ - char buffer[sizeof("|1|255.255.255.255|65535|")]; - - sprintf(buffer, "|1|%u.%u.%u.%u|%u|", NIPQUAD(newip), port); - - DEBUGP("calling ip_nat_mangle_tcp_packet\n"); - - *seq += strlen(buffer) - matchlen; - return ip_nat_mangle_tcp_packet(pskb, ct, ctinfo, matchoff, - matchlen, buffer, strlen(buffer)); -} - -/* |1|132.235.1.2|6275| */ -static int -mangle_epsv_packet(struct sk_buff **pskb, - __be32 newip, - u_int16_t port, - unsigned int matchoff, - unsigned int matchlen, - struct ip_conntrack *ct, - enum ip_conntrack_info ctinfo, - u32 *seq) -{ - char buffer[sizeof("|||65535|")]; - - sprintf(buffer, "|||%u|", port); - - DEBUGP("calling ip_nat_mangle_tcp_packet\n"); - - *seq += strlen(buffer) - matchlen; - return ip_nat_mangle_tcp_packet(pskb, ct, ctinfo, matchoff, - matchlen, buffer, strlen(buffer)); -} - -static int (*mangle[])(struct sk_buff **, __be32, u_int16_t, - unsigned int, - unsigned int, - struct ip_conntrack *, - enum ip_conntrack_info, - u32 *seq) -= { [IP_CT_FTP_PORT] = mangle_rfc959_packet, - [IP_CT_FTP_PASV] = mangle_rfc959_packet, - [IP_CT_FTP_EPRT] = mangle_eprt_packet, - [IP_CT_FTP_EPSV] = mangle_epsv_packet -}; - -/* So, this packet has hit the connection tracking matching code. - Mangle it, and change the expectation to match the new version. */ -static unsigned int ip_nat_ftp(struct sk_buff **pskb, - enum ip_conntrack_info ctinfo, - enum ip_ct_ftp_type type, - unsigned int matchoff, - unsigned int matchlen, - struct ip_conntrack_expect *exp, - u32 *seq) -{ - __be32 newip; - u_int16_t port; - int dir = CTINFO2DIR(ctinfo); - struct ip_conntrack *ct = exp->master; - - DEBUGP("FTP_NAT: type %i, off %u len %u\n", type, matchoff, matchlen); - - /* Connection will come from wherever this packet goes, hence !dir */ - newip = ct->tuplehash[!dir].tuple.dst.ip; - exp->saved_proto.tcp.port = exp->tuple.dst.u.tcp.port; - exp->dir = !dir; - - /* When you see the packet, we need to NAT it the same as the - * this one. */ - exp->expectfn = ip_nat_follow_master; - - /* Try to get same port: if not, try to change it. */ - for (port = ntohs(exp->saved_proto.tcp.port); port != 0; port++) { - exp->tuple.dst.u.tcp.port = htons(port); - if (ip_conntrack_expect_related(exp) == 0) - break; - } - - if (port == 0) - return NF_DROP; - - if (!mangle[type](pskb, newip, port, matchoff, matchlen, ct, ctinfo, - seq)) { - ip_conntrack_unexpect_related(exp); - return NF_DROP; - } - return NF_ACCEPT; -} - -static void __exit ip_nat_ftp_fini(void) -{ - rcu_assign_pointer(ip_nat_ftp_hook, NULL); - synchronize_rcu(); -} - -static int __init ip_nat_ftp_init(void) -{ - BUG_ON(rcu_dereference(ip_nat_ftp_hook)); - rcu_assign_pointer(ip_nat_ftp_hook, ip_nat_ftp); - return 0; -} - -/* Prior to 2.6.11, we had a ports param. No longer, but don't break users. */ -static int warn_set(const char *val, struct kernel_param *kp) -{ - printk(KERN_INFO KBUILD_MODNAME - ": kernel >= 2.6.10 only uses 'ports' for conntrack modules\n"); - return 0; -} -module_param_call(ports, warn_set, NULL, NULL, 0); - -module_init(ip_nat_ftp_init); -module_exit(ip_nat_ftp_fini); diff --git a/net/ipv4/netfilter/ip_nat_helper.c b/net/ipv4/netfilter/ip_nat_helper.c deleted file mode 100644 index 4cddc2951744..000000000000 --- a/net/ipv4/netfilter/ip_nat_helper.c +++ /dev/null @@ -1,436 +0,0 @@ -/* ip_nat_helper.c - generic support functions for NAT helpers - * - * (C) 2000-2002 Harald Welte - * (C) 2003-2004 Netfilter Core Team - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * 14 Jan 2002 Harald Welte : - * - add support for SACK adjustment - * 14 Mar 2002 Harald Welte : - * - merge SACK support into newnat API - * 16 Aug 2002 Brian J. Murrell : - * - make ip_nat_resize_packet more generic (TCP and UDP) - * - add ip_nat_mangle_udp_packet - */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include - -#if 0 -#define DEBUGP printk -#define DUMP_OFFSET(x) printk("offset_before=%d, offset_after=%d, correction_pos=%u\n", x->offset_before, x->offset_after, x->correction_pos); -#else -#define DEBUGP(format, args...) -#define DUMP_OFFSET(x) -#endif - -static DEFINE_SPINLOCK(ip_nat_seqofs_lock); - -/* Setup TCP sequence correction given this change at this sequence */ -static inline void -adjust_tcp_sequence(u32 seq, - int sizediff, - struct ip_conntrack *ct, - enum ip_conntrack_info ctinfo) -{ - int dir; - struct ip_nat_seq *this_way, *other_way; - - DEBUGP("ip_nat_resize_packet: old_size = %u, new_size = %u\n", - (*skb)->len, new_size); - - dir = CTINFO2DIR(ctinfo); - - this_way = &ct->nat.info.seq[dir]; - other_way = &ct->nat.info.seq[!dir]; - - DEBUGP("ip_nat_resize_packet: Seq_offset before: "); - DUMP_OFFSET(this_way); - - spin_lock_bh(&ip_nat_seqofs_lock); - - /* SYN adjust. If it's uninitialized, or this is after last - * correction, record it: we don't handle more than one - * adjustment in the window, but do deal with common case of a - * retransmit */ - if (this_way->offset_before == this_way->offset_after - || before(this_way->correction_pos, seq)) { - this_way->correction_pos = seq; - this_way->offset_before = this_way->offset_after; - this_way->offset_after += sizediff; - } - spin_unlock_bh(&ip_nat_seqofs_lock); - - DEBUGP("ip_nat_resize_packet: Seq_offset after: "); - DUMP_OFFSET(this_way); -} - -/* Frobs data inside this packet, which is linear. */ -static void mangle_contents(struct sk_buff *skb, - unsigned int dataoff, - unsigned int match_offset, - unsigned int match_len, - const char *rep_buffer, - unsigned int rep_len) -{ - unsigned char *data; - - BUG_ON(skb_is_nonlinear(skb)); - data = skb_network_header(skb) + dataoff; - - /* move post-replacement */ - memmove(data + match_offset + rep_len, - data + match_offset + match_len, - skb->tail - (data + match_offset + match_len)); - - /* insert data from buffer */ - memcpy(data + match_offset, rep_buffer, rep_len); - - /* update skb info */ - if (rep_len > match_len) { - DEBUGP("ip_nat_mangle_packet: Extending packet by " - "%u from %u bytes\n", rep_len - match_len, - skb->len); - skb_put(skb, rep_len - match_len); - } else { - DEBUGP("ip_nat_mangle_packet: Shrinking packet from " - "%u from %u bytes\n", match_len - rep_len, - skb->len); - __skb_trim(skb, skb->len + rep_len - match_len); - } - - /* fix IP hdr checksum information */ - ip_hdr(skb)->tot_len = htons(skb->len); - ip_send_check(ip_hdr(skb)); -} - -/* Unusual, but possible case. */ -static int enlarge_skb(struct sk_buff **pskb, unsigned int extra) -{ - struct sk_buff *nskb; - - if ((*pskb)->len + extra > 65535) - return 0; - - nskb = skb_copy_expand(*pskb, skb_headroom(*pskb), extra, GFP_ATOMIC); - if (!nskb) - return 0; - - /* Transfer socket to new skb. */ - if ((*pskb)->sk) - skb_set_owner_w(nskb, (*pskb)->sk); - kfree_skb(*pskb); - *pskb = nskb; - return 1; -} - -/* Generic function for mangling variable-length address changes inside - * NATed TCP connections (like the PORT XXX,XXX,XXX,XXX,XXX,XXX - * command in FTP). - * - * Takes care about all the nasty sequence number changes, checksumming, - * skb enlargement, ... - * - * */ -int -ip_nat_mangle_tcp_packet(struct sk_buff **pskb, - struct ip_conntrack *ct, - enum ip_conntrack_info ctinfo, - unsigned int match_offset, - unsigned int match_len, - const char *rep_buffer, - unsigned int rep_len) -{ - struct iphdr *iph; - struct tcphdr *tcph; - int oldlen, datalen; - - if (!skb_make_writable(pskb, (*pskb)->len)) - return 0; - - if (rep_len > match_len - && rep_len - match_len > skb_tailroom(*pskb) - && !enlarge_skb(pskb, rep_len - match_len)) - return 0; - - SKB_LINEAR_ASSERT(*pskb); - - iph = ip_hdr(*pskb); - tcph = (void *)iph + iph->ihl*4; - - oldlen = (*pskb)->len - iph->ihl*4; - mangle_contents(*pskb, iph->ihl*4 + tcph->doff*4, - match_offset, match_len, rep_buffer, rep_len); - - datalen = (*pskb)->len - iph->ihl*4; - if ((*pskb)->ip_summed != CHECKSUM_PARTIAL) { - tcph->check = 0; - tcph->check = tcp_v4_check(datalen, - iph->saddr, iph->daddr, - csum_partial((char *)tcph, - datalen, 0)); - } else - nf_proto_csum_replace2(&tcph->check, *pskb, - htons(oldlen), htons(datalen), 1); - - if (rep_len != match_len) { - set_bit(IPS_SEQ_ADJUST_BIT, &ct->status); - adjust_tcp_sequence(ntohl(tcph->seq), - (int)rep_len - (int)match_len, - ct, ctinfo); - /* Tell TCP window tracking about seq change */ - ip_conntrack_tcp_update(*pskb, ct, CTINFO2DIR(ctinfo)); - } - return 1; -} -EXPORT_SYMBOL(ip_nat_mangle_tcp_packet); - -/* Generic function for mangling variable-length address changes inside - * NATed UDP connections (like the CONNECT DATA XXXXX MESG XXXXX INDEX XXXXX - * command in the Amanda protocol) - * - * Takes care about all the nasty sequence number changes, checksumming, - * skb enlargement, ... - * - * XXX - This function could be merged with ip_nat_mangle_tcp_packet which - * should be fairly easy to do. - */ -int -ip_nat_mangle_udp_packet(struct sk_buff **pskb, - struct ip_conntrack *ct, - enum ip_conntrack_info ctinfo, - unsigned int match_offset, - unsigned int match_len, - const char *rep_buffer, - unsigned int rep_len) -{ - struct iphdr *iph; - struct udphdr *udph; - int datalen, oldlen; - - /* UDP helpers might accidentally mangle the wrong packet */ - iph = ip_hdr(*pskb); - if ((*pskb)->len < iph->ihl*4 + sizeof(*udph) + - match_offset + match_len) - return 0; - - if (!skb_make_writable(pskb, (*pskb)->len)) - return 0; - - if (rep_len > match_len - && rep_len - match_len > skb_tailroom(*pskb) - && !enlarge_skb(pskb, rep_len - match_len)) - return 0; - - iph = ip_hdr(*pskb); - udph = (void *)iph + iph->ihl*4; - - oldlen = (*pskb)->len - iph->ihl*4; - mangle_contents(*pskb, iph->ihl*4 + sizeof(*udph), - match_offset, match_len, rep_buffer, rep_len); - - /* update the length of the UDP packet */ - datalen = (*pskb)->len - iph->ihl*4; - udph->len = htons(datalen); - - /* fix udp checksum if udp checksum was previously calculated */ - if (!udph->check && (*pskb)->ip_summed != CHECKSUM_PARTIAL) - return 1; - - if ((*pskb)->ip_summed != CHECKSUM_PARTIAL) { - udph->check = 0; - udph->check = csum_tcpudp_magic(iph->saddr, iph->daddr, - datalen, IPPROTO_UDP, - csum_partial((char *)udph, - datalen, 0)); - if (!udph->check) - udph->check = CSUM_MANGLED_0; - } else - nf_proto_csum_replace2(&udph->check, *pskb, - htons(oldlen), htons(datalen), 1); - return 1; -} -EXPORT_SYMBOL(ip_nat_mangle_udp_packet); - -/* Adjust one found SACK option including checksum correction */ -static void -sack_adjust(struct sk_buff *skb, - struct tcphdr *tcph, - unsigned int sackoff, - unsigned int sackend, - struct ip_nat_seq *natseq) -{ - while (sackoff < sackend) { - struct tcp_sack_block_wire *sack; - __be32 new_start_seq, new_end_seq; - - sack = (void *)skb->data + sackoff; - if (after(ntohl(sack->start_seq) - natseq->offset_before, - natseq->correction_pos)) - new_start_seq = htonl(ntohl(sack->start_seq) - - natseq->offset_after); - else - new_start_seq = htonl(ntohl(sack->start_seq) - - natseq->offset_before); - - if (after(ntohl(sack->end_seq) - natseq->offset_before, - natseq->correction_pos)) - new_end_seq = htonl(ntohl(sack->end_seq) - - natseq->offset_after); - else - new_end_seq = htonl(ntohl(sack->end_seq) - - natseq->offset_before); - - DEBUGP("sack_adjust: start_seq: %d->%d, end_seq: %d->%d\n", - ntohl(sack->start_seq), new_start_seq, - ntohl(sack->end_seq), new_end_seq); - - nf_proto_csum_replace4(&tcph->check, skb, - sack->start_seq, new_start_seq, 0); - nf_proto_csum_replace4(&tcph->check, skb, - sack->end_seq, new_end_seq, 0); - sack->start_seq = new_start_seq; - sack->end_seq = new_end_seq; - sackoff += sizeof(*sack); - } -} - -/* TCP SACK sequence number adjustment */ -static inline unsigned int -ip_nat_sack_adjust(struct sk_buff **pskb, - struct tcphdr *tcph, - struct ip_conntrack *ct, - enum ip_conntrack_info ctinfo) -{ - unsigned int dir, optoff, optend; - - optoff = ip_hdrlen(*pskb) + sizeof(struct tcphdr); - optend = ip_hdrlen(*pskb) + tcph->doff * 4; - - if (!skb_make_writable(pskb, optend)) - return 0; - - dir = CTINFO2DIR(ctinfo); - - while (optoff < optend) { - /* Usually: option, length. */ - unsigned char *op = (*pskb)->data + optoff; - - switch (op[0]) { - case TCPOPT_EOL: - return 1; - case TCPOPT_NOP: - optoff++; - continue; - default: - /* no partial options */ - if (optoff + 1 == optend - || optoff + op[1] > optend - || op[1] < 2) - return 0; - if (op[0] == TCPOPT_SACK - && op[1] >= 2+TCPOLEN_SACK_PERBLOCK - && ((op[1] - 2) % TCPOLEN_SACK_PERBLOCK) == 0) - sack_adjust(*pskb, tcph, optoff+2, - optoff+op[1], - &ct->nat.info.seq[!dir]); - optoff += op[1]; - } - } - return 1; -} - -/* TCP sequence number adjustment. Returns 1 on success, 0 on failure */ -int -ip_nat_seq_adjust(struct sk_buff **pskb, - struct ip_conntrack *ct, - enum ip_conntrack_info ctinfo) -{ - struct tcphdr *tcph; - int dir; - __be32 newseq, newack; - struct ip_nat_seq *this_way, *other_way; - - dir = CTINFO2DIR(ctinfo); - - this_way = &ct->nat.info.seq[dir]; - other_way = &ct->nat.info.seq[!dir]; - - if (!skb_make_writable(pskb, ip_hdrlen(*pskb) + sizeof(*tcph))) - return 0; - - tcph = (void *)(*pskb)->data + ip_hdrlen(*pskb); - if (after(ntohl(tcph->seq), this_way->correction_pos)) - newseq = htonl(ntohl(tcph->seq) + this_way->offset_after); - else - newseq = htonl(ntohl(tcph->seq) + this_way->offset_before); - - if (after(ntohl(tcph->ack_seq) - other_way->offset_before, - other_way->correction_pos)) - newack = htonl(ntohl(tcph->ack_seq) - other_way->offset_after); - else - newack = htonl(ntohl(tcph->ack_seq) - other_way->offset_before); - - nf_proto_csum_replace4(&tcph->check, *pskb, tcph->seq, newseq, 0); - nf_proto_csum_replace4(&tcph->check, *pskb, tcph->ack_seq, newack, 0); - - DEBUGP("Adjusting sequence number from %u->%u, ack from %u->%u\n", - ntohl(tcph->seq), ntohl(newseq), ntohl(tcph->ack_seq), - ntohl(newack)); - - tcph->seq = newseq; - tcph->ack_seq = newack; - - if (!ip_nat_sack_adjust(pskb, tcph, ct, ctinfo)) - return 0; - - ip_conntrack_tcp_update(*pskb, ct, dir); - - return 1; -} -EXPORT_SYMBOL(ip_nat_seq_adjust); - -/* Setup NAT on this expected conntrack so it follows master. */ -/* If we fail to get a free NAT slot, we'll get dropped on confirm */ -void ip_nat_follow_master(struct ip_conntrack *ct, - struct ip_conntrack_expect *exp) -{ - struct ip_nat_range range; - - /* This must be a fresh one. */ - BUG_ON(ct->status & IPS_NAT_DONE_MASK); - - /* Change src to where master sends to */ - range.flags = IP_NAT_RANGE_MAP_IPS; - range.min_ip = range.max_ip - = ct->master->tuplehash[!exp->dir].tuple.dst.ip; - /* hook doesn't matter, but it has to do source manip */ - ip_nat_setup_info(ct, &range, NF_IP_POST_ROUTING); - - /* For DST manip, map port here to where it's expected. */ - range.flags = (IP_NAT_RANGE_MAP_IPS | IP_NAT_RANGE_PROTO_SPECIFIED); - range.min = range.max = exp->saved_proto; - range.min_ip = range.max_ip - = ct->master->tuplehash[!exp->dir].tuple.src.ip; - /* hook doesn't matter, but it has to do destination manip */ - ip_nat_setup_info(ct, &range, NF_IP_PRE_ROUTING); -} -EXPORT_SYMBOL(ip_nat_follow_master); diff --git a/net/ipv4/netfilter/ip_nat_helper_h323.c b/net/ipv4/netfilter/ip_nat_helper_h323.c deleted file mode 100644 index 0d9444f9236b..000000000000 --- a/net/ipv4/netfilter/ip_nat_helper_h323.c +++ /dev/null @@ -1,611 +0,0 @@ -/* - * H.323 extension for NAT alteration. - * - * Copyright (c) 2006 Jing Min Zhao - * - * This source code is licensed under General Public License version 2. - * - * Based on the 'brute force' H.323 NAT module by - * Jozsef Kadlecsik - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#if 0 -#define DEBUGP printk -#else -#define DEBUGP(format, args...) -#endif - -/****************************************************************************/ -static int set_addr(struct sk_buff **pskb, - unsigned char **data, int dataoff, - unsigned int addroff, __be32 ip, u_int16_t port) -{ - enum ip_conntrack_info ctinfo; - struct ip_conntrack *ct = ip_conntrack_get(*pskb, &ctinfo); - struct { - __be32 ip; - __be16 port; - } __attribute__ ((__packed__)) buf; - struct tcphdr _tcph, *th; - - buf.ip = ip; - buf.port = htons(port); - addroff += dataoff; - - if (ip_hdr(*pskb)->protocol == IPPROTO_TCP) { - if (!ip_nat_mangle_tcp_packet(pskb, ct, ctinfo, - addroff, sizeof(buf), - (char *) &buf, sizeof(buf))) { - if (net_ratelimit()) - printk("ip_nat_h323: ip_nat_mangle_tcp_packet" - " error\n"); - return -1; - } - - /* Relocate data pointer */ - th = skb_header_pointer(*pskb, ip_hdrlen(*pskb), - sizeof(_tcph), &_tcph); - if (th == NULL) - return -1; - *data = (*pskb)->data + ip_hdrlen(*pskb) + - th->doff * 4 + dataoff; - } else { - if (!ip_nat_mangle_udp_packet(pskb, ct, ctinfo, - addroff, sizeof(buf), - (char *) &buf, sizeof(buf))) { - if (net_ratelimit()) - printk("ip_nat_h323: ip_nat_mangle_udp_packet" - " error\n"); - return -1; - } - /* ip_nat_mangle_udp_packet uses skb_make_writable() to copy - * or pull everything in a linear buffer, so we can safely - * use the skb pointers now */ - *data = ((*pskb)->data + ip_hdrlen(*pskb) + - sizeof(struct udphdr)); - } - - return 0; -} - -/****************************************************************************/ -static int set_h225_addr(struct sk_buff **pskb, - unsigned char **data, int dataoff, - TransportAddress * addr, - __be32 ip, u_int16_t port) -{ - return set_addr(pskb, data, dataoff, addr->ipAddress.ip, ip, port); -} - -/****************************************************************************/ -static int set_h245_addr(struct sk_buff **pskb, - unsigned char **data, int dataoff, - H245_TransportAddress * addr, - __be32 ip, u_int16_t port) -{ - return set_addr(pskb, data, dataoff, - addr->unicastAddress.iPAddress.network, ip, port); -} - -/****************************************************************************/ -static int set_sig_addr(struct sk_buff **pskb, struct ip_conntrack *ct, - enum ip_conntrack_info ctinfo, - unsigned char **data, - TransportAddress * addr, int count) -{ - struct ip_ct_h323_master *info = &ct->help.ct_h323_info; - int dir = CTINFO2DIR(ctinfo); - int i; - __be32 ip; - u_int16_t port; - - for (i = 0; i < count; i++) { - if (get_h225_addr(*data, &addr[i], &ip, &port)) { - if (ip == ct->tuplehash[dir].tuple.src.ip && - port == info->sig_port[dir]) { - /* GW->GK */ - - /* Fix for Gnomemeeting */ - if (i > 0 && - get_h225_addr(*data, &addr[0], - &ip, &port) && - (ntohl(ip) & 0xff000000) == 0x7f000000) - i = 0; - - DEBUGP - ("ip_nat_ras: set signal address " - "%u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu\n", - NIPQUAD(ip), port, - NIPQUAD(ct->tuplehash[!dir].tuple.dst. - ip), info->sig_port[!dir]); - return set_h225_addr(pskb, data, 0, &addr[i], - ct->tuplehash[!dir]. - tuple.dst.ip, - info->sig_port[!dir]); - } else if (ip == ct->tuplehash[dir].tuple.dst.ip && - port == info->sig_port[dir]) { - /* GK->GW */ - DEBUGP - ("ip_nat_ras: set signal address " - "%u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu\n", - NIPQUAD(ip), port, - NIPQUAD(ct->tuplehash[!dir].tuple.src. - ip), info->sig_port[!dir]); - return set_h225_addr(pskb, data, 0, &addr[i], - ct->tuplehash[!dir]. - tuple.src.ip, - info->sig_port[!dir]); - } - } - } - - return 0; -} - -/****************************************************************************/ -static int set_ras_addr(struct sk_buff **pskb, struct ip_conntrack *ct, - enum ip_conntrack_info ctinfo, - unsigned char **data, - TransportAddress * addr, int count) -{ - int dir = CTINFO2DIR(ctinfo); - int i; - __be32 ip; - u_int16_t port; - - for (i = 0; i < count; i++) { - if (get_h225_addr(*data, &addr[i], &ip, &port) && - ip == ct->tuplehash[dir].tuple.src.ip && - port == ntohs(ct->tuplehash[dir].tuple.src.u.udp.port)) { - DEBUGP("ip_nat_ras: set rasAddress " - "%u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu\n", - NIPQUAD(ip), port, - NIPQUAD(ct->tuplehash[!dir].tuple.dst.ip), - ntohs(ct->tuplehash[!dir].tuple.dst.u.udp. - port)); - return set_h225_addr(pskb, data, 0, &addr[i], - ct->tuplehash[!dir].tuple.dst.ip, - ntohs(ct->tuplehash[!dir].tuple. - dst.u.udp.port)); - } - } - - return 0; -} - -/****************************************************************************/ -static int nat_rtp_rtcp(struct sk_buff **pskb, struct ip_conntrack *ct, - enum ip_conntrack_info ctinfo, - unsigned char **data, int dataoff, - H245_TransportAddress * addr, - u_int16_t port, u_int16_t rtp_port, - struct ip_conntrack_expect *rtp_exp, - struct ip_conntrack_expect *rtcp_exp) -{ - struct ip_ct_h323_master *info = &ct->help.ct_h323_info; - int dir = CTINFO2DIR(ctinfo); - int i; - u_int16_t nated_port; - - /* Set expectations for NAT */ - rtp_exp->saved_proto.udp.port = rtp_exp->tuple.dst.u.udp.port; - rtp_exp->expectfn = ip_nat_follow_master; - rtp_exp->dir = !dir; - rtcp_exp->saved_proto.udp.port = rtcp_exp->tuple.dst.u.udp.port; - rtcp_exp->expectfn = ip_nat_follow_master; - rtcp_exp->dir = !dir; - - /* Lookup existing expects */ - for (i = 0; i < H323_RTP_CHANNEL_MAX; i++) { - if (info->rtp_port[i][dir] == rtp_port) { - /* Expected */ - - /* Use allocated ports first. This will refresh - * the expects */ - rtp_exp->tuple.dst.u.udp.port = - htons(info->rtp_port[i][dir]); - rtcp_exp->tuple.dst.u.udp.port = - htons(info->rtp_port[i][dir] + 1); - break; - } else if (info->rtp_port[i][dir] == 0) { - /* Not expected */ - break; - } - } - - /* Run out of expectations */ - if (i >= H323_RTP_CHANNEL_MAX) { - if (net_ratelimit()) - printk("ip_nat_h323: out of expectations\n"); - return 0; - } - - /* Try to get a pair of ports. */ - for (nated_port = ntohs(rtp_exp->tuple.dst.u.udp.port); - nated_port != 0; nated_port += 2) { - rtp_exp->tuple.dst.u.udp.port = htons(nated_port); - if (ip_conntrack_expect_related(rtp_exp) == 0) { - rtcp_exp->tuple.dst.u.udp.port = - htons(nated_port + 1); - if (ip_conntrack_expect_related(rtcp_exp) == 0) - break; - ip_conntrack_unexpect_related(rtp_exp); - } - } - - if (nated_port == 0) { /* No port available */ - if (net_ratelimit()) - printk("ip_nat_h323: out of RTP ports\n"); - return 0; - } - - /* Modify signal */ - if (set_h245_addr(pskb, data, dataoff, addr, - ct->tuplehash[!dir].tuple.dst.ip, - (port & 1) ? nated_port + 1 : nated_port) == 0) { - /* Save ports */ - info->rtp_port[i][dir] = rtp_port; - info->rtp_port[i][!dir] = nated_port; - } else { - ip_conntrack_unexpect_related(rtp_exp); - ip_conntrack_unexpect_related(rtcp_exp); - return -1; - } - - /* Success */ - DEBUGP("ip_nat_h323: expect RTP %u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu\n", - NIPQUAD(rtp_exp->tuple.src.ip), - ntohs(rtp_exp->tuple.src.u.udp.port), - NIPQUAD(rtp_exp->tuple.dst.ip), - ntohs(rtp_exp->tuple.dst.u.udp.port)); - DEBUGP("ip_nat_h323: expect RTCP %u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu\n", - NIPQUAD(rtcp_exp->tuple.src.ip), - ntohs(rtcp_exp->tuple.src.u.udp.port), - NIPQUAD(rtcp_exp->tuple.dst.ip), - ntohs(rtcp_exp->tuple.dst.u.udp.port)); - - return 0; -} - -/****************************************************************************/ -static int nat_t120(struct sk_buff **pskb, struct ip_conntrack *ct, - enum ip_conntrack_info ctinfo, - unsigned char **data, int dataoff, - H245_TransportAddress * addr, u_int16_t port, - struct ip_conntrack_expect *exp) -{ - int dir = CTINFO2DIR(ctinfo); - u_int16_t nated_port = port; - - /* Set expectations for NAT */ - exp->saved_proto.tcp.port = exp->tuple.dst.u.tcp.port; - exp->expectfn = ip_nat_follow_master; - exp->dir = !dir; - - /* Try to get same port: if not, try to change it. */ - for (; nated_port != 0; nated_port++) { - exp->tuple.dst.u.tcp.port = htons(nated_port); - if (ip_conntrack_expect_related(exp) == 0) - break; - } - - if (nated_port == 0) { /* No port available */ - if (net_ratelimit()) - printk("ip_nat_h323: out of TCP ports\n"); - return 0; - } - - /* Modify signal */ - if (set_h245_addr(pskb, data, dataoff, addr, - ct->tuplehash[!dir].tuple.dst.ip, nated_port) < 0) { - ip_conntrack_unexpect_related(exp); - return -1; - } - - DEBUGP("ip_nat_h323: expect T.120 %u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu\n", - NIPQUAD(exp->tuple.src.ip), ntohs(exp->tuple.src.u.tcp.port), - NIPQUAD(exp->tuple.dst.ip), ntohs(exp->tuple.dst.u.tcp.port)); - - return 0; -} - -/**************************************************************************** - * This conntrack expect function replaces ip_conntrack_h245_expect() - * which was set by ip_conntrack_helper_h323.c. It calls both - * ip_nat_follow_master() and ip_conntrack_h245_expect() - ****************************************************************************/ -static void ip_nat_h245_expect(struct ip_conntrack *new, - struct ip_conntrack_expect *this) -{ - ip_nat_follow_master(new, this); - ip_conntrack_h245_expect(new, this); -} - -/****************************************************************************/ -static int nat_h245(struct sk_buff **pskb, struct ip_conntrack *ct, - enum ip_conntrack_info ctinfo, - unsigned char **data, int dataoff, - TransportAddress * addr, u_int16_t port, - struct ip_conntrack_expect *exp) -{ - struct ip_ct_h323_master *info = &ct->help.ct_h323_info; - int dir = CTINFO2DIR(ctinfo); - u_int16_t nated_port = port; - - /* Set expectations for NAT */ - exp->saved_proto.tcp.port = exp->tuple.dst.u.tcp.port; - exp->expectfn = ip_nat_h245_expect; - exp->dir = !dir; - - /* Check existing expects */ - if (info->sig_port[dir] == port) - nated_port = info->sig_port[!dir]; - - /* Try to get same port: if not, try to change it. */ - for (; nated_port != 0; nated_port++) { - exp->tuple.dst.u.tcp.port = htons(nated_port); - if (ip_conntrack_expect_related(exp) == 0) - break; - } - - if (nated_port == 0) { /* No port available */ - if (net_ratelimit()) - printk("ip_nat_q931: out of TCP ports\n"); - return 0; - } - - /* Modify signal */ - if (set_h225_addr(pskb, data, dataoff, addr, - ct->tuplehash[!dir].tuple.dst.ip, - nated_port) == 0) { - /* Save ports */ - info->sig_port[dir] = port; - info->sig_port[!dir] = nated_port; - } else { - ip_conntrack_unexpect_related(exp); - return -1; - } - - DEBUGP("ip_nat_q931: expect H.245 %u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu\n", - NIPQUAD(exp->tuple.src.ip), ntohs(exp->tuple.src.u.tcp.port), - NIPQUAD(exp->tuple.dst.ip), ntohs(exp->tuple.dst.u.tcp.port)); - - return 0; -} - -/**************************************************************************** - * This conntrack expect function replaces ip_conntrack_q931_expect() - * which was set by ip_conntrack_helper_h323.c. - ****************************************************************************/ -static void ip_nat_q931_expect(struct ip_conntrack *new, - struct ip_conntrack_expect *this) -{ - struct ip_nat_range range; - - if (this->tuple.src.ip != 0) { /* Only accept calls from GK */ - ip_nat_follow_master(new, this); - goto out; - } - - /* This must be a fresh one. */ - BUG_ON(new->status & IPS_NAT_DONE_MASK); - - /* Change src to where master sends to */ - range.flags = IP_NAT_RANGE_MAP_IPS; - range.min_ip = range.max_ip = new->tuplehash[!this->dir].tuple.src.ip; - - /* hook doesn't matter, but it has to do source manip */ - ip_nat_setup_info(new, &range, NF_IP_POST_ROUTING); - - /* For DST manip, map port here to where it's expected. */ - range.flags = (IP_NAT_RANGE_MAP_IPS | IP_NAT_RANGE_PROTO_SPECIFIED); - range.min = range.max = this->saved_proto; - range.min_ip = range.max_ip = - new->master->tuplehash[!this->dir].tuple.src.ip; - - /* hook doesn't matter, but it has to do destination manip */ - ip_nat_setup_info(new, &range, NF_IP_PRE_ROUTING); - - out: - ip_conntrack_q931_expect(new, this); -} - -/****************************************************************************/ -static int nat_q931(struct sk_buff **pskb, struct ip_conntrack *ct, - enum ip_conntrack_info ctinfo, - unsigned char **data, TransportAddress * addr, int idx, - u_int16_t port, struct ip_conntrack_expect *exp) -{ - struct ip_ct_h323_master *info = &ct->help.ct_h323_info; - int dir = CTINFO2DIR(ctinfo); - u_int16_t nated_port = port; - __be32 ip; - - /* Set expectations for NAT */ - exp->saved_proto.tcp.port = exp->tuple.dst.u.tcp.port; - exp->expectfn = ip_nat_q931_expect; - exp->dir = !dir; - - /* Check existing expects */ - if (info->sig_port[dir] == port) - nated_port = info->sig_port[!dir]; - - /* Try to get same port: if not, try to change it. */ - for (; nated_port != 0; nated_port++) { - exp->tuple.dst.u.tcp.port = htons(nated_port); - if (ip_conntrack_expect_related(exp) == 0) - break; - } - - if (nated_port == 0) { /* No port available */ - if (net_ratelimit()) - printk("ip_nat_ras: out of TCP ports\n"); - return 0; - } - - /* Modify signal */ - if (set_h225_addr(pskb, data, 0, &addr[idx], - ct->tuplehash[!dir].tuple.dst.ip, - nated_port) == 0) { - /* Save ports */ - info->sig_port[dir] = port; - info->sig_port[!dir] = nated_port; - - /* Fix for Gnomemeeting */ - if (idx > 0 && - get_h225_addr(*data, &addr[0], &ip, &port) && - (ntohl(ip) & 0xff000000) == 0x7f000000) { - set_h225_addr_hook(pskb, data, 0, &addr[0], - ct->tuplehash[!dir].tuple.dst.ip, - info->sig_port[!dir]); - } - } else { - ip_conntrack_unexpect_related(exp); - return -1; - } - - /* Success */ - DEBUGP("ip_nat_ras: expect Q.931 %u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu\n", - NIPQUAD(exp->tuple.src.ip), ntohs(exp->tuple.src.u.tcp.port), - NIPQUAD(exp->tuple.dst.ip), ntohs(exp->tuple.dst.u.tcp.port)); - - return 0; -} - -/****************************************************************************/ -static void ip_nat_callforwarding_expect(struct ip_conntrack *new, - struct ip_conntrack_expect *this) -{ - struct ip_nat_range range; - - /* This must be a fresh one. */ - BUG_ON(new->status & IPS_NAT_DONE_MASK); - - /* Change src to where master sends to */ - range.flags = IP_NAT_RANGE_MAP_IPS; - range.min_ip = range.max_ip = new->tuplehash[!this->dir].tuple.src.ip; - - /* hook doesn't matter, but it has to do source manip */ - ip_nat_setup_info(new, &range, NF_IP_POST_ROUTING); - - /* For DST manip, map port here to where it's expected. */ - range.flags = (IP_NAT_RANGE_MAP_IPS | IP_NAT_RANGE_PROTO_SPECIFIED); - range.min = range.max = this->saved_proto; - range.min_ip = range.max_ip = this->saved_ip; - - /* hook doesn't matter, but it has to do destination manip */ - ip_nat_setup_info(new, &range, NF_IP_PRE_ROUTING); - - ip_conntrack_q931_expect(new, this); -} - -/****************************************************************************/ -static int nat_callforwarding(struct sk_buff **pskb, struct ip_conntrack *ct, - enum ip_conntrack_info ctinfo, - unsigned char **data, int dataoff, - TransportAddress * addr, u_int16_t port, - struct ip_conntrack_expect *exp) -{ - int dir = CTINFO2DIR(ctinfo); - u_int16_t nated_port; - - /* Set expectations for NAT */ - exp->saved_ip = exp->tuple.dst.ip; - exp->tuple.dst.ip = ct->tuplehash[!dir].tuple.dst.ip; - exp->saved_proto.tcp.port = exp->tuple.dst.u.tcp.port; - exp->expectfn = ip_nat_callforwarding_expect; - exp->dir = !dir; - - /* Try to get same port: if not, try to change it. */ - for (nated_port = port; nated_port != 0; nated_port++) { - exp->tuple.dst.u.tcp.port = htons(nated_port); - if (ip_conntrack_expect_related(exp) == 0) - break; - } - - if (nated_port == 0) { /* No port available */ - if (net_ratelimit()) - printk("ip_nat_q931: out of TCP ports\n"); - return 0; - } - - /* Modify signal */ - if (!set_h225_addr(pskb, data, dataoff, addr, - ct->tuplehash[!dir].tuple.dst.ip, - nated_port) == 0) { - ip_conntrack_unexpect_related(exp); - return -1; - } - - /* Success */ - DEBUGP("ip_nat_q931: expect Call Forwarding " - "%u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu\n", - NIPQUAD(exp->tuple.src.ip), ntohs(exp->tuple.src.u.tcp.port), - NIPQUAD(exp->tuple.dst.ip), ntohs(exp->tuple.dst.u.tcp.port)); - - return 0; -} - -/****************************************************************************/ -static int __init init(void) -{ - BUG_ON(rcu_dereference(set_h245_addr_hook) != NULL); - BUG_ON(rcu_dereference(set_h225_addr_hook) != NULL); - BUG_ON(rcu_dereference(set_sig_addr_hook) != NULL); - BUG_ON(rcu_dereference(set_ras_addr_hook) != NULL); - BUG_ON(rcu_dereference(nat_rtp_rtcp_hook) != NULL); - BUG_ON(rcu_dereference(nat_t120_hook) != NULL); - BUG_ON(rcu_dereference(nat_h245_hook) != NULL); - BUG_ON(rcu_dereference(nat_callforwarding_hook) != NULL); - BUG_ON(rcu_dereference(nat_q931_hook) != NULL); - - rcu_assign_pointer(set_h245_addr_hook, set_h245_addr); - rcu_assign_pointer(set_h225_addr_hook, set_h225_addr); - rcu_assign_pointer(set_sig_addr_hook, set_sig_addr); - rcu_assign_pointer(set_ras_addr_hook, set_ras_addr); - rcu_assign_pointer(nat_rtp_rtcp_hook, nat_rtp_rtcp); - rcu_assign_pointer(nat_t120_hook, nat_t120); - rcu_assign_pointer(nat_h245_hook, nat_h245); - rcu_assign_pointer(nat_callforwarding_hook, nat_callforwarding); - rcu_assign_pointer(nat_q931_hook, nat_q931); - - DEBUGP("ip_nat_h323: init success\n"); - return 0; -} - -/****************************************************************************/ -static void __exit fini(void) -{ - rcu_assign_pointer(set_h245_addr_hook, NULL); - rcu_assign_pointer(set_h225_addr_hook, NULL); - rcu_assign_pointer(set_sig_addr_hook, NULL); - rcu_assign_pointer(set_ras_addr_hook, NULL); - rcu_assign_pointer(nat_rtp_rtcp_hook, NULL); - rcu_assign_pointer(nat_t120_hook, NULL); - rcu_assign_pointer(nat_h245_hook, NULL); - rcu_assign_pointer(nat_callforwarding_hook, NULL); - rcu_assign_pointer(nat_q931_hook, NULL); - synchronize_rcu(); -} - -/****************************************************************************/ -module_init(init); -module_exit(fini); - -MODULE_AUTHOR("Jing Min Zhao "); -MODULE_DESCRIPTION("H.323 NAT helper"); -MODULE_LICENSE("GPL"); diff --git a/net/ipv4/netfilter/ip_nat_helper_pptp.c b/net/ipv4/netfilter/ip_nat_helper_pptp.c deleted file mode 100644 index 24ce4a5023d7..000000000000 --- a/net/ipv4/netfilter/ip_nat_helper_pptp.c +++ /dev/null @@ -1,350 +0,0 @@ -/* - * ip_nat_pptp.c - Version 3.0 - * - * NAT support for PPTP (Point to Point Tunneling Protocol). - * PPTP is a a protocol for creating virtual private networks. - * It is a specification defined by Microsoft and some vendors - * working with Microsoft. PPTP is built on top of a modified - * version of the Internet Generic Routing Encapsulation Protocol. - * GRE is defined in RFC 1701 and RFC 1702. Documentation of - * PPTP can be found in RFC 2637 - * - * (C) 2000-2005 by Harald Welte - * - * Development of this code funded by Astaro AG (http://www.astaro.com/) - * - * TODO: - NAT to a unique tuple, not to TCP source port - * (needs netfilter tuple reservation) - * - * Changes: - * 2002-02-10 - Version 1.3 - * - Use ip_nat_mangle_tcp_packet() because of cloned skb's - * in local connections (Philip Craig ) - * - add checks for magicCookie and pptp version - * - make argument list of pptp_{out,in}bound_packet() shorter - * - move to C99 style initializers - * - print version number at module loadtime - * 2003-09-22 - Version 1.5 - * - use SNATed tcp sourceport as callid, since we get called before - * TCP header is mangled (Philip Craig ) - * 2004-10-22 - Version 2.0 - * - kernel 2.6.x version - * 2005-06-10 - Version 3.0 - * - kernel >= 2.6.11 version, - * funded by Oxcoda NetBox Blue (http://www.netboxblue.com/) - * - */ - -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include - -#define IP_NAT_PPTP_VERSION "3.0" - -#define REQ_CID(req, off) (*(__be16 *)((char *)(req) + (off))) - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Harald Welte "); -MODULE_DESCRIPTION("Netfilter NAT helper module for PPTP"); - - -#if 0 -extern const char *pptp_msg_name[]; -#define DEBUGP(format, args...) printk(KERN_DEBUG "%s:%s: " format, __FILE__, \ - __FUNCTION__, ## args) -#else -#define DEBUGP(format, args...) -#endif - -static void pptp_nat_expected(struct ip_conntrack *ct, - struct ip_conntrack_expect *exp) -{ - struct ip_conntrack *master = ct->master; - struct ip_conntrack_expect *other_exp; - struct ip_conntrack_tuple t; - struct ip_ct_pptp_master *ct_pptp_info; - struct ip_nat_pptp *nat_pptp_info; - struct ip_nat_range range; - - ct_pptp_info = &master->help.ct_pptp_info; - nat_pptp_info = &master->nat.help.nat_pptp_info; - - /* And here goes the grand finale of corrosion... */ - - if (exp->dir == IP_CT_DIR_ORIGINAL) { - DEBUGP("we are PNS->PAC\n"); - /* therefore, build tuple for PAC->PNS */ - t.src.ip = master->tuplehash[IP_CT_DIR_REPLY].tuple.src.ip; - t.src.u.gre.key = master->help.ct_pptp_info.pac_call_id; - t.dst.ip = master->tuplehash[IP_CT_DIR_REPLY].tuple.dst.ip; - t.dst.u.gre.key = master->help.ct_pptp_info.pns_call_id; - t.dst.protonum = IPPROTO_GRE; - } else { - DEBUGP("we are PAC->PNS\n"); - /* build tuple for PNS->PAC */ - t.src.ip = master->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.ip; - t.src.u.gre.key = master->nat.help.nat_pptp_info.pns_call_id; - t.dst.ip = master->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.ip; - t.dst.u.gre.key = master->nat.help.nat_pptp_info.pac_call_id; - t.dst.protonum = IPPROTO_GRE; - } - - DEBUGP("trying to unexpect other dir: "); - DUMP_TUPLE(&t); - other_exp = ip_conntrack_expect_find_get(&t); - if (other_exp) { - ip_conntrack_unexpect_related(other_exp); - ip_conntrack_expect_put(other_exp); - DEBUGP("success\n"); - } else { - DEBUGP("not found!\n"); - } - - /* This must be a fresh one. */ - BUG_ON(ct->status & IPS_NAT_DONE_MASK); - - /* Change src to where master sends to */ - range.flags = IP_NAT_RANGE_MAP_IPS; - range.min_ip = range.max_ip - = ct->master->tuplehash[!exp->dir].tuple.dst.ip; - if (exp->dir == IP_CT_DIR_ORIGINAL) { - range.flags |= IP_NAT_RANGE_PROTO_SPECIFIED; - range.min = range.max = exp->saved_proto; - } - /* hook doesn't matter, but it has to do source manip */ - ip_nat_setup_info(ct, &range, NF_IP_POST_ROUTING); - - /* For DST manip, map port here to where it's expected. */ - range.flags = IP_NAT_RANGE_MAP_IPS; - range.min_ip = range.max_ip - = ct->master->tuplehash[!exp->dir].tuple.src.ip; - if (exp->dir == IP_CT_DIR_REPLY) { - range.flags |= IP_NAT_RANGE_PROTO_SPECIFIED; - range.min = range.max = exp->saved_proto; - } - /* hook doesn't matter, but it has to do destination manip */ - ip_nat_setup_info(ct, &range, NF_IP_PRE_ROUTING); -} - -/* outbound packets == from PNS to PAC */ -static int -pptp_outbound_pkt(struct sk_buff **pskb, - struct ip_conntrack *ct, - enum ip_conntrack_info ctinfo, - struct PptpControlHeader *ctlh, - union pptp_ctrl_union *pptpReq) - -{ - struct ip_ct_pptp_master *ct_pptp_info = &ct->help.ct_pptp_info; - struct ip_nat_pptp *nat_pptp_info = &ct->nat.help.nat_pptp_info; - u_int16_t msg; - __be16 new_callid; - unsigned int cid_off; - - new_callid = ct_pptp_info->pns_call_id; - - switch (msg = ntohs(ctlh->messageType)) { - case PPTP_OUT_CALL_REQUEST: - cid_off = offsetof(union pptp_ctrl_union, ocreq.callID); - /* FIXME: ideally we would want to reserve a call ID - * here. current netfilter NAT core is not able to do - * this :( For now we use TCP source port. This breaks - * multiple calls within one control session */ - - /* save original call ID in nat_info */ - nat_pptp_info->pns_call_id = ct_pptp_info->pns_call_id; - - /* don't use tcph->source since we are at a DSTmanip - * hook (e.g. PREROUTING) and pkt is not mangled yet */ - new_callid = ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u.tcp.port; - - /* save new call ID in ct info */ - ct_pptp_info->pns_call_id = new_callid; - break; - case PPTP_IN_CALL_REPLY: - cid_off = offsetof(union pptp_ctrl_union, icack.callID); - break; - case PPTP_CALL_CLEAR_REQUEST: - cid_off = offsetof(union pptp_ctrl_union, clrreq.callID); - break; - default: - DEBUGP("unknown outbound packet 0x%04x:%s\n", msg, - (msg <= PPTP_MSG_MAX)? - pptp_msg_name[msg]:pptp_msg_name[0]); - /* fall through */ - - case PPTP_SET_LINK_INFO: - /* only need to NAT in case PAC is behind NAT box */ - case PPTP_START_SESSION_REQUEST: - case PPTP_START_SESSION_REPLY: - case PPTP_STOP_SESSION_REQUEST: - case PPTP_STOP_SESSION_REPLY: - case PPTP_ECHO_REQUEST: - case PPTP_ECHO_REPLY: - /* no need to alter packet */ - return NF_ACCEPT; - } - - /* only OUT_CALL_REQUEST, IN_CALL_REPLY, CALL_CLEAR_REQUEST pass - * down to here */ - DEBUGP("altering call id from 0x%04x to 0x%04x\n", - ntohs(REQ_CID(pptpReq, cid_off)), ntohs(new_callid)); - - /* mangle packet */ - if (ip_nat_mangle_tcp_packet(pskb, ct, ctinfo, - cid_off + sizeof(struct pptp_pkt_hdr) + - sizeof(struct PptpControlHeader), - sizeof(new_callid), (char *)&new_callid, - sizeof(new_callid)) == 0) - return NF_DROP; - - return NF_ACCEPT; -} - -static void -pptp_exp_gre(struct ip_conntrack_expect *expect_orig, - struct ip_conntrack_expect *expect_reply) -{ - struct ip_conntrack *ct = expect_orig->master; - struct ip_ct_pptp_master *ct_pptp_info = &ct->help.ct_pptp_info; - struct ip_nat_pptp *nat_pptp_info = &ct->nat.help.nat_pptp_info; - - /* save original PAC call ID in nat_info */ - nat_pptp_info->pac_call_id = ct_pptp_info->pac_call_id; - - /* alter expectation for PNS->PAC direction */ - expect_orig->saved_proto.gre.key = ct_pptp_info->pns_call_id; - expect_orig->tuple.src.u.gre.key = nat_pptp_info->pns_call_id; - expect_orig->tuple.dst.u.gre.key = ct_pptp_info->pac_call_id; - expect_orig->dir = IP_CT_DIR_ORIGINAL; - - /* alter expectation for PAC->PNS direction */ - expect_reply->saved_proto.gre.key = nat_pptp_info->pns_call_id; - expect_reply->tuple.src.u.gre.key = nat_pptp_info->pac_call_id; - expect_reply->tuple.dst.u.gre.key = ct_pptp_info->pns_call_id; - expect_reply->dir = IP_CT_DIR_REPLY; -} - -/* inbound packets == from PAC to PNS */ -static int -pptp_inbound_pkt(struct sk_buff **pskb, - struct ip_conntrack *ct, - enum ip_conntrack_info ctinfo, - struct PptpControlHeader *ctlh, - union pptp_ctrl_union *pptpReq) -{ - struct ip_nat_pptp *nat_pptp_info = &ct->nat.help.nat_pptp_info; - u_int16_t msg; - __be16 new_pcid; - unsigned int pcid_off; - - new_pcid = nat_pptp_info->pns_call_id; - - switch (msg = ntohs(ctlh->messageType)) { - case PPTP_OUT_CALL_REPLY: - pcid_off = offsetof(union pptp_ctrl_union, ocack.peersCallID); - break; - case PPTP_IN_CALL_CONNECT: - pcid_off = offsetof(union pptp_ctrl_union, iccon.peersCallID); - break; - case PPTP_IN_CALL_REQUEST: - /* only need to nat in case PAC is behind NAT box */ - return NF_ACCEPT; - case PPTP_WAN_ERROR_NOTIFY: - pcid_off = offsetof(union pptp_ctrl_union, wanerr.peersCallID); - break; - case PPTP_CALL_DISCONNECT_NOTIFY: - pcid_off = offsetof(union pptp_ctrl_union, disc.callID); - break; - case PPTP_SET_LINK_INFO: - pcid_off = offsetof(union pptp_ctrl_union, setlink.peersCallID); - break; - - default: - DEBUGP("unknown inbound packet %s\n", (msg <= PPTP_MSG_MAX)? - pptp_msg_name[msg]:pptp_msg_name[0]); - /* fall through */ - - case PPTP_START_SESSION_REQUEST: - case PPTP_START_SESSION_REPLY: - case PPTP_STOP_SESSION_REQUEST: - case PPTP_STOP_SESSION_REPLY: - case PPTP_ECHO_REQUEST: - case PPTP_ECHO_REPLY: - /* no need to alter packet */ - return NF_ACCEPT; - } - - /* only OUT_CALL_REPLY, IN_CALL_CONNECT, IN_CALL_REQUEST, - * WAN_ERROR_NOTIFY, CALL_DISCONNECT_NOTIFY pass down here */ - - /* mangle packet */ - DEBUGP("altering peer call id from 0x%04x to 0x%04x\n", - ntohs(REQ_CID(pptpReq, pcid_off)), ntohs(new_pcid)); - - if (ip_nat_mangle_tcp_packet(pskb, ct, ctinfo, - pcid_off + sizeof(struct pptp_pkt_hdr) + - sizeof(struct PptpControlHeader), - sizeof(new_pcid), (char *)&new_pcid, - sizeof(new_pcid)) == 0) - return NF_DROP; - return NF_ACCEPT; -} - - -extern int __init ip_nat_proto_gre_init(void); -extern void __exit ip_nat_proto_gre_fini(void); - -static int __init ip_nat_helper_pptp_init(void) -{ - int ret; - - DEBUGP("%s: registering NAT helper\n", __FILE__); - - ret = ip_nat_proto_gre_init(); - if (ret < 0) - return ret; - - BUG_ON(rcu_dereference(ip_nat_pptp_hook_outbound)); - rcu_assign_pointer(ip_nat_pptp_hook_outbound, pptp_outbound_pkt); - - BUG_ON(rcu_dereference(ip_nat_pptp_hook_inbound)); - rcu_assign_pointer(ip_nat_pptp_hook_inbound, pptp_inbound_pkt); - - BUG_ON(rcu_dereference(ip_nat_pptp_hook_exp_gre)); - rcu_assign_pointer(ip_nat_pptp_hook_exp_gre, pptp_exp_gre); - - BUG_ON(rcu_dereference(ip_nat_pptp_hook_expectfn)); - rcu_assign_pointer(ip_nat_pptp_hook_expectfn, pptp_nat_expected); - - printk("ip_nat_pptp version %s loaded\n", IP_NAT_PPTP_VERSION); - return 0; -} - -static void __exit ip_nat_helper_pptp_fini(void) -{ - DEBUGP("cleanup_module\n" ); - - rcu_assign_pointer(ip_nat_pptp_hook_expectfn, NULL); - rcu_assign_pointer(ip_nat_pptp_hook_exp_gre, NULL); - rcu_assign_pointer(ip_nat_pptp_hook_inbound, NULL); - rcu_assign_pointer(ip_nat_pptp_hook_outbound, NULL); - synchronize_rcu(); - - ip_nat_proto_gre_fini(); - - printk("ip_nat_pptp version %s unloaded\n", IP_NAT_PPTP_VERSION); -} - -module_init(ip_nat_helper_pptp_init); -module_exit(ip_nat_helper_pptp_fini); diff --git a/net/ipv4/netfilter/ip_nat_irc.c b/net/ipv4/netfilter/ip_nat_irc.c deleted file mode 100644 index cfaeea38314f..000000000000 --- a/net/ipv4/netfilter/ip_nat_irc.c +++ /dev/null @@ -1,122 +0,0 @@ -/* IRC extension for TCP NAT alteration. - * (C) 2000-2001 by Harald Welte - * (C) 2004 Rusty Russell IBM Corporation - * based on a copy of RR's ip_nat_ftp.c - * - * ip_nat_irc.c,v 1.16 2001/12/06 07:42:10 laforge Exp - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#if 0 -#define DEBUGP printk -#else -#define DEBUGP(format, args...) -#endif - -MODULE_AUTHOR("Harald Welte "); -MODULE_DESCRIPTION("IRC (DCC) NAT helper"); -MODULE_LICENSE("GPL"); - -static unsigned int help(struct sk_buff **pskb, - enum ip_conntrack_info ctinfo, - unsigned int matchoff, - unsigned int matchlen, - struct ip_conntrack_expect *exp) -{ - u_int16_t port; - unsigned int ret; - - /* "4294967296 65635 " */ - char buffer[18]; - - DEBUGP("IRC_NAT: info (seq %u + %u) in %u\n", - expect->seq, exp_irc_info->len, - ntohl(tcph->seq)); - - /* Reply comes from server. */ - exp->saved_proto.tcp.port = exp->tuple.dst.u.tcp.port; - exp->dir = IP_CT_DIR_REPLY; - - /* When you see the packet, we need to NAT it the same as the - * this one. */ - exp->expectfn = ip_nat_follow_master; - - /* Try to get same port: if not, try to change it. */ - for (port = ntohs(exp->saved_proto.tcp.port); port != 0; port++) { - exp->tuple.dst.u.tcp.port = htons(port); - if (ip_conntrack_expect_related(exp) == 0) - break; - } - - if (port == 0) - return NF_DROP; - - /* strlen("\1DCC CHAT chat AAAAAAAA P\1\n")=27 - * strlen("\1DCC SCHAT chat AAAAAAAA P\1\n")=28 - * strlen("\1DCC SEND F AAAAAAAA P S\1\n")=26 - * strlen("\1DCC MOVE F AAAAAAAA P S\1\n")=26 - * strlen("\1DCC TSEND F AAAAAAAA P S\1\n")=27 - * AAAAAAAAA: bound addr (1.0.0.0==16777216, min 8 digits, - * 255.255.255.255==4294967296, 10 digits) - * P: bound port (min 1 d, max 5d (65635)) - * F: filename (min 1 d ) - * S: size (min 1 d ) - * 0x01, \n: terminators - */ - - /* AAA = "us", ie. where server normally talks to. */ - sprintf(buffer, "%u %u", - ntohl(exp->master->tuplehash[IP_CT_DIR_REPLY].tuple.dst.ip), - port); - DEBUGP("ip_nat_irc: Inserting '%s' == %u.%u.%u.%u, port %u\n", - buffer, NIPQUAD(exp->tuple.src.ip), port); - - ret = ip_nat_mangle_tcp_packet(pskb, exp->master, ctinfo, - matchoff, matchlen, buffer, - strlen(buffer)); - if (ret != NF_ACCEPT) - ip_conntrack_unexpect_related(exp); - return ret; -} - -static void __exit ip_nat_irc_fini(void) -{ - rcu_assign_pointer(ip_nat_irc_hook, NULL); - synchronize_rcu(); -} - -static int __init ip_nat_irc_init(void) -{ - BUG_ON(rcu_dereference(ip_nat_irc_hook)); - rcu_assign_pointer(ip_nat_irc_hook, help); - return 0; -} - -/* Prior to 2.6.11, we had a ports param. No longer, but don't break users. */ -static int warn_set(const char *val, struct kernel_param *kp) -{ - printk(KERN_INFO KBUILD_MODNAME - ": kernel >= 2.6.10 only uses 'ports' for conntrack modules\n"); - return 0; -} -module_param_call(ports, warn_set, NULL, NULL, 0); - -module_init(ip_nat_irc_init); -module_exit(ip_nat_irc_fini); diff --git a/net/ipv4/netfilter/ip_nat_proto_gre.c b/net/ipv4/netfilter/ip_nat_proto_gre.c deleted file mode 100644 index 95810202d849..000000000000 --- a/net/ipv4/netfilter/ip_nat_proto_gre.c +++ /dev/null @@ -1,174 +0,0 @@ -/* - * ip_nat_proto_gre.c - Version 2.0 - * - * NAT protocol helper module for GRE. - * - * GRE is a generic encapsulation protocol, which is generally not very - * suited for NAT, as it has no protocol-specific part as port numbers. - * - * It has an optional key field, which may help us distinguishing two - * connections between the same two hosts. - * - * GRE is defined in RFC 1701 and RFC 1702, as well as RFC 2784 - * - * PPTP is built on top of a modified version of GRE, and has a mandatory - * field called "CallID", which serves us for the same purpose as the key - * field in plain GRE. - * - * Documentation about PPTP can be found in RFC 2637 - * - * (C) 2000-2005 by Harald Welte - * - * Development of this code funded by Astaro AG (http://www.astaro.com/) - * - */ - -#include -#include -#include -#include -#include -#include - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Harald Welte "); -MODULE_DESCRIPTION("Netfilter NAT protocol helper module for GRE"); - -#if 0 -#define DEBUGP(format, args...) printk(KERN_DEBUG "%s:%s: " format, __FILE__, \ - __FUNCTION__, ## args) -#else -#define DEBUGP(x, args...) -#endif - -/* is key in given range between min and max */ -static int -gre_in_range(const struct ip_conntrack_tuple *tuple, - enum ip_nat_manip_type maniptype, - const union ip_conntrack_manip_proto *min, - const union ip_conntrack_manip_proto *max) -{ - __be16 key; - - if (maniptype == IP_NAT_MANIP_SRC) - key = tuple->src.u.gre.key; - else - key = tuple->dst.u.gre.key; - - return ntohs(key) >= ntohs(min->gre.key) - && ntohs(key) <= ntohs(max->gre.key); -} - -/* generate unique tuple ... */ -static int -gre_unique_tuple(struct ip_conntrack_tuple *tuple, - const struct ip_nat_range *range, - enum ip_nat_manip_type maniptype, - const struct ip_conntrack *conntrack) -{ - static u_int16_t key; - __be16 *keyptr; - unsigned int min, i, range_size; - - if (maniptype == IP_NAT_MANIP_SRC) - keyptr = &tuple->src.u.gre.key; - else - keyptr = &tuple->dst.u.gre.key; - - if (!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED)) { - DEBUGP("%p: NATing GRE PPTP\n", conntrack); - min = 1; - range_size = 0xffff; - } else { - min = ntohs(range->min.gre.key); - range_size = ntohs(range->max.gre.key) - min + 1; - } - - DEBUGP("min = %u, range_size = %u\n", min, range_size); - - for (i = 0; i < range_size; i++, key++) { - *keyptr = htons(min + key % range_size); - if (!ip_nat_used_tuple(tuple, conntrack)) - return 1; - } - - DEBUGP("%p: no NAT mapping\n", conntrack); - - return 0; -} - -/* manipulate a GRE packet according to maniptype */ -static int -gre_manip_pkt(struct sk_buff **pskb, - unsigned int iphdroff, - const struct ip_conntrack_tuple *tuple, - enum ip_nat_manip_type maniptype) -{ - struct gre_hdr *greh; - struct gre_hdr_pptp *pgreh; - struct iphdr *iph = (struct iphdr *)((*pskb)->data + iphdroff); - unsigned int hdroff = iphdroff + iph->ihl*4; - - /* pgreh includes two optional 32bit fields which are not required - * to be there. That's where the magic '8' comes from */ - if (!skb_make_writable(pskb, hdroff + sizeof(*pgreh)-8)) - return 0; - - greh = (void *)(*pskb)->data + hdroff; - pgreh = (struct gre_hdr_pptp *) greh; - - /* we only have destination manip of a packet, since 'source key' - * is not present in the packet itself */ - if (maniptype == IP_NAT_MANIP_DST) { - /* key manipulation is always dest */ - switch (greh->version) { - case 0: - if (!greh->key) { - DEBUGP("can't nat GRE w/o key\n"); - break; - } - if (greh->csum) { - /* FIXME: Never tested this code... */ - nf_proto_csum_replace4(gre_csum(greh), *pskb, - *(gre_key(greh)), - tuple->dst.u.gre.key, 0); - } - *(gre_key(greh)) = tuple->dst.u.gre.key; - break; - case GRE_VERSION_PPTP: - DEBUGP("call_id -> 0x%04x\n", - ntohs(tuple->dst.u.gre.key)); - pgreh->call_id = tuple->dst.u.gre.key; - break; - default: - DEBUGP("can't nat unknown GRE version\n"); - return 0; - break; - } - } - return 1; -} - -/* nat helper struct */ -static struct ip_nat_protocol gre = { - .name = "GRE", - .protonum = IPPROTO_GRE, - .manip_pkt = gre_manip_pkt, - .in_range = gre_in_range, - .unique_tuple = gre_unique_tuple, -#if defined(CONFIG_IP_NF_CONNTRACK_NETLINK) || \ - defined(CONFIG_IP_NF_CONNTRACK_NETLINK_MODULE) - .range_to_nfattr = ip_nat_port_range_to_nfattr, - .nfattr_to_range = ip_nat_port_nfattr_to_range, -#endif -}; - -int __init ip_nat_proto_gre_init(void) -{ - return ip_nat_protocol_register(&gre); -} - -void __exit ip_nat_proto_gre_fini(void) -{ - ip_nat_protocol_unregister(&gre); -} diff --git a/net/ipv4/netfilter/ip_nat_proto_icmp.c b/net/ipv4/netfilter/ip_nat_proto_icmp.c deleted file mode 100644 index 22a528ae0380..000000000000 --- a/net/ipv4/netfilter/ip_nat_proto_icmp.c +++ /dev/null @@ -1,87 +0,0 @@ -/* (C) 1999-2001 Paul `Rusty' Russell - * (C) 2002-2004 Netfilter Core Team - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include - -static int -icmp_in_range(const struct ip_conntrack_tuple *tuple, - enum ip_nat_manip_type maniptype, - const union ip_conntrack_manip_proto *min, - const union ip_conntrack_manip_proto *max) -{ - return ntohs(tuple->src.u.icmp.id) >= ntohs(min->icmp.id) && - ntohs(tuple->src.u.icmp.id) <= ntohs(max->icmp.id); -} - -static int -icmp_unique_tuple(struct ip_conntrack_tuple *tuple, - const struct ip_nat_range *range, - enum ip_nat_manip_type maniptype, - const struct ip_conntrack *conntrack) -{ - static u_int16_t id; - unsigned int range_size; - unsigned int i; - - range_size = ntohs(range->max.icmp.id) - ntohs(range->min.icmp.id) + 1; - /* If no range specified... */ - if (!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED)) - range_size = 0xFFFF; - - for (i = 0; i < range_size; i++, id++) { - tuple->src.u.icmp.id = htons(ntohs(range->min.icmp.id) + - (id % range_size)); - if (!ip_nat_used_tuple(tuple, conntrack)) - return 1; - } - return 0; -} - -static int -icmp_manip_pkt(struct sk_buff **pskb, - unsigned int iphdroff, - const struct ip_conntrack_tuple *tuple, - enum ip_nat_manip_type maniptype) -{ - struct iphdr *iph = (struct iphdr *)((*pskb)->data + iphdroff); - struct icmphdr *hdr; - unsigned int hdroff = iphdroff + iph->ihl*4; - - if (!skb_make_writable(pskb, hdroff + sizeof(*hdr))) - return 0; - - hdr = (struct icmphdr *)((*pskb)->data + hdroff); - nf_proto_csum_replace2(&hdr->checksum, *pskb, - hdr->un.echo.id, tuple->src.u.icmp.id, 0); - hdr->un.echo.id = tuple->src.u.icmp.id; - return 1; -} - -struct ip_nat_protocol ip_nat_protocol_icmp = { - .name = "ICMP", - .protonum = IPPROTO_ICMP, - .me = THIS_MODULE, - .manip_pkt = icmp_manip_pkt, - .in_range = icmp_in_range, - .unique_tuple = icmp_unique_tuple, -#if defined(CONFIG_IP_NF_CONNTRACK_NETLINK) || \ - defined(CONFIG_IP_NF_CONNTRACK_NETLINK_MODULE) - .range_to_nfattr = ip_nat_port_range_to_nfattr, - .nfattr_to_range = ip_nat_port_nfattr_to_range, -#endif -}; diff --git a/net/ipv4/netfilter/ip_nat_proto_tcp.c b/net/ipv4/netfilter/ip_nat_proto_tcp.c deleted file mode 100644 index 14ff24f53a7a..000000000000 --- a/net/ipv4/netfilter/ip_nat_proto_tcp.c +++ /dev/null @@ -1,154 +0,0 @@ -/* (C) 1999-2001 Paul `Rusty' Russell - * (C) 2002-2004 Netfilter Core Team - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -static int -tcp_in_range(const struct ip_conntrack_tuple *tuple, - enum ip_nat_manip_type maniptype, - const union ip_conntrack_manip_proto *min, - const union ip_conntrack_manip_proto *max) -{ - __be16 port; - - if (maniptype == IP_NAT_MANIP_SRC) - port = tuple->src.u.tcp.port; - else - port = tuple->dst.u.tcp.port; - - return ntohs(port) >= ntohs(min->tcp.port) - && ntohs(port) <= ntohs(max->tcp.port); -} - -static int -tcp_unique_tuple(struct ip_conntrack_tuple *tuple, - const struct ip_nat_range *range, - enum ip_nat_manip_type maniptype, - const struct ip_conntrack *conntrack) -{ - static u_int16_t port; - __be16 *portptr; - unsigned int range_size, min, i; - - if (maniptype == IP_NAT_MANIP_SRC) - portptr = &tuple->src.u.tcp.port; - else - portptr = &tuple->dst.u.tcp.port; - - /* If no range specified... */ - if (!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED)) { - /* If it's dst rewrite, can't change port */ - if (maniptype == IP_NAT_MANIP_DST) - return 0; - - /* Map privileged onto privileged. */ - if (ntohs(*portptr) < 1024) { - /* Loose convention: >> 512 is credential passing */ - if (ntohs(*portptr)<512) { - min = 1; - range_size = 511 - min + 1; - } else { - min = 600; - range_size = 1023 - min + 1; - } - } else { - min = 1024; - range_size = 65535 - 1024 + 1; - } - } else { - min = ntohs(range->min.tcp.port); - range_size = ntohs(range->max.tcp.port) - min + 1; - } - - /* Start from random port to avoid prediction */ - if (range->flags & IP_NAT_RANGE_PROTO_RANDOM) - port = net_random(); - - for (i = 0; i < range_size; i++, port++) { - *portptr = htons(min + port % range_size); - if (!ip_nat_used_tuple(tuple, conntrack)) { - return 1; - } - } - return 0; -} - -static int -tcp_manip_pkt(struct sk_buff **pskb, - unsigned int iphdroff, - const struct ip_conntrack_tuple *tuple, - enum ip_nat_manip_type maniptype) -{ - struct iphdr *iph = (struct iphdr *)((*pskb)->data + iphdroff); - struct tcphdr *hdr; - unsigned int hdroff = iphdroff + iph->ihl*4; - __be32 oldip, newip; - __be16 *portptr, newport, oldport; - int hdrsize = 8; /* TCP connection tracking guarantees this much */ - - /* this could be a inner header returned in icmp packet; in such - cases we cannot update the checksum field since it is outside of - the 8 bytes of transport layer headers we are guaranteed */ - if ((*pskb)->len >= hdroff + sizeof(struct tcphdr)) - hdrsize = sizeof(struct tcphdr); - - if (!skb_make_writable(pskb, hdroff + hdrsize)) - return 0; - - iph = (struct iphdr *)((*pskb)->data + iphdroff); - hdr = (struct tcphdr *)((*pskb)->data + hdroff); - - if (maniptype == IP_NAT_MANIP_SRC) { - /* Get rid of src ip and src pt */ - oldip = iph->saddr; - newip = tuple->src.ip; - newport = tuple->src.u.tcp.port; - portptr = &hdr->source; - } else { - /* Get rid of dst ip and dst pt */ - oldip = iph->daddr; - newip = tuple->dst.ip; - newport = tuple->dst.u.tcp.port; - portptr = &hdr->dest; - } - - oldport = *portptr; - *portptr = newport; - - if (hdrsize < sizeof(*hdr)) - return 1; - - nf_proto_csum_replace4(&hdr->check, *pskb, oldip, newip, 1); - nf_proto_csum_replace2(&hdr->check, *pskb, oldport, newport, 0); - return 1; -} - -struct ip_nat_protocol ip_nat_protocol_tcp = { - .name = "TCP", - .protonum = IPPROTO_TCP, - .me = THIS_MODULE, - .manip_pkt = tcp_manip_pkt, - .in_range = tcp_in_range, - .unique_tuple = tcp_unique_tuple, -#if defined(CONFIG_IP_NF_CONNTRACK_NETLINK) || \ - defined(CONFIG_IP_NF_CONNTRACK_NETLINK_MODULE) - .range_to_nfattr = ip_nat_port_range_to_nfattr, - .nfattr_to_range = ip_nat_port_nfattr_to_range, -#endif -}; diff --git a/net/ipv4/netfilter/ip_nat_proto_udp.c b/net/ipv4/netfilter/ip_nat_proto_udp.c deleted file mode 100644 index dfd521672891..000000000000 --- a/net/ipv4/netfilter/ip_nat_proto_udp.c +++ /dev/null @@ -1,144 +0,0 @@ -/* (C) 1999-2001 Paul `Rusty' Russell - * (C) 2002-2004 Netfilter Core Team - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include - -static int -udp_in_range(const struct ip_conntrack_tuple *tuple, - enum ip_nat_manip_type maniptype, - const union ip_conntrack_manip_proto *min, - const union ip_conntrack_manip_proto *max) -{ - __be16 port; - - if (maniptype == IP_NAT_MANIP_SRC) - port = tuple->src.u.udp.port; - else - port = tuple->dst.u.udp.port; - - return ntohs(port) >= ntohs(min->udp.port) - && ntohs(port) <= ntohs(max->udp.port); -} - -static int -udp_unique_tuple(struct ip_conntrack_tuple *tuple, - const struct ip_nat_range *range, - enum ip_nat_manip_type maniptype, - const struct ip_conntrack *conntrack) -{ - static u_int16_t port; - __be16 *portptr; - unsigned int range_size, min, i; - - if (maniptype == IP_NAT_MANIP_SRC) - portptr = &tuple->src.u.udp.port; - else - portptr = &tuple->dst.u.udp.port; - - /* If no range specified... */ - if (!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED)) { - /* If it's dst rewrite, can't change port */ - if (maniptype == IP_NAT_MANIP_DST) - return 0; - - if (ntohs(*portptr) < 1024) { - /* Loose convention: >> 512 is credential passing */ - if (ntohs(*portptr)<512) { - min = 1; - range_size = 511 - min + 1; - } else { - min = 600; - range_size = 1023 - min + 1; - } - } else { - min = 1024; - range_size = 65535 - 1024 + 1; - } - } else { - min = ntohs(range->min.udp.port); - range_size = ntohs(range->max.udp.port) - min + 1; - } - - /* Start from random port to avoid prediction */ - if (range->flags & IP_NAT_RANGE_PROTO_RANDOM) - port = net_random(); - - for (i = 0; i < range_size; i++, port++) { - *portptr = htons(min + port % range_size); - if (!ip_nat_used_tuple(tuple, conntrack)) - return 1; - } - return 0; -} - -static int -udp_manip_pkt(struct sk_buff **pskb, - unsigned int iphdroff, - const struct ip_conntrack_tuple *tuple, - enum ip_nat_manip_type maniptype) -{ - struct iphdr *iph = (struct iphdr *)((*pskb)->data + iphdroff); - struct udphdr *hdr; - unsigned int hdroff = iphdroff + iph->ihl*4; - __be32 oldip, newip; - __be16 *portptr, newport; - - if (!skb_make_writable(pskb, hdroff + sizeof(*hdr))) - return 0; - - iph = (struct iphdr *)((*pskb)->data + iphdroff); - hdr = (struct udphdr *)((*pskb)->data + hdroff); - - if (maniptype == IP_NAT_MANIP_SRC) { - /* Get rid of src ip and src pt */ - oldip = iph->saddr; - newip = tuple->src.ip; - newport = tuple->src.u.udp.port; - portptr = &hdr->source; - } else { - /* Get rid of dst ip and dst pt */ - oldip = iph->daddr; - newip = tuple->dst.ip; - newport = tuple->dst.u.udp.port; - portptr = &hdr->dest; - } - - if (hdr->check || (*pskb)->ip_summed == CHECKSUM_PARTIAL) { - nf_proto_csum_replace4(&hdr->check, *pskb, oldip, newip, 1); - nf_proto_csum_replace2(&hdr->check, *pskb, *portptr, newport, 0); - if (!hdr->check) - hdr->check = CSUM_MANGLED_0; - } - *portptr = newport; - return 1; -} - -struct ip_nat_protocol ip_nat_protocol_udp = { - .name = "UDP", - .protonum = IPPROTO_UDP, - .me = THIS_MODULE, - .manip_pkt = udp_manip_pkt, - .in_range = udp_in_range, - .unique_tuple = udp_unique_tuple, -#if defined(CONFIG_IP_NF_CONNTRACK_NETLINK) || \ - defined(CONFIG_IP_NF_CONNTRACK_NETLINK_MODULE) - .range_to_nfattr = ip_nat_port_range_to_nfattr, - .nfattr_to_range = ip_nat_port_nfattr_to_range, -#endif -}; diff --git a/net/ipv4/netfilter/ip_nat_proto_unknown.c b/net/ipv4/netfilter/ip_nat_proto_unknown.c deleted file mode 100644 index 3bf049517246..000000000000 --- a/net/ipv4/netfilter/ip_nat_proto_unknown.c +++ /dev/null @@ -1,55 +0,0 @@ -/* The "unknown" protocol. This is what is used for protocols we - * don't understand. It's returned by ip_ct_find_proto(). - */ - -/* (C) 1999-2001 Paul `Rusty' Russell - * (C) 2002-2004 Netfilter Core Team - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include -#include -#include -#include - -#include -#include -#include - -static int unknown_in_range(const struct ip_conntrack_tuple *tuple, - enum ip_nat_manip_type manip_type, - const union ip_conntrack_manip_proto *min, - const union ip_conntrack_manip_proto *max) -{ - return 1; -} - -static int unknown_unique_tuple(struct ip_conntrack_tuple *tuple, - const struct ip_nat_range *range, - enum ip_nat_manip_type maniptype, - const struct ip_conntrack *conntrack) -{ - /* Sorry: we can't help you; if it's not unique, we can't frob - anything. */ - return 0; -} - -static int -unknown_manip_pkt(struct sk_buff **pskb, - unsigned int iphdroff, - const struct ip_conntrack_tuple *tuple, - enum ip_nat_manip_type maniptype) -{ - return 1; -} - -struct ip_nat_protocol ip_nat_unknown_protocol = { - .name = "unknown", - /* .me isn't set: getting a ref to this cannot fail. */ - .manip_pkt = unknown_manip_pkt, - .in_range = unknown_in_range, - .unique_tuple = unknown_unique_tuple, -}; diff --git a/net/ipv4/netfilter/ip_nat_rule.c b/net/ipv4/netfilter/ip_nat_rule.c deleted file mode 100644 index 25415a91e023..000000000000 --- a/net/ipv4/netfilter/ip_nat_rule.c +++ /dev/null @@ -1,314 +0,0 @@ -/* (C) 1999-2001 Paul `Rusty' Russell - * (C) 2002-2004 Netfilter Core Team - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -/* Everything about the rules for NAT. */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include - -#if 0 -#define DEBUGP printk -#else -#define DEBUGP(format, args...) -#endif - -#define NAT_VALID_HOOKS ((1<range[0], hooknum); -} - -/* Before 2.6.11 we did implicit source NAT if required. Warn about change. */ -static void warn_if_extra_mangle(__be32 dstip, __be32 srcip) -{ - static int warned = 0; - struct flowi fl = { .nl_u = { .ip4_u = { .daddr = dstip } } }; - struct rtable *rt; - - if (ip_route_output_key(&rt, &fl) != 0) - return; - - if (rt->rt_src != srcip && !warned) { - printk("NAT: no longer support implicit source local NAT\n"); - printk("NAT: packet src %u.%u.%u.%u -> dst %u.%u.%u.%u\n", - NIPQUAD(srcip), NIPQUAD(dstip)); - warned = 1; - } - ip_rt_put(rt); -} - -static unsigned int ipt_dnat_target(struct sk_buff **pskb, - const struct net_device *in, - const struct net_device *out, - unsigned int hooknum, - const struct xt_target *target, - const void *targinfo) -{ - struct ip_conntrack *ct; - enum ip_conntrack_info ctinfo; - const struct ip_nat_multi_range_compat *mr = targinfo; - - IP_NF_ASSERT(hooknum == NF_IP_PRE_ROUTING - || hooknum == NF_IP_LOCAL_OUT); - - ct = ip_conntrack_get(*pskb, &ctinfo); - - /* Connection must be valid and new. */ - IP_NF_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED)); - - if (hooknum == NF_IP_LOCAL_OUT - && mr->range[0].flags & IP_NAT_RANGE_MAP_IPS) - warn_if_extra_mangle(ip_hdr(*pskb)->daddr, - mr->range[0].min_ip); - - return ip_nat_setup_info(ct, &mr->range[0], hooknum); -} - -static int ipt_snat_checkentry(const char *tablename, - const void *entry, - const struct xt_target *target, - void *targinfo, - unsigned int hook_mask) -{ - struct ip_nat_multi_range_compat *mr = targinfo; - - /* Must be a valid range */ - if (mr->rangesize != 1) { - printk("SNAT: multiple ranges no longer supported\n"); - return 0; - } - return 1; -} - -static int ipt_dnat_checkentry(const char *tablename, - const void *entry, - const struct xt_target *target, - void *targinfo, - unsigned int hook_mask) -{ - struct ip_nat_multi_range_compat *mr = targinfo; - - /* Must be a valid range */ - if (mr->rangesize != 1) { - printk("DNAT: multiple ranges no longer supported\n"); - return 0; - } - if (mr->range[0].flags & IP_NAT_RANGE_PROTO_RANDOM) { - printk("DNAT: port randomization not supported\n"); - return 0; - } - return 1; -} - -inline unsigned int -alloc_null_binding(struct ip_conntrack *conntrack, - struct ip_nat_info *info, - unsigned int hooknum) -{ - /* Force range to this IP; let proto decide mapping for - per-proto parts (hence not IP_NAT_RANGE_PROTO_SPECIFIED). - Use reply in case it's already been mangled (eg local packet). - */ - __be32 ip - = (HOOK2MANIP(hooknum) == IP_NAT_MANIP_SRC - ? conntrack->tuplehash[IP_CT_DIR_REPLY].tuple.dst.ip - : conntrack->tuplehash[IP_CT_DIR_REPLY].tuple.src.ip); - struct ip_nat_range range - = { IP_NAT_RANGE_MAP_IPS, ip, ip, { 0 }, { 0 } }; - - DEBUGP("Allocating NULL binding for %p (%u.%u.%u.%u)\n", conntrack, - NIPQUAD(ip)); - return ip_nat_setup_info(conntrack, &range, hooknum); -} - -unsigned int -alloc_null_binding_confirmed(struct ip_conntrack *conntrack, - struct ip_nat_info *info, - unsigned int hooknum) -{ - __be32 ip - = (HOOK2MANIP(hooknum) == IP_NAT_MANIP_SRC - ? conntrack->tuplehash[IP_CT_DIR_REPLY].tuple.dst.ip - : conntrack->tuplehash[IP_CT_DIR_REPLY].tuple.src.ip); - u_int16_t all - = (HOOK2MANIP(hooknum) == IP_NAT_MANIP_SRC - ? conntrack->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u.all - : conntrack->tuplehash[IP_CT_DIR_REPLY].tuple.src.u.all); - struct ip_nat_range range - = { IP_NAT_RANGE_MAP_IPS, ip, ip, { all }, { all } }; - - DEBUGP("Allocating NULL binding for confirmed %p (%u.%u.%u.%u)\n", - conntrack, NIPQUAD(ip)); - return ip_nat_setup_info(conntrack, &range, hooknum); -} - -int ip_nat_rule_find(struct sk_buff **pskb, - unsigned int hooknum, - const struct net_device *in, - const struct net_device *out, - struct ip_conntrack *ct, - struct ip_nat_info *info) -{ - int ret; - - ret = ipt_do_table(pskb, hooknum, in, out, &nat_table); - - if (ret == NF_ACCEPT) { - if (!ip_nat_initialized(ct, HOOK2MANIP(hooknum))) - /* NUL mapping */ - ret = alloc_null_binding(ct, info, hooknum); - } - return ret; -} - -static struct xt_target ipt_snat_reg = { - .name = "SNAT", - .family = AF_INET, - .target = ipt_snat_target, - .targetsize = sizeof(struct ip_nat_multi_range_compat), - .table = "nat", - .hooks = 1 << NF_IP_POST_ROUTING, - .checkentry = ipt_snat_checkentry, -}; - -static struct xt_target ipt_dnat_reg = { - .name = "DNAT", - .family = AF_INET, - .target = ipt_dnat_target, - .targetsize = sizeof(struct ip_nat_multi_range_compat), - .table = "nat", - .hooks = (1 << NF_IP_PRE_ROUTING) | (1 << NF_IP_LOCAL_OUT), - .checkentry = ipt_dnat_checkentry, -}; - -int __init ip_nat_rule_init(void) -{ - int ret; - - ret = ipt_register_table(&nat_table, &nat_initial_table.repl); - if (ret != 0) - return ret; - ret = xt_register_target(&ipt_snat_reg); - if (ret != 0) - goto unregister_table; - - ret = xt_register_target(&ipt_dnat_reg); - if (ret != 0) - goto unregister_snat; - - return ret; - - unregister_snat: - xt_unregister_target(&ipt_snat_reg); - unregister_table: - xt_unregister_table(&nat_table); - - return ret; -} - -void ip_nat_rule_cleanup(void) -{ - xt_unregister_target(&ipt_dnat_reg); - xt_unregister_target(&ipt_snat_reg); - ipt_unregister_table(&nat_table); -} diff --git a/net/ipv4/netfilter/ip_nat_sip.c b/net/ipv4/netfilter/ip_nat_sip.c deleted file mode 100644 index 84953601762d..000000000000 --- a/net/ipv4/netfilter/ip_nat_sip.c +++ /dev/null @@ -1,282 +0,0 @@ -/* SIP extension for UDP NAT alteration. - * - * (C) 2005 by Christian Hentschel - * based on RR's ip_nat_ftp.c and other modules. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include -#include -#include -#include - -#include -#include -#include -#include -#include - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Christian Hentschel "); -MODULE_DESCRIPTION("SIP NAT helper"); - -#if 0 -#define DEBUGP printk -#else -#define DEBUGP(format, args...) -#endif - -struct addr_map { - struct { - char src[sizeof("nnn.nnn.nnn.nnn:nnnnn")]; - char dst[sizeof("nnn.nnn.nnn.nnn:nnnnn")]; - unsigned int srclen, srciplen; - unsigned int dstlen, dstiplen; - } addr[IP_CT_DIR_MAX]; -}; - -static void addr_map_init(struct ip_conntrack *ct, struct addr_map *map) -{ - struct ip_conntrack_tuple *t; - enum ip_conntrack_dir dir; - unsigned int n; - - for (dir = 0; dir < IP_CT_DIR_MAX; dir++) { - t = &ct->tuplehash[dir].tuple; - - n = sprintf(map->addr[dir].src, "%u.%u.%u.%u", - NIPQUAD(t->src.ip)); - map->addr[dir].srciplen = n; - n += sprintf(map->addr[dir].src + n, ":%u", - ntohs(t->src.u.udp.port)); - map->addr[dir].srclen = n; - - n = sprintf(map->addr[dir].dst, "%u.%u.%u.%u", - NIPQUAD(t->dst.ip)); - map->addr[dir].dstiplen = n; - n += sprintf(map->addr[dir].dst + n, ":%u", - ntohs(t->dst.u.udp.port)); - map->addr[dir].dstlen = n; - } -} - -static int map_sip_addr(struct sk_buff **pskb, enum ip_conntrack_info ctinfo, - struct ip_conntrack *ct, const char **dptr, size_t dlen, - enum sip_header_pos pos, struct addr_map *map) -{ - enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); - unsigned int matchlen, matchoff, addrlen; - char *addr; - - if (ct_sip_get_info(*dptr, dlen, &matchoff, &matchlen, pos) <= 0) - return 1; - - if ((matchlen == map->addr[dir].srciplen || - matchlen == map->addr[dir].srclen) && - memcmp(*dptr + matchoff, map->addr[dir].src, matchlen) == 0) { - addr = map->addr[!dir].dst; - addrlen = map->addr[!dir].dstlen; - } else if ((matchlen == map->addr[dir].dstiplen || - matchlen == map->addr[dir].dstlen) && - memcmp(*dptr + matchoff, map->addr[dir].dst, matchlen) == 0) { - addr = map->addr[!dir].src; - addrlen = map->addr[!dir].srclen; - } else - return 1; - - if (!ip_nat_mangle_udp_packet(pskb, ct, ctinfo, - matchoff, matchlen, addr, addrlen)) - return 0; - *dptr = (*pskb)->data + ip_hdrlen(*pskb) + sizeof(struct udphdr); - return 1; - -} - -static unsigned int ip_nat_sip(struct sk_buff **pskb, - enum ip_conntrack_info ctinfo, - struct ip_conntrack *ct, - const char **dptr) -{ - enum sip_header_pos pos; - struct addr_map map; - int dataoff, datalen; - - dataoff = ip_hdrlen(*pskb) + sizeof(struct udphdr); - datalen = (*pskb)->len - dataoff; - if (datalen < sizeof("SIP/2.0") - 1) - return NF_DROP; - - addr_map_init(ct, &map); - - /* Basic rules: requests and responses. */ - if (strncmp(*dptr, "SIP/2.0", sizeof("SIP/2.0") - 1) != 0) { - /* 10.2: Constructing the REGISTER Request: - * - * The "userinfo" and "@" components of the SIP URI MUST NOT - * be present. - */ - if (datalen >= sizeof("REGISTER") - 1 && - strncmp(*dptr, "REGISTER", sizeof("REGISTER") - 1) == 0) - pos = POS_REG_REQ_URI; - else - pos = POS_REQ_URI; - - if (!map_sip_addr(pskb, ctinfo, ct, dptr, datalen, pos, &map)) - return NF_DROP; - } - - if (!map_sip_addr(pskb, ctinfo, ct, dptr, datalen, POS_FROM, &map) || - !map_sip_addr(pskb, ctinfo, ct, dptr, datalen, POS_TO, &map) || - !map_sip_addr(pskb, ctinfo, ct, dptr, datalen, POS_VIA, &map) || - !map_sip_addr(pskb, ctinfo, ct, dptr, datalen, POS_CONTACT, &map)) - return NF_DROP; - return NF_ACCEPT; -} - -static unsigned int mangle_sip_packet(struct sk_buff **pskb, - enum ip_conntrack_info ctinfo, - struct ip_conntrack *ct, - const char **dptr, size_t dlen, - char *buffer, int bufflen, - enum sip_header_pos pos) -{ - unsigned int matchlen, matchoff; - - if (ct_sip_get_info(*dptr, dlen, &matchoff, &matchlen, pos) <= 0) - return 0; - - if (!ip_nat_mangle_udp_packet(pskb, ct, ctinfo, - matchoff, matchlen, buffer, bufflen)) - return 0; - - /* We need to reload this. Thanks Patrick. */ - *dptr = (*pskb)->data + ip_hdrlen(*pskb) + sizeof(struct udphdr); - return 1; -} - -static int mangle_content_len(struct sk_buff **pskb, - enum ip_conntrack_info ctinfo, - struct ip_conntrack *ct, - const char *dptr) -{ - unsigned int dataoff, matchoff, matchlen; - char buffer[sizeof("65536")]; - int bufflen; - - dataoff = ip_hdrlen(*pskb) + sizeof(struct udphdr); - - /* Get actual SDP lenght */ - if (ct_sip_get_info(dptr, (*pskb)->len - dataoff, &matchoff, - &matchlen, POS_SDP_HEADER) > 0) { - - /* since ct_sip_get_info() give us a pointer passing 'v=' - we need to add 2 bytes in this count. */ - int c_len = (*pskb)->len - dataoff - matchoff + 2; - - /* Now, update SDP lenght */ - if (ct_sip_get_info(dptr, (*pskb)->len - dataoff, &matchoff, - &matchlen, POS_CONTENT) > 0) { - - bufflen = sprintf(buffer, "%u", c_len); - - return ip_nat_mangle_udp_packet(pskb, ct, ctinfo, - matchoff, matchlen, - buffer, bufflen); - } - } - return 0; -} - -static unsigned int mangle_sdp(struct sk_buff **pskb, - enum ip_conntrack_info ctinfo, - struct ip_conntrack *ct, - __be32 newip, u_int16_t port, - const char *dptr) -{ - char buffer[sizeof("nnn.nnn.nnn.nnn")]; - unsigned int dataoff, bufflen; - - dataoff = ip_hdrlen(*pskb) + sizeof(struct udphdr); - - /* Mangle owner and contact info. */ - bufflen = sprintf(buffer, "%u.%u.%u.%u", NIPQUAD(newip)); - if (!mangle_sip_packet(pskb, ctinfo, ct, &dptr, (*pskb)->len - dataoff, - buffer, bufflen, POS_OWNER)) - return 0; - - if (!mangle_sip_packet(pskb, ctinfo, ct, &dptr, (*pskb)->len - dataoff, - buffer, bufflen, POS_CONNECTION)) - return 0; - - /* Mangle media port. */ - bufflen = sprintf(buffer, "%u", port); - if (!mangle_sip_packet(pskb, ctinfo, ct, &dptr, (*pskb)->len - dataoff, - buffer, bufflen, POS_MEDIA)) - return 0; - - return mangle_content_len(pskb, ctinfo, ct, dptr); -} - -/* So, this packet has hit the connection tracking matching code. - Mangle it, and change the expectation to match the new version. */ -static unsigned int ip_nat_sdp(struct sk_buff **pskb, - enum ip_conntrack_info ctinfo, - struct ip_conntrack_expect *exp, - const char *dptr) -{ - struct ip_conntrack *ct = exp->master; - enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); - __be32 newip; - u_int16_t port; - - DEBUGP("ip_nat_sdp():\n"); - - /* Connection will come from reply */ - newip = ct->tuplehash[!dir].tuple.dst.ip; - - exp->tuple.dst.ip = newip; - exp->saved_proto.udp.port = exp->tuple.dst.u.udp.port; - exp->dir = !dir; - - /* When you see the packet, we need to NAT it the same as the - this one. */ - exp->expectfn = ip_nat_follow_master; - - /* Try to get same port: if not, try to change it. */ - for (port = ntohs(exp->saved_proto.udp.port); port != 0; port++) { - exp->tuple.dst.u.udp.port = htons(port); - if (ip_conntrack_expect_related(exp) == 0) - break; - } - - if (port == 0) - return NF_DROP; - - if (!mangle_sdp(pskb, ctinfo, ct, newip, port, dptr)) { - ip_conntrack_unexpect_related(exp); - return NF_DROP; - } - return NF_ACCEPT; -} - -static void __exit fini(void) -{ - rcu_assign_pointer(ip_nat_sip_hook, NULL); - rcu_assign_pointer(ip_nat_sdp_hook, NULL); - synchronize_rcu(); -} - -static int __init init(void) -{ - BUG_ON(rcu_dereference(ip_nat_sip_hook)); - BUG_ON(rcu_dereference(ip_nat_sdp_hook)); - rcu_assign_pointer(ip_nat_sip_hook, ip_nat_sip); - rcu_assign_pointer(ip_nat_sdp_hook, ip_nat_sdp); - return 0; -} - -module_init(init); -module_exit(fini); diff --git a/net/ipv4/netfilter/ip_nat_snmp_basic.c b/net/ipv4/netfilter/ip_nat_snmp_basic.c deleted file mode 100644 index 025e04587789..000000000000 --- a/net/ipv4/netfilter/ip_nat_snmp_basic.c +++ /dev/null @@ -1,1333 +0,0 @@ -/* - * ip_nat_snmp_basic.c - * - * Basic SNMP Application Layer Gateway - * - * This IP NAT module is intended for use with SNMP network - * discovery and monitoring applications where target networks use - * conflicting private address realms. - * - * Static NAT is used to remap the networks from the view of the network - * management system at the IP layer, and this module remaps some application - * layer addresses to match. - * - * The simplest form of ALG is performed, where only tagged IP addresses - * are modified. The module does not need to be MIB aware and only scans - * messages at the ASN.1/BER level. - * - * Currently, only SNMPv1 and SNMPv2 are supported. - * - * More information on ALG and associated issues can be found in - * RFC 2962 - * - * The ASB.1/BER parsing code is derived from the gxsnmp package by Gregory - * McLean & Jochen Friedrich, stripped down for use in the kernel. - * - * Copyright (c) 2000 RP Internet (www.rpi.net.au). - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * - * Author: James Morris - * - * Updates: - * 2000-08-06: Convert to new helper API (Harald Welte). - * - */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("James Morris "); -MODULE_DESCRIPTION("Basic SNMP Application Layer Gateway"); - -#define SNMP_PORT 161 -#define SNMP_TRAP_PORT 162 -#define NOCT1(n) (*(u8 *)n) - -static int debug; -static DEFINE_SPINLOCK(snmp_lock); - -/* - * Application layer address mapping mimics the NAT mapping, but - * only for the first octet in this case (a more flexible system - * can be implemented if needed). - */ -struct oct1_map -{ - u_int8_t from; - u_int8_t to; -}; - - -/***************************************************************************** - * - * Basic ASN.1 decoding routines (gxsnmp author Dirk Wisse) - * - *****************************************************************************/ - -/* Class */ -#define ASN1_UNI 0 /* Universal */ -#define ASN1_APL 1 /* Application */ -#define ASN1_CTX 2 /* Context */ -#define ASN1_PRV 3 /* Private */ - -/* Tag */ -#define ASN1_EOC 0 /* End Of Contents */ -#define ASN1_BOL 1 /* Boolean */ -#define ASN1_INT 2 /* Integer */ -#define ASN1_BTS 3 /* Bit String */ -#define ASN1_OTS 4 /* Octet String */ -#define ASN1_NUL 5 /* Null */ -#define ASN1_OJI 6 /* Object Identifier */ -#define ASN1_OJD 7 /* Object Description */ -#define ASN1_EXT 8 /* External */ -#define ASN1_SEQ 16 /* Sequence */ -#define ASN1_SET 17 /* Set */ -#define ASN1_NUMSTR 18 /* Numerical String */ -#define ASN1_PRNSTR 19 /* Printable String */ -#define ASN1_TEXSTR 20 /* Teletext String */ -#define ASN1_VIDSTR 21 /* Video String */ -#define ASN1_IA5STR 22 /* IA5 String */ -#define ASN1_UNITIM 23 /* Universal Time */ -#define ASN1_GENTIM 24 /* General Time */ -#define ASN1_GRASTR 25 /* Graphical String */ -#define ASN1_VISSTR 26 /* Visible String */ -#define ASN1_GENSTR 27 /* General String */ - -/* Primitive / Constructed methods*/ -#define ASN1_PRI 0 /* Primitive */ -#define ASN1_CON 1 /* Constructed */ - -/* - * Error codes. - */ -#define ASN1_ERR_NOERROR 0 -#define ASN1_ERR_DEC_EMPTY 2 -#define ASN1_ERR_DEC_EOC_MISMATCH 3 -#define ASN1_ERR_DEC_LENGTH_MISMATCH 4 -#define ASN1_ERR_DEC_BADVALUE 5 - -/* - * ASN.1 context. - */ -struct asn1_ctx -{ - int error; /* Error condition */ - unsigned char *pointer; /* Octet just to be decoded */ - unsigned char *begin; /* First octet */ - unsigned char *end; /* Octet after last octet */ -}; - -/* - * Octet string (not null terminated) - */ -struct asn1_octstr -{ - unsigned char *data; - unsigned int len; -}; - -static void asn1_open(struct asn1_ctx *ctx, - unsigned char *buf, - unsigned int len) -{ - ctx->begin = buf; - ctx->end = buf + len; - ctx->pointer = buf; - ctx->error = ASN1_ERR_NOERROR; -} - -static unsigned char asn1_octet_decode(struct asn1_ctx *ctx, unsigned char *ch) -{ - if (ctx->pointer >= ctx->end) { - ctx->error = ASN1_ERR_DEC_EMPTY; - return 0; - } - *ch = *(ctx->pointer)++; - return 1; -} - -static unsigned char asn1_tag_decode(struct asn1_ctx *ctx, unsigned int *tag) -{ - unsigned char ch; - - *tag = 0; - - do - { - if (!asn1_octet_decode(ctx, &ch)) - return 0; - *tag <<= 7; - *tag |= ch & 0x7F; - } while ((ch & 0x80) == 0x80); - return 1; -} - -static unsigned char asn1_id_decode(struct asn1_ctx *ctx, - unsigned int *cls, - unsigned int *con, - unsigned int *tag) -{ - unsigned char ch; - - if (!asn1_octet_decode(ctx, &ch)) - return 0; - - *cls = (ch & 0xC0) >> 6; - *con = (ch & 0x20) >> 5; - *tag = (ch & 0x1F); - - if (*tag == 0x1F) { - if (!asn1_tag_decode(ctx, tag)) - return 0; - } - return 1; -} - -static unsigned char asn1_length_decode(struct asn1_ctx *ctx, - unsigned int *def, - unsigned int *len) -{ - unsigned char ch, cnt; - - if (!asn1_octet_decode(ctx, &ch)) - return 0; - - if (ch == 0x80) - *def = 0; - else { - *def = 1; - - if (ch < 0x80) - *len = ch; - else { - cnt = (unsigned char) (ch & 0x7F); - *len = 0; - - while (cnt > 0) { - if (!asn1_octet_decode(ctx, &ch)) - return 0; - *len <<= 8; - *len |= ch; - cnt--; - } - } - } - return 1; -} - -static unsigned char asn1_header_decode(struct asn1_ctx *ctx, - unsigned char **eoc, - unsigned int *cls, - unsigned int *con, - unsigned int *tag) -{ - unsigned int def, len; - - if (!asn1_id_decode(ctx, cls, con, tag)) - return 0; - - def = len = 0; - if (!asn1_length_decode(ctx, &def, &len)) - return 0; - - if (def) - *eoc = ctx->pointer + len; - else - *eoc = NULL; - return 1; -} - -static unsigned char asn1_eoc_decode(struct asn1_ctx *ctx, unsigned char *eoc) -{ - unsigned char ch; - - if (eoc == 0) { - if (!asn1_octet_decode(ctx, &ch)) - return 0; - - if (ch != 0x00) { - ctx->error = ASN1_ERR_DEC_EOC_MISMATCH; - return 0; - } - - if (!asn1_octet_decode(ctx, &ch)) - return 0; - - if (ch != 0x00) { - ctx->error = ASN1_ERR_DEC_EOC_MISMATCH; - return 0; - } - return 1; - } else { - if (ctx->pointer != eoc) { - ctx->error = ASN1_ERR_DEC_LENGTH_MISMATCH; - return 0; - } - return 1; - } -} - -static unsigned char asn1_null_decode(struct asn1_ctx *ctx, unsigned char *eoc) -{ - ctx->pointer = eoc; - return 1; -} - -static unsigned char asn1_long_decode(struct asn1_ctx *ctx, - unsigned char *eoc, - long *integer) -{ - unsigned char ch; - unsigned int len; - - if (!asn1_octet_decode(ctx, &ch)) - return 0; - - *integer = (signed char) ch; - len = 1; - - while (ctx->pointer < eoc) { - if (++len > sizeof (long)) { - ctx->error = ASN1_ERR_DEC_BADVALUE; - return 0; - } - - if (!asn1_octet_decode(ctx, &ch)) - return 0; - - *integer <<= 8; - *integer |= ch; - } - return 1; -} - -static unsigned char asn1_uint_decode(struct asn1_ctx *ctx, - unsigned char *eoc, - unsigned int *integer) -{ - unsigned char ch; - unsigned int len; - - if (!asn1_octet_decode(ctx, &ch)) - return 0; - - *integer = ch; - if (ch == 0) len = 0; - else len = 1; - - while (ctx->pointer < eoc) { - if (++len > sizeof (unsigned int)) { - ctx->error = ASN1_ERR_DEC_BADVALUE; - return 0; - } - - if (!asn1_octet_decode(ctx, &ch)) - return 0; - - *integer <<= 8; - *integer |= ch; - } - return 1; -} - -static unsigned char asn1_ulong_decode(struct asn1_ctx *ctx, - unsigned char *eoc, - unsigned long *integer) -{ - unsigned char ch; - unsigned int len; - - if (!asn1_octet_decode(ctx, &ch)) - return 0; - - *integer = ch; - if (ch == 0) len = 0; - else len = 1; - - while (ctx->pointer < eoc) { - if (++len > sizeof (unsigned long)) { - ctx->error = ASN1_ERR_DEC_BADVALUE; - return 0; - } - - if (!asn1_octet_decode(ctx, &ch)) - return 0; - - *integer <<= 8; - *integer |= ch; - } - return 1; -} - -static unsigned char asn1_octets_decode(struct asn1_ctx *ctx, - unsigned char *eoc, - unsigned char **octets, - unsigned int *len) -{ - unsigned char *ptr; - - *len = 0; - - *octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC); - if (*octets == NULL) { - if (net_ratelimit()) - printk("OOM in bsalg (%d)\n", __LINE__); - return 0; - } - - ptr = *octets; - while (ctx->pointer < eoc) { - if (!asn1_octet_decode(ctx, (unsigned char *)ptr++)) { - kfree(*octets); - *octets = NULL; - return 0; - } - (*len)++; - } - return 1; -} - -static unsigned char asn1_subid_decode(struct asn1_ctx *ctx, - unsigned long *subid) -{ - unsigned char ch; - - *subid = 0; - - do { - if (!asn1_octet_decode(ctx, &ch)) - return 0; - - *subid <<= 7; - *subid |= ch & 0x7F; - } while ((ch & 0x80) == 0x80); - return 1; -} - -static unsigned char asn1_oid_decode(struct asn1_ctx *ctx, - unsigned char *eoc, - unsigned long **oid, - unsigned int *len) -{ - unsigned long subid; - unsigned int size; - unsigned long *optr; - - size = eoc - ctx->pointer + 1; - *oid = kmalloc(size * sizeof(unsigned long), GFP_ATOMIC); - if (*oid == NULL) { - if (net_ratelimit()) - printk("OOM in bsalg (%d)\n", __LINE__); - return 0; - } - - optr = *oid; - - if (!asn1_subid_decode(ctx, &subid)) { - kfree(*oid); - *oid = NULL; - return 0; - } - - if (subid < 40) { - optr [0] = 0; - optr [1] = subid; - } else if (subid < 80) { - optr [0] = 1; - optr [1] = subid - 40; - } else { - optr [0] = 2; - optr [1] = subid - 80; - } - - *len = 2; - optr += 2; - - while (ctx->pointer < eoc) { - if (++(*len) > size) { - ctx->error = ASN1_ERR_DEC_BADVALUE; - kfree(*oid); - *oid = NULL; - return 0; - } - - if (!asn1_subid_decode(ctx, optr++)) { - kfree(*oid); - *oid = NULL; - return 0; - } - } - return 1; -} - -/***************************************************************************** - * - * SNMP decoding routines (gxsnmp author Dirk Wisse) - * - *****************************************************************************/ - -/* SNMP Versions */ -#define SNMP_V1 0 -#define SNMP_V2C 1 -#define SNMP_V2 2 -#define SNMP_V3 3 - -/* Default Sizes */ -#define SNMP_SIZE_COMM 256 -#define SNMP_SIZE_OBJECTID 128 -#define SNMP_SIZE_BUFCHR 256 -#define SNMP_SIZE_BUFINT 128 -#define SNMP_SIZE_SMALLOBJECTID 16 - -/* Requests */ -#define SNMP_PDU_GET 0 -#define SNMP_PDU_NEXT 1 -#define SNMP_PDU_RESPONSE 2 -#define SNMP_PDU_SET 3 -#define SNMP_PDU_TRAP1 4 -#define SNMP_PDU_BULK 5 -#define SNMP_PDU_INFORM 6 -#define SNMP_PDU_TRAP2 7 - -/* Errors */ -#define SNMP_NOERROR 0 -#define SNMP_TOOBIG 1 -#define SNMP_NOSUCHNAME 2 -#define SNMP_BADVALUE 3 -#define SNMP_READONLY 4 -#define SNMP_GENERROR 5 -#define SNMP_NOACCESS 6 -#define SNMP_WRONGTYPE 7 -#define SNMP_WRONGLENGTH 8 -#define SNMP_WRONGENCODING 9 -#define SNMP_WRONGVALUE 10 -#define SNMP_NOCREATION 11 -#define SNMP_INCONSISTENTVALUE 12 -#define SNMP_RESOURCEUNAVAILABLE 13 -#define SNMP_COMMITFAILED 14 -#define SNMP_UNDOFAILED 15 -#define SNMP_AUTHORIZATIONERROR 16 -#define SNMP_NOTWRITABLE 17 -#define SNMP_INCONSISTENTNAME 18 - -/* General SNMP V1 Traps */ -#define SNMP_TRAP_COLDSTART 0 -#define SNMP_TRAP_WARMSTART 1 -#define SNMP_TRAP_LINKDOWN 2 -#define SNMP_TRAP_LINKUP 3 -#define SNMP_TRAP_AUTFAILURE 4 -#define SNMP_TRAP_EQPNEIGHBORLOSS 5 -#define SNMP_TRAP_ENTSPECIFIC 6 - -/* SNMPv1 Types */ -#define SNMP_NULL 0 -#define SNMP_INTEGER 1 /* l */ -#define SNMP_OCTETSTR 2 /* c */ -#define SNMP_DISPLAYSTR 2 /* c */ -#define SNMP_OBJECTID 3 /* ul */ -#define SNMP_IPADDR 4 /* uc */ -#define SNMP_COUNTER 5 /* ul */ -#define SNMP_GAUGE 6 /* ul */ -#define SNMP_TIMETICKS 7 /* ul */ -#define SNMP_OPAQUE 8 /* c */ - -/* Additional SNMPv2 Types */ -#define SNMP_UINTEGER 5 /* ul */ -#define SNMP_BITSTR 9 /* uc */ -#define SNMP_NSAP 10 /* uc */ -#define SNMP_COUNTER64 11 /* ul */ -#define SNMP_NOSUCHOBJECT 12 -#define SNMP_NOSUCHINSTANCE 13 -#define SNMP_ENDOFMIBVIEW 14 - -union snmp_syntax -{ - unsigned char uc[0]; /* 8 bit unsigned */ - char c[0]; /* 8 bit signed */ - unsigned long ul[0]; /* 32 bit unsigned */ - long l[0]; /* 32 bit signed */ -}; - -struct snmp_object -{ - unsigned long *id; - unsigned int id_len; - unsigned short type; - unsigned int syntax_len; - union snmp_syntax syntax; -}; - -struct snmp_request -{ - unsigned long id; - unsigned int error_status; - unsigned int error_index; -}; - -struct snmp_v1_trap -{ - unsigned long *id; - unsigned int id_len; - unsigned long ip_address; /* pointer */ - unsigned int general; - unsigned int specific; - unsigned long time; -}; - -/* SNMP types */ -#define SNMP_IPA 0 -#define SNMP_CNT 1 -#define SNMP_GGE 2 -#define SNMP_TIT 3 -#define SNMP_OPQ 4 -#define SNMP_C64 6 - -/* SNMP errors */ -#define SERR_NSO 0 -#define SERR_NSI 1 -#define SERR_EOM 2 - -static inline void mangle_address(unsigned char *begin, - unsigned char *addr, - const struct oct1_map *map, - __sum16 *check); -struct snmp_cnv -{ - unsigned int class; - unsigned int tag; - int syntax; -}; - -static struct snmp_cnv snmp_conv [] = -{ - {ASN1_UNI, ASN1_NUL, SNMP_NULL}, - {ASN1_UNI, ASN1_INT, SNMP_INTEGER}, - {ASN1_UNI, ASN1_OTS, SNMP_OCTETSTR}, - {ASN1_UNI, ASN1_OTS, SNMP_DISPLAYSTR}, - {ASN1_UNI, ASN1_OJI, SNMP_OBJECTID}, - {ASN1_APL, SNMP_IPA, SNMP_IPADDR}, - {ASN1_APL, SNMP_CNT, SNMP_COUNTER}, /* Counter32 */ - {ASN1_APL, SNMP_GGE, SNMP_GAUGE}, /* Gauge32 == Unsigned32 */ - {ASN1_APL, SNMP_TIT, SNMP_TIMETICKS}, - {ASN1_APL, SNMP_OPQ, SNMP_OPAQUE}, - - /* SNMPv2 data types and errors */ - {ASN1_UNI, ASN1_BTS, SNMP_BITSTR}, - {ASN1_APL, SNMP_C64, SNMP_COUNTER64}, - {ASN1_CTX, SERR_NSO, SNMP_NOSUCHOBJECT}, - {ASN1_CTX, SERR_NSI, SNMP_NOSUCHINSTANCE}, - {ASN1_CTX, SERR_EOM, SNMP_ENDOFMIBVIEW}, - {0, 0, -1} -}; - -static unsigned char snmp_tag_cls2syntax(unsigned int tag, - unsigned int cls, - unsigned short *syntax) -{ - struct snmp_cnv *cnv; - - cnv = snmp_conv; - - while (cnv->syntax != -1) { - if (cnv->tag == tag && cnv->class == cls) { - *syntax = cnv->syntax; - return 1; - } - cnv++; - } - return 0; -} - -static unsigned char snmp_object_decode(struct asn1_ctx *ctx, - struct snmp_object **obj) -{ - unsigned int cls, con, tag, len, idlen; - unsigned short type; - unsigned char *eoc, *end, *p; - unsigned long *lp, *id; - unsigned long ul; - long l; - - *obj = NULL; - id = NULL; - - if (!asn1_header_decode(ctx, &eoc, &cls, &con, &tag)) - return 0; - - if (cls != ASN1_UNI || con != ASN1_CON || tag != ASN1_SEQ) - return 0; - - if (!asn1_header_decode(ctx, &end, &cls, &con, &tag)) - return 0; - - if (cls != ASN1_UNI || con != ASN1_PRI || tag != ASN1_OJI) - return 0; - - if (!asn1_oid_decode(ctx, end, &id, &idlen)) - return 0; - - if (!asn1_header_decode(ctx, &end, &cls, &con, &tag)) { - kfree(id); - return 0; - } - - if (con != ASN1_PRI) { - kfree(id); - return 0; - } - - type = 0; - if (!snmp_tag_cls2syntax(tag, cls, &type)) { - kfree(id); - return 0; - } - - l = 0; - switch (type) { - case SNMP_INTEGER: - len = sizeof(long); - if (!asn1_long_decode(ctx, end, &l)) { - kfree(id); - return 0; - } - *obj = kmalloc(sizeof(struct snmp_object) + len, - GFP_ATOMIC); - if (*obj == NULL) { - kfree(id); - if (net_ratelimit()) - printk("OOM in bsalg (%d)\n", __LINE__); - return 0; - } - (*obj)->syntax.l[0] = l; - break; - case SNMP_OCTETSTR: - case SNMP_OPAQUE: - if (!asn1_octets_decode(ctx, end, &p, &len)) { - kfree(id); - return 0; - } - *obj = kmalloc(sizeof(struct snmp_object) + len, - GFP_ATOMIC); - if (*obj == NULL) { - kfree(id); - if (net_ratelimit()) - printk("OOM in bsalg (%d)\n", __LINE__); - return 0; - } - memcpy((*obj)->syntax.c, p, len); - kfree(p); - break; - case SNMP_NULL: - case SNMP_NOSUCHOBJECT: - case SNMP_NOSUCHINSTANCE: - case SNMP_ENDOFMIBVIEW: - len = 0; - *obj = kmalloc(sizeof(struct snmp_object), GFP_ATOMIC); - if (*obj == NULL) { - kfree(id); - if (net_ratelimit()) - printk("OOM in bsalg (%d)\n", __LINE__); - return 0; - } - if (!asn1_null_decode(ctx, end)) { - kfree(id); - kfree(*obj); - *obj = NULL; - return 0; - } - break; - case SNMP_OBJECTID: - if (!asn1_oid_decode(ctx, end, (unsigned long **)&lp, &len)) { - kfree(id); - return 0; - } - len *= sizeof(unsigned long); - *obj = kmalloc(sizeof(struct snmp_object) + len, GFP_ATOMIC); - if (*obj == NULL) { - kfree(lp); - kfree(id); - if (net_ratelimit()) - printk("OOM in bsalg (%d)\n", __LINE__); - return 0; - } - memcpy((*obj)->syntax.ul, lp, len); - kfree(lp); - break; - case SNMP_IPADDR: - if (!asn1_octets_decode(ctx, end, &p, &len)) { - kfree(id); - return 0; - } - if (len != 4) { - kfree(p); - kfree(id); - return 0; - } - *obj = kmalloc(sizeof(struct snmp_object) + len, GFP_ATOMIC); - if (*obj == NULL) { - kfree(p); - kfree(id); - if (net_ratelimit()) - printk("OOM in bsalg (%d)\n", __LINE__); - return 0; - } - memcpy((*obj)->syntax.uc, p, len); - kfree(p); - break; - case SNMP_COUNTER: - case SNMP_GAUGE: - case SNMP_TIMETICKS: - len = sizeof(unsigned long); - if (!asn1_ulong_decode(ctx, end, &ul)) { - kfree(id); - return 0; - } - *obj = kmalloc(sizeof(struct snmp_object) + len, GFP_ATOMIC); - if (*obj == NULL) { - kfree(id); - if (net_ratelimit()) - printk("OOM in bsalg (%d)\n", __LINE__); - return 0; - } - (*obj)->syntax.ul[0] = ul; - break; - default: - kfree(id); - return 0; - } - - (*obj)->syntax_len = len; - (*obj)->type = type; - (*obj)->id = id; - (*obj)->id_len = idlen; - - if (!asn1_eoc_decode(ctx, eoc)) { - kfree(id); - kfree(*obj); - *obj = NULL; - return 0; - } - return 1; -} - -static unsigned char snmp_request_decode(struct asn1_ctx *ctx, - struct snmp_request *request) -{ - unsigned int cls, con, tag; - unsigned char *end; - - if (!asn1_header_decode(ctx, &end, &cls, &con, &tag)) - return 0; - - if (cls != ASN1_UNI || con != ASN1_PRI || tag != ASN1_INT) - return 0; - - if (!asn1_ulong_decode(ctx, end, &request->id)) - return 0; - - if (!asn1_header_decode(ctx, &end, &cls, &con, &tag)) - return 0; - - if (cls != ASN1_UNI || con != ASN1_PRI || tag != ASN1_INT) - return 0; - - if (!asn1_uint_decode(ctx, end, &request->error_status)) - return 0; - - if (!asn1_header_decode(ctx, &end, &cls, &con, &tag)) - return 0; - - if (cls != ASN1_UNI || con != ASN1_PRI || tag != ASN1_INT) - return 0; - - if (!asn1_uint_decode(ctx, end, &request->error_index)) - return 0; - - return 1; -} - -/* - * Fast checksum update for possibly oddly-aligned UDP byte, from the - * code example in the draft. - */ -static void fast_csum(__sum16 *csum, - const unsigned char *optr, - const unsigned char *nptr, - int offset) -{ - unsigned char s[4]; - - if (offset & 1) { - s[0] = s[2] = 0; - s[1] = ~*optr; - s[3] = *nptr; - } else { - s[1] = s[3] = 0; - s[0] = ~*optr; - s[2] = *nptr; - } - - *csum = csum_fold(csum_partial(s, 4, ~csum_unfold(*csum))); -} - -/* - * Mangle IP address. - * - begin points to the start of the snmp messgae - * - addr points to the start of the address - */ -static inline void mangle_address(unsigned char *begin, - unsigned char *addr, - const struct oct1_map *map, - __sum16 *check) -{ - if (map->from == NOCT1(addr)) { - u_int32_t old; - - if (debug) - memcpy(&old, (unsigned char *)addr, sizeof(old)); - - *addr = map->to; - - /* Update UDP checksum if being used */ - if (*check) { - fast_csum(check, - &map->from, &map->to, addr - begin); - } - - if (debug) - printk(KERN_DEBUG "bsalg: mapped %u.%u.%u.%u to " - "%u.%u.%u.%u\n", NIPQUAD(old), NIPQUAD(*addr)); - } -} - -static unsigned char snmp_trap_decode(struct asn1_ctx *ctx, - struct snmp_v1_trap *trap, - const struct oct1_map *map, - __sum16 *check) -{ - unsigned int cls, con, tag, len; - unsigned char *end; - - if (!asn1_header_decode(ctx, &end, &cls, &con, &tag)) - return 0; - - if (cls != ASN1_UNI || con != ASN1_PRI || tag != ASN1_OJI) - return 0; - - if (!asn1_oid_decode(ctx, end, &trap->id, &trap->id_len)) - return 0; - - if (!asn1_header_decode(ctx, &end, &cls, &con, &tag)) - goto err_id_free; - - if (!((cls == ASN1_APL && con == ASN1_PRI && tag == SNMP_IPA) || - (cls == ASN1_UNI && con == ASN1_PRI && tag == ASN1_OTS))) - goto err_id_free; - - if (!asn1_octets_decode(ctx, end, (unsigned char **)&trap->ip_address, &len)) - goto err_id_free; - - /* IPv4 only */ - if (len != 4) - goto err_addr_free; - - mangle_address(ctx->begin, ctx->pointer - 4, map, check); - - if (!asn1_header_decode(ctx, &end, &cls, &con, &tag)) - goto err_addr_free; - - if (cls != ASN1_UNI || con != ASN1_PRI || tag != ASN1_INT) - goto err_addr_free; - - if (!asn1_uint_decode(ctx, end, &trap->general)) - goto err_addr_free; - - if (!asn1_header_decode(ctx, &end, &cls, &con, &tag)) - goto err_addr_free; - - if (cls != ASN1_UNI || con != ASN1_PRI || tag != ASN1_INT) - goto err_addr_free; - - if (!asn1_uint_decode(ctx, end, &trap->specific)) - goto err_addr_free; - - if (!asn1_header_decode(ctx, &end, &cls, &con, &tag)) - goto err_addr_free; - - if (!((cls == ASN1_APL && con == ASN1_PRI && tag == SNMP_TIT) || - (cls == ASN1_UNI && con == ASN1_PRI && tag == ASN1_INT))) - goto err_addr_free; - - if (!asn1_ulong_decode(ctx, end, &trap->time)) - goto err_addr_free; - - return 1; - -err_addr_free: - kfree((unsigned long *)trap->ip_address); - -err_id_free: - kfree(trap->id); - - return 0; -} - -/***************************************************************************** - * - * Misc. routines - * - *****************************************************************************/ - -static void hex_dump(unsigned char *buf, size_t len) -{ - size_t i; - - for (i = 0; i < len; i++) { - if (i && !(i % 16)) - printk("\n"); - printk("%02x ", *(buf + i)); - } - printk("\n"); -} - -/* - * Parse and mangle SNMP message according to mapping. - * (And this is the fucking 'basic' method). - */ -static int snmp_parse_mangle(unsigned char *msg, - u_int16_t len, - const struct oct1_map *map, - __sum16 *check) -{ - unsigned char *eoc, *end; - unsigned int cls, con, tag, vers, pdutype; - struct asn1_ctx ctx; - struct asn1_octstr comm; - struct snmp_object **obj; - - if (debug > 1) - hex_dump(msg, len); - - asn1_open(&ctx, msg, len); - - /* - * Start of SNMP message. - */ - if (!asn1_header_decode(&ctx, &eoc, &cls, &con, &tag)) - return 0; - if (cls != ASN1_UNI || con != ASN1_CON || tag != ASN1_SEQ) - return 0; - - /* - * Version 1 or 2 handled. - */ - if (!asn1_header_decode(&ctx, &end, &cls, &con, &tag)) - return 0; - if (cls != ASN1_UNI || con != ASN1_PRI || tag != ASN1_INT) - return 0; - if (!asn1_uint_decode (&ctx, end, &vers)) - return 0; - if (debug > 1) - printk(KERN_DEBUG "bsalg: snmp version: %u\n", vers + 1); - if (vers > 1) - return 1; - - /* - * Community. - */ - if (!asn1_header_decode (&ctx, &end, &cls, &con, &tag)) - return 0; - if (cls != ASN1_UNI || con != ASN1_PRI || tag != ASN1_OTS) - return 0; - if (!asn1_octets_decode(&ctx, end, &comm.data, &comm.len)) - return 0; - if (debug > 1) { - unsigned int i; - - printk(KERN_DEBUG "bsalg: community: "); - for (i = 0; i < comm.len; i++) - printk("%c", comm.data[i]); - printk("\n"); - } - kfree(comm.data); - - /* - * PDU type - */ - if (!asn1_header_decode(&ctx, &eoc, &cls, &con, &pdutype)) - return 0; - if (cls != ASN1_CTX || con != ASN1_CON) - return 0; - if (debug > 1) { - unsigned char *pdus[] = { - [SNMP_PDU_GET] = "get", - [SNMP_PDU_NEXT] = "get-next", - [SNMP_PDU_RESPONSE] = "response", - [SNMP_PDU_SET] = "set", - [SNMP_PDU_TRAP1] = "trapv1", - [SNMP_PDU_BULK] = "bulk", - [SNMP_PDU_INFORM] = "inform", - [SNMP_PDU_TRAP2] = "trapv2" - }; - - if (pdutype > SNMP_PDU_TRAP2) - printk(KERN_DEBUG "bsalg: bad pdu type %u\n", pdutype); - else - printk(KERN_DEBUG "bsalg: pdu: %s\n", pdus[pdutype]); - } - if (pdutype != SNMP_PDU_RESPONSE && - pdutype != SNMP_PDU_TRAP1 && pdutype != SNMP_PDU_TRAP2) - return 1; - - /* - * Request header or v1 trap - */ - if (pdutype == SNMP_PDU_TRAP1) { - struct snmp_v1_trap trap; - unsigned char ret = snmp_trap_decode(&ctx, &trap, map, check); - - if (ret) { - kfree(trap.id); - kfree((unsigned long *)trap.ip_address); - } else - return ret; - - } else { - struct snmp_request req; - - if (!snmp_request_decode(&ctx, &req)) - return 0; - - if (debug > 1) - printk(KERN_DEBUG "bsalg: request: id=0x%lx error_status=%u " - "error_index=%u\n", req.id, req.error_status, - req.error_index); - } - - /* - * Loop through objects, look for IP addresses to mangle. - */ - if (!asn1_header_decode(&ctx, &eoc, &cls, &con, &tag)) - return 0; - - if (cls != ASN1_UNI || con != ASN1_CON || tag != ASN1_SEQ) - return 0; - - obj = kmalloc(sizeof(struct snmp_object), GFP_ATOMIC); - if (obj == NULL) { - if (net_ratelimit()) - printk(KERN_WARNING "OOM in bsalg(%d)\n", __LINE__); - return 0; - } - - while (!asn1_eoc_decode(&ctx, eoc)) { - unsigned int i; - - if (!snmp_object_decode(&ctx, obj)) { - if (*obj) { - kfree((*obj)->id); - kfree(*obj); - } - kfree(obj); - return 0; - } - - if (debug > 1) { - printk(KERN_DEBUG "bsalg: object: "); - for (i = 0; i < (*obj)->id_len; i++) { - if (i > 0) - printk("."); - printk("%lu", (*obj)->id[i]); - } - printk(": type=%u\n", (*obj)->type); - - } - - if ((*obj)->type == SNMP_IPADDR) - mangle_address(ctx.begin, ctx.pointer - 4 , map, check); - - kfree((*obj)->id); - kfree(*obj); - } - kfree(obj); - - if (!asn1_eoc_decode(&ctx, eoc)) - return 0; - - return 1; -} - -/***************************************************************************** - * - * NAT routines. - * - *****************************************************************************/ - -/* - * SNMP translation routine. - */ -static int snmp_translate(struct ip_conntrack *ct, - enum ip_conntrack_info ctinfo, - struct sk_buff **pskb) -{ - struct iphdr *iph = ip_hdr(*pskb); - struct udphdr *udph = (struct udphdr *)((__be32 *)iph + iph->ihl); - u_int16_t udplen = ntohs(udph->len); - u_int16_t paylen = udplen - sizeof(struct udphdr); - int dir = CTINFO2DIR(ctinfo); - struct oct1_map map; - - /* - * Determine mappping for application layer addresses based - * on NAT manipulations for the packet. - */ - if (dir == IP_CT_DIR_ORIGINAL) { - /* SNAT traps */ - map.from = NOCT1(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.ip); - map.to = NOCT1(&ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.ip); - } else { - /* DNAT replies */ - map.from = NOCT1(&ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.ip); - map.to = NOCT1(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.ip); - } - - if (map.from == map.to) - return NF_ACCEPT; - - if (!snmp_parse_mangle((unsigned char *)udph + sizeof(struct udphdr), - paylen, &map, &udph->check)) { - if (net_ratelimit()) - printk(KERN_WARNING "bsalg: parser failed\n"); - return NF_DROP; - } - return NF_ACCEPT; -} - -/* We don't actually set up expectations, just adjust internal IP - * addresses if this is being NATted */ -static int help(struct sk_buff **pskb, - struct ip_conntrack *ct, - enum ip_conntrack_info ctinfo) -{ - int dir = CTINFO2DIR(ctinfo); - unsigned int ret; - struct iphdr *iph = ip_hdr(*pskb); - struct udphdr *udph = (struct udphdr *)((u_int32_t *)iph + iph->ihl); - - /* SNMP replies and originating SNMP traps get mangled */ - if (udph->source == htons(SNMP_PORT) && dir != IP_CT_DIR_REPLY) - return NF_ACCEPT; - if (udph->dest == htons(SNMP_TRAP_PORT) && dir != IP_CT_DIR_ORIGINAL) - return NF_ACCEPT; - - /* No NAT? */ - if (!(ct->status & IPS_NAT_MASK)) - return NF_ACCEPT; - - /* - * Make sure the packet length is ok. So far, we were only guaranteed - * to have a valid length IP header plus 8 bytes, which means we have - * enough room for a UDP header. Just verify the UDP length field so we - * can mess around with the payload. - */ - if (ntohs(udph->len) != (*pskb)->len - (iph->ihl << 2)) { - if (net_ratelimit()) - printk(KERN_WARNING "SNMP: dropping malformed packet " - "src=%u.%u.%u.%u dst=%u.%u.%u.%u\n", - NIPQUAD(iph->saddr), NIPQUAD(iph->daddr)); - return NF_DROP; - } - - if (!skb_make_writable(pskb, (*pskb)->len)) - return NF_DROP; - - spin_lock_bh(&snmp_lock); - ret = snmp_translate(ct, ctinfo, pskb); - spin_unlock_bh(&snmp_lock); - return ret; -} - -static struct ip_conntrack_helper snmp_helper = { - .max_expected = 0, - .timeout = 180, - .me = THIS_MODULE, - .help = help, - .name = "snmp", - - .tuple = {.src = {.u = {.udp = {.port = __constant_htons(SNMP_PORT)}}}, - .dst = {.protonum = IPPROTO_UDP}, - }, - .mask = {.src = {.u = {0xFFFF}}, - .dst = {.protonum = 0xFF}, - }, -}; - -static struct ip_conntrack_helper snmp_trap_helper = { - .max_expected = 0, - .timeout = 180, - .me = THIS_MODULE, - .help = help, - .name = "snmp_trap", - - .tuple = {.src = {.u = {.udp = {.port = __constant_htons(SNMP_TRAP_PORT)}}}, - .dst = {.protonum = IPPROTO_UDP}, - }, - .mask = {.src = {.u = {0xFFFF}}, - .dst = {.protonum = 0xFF}, - }, -}; - -/***************************************************************************** - * - * Module stuff. - * - *****************************************************************************/ - -static int __init ip_nat_snmp_basic_init(void) -{ - int ret = 0; - - ret = ip_conntrack_helper_register(&snmp_helper); - if (ret < 0) - return ret; - ret = ip_conntrack_helper_register(&snmp_trap_helper); - if (ret < 0) { - ip_conntrack_helper_unregister(&snmp_helper); - return ret; - } - return ret; -} - -static void __exit ip_nat_snmp_basic_fini(void) -{ - ip_conntrack_helper_unregister(&snmp_helper); - ip_conntrack_helper_unregister(&snmp_trap_helper); -} - -module_init(ip_nat_snmp_basic_init); -module_exit(ip_nat_snmp_basic_fini); - -module_param(debug, int, 0600); diff --git a/net/ipv4/netfilter/ip_nat_standalone.c b/net/ipv4/netfilter/ip_nat_standalone.c deleted file mode 100644 index 32f7bf661fc8..000000000000 --- a/net/ipv4/netfilter/ip_nat_standalone.c +++ /dev/null @@ -1,387 +0,0 @@ -/* This file contains all the functions required for the standalone - ip_nat module. - - These are not required by the compatibility layer. -*/ - -/* (C) 1999-2001 Paul `Rusty' Russell - * (C) 2002-2004 Netfilter Core Team - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -/* - * 23 Apr 2001: Harald Welte - * - new API and handling of conntrack/nat helpers - * - now capable of multiple expectations for one master - * */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include - -#if 0 -#define DEBUGP printk -#else -#define DEBUGP(format, args...) -#endif - -#ifdef CONFIG_XFRM -static void nat_decode_session(struct sk_buff *skb, struct flowi *fl) -{ - struct ip_conntrack *ct; - struct ip_conntrack_tuple *t; - enum ip_conntrack_info ctinfo; - enum ip_conntrack_dir dir; - unsigned long statusbit; - - ct = ip_conntrack_get(skb, &ctinfo); - if (ct == NULL) - return; - dir = CTINFO2DIR(ctinfo); - t = &ct->tuplehash[dir].tuple; - - if (dir == IP_CT_DIR_ORIGINAL) - statusbit = IPS_DST_NAT; - else - statusbit = IPS_SRC_NAT; - - if (ct->status & statusbit) { - fl->fl4_dst = t->dst.ip; - if (t->dst.protonum == IPPROTO_TCP || - t->dst.protonum == IPPROTO_UDP) - fl->fl_ip_dport = t->dst.u.tcp.port; - } - - statusbit ^= IPS_NAT_MASK; - - if (ct->status & statusbit) { - fl->fl4_src = t->src.ip; - if (t->dst.protonum == IPPROTO_TCP || - t->dst.protonum == IPPROTO_UDP) - fl->fl_ip_sport = t->src.u.tcp.port; - } -} -#endif - -static unsigned int -ip_nat_fn(unsigned int hooknum, - struct sk_buff **pskb, - const struct net_device *in, - const struct net_device *out, - int (*okfn)(struct sk_buff *)) -{ - struct ip_conntrack *ct; - enum ip_conntrack_info ctinfo; - struct ip_nat_info *info; - /* maniptype == SRC for postrouting. */ - enum ip_nat_manip_type maniptype = HOOK2MANIP(hooknum); - - /* We never see fragments: conntrack defrags on pre-routing - and local-out, and ip_nat_out protects post-routing. */ - IP_NF_ASSERT(!(ip_hdr(*pskb)->frag_off - & htons(IP_MF|IP_OFFSET))); - - ct = ip_conntrack_get(*pskb, &ctinfo); - /* Can't track? It's not due to stress, or conntrack would - have dropped it. Hence it's the user's responsibilty to - packet filter it out, or implement conntrack/NAT for that - protocol. 8) --RR */ - if (!ct) { - /* Exception: ICMP redirect to new connection (not in - hash table yet). We must not let this through, in - case we're doing NAT to the same network. */ - if (ip_hdr(*pskb)->protocol == IPPROTO_ICMP) { - struct icmphdr _hdr, *hp; - - hp = skb_header_pointer(*pskb, ip_hdrlen(*pskb), - sizeof(_hdr), &_hdr); - if (hp != NULL && - hp->type == ICMP_REDIRECT) - return NF_DROP; - } - return NF_ACCEPT; - } - - /* Don't try to NAT if this packet is not conntracked */ - if (ct == &ip_conntrack_untracked) - return NF_ACCEPT; - - switch (ctinfo) { - case IP_CT_RELATED: - case IP_CT_RELATED+IP_CT_IS_REPLY: - if (ip_hdr(*pskb)->protocol == IPPROTO_ICMP) { - if (!ip_nat_icmp_reply_translation(ct, ctinfo, - hooknum, pskb)) - return NF_DROP; - else - return NF_ACCEPT; - } - /* Fall thru... (Only ICMPs can be IP_CT_IS_REPLY) */ - case IP_CT_NEW: - info = &ct->nat.info; - - /* Seen it before? This can happen for loopback, retrans, - or local packets.. */ - if (!ip_nat_initialized(ct, maniptype)) { - unsigned int ret; - - if (unlikely(is_confirmed(ct))) - /* NAT module was loaded late */ - ret = alloc_null_binding_confirmed(ct, info, - hooknum); - else if (hooknum == NF_IP_LOCAL_IN) - /* LOCAL_IN hook doesn't have a chain! */ - ret = alloc_null_binding(ct, info, hooknum); - else - ret = ip_nat_rule_find(pskb, hooknum, - in, out, ct, - info); - - if (ret != NF_ACCEPT) { - return ret; - } - } else - DEBUGP("Already setup manip %s for ct %p\n", - maniptype == IP_NAT_MANIP_SRC ? "SRC" : "DST", - ct); - break; - - default: - /* ESTABLISHED */ - IP_NF_ASSERT(ctinfo == IP_CT_ESTABLISHED - || ctinfo == (IP_CT_ESTABLISHED+IP_CT_IS_REPLY)); - info = &ct->nat.info; - } - - IP_NF_ASSERT(info); - return ip_nat_packet(ct, ctinfo, hooknum, pskb); -} - -static unsigned int -ip_nat_in(unsigned int hooknum, - struct sk_buff **pskb, - const struct net_device *in, - const struct net_device *out, - int (*okfn)(struct sk_buff *)) -{ - unsigned int ret; - __be32 daddr = ip_hdr(*pskb)->daddr; - - ret = ip_nat_fn(hooknum, pskb, in, out, okfn); - if (ret != NF_DROP && ret != NF_STOLEN - && daddr != ip_hdr(*pskb)->daddr) { - dst_release((*pskb)->dst); - (*pskb)->dst = NULL; - } - return ret; -} - -static unsigned int -ip_nat_out(unsigned int hooknum, - struct sk_buff **pskb, - const struct net_device *in, - const struct net_device *out, - int (*okfn)(struct sk_buff *)) -{ -#ifdef CONFIG_XFRM - struct ip_conntrack *ct; - enum ip_conntrack_info ctinfo; -#endif - unsigned int ret; - - /* root is playing with raw sockets. */ - if ((*pskb)->len < sizeof(struct iphdr) - || ip_hdrlen(*pskb) < sizeof(struct iphdr)) - return NF_ACCEPT; - - ret = ip_nat_fn(hooknum, pskb, in, out, okfn); -#ifdef CONFIG_XFRM - if (ret != NF_DROP && ret != NF_STOLEN - && (ct = ip_conntrack_get(*pskb, &ctinfo)) != NULL) { - enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); - - if (ct->tuplehash[dir].tuple.src.ip != - ct->tuplehash[!dir].tuple.dst.ip - || ct->tuplehash[dir].tuple.src.u.all != - ct->tuplehash[!dir].tuple.dst.u.all - ) - return ip_xfrm_me_harder(pskb) == 0 ? ret : NF_DROP; - } -#endif - return ret; -} - -static unsigned int -ip_nat_local_fn(unsigned int hooknum, - struct sk_buff **pskb, - const struct net_device *in, - const struct net_device *out, - int (*okfn)(struct sk_buff *)) -{ - struct ip_conntrack *ct; - enum ip_conntrack_info ctinfo; - unsigned int ret; - - /* root is playing with raw sockets. */ - if ((*pskb)->len < sizeof(struct iphdr) - || ip_hdrlen(*pskb) < sizeof(struct iphdr)) - return NF_ACCEPT; - - ret = ip_nat_fn(hooknum, pskb, in, out, okfn); - if (ret != NF_DROP && ret != NF_STOLEN - && (ct = ip_conntrack_get(*pskb, &ctinfo)) != NULL) { - enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); - - if (ct->tuplehash[dir].tuple.dst.ip != - ct->tuplehash[!dir].tuple.src.ip) { - if (ip_route_me_harder(pskb, RTN_UNSPEC)) - ret = NF_DROP; - } -#ifdef CONFIG_XFRM - else if (ct->tuplehash[dir].tuple.dst.u.all != - ct->tuplehash[!dir].tuple.src.u.all) - if (ip_xfrm_me_harder(pskb)) - ret = NF_DROP; -#endif - - } - return ret; -} - -static unsigned int -ip_nat_adjust(unsigned int hooknum, - struct sk_buff **pskb, - const struct net_device *in, - const struct net_device *out, - int (*okfn)(struct sk_buff *)) -{ - struct ip_conntrack *ct; - enum ip_conntrack_info ctinfo; - - ct = ip_conntrack_get(*pskb, &ctinfo); - if (ct && test_bit(IPS_SEQ_ADJUST_BIT, &ct->status)) { - DEBUGP("ip_nat_standalone: adjusting sequence number\n"); - if (!ip_nat_seq_adjust(pskb, ct, ctinfo)) - return NF_DROP; - } - return NF_ACCEPT; -} - -/* We must be after connection tracking and before packet filtering. */ - -static struct nf_hook_ops ip_nat_ops[] = { - /* Before packet filtering, change destination */ - { - .hook = ip_nat_in, - .owner = THIS_MODULE, - .pf = PF_INET, - .hooknum = NF_IP_PRE_ROUTING, - .priority = NF_IP_PRI_NAT_DST, - }, - /* After packet filtering, change source */ - { - .hook = ip_nat_out, - .owner = THIS_MODULE, - .pf = PF_INET, - .hooknum = NF_IP_POST_ROUTING, - .priority = NF_IP_PRI_NAT_SRC, - }, - /* After conntrack, adjust sequence number */ - { - .hook = ip_nat_adjust, - .owner = THIS_MODULE, - .pf = PF_INET, - .hooknum = NF_IP_POST_ROUTING, - .priority = NF_IP_PRI_NAT_SEQ_ADJUST, - }, - /* Before packet filtering, change destination */ - { - .hook = ip_nat_local_fn, - .owner = THIS_MODULE, - .pf = PF_INET, - .hooknum = NF_IP_LOCAL_OUT, - .priority = NF_IP_PRI_NAT_DST, - }, - /* After packet filtering, change source */ - { - .hook = ip_nat_fn, - .owner = THIS_MODULE, - .pf = PF_INET, - .hooknum = NF_IP_LOCAL_IN, - .priority = NF_IP_PRI_NAT_SRC, - }, - /* After conntrack, adjust sequence number */ - { - .hook = ip_nat_adjust, - .owner = THIS_MODULE, - .pf = PF_INET, - .hooknum = NF_IP_LOCAL_IN, - .priority = NF_IP_PRI_NAT_SEQ_ADJUST, - }, -}; - -static int __init ip_nat_standalone_init(void) -{ - int ret = 0; - - need_conntrack(); - -#ifdef CONFIG_XFRM - BUG_ON(ip_nat_decode_session != NULL); - ip_nat_decode_session = nat_decode_session; -#endif - ret = ip_nat_rule_init(); - if (ret < 0) { - printk("ip_nat_init: can't setup rules.\n"); - goto cleanup_decode_session; - } - ret = nf_register_hooks(ip_nat_ops, ARRAY_SIZE(ip_nat_ops)); - if (ret < 0) { - printk("ip_nat_init: can't register hooks.\n"); - goto cleanup_rule_init; - } - return ret; - - cleanup_rule_init: - ip_nat_rule_cleanup(); - cleanup_decode_session: -#ifdef CONFIG_XFRM - ip_nat_decode_session = NULL; - synchronize_net(); -#endif - return ret; -} - -static void __exit ip_nat_standalone_fini(void) -{ - nf_unregister_hooks(ip_nat_ops, ARRAY_SIZE(ip_nat_ops)); - ip_nat_rule_cleanup(); -#ifdef CONFIG_XFRM - ip_nat_decode_session = NULL; - synchronize_net(); -#endif -} - -module_init(ip_nat_standalone_init); -module_exit(ip_nat_standalone_fini); - -MODULE_LICENSE("GPL"); diff --git a/net/ipv4/netfilter/ip_nat_tftp.c b/net/ipv4/netfilter/ip_nat_tftp.c deleted file mode 100644 index 604793536fc1..000000000000 --- a/net/ipv4/netfilter/ip_nat_tftp.c +++ /dev/null @@ -1,70 +0,0 @@ -/* (C) 2001-2002 Magnus Boden - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * Version: 0.0.7 - * - * Thu 21 Mar 2002 Harald Welte - * - Port to newnat API - * - * This module currently supports DNAT: - * iptables -t nat -A PREROUTING -d x.x.x.x -j DNAT --to-dest x.x.x.y - * - * and SNAT: - * iptables -t nat -A POSTROUTING { -j MASQUERADE , -j SNAT --to-source x.x.x.x } - * - * It has not been tested with - * -j SNAT --to-source x.x.x.x-x.x.x.y since I only have one external ip - * If you do test this please let me know if it works or not. - * - */ - -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include - -MODULE_AUTHOR("Magnus Boden "); -MODULE_DESCRIPTION("tftp NAT helper"); -MODULE_LICENSE("GPL"); - -static unsigned int help(struct sk_buff **pskb, - enum ip_conntrack_info ctinfo, - struct ip_conntrack_expect *exp) -{ - struct ip_conntrack *ct = exp->master; - - exp->saved_proto.udp.port - = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u.udp.port; - exp->dir = IP_CT_DIR_REPLY; - exp->expectfn = ip_nat_follow_master; - if (ip_conntrack_expect_related(exp) != 0) - return NF_DROP; - return NF_ACCEPT; -} - -static void __exit ip_nat_tftp_fini(void) -{ - rcu_assign_pointer(ip_nat_tftp_hook, NULL); - synchronize_rcu(); -} - -static int __init ip_nat_tftp_init(void) -{ - BUG_ON(rcu_dereference(ip_nat_tftp_hook)); - rcu_assign_pointer(ip_nat_tftp_hook, help); - return 0; -} - -module_init(ip_nat_tftp_init); -module_exit(ip_nat_tftp_fini); diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c index d3b16817a991..40e273421398 100644 --- a/net/ipv4/netfilter/ipt_CLUSTERIP.c +++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c @@ -21,15 +21,12 @@ #include #include #include - -#include - #include - #include #include #include -#include +#include +#include #define CLUSTERIP_VERSION "0.8" @@ -310,15 +307,16 @@ target(struct sk_buff **pskb, const void *targinfo) { const struct ipt_clusterip_tgt_info *cipinfo = targinfo; + struct nf_conn *ct; enum ip_conntrack_info ctinfo; - u_int32_t *mark, hash; + u_int32_t hash; /* don't need to clusterip_config_get() here, since refcount * is only decremented by destroy() - and ip_tables guarantees * that the ->target() function isn't called after ->destroy() */ - mark = nf_ct_get_mark((*pskb), &ctinfo); - if (mark == NULL) { + ct = nf_ct_get(*pskb, &ctinfo); + if (ct == NULL) { printk(KERN_ERR "CLUSTERIP: no conntrack!\n"); /* FIXME: need to drop invalid ones, since replies * to outgoing connections of other nodes will be @@ -341,7 +339,7 @@ target(struct sk_buff **pskb, switch (ctinfo) { case IP_CT_NEW: - *mark = hash; + ct->mark = hash; break; case IP_CT_RELATED: case IP_CT_RELATED+IP_CT_IS_REPLY: @@ -358,7 +356,7 @@ target(struct sk_buff **pskb, #ifdef DEBUG_CLUSTERP DUMP_TUPLE(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); #endif - DEBUGP("hash=%u ct_hash=%u ", hash, *mark); + DEBUGP("hash=%u ct_hash=%u ", hash, ct->mark); if (!clusterip_responsible(cipinfo->config, hash)) { DEBUGP("not responsible\n"); return NF_DROP; diff --git a/net/ipv4/netfilter/ipt_MASQUERADE.c b/net/ipv4/netfilter/ipt_MASQUERADE.c index b5955f3a3f8f..d4f2d7775330 100644 --- a/net/ipv4/netfilter/ipt_MASQUERADE.c +++ b/net/ipv4/netfilter/ipt_MASQUERADE.c @@ -19,12 +19,8 @@ #include #include #include -#include -#ifdef CONFIG_NF_NAT_NEEDED #include -#else -#include -#endif +#include #include MODULE_LICENSE("GPL"); @@ -48,7 +44,7 @@ masquerade_check(const char *tablename, void *targinfo, unsigned int hook_mask) { - const struct ip_nat_multi_range_compat *mr = targinfo; + const struct nf_nat_multi_range_compat *mr = targinfo; if (mr->range[0].flags & IP_NAT_RANGE_MAP_IPS) { DEBUGP("masquerade_check: bad MAP_IPS.\n"); @@ -69,33 +65,26 @@ masquerade_target(struct sk_buff **pskb, const struct xt_target *target, const void *targinfo) { -#ifdef CONFIG_NF_NAT_NEEDED + struct nf_conn *ct; struct nf_conn_nat *nat; -#endif - struct ip_conntrack *ct; enum ip_conntrack_info ctinfo; - struct ip_nat_range newrange; - const struct ip_nat_multi_range_compat *mr; + struct nf_nat_range newrange; + const struct nf_nat_multi_range_compat *mr; struct rtable *rt; __be32 newsrc; - IP_NF_ASSERT(hooknum == NF_IP_POST_ROUTING); + NF_CT_ASSERT(hooknum == NF_IP_POST_ROUTING); - ct = ip_conntrack_get(*pskb, &ctinfo); -#ifdef CONFIG_NF_NAT_NEEDED + ct = nf_ct_get(*pskb, &ctinfo); nat = nfct_nat(ct); -#endif - IP_NF_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED + + NF_CT_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED || ctinfo == IP_CT_RELATED + IP_CT_IS_REPLY)); /* Source address is 0.0.0.0 - locally generated packet that is * probably not supposed to be masqueraded. */ -#ifdef CONFIG_NF_NAT_NEEDED if (ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip == 0) -#else - if (ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.ip == 0) -#endif return NF_ACCEPT; mr = targinfo; @@ -107,40 +96,30 @@ masquerade_target(struct sk_buff **pskb, } write_lock_bh(&masq_lock); -#ifdef CONFIG_NF_NAT_NEEDED nat->masq_index = out->ifindex; -#else - ct->nat.masq_index = out->ifindex; -#endif write_unlock_bh(&masq_lock); /* Transfer from original range. */ - newrange = ((struct ip_nat_range) + newrange = ((struct nf_nat_range) { mr->range[0].flags | IP_NAT_RANGE_MAP_IPS, newsrc, newsrc, mr->range[0].min, mr->range[0].max }); /* Hand modified range to generic setup. */ - return ip_nat_setup_info(ct, &newrange, hooknum); + return nf_nat_setup_info(ct, &newrange, hooknum); } static inline int -device_cmp(struct ip_conntrack *i, void *ifindex) +device_cmp(struct nf_conn *i, void *ifindex) { - int ret; -#ifdef CONFIG_NF_NAT_NEEDED struct nf_conn_nat *nat = nfct_nat(i); + int ret; if (!nat) return 0; -#endif read_lock_bh(&masq_lock); -#ifdef CONFIG_NF_NAT_NEEDED ret = (nat->masq_index == (int)(long)ifindex); -#else - ret = (i->nat.masq_index == (int)(long)ifindex); -#endif read_unlock_bh(&masq_lock); return ret; @@ -156,9 +135,9 @@ static int masq_device_event(struct notifier_block *this, /* Device was downed. Search entire table for conntracks which were associated with that device, and forget them. */ - IP_NF_ASSERT(dev->ifindex != 0); + NF_CT_ASSERT(dev->ifindex != 0); - ip_ct_iterate_cleanup(device_cmp, (void *)(long)dev->ifindex); + nf_ct_iterate_cleanup(device_cmp, (void *)(long)dev->ifindex); } return NOTIFY_DONE; @@ -174,9 +153,9 @@ static int masq_inet_event(struct notifier_block *this, /* IP address was deleted. Search entire table for conntracks which were associated with that device, and forget them. */ - IP_NF_ASSERT(dev->ifindex != 0); + NF_CT_ASSERT(dev->ifindex != 0); - ip_ct_iterate_cleanup(device_cmp, (void *)(long)dev->ifindex); + nf_ct_iterate_cleanup(device_cmp, (void *)(long)dev->ifindex); } return NOTIFY_DONE; @@ -194,7 +173,7 @@ static struct xt_target masquerade = { .name = "MASQUERADE", .family = AF_INET, .target = masquerade_target, - .targetsize = sizeof(struct ip_nat_multi_range_compat), + .targetsize = sizeof(struct nf_nat_multi_range_compat), .table = "nat", .hooks = 1 << NF_IP_POST_ROUTING, .checkentry = masquerade_check, diff --git a/net/ipv4/netfilter/ipt_NETMAP.c b/net/ipv4/netfilter/ipt_NETMAP.c index d03f165722da..068c69bce30e 100644 --- a/net/ipv4/netfilter/ipt_NETMAP.c +++ b/net/ipv4/netfilter/ipt_NETMAP.c @@ -16,11 +16,7 @@ #include #include #include -#ifdef CONFIG_NF_NAT_NEEDED #include -#else -#include -#endif #define MODULENAME "NETMAP" MODULE_LICENSE("GPL"); @@ -40,7 +36,7 @@ check(const char *tablename, void *targinfo, unsigned int hook_mask) { - const struct ip_nat_multi_range_compat *mr = targinfo; + const struct nf_nat_multi_range_compat *mr = targinfo; if (!(mr->range[0].flags & IP_NAT_RANGE_MAP_IPS)) { DEBUGP(MODULENAME":check: bad MAP_IPS.\n"); @@ -61,16 +57,16 @@ target(struct sk_buff **pskb, const struct xt_target *target, const void *targinfo) { - struct ip_conntrack *ct; + struct nf_conn *ct; enum ip_conntrack_info ctinfo; __be32 new_ip, netmask; - const struct ip_nat_multi_range_compat *mr = targinfo; - struct ip_nat_range newrange; + const struct nf_nat_multi_range_compat *mr = targinfo; + struct nf_nat_range newrange; - IP_NF_ASSERT(hooknum == NF_IP_PRE_ROUTING + NF_CT_ASSERT(hooknum == NF_IP_PRE_ROUTING || hooknum == NF_IP_POST_ROUTING || hooknum == NF_IP_LOCAL_OUT); - ct = ip_conntrack_get(*pskb, &ctinfo); + ct = nf_ct_get(*pskb, &ctinfo); netmask = ~(mr->range[0].min_ip ^ mr->range[0].max_ip); @@ -80,20 +76,20 @@ target(struct sk_buff **pskb, new_ip = ip_hdr(*pskb)->saddr & ~netmask; new_ip |= mr->range[0].min_ip & netmask; - newrange = ((struct ip_nat_range) + newrange = ((struct nf_nat_range) { mr->range[0].flags | IP_NAT_RANGE_MAP_IPS, new_ip, new_ip, mr->range[0].min, mr->range[0].max }); /* Hand modified range to generic setup. */ - return ip_nat_setup_info(ct, &newrange, hooknum); + return nf_nat_setup_info(ct, &newrange, hooknum); } static struct xt_target target_module = { .name = MODULENAME, .family = AF_INET, .target = target, - .targetsize = sizeof(struct ip_nat_multi_range_compat), + .targetsize = sizeof(struct nf_nat_multi_range_compat), .table = "nat", .hooks = (1 << NF_IP_PRE_ROUTING) | (1 << NF_IP_POST_ROUTING) | (1 << NF_IP_LOCAL_OUT), diff --git a/net/ipv4/netfilter/ipt_REDIRECT.c b/net/ipv4/netfilter/ipt_REDIRECT.c index c2b6b80670f8..68cc76a198eb 100644 --- a/net/ipv4/netfilter/ipt_REDIRECT.c +++ b/net/ipv4/netfilter/ipt_REDIRECT.c @@ -19,11 +19,7 @@ #include #include #include -#ifdef CONFIG_NF_NAT_NEEDED #include -#else -#include -#endif MODULE_LICENSE("GPL"); MODULE_AUTHOR("Netfilter Core Team "); @@ -43,7 +39,7 @@ redirect_check(const char *tablename, void *targinfo, unsigned int hook_mask) { - const struct ip_nat_multi_range_compat *mr = targinfo; + const struct nf_nat_multi_range_compat *mr = targinfo; if (mr->range[0].flags & IP_NAT_RANGE_MAP_IPS) { DEBUGP("redirect_check: bad MAP_IPS.\n"); @@ -64,17 +60,17 @@ redirect_target(struct sk_buff **pskb, const struct xt_target *target, const void *targinfo) { - struct ip_conntrack *ct; + struct nf_conn *ct; enum ip_conntrack_info ctinfo; __be32 newdst; - const struct ip_nat_multi_range_compat *mr = targinfo; - struct ip_nat_range newrange; + const struct nf_nat_multi_range_compat *mr = targinfo; + struct nf_nat_range newrange; - IP_NF_ASSERT(hooknum == NF_IP_PRE_ROUTING + NF_CT_ASSERT(hooknum == NF_IP_PRE_ROUTING || hooknum == NF_IP_LOCAL_OUT); - ct = ip_conntrack_get(*pskb, &ctinfo); - IP_NF_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED)); + ct = nf_ct_get(*pskb, &ctinfo); + NF_CT_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED)); /* Local packets: make them go to loopback */ if (hooknum == NF_IP_LOCAL_OUT) @@ -96,20 +92,20 @@ redirect_target(struct sk_buff **pskb, } /* Transfer from original range. */ - newrange = ((struct ip_nat_range) + newrange = ((struct nf_nat_range) { mr->range[0].flags | IP_NAT_RANGE_MAP_IPS, newdst, newdst, mr->range[0].min, mr->range[0].max }); /* Hand modified range to generic setup. */ - return ip_nat_setup_info(ct, &newrange, hooknum); + return nf_nat_setup_info(ct, &newrange, hooknum); } static struct xt_target redirect_reg = { .name = "REDIRECT", .family = AF_INET, .target = redirect_target, - .targetsize = sizeof(struct ip_nat_multi_range_compat), + .targetsize = sizeof(struct nf_nat_multi_range_compat), .table = "nat", .hooks = (1 << NF_IP_PRE_ROUTING) | (1 << NF_IP_LOCAL_OUT), .checkentry = redirect_check, diff --git a/net/ipv4/netfilter/ipt_SAME.c b/net/ipv4/netfilter/ipt_SAME.c index bd4404e5c688..fe76ffc0caed 100644 --- a/net/ipv4/netfilter/ipt_SAME.c +++ b/net/ipv4/netfilter/ipt_SAME.c @@ -35,11 +35,7 @@ #include #include #include -#ifdef CONFIG_NF_NAT_NEEDED #include -#else -#include -#endif #include MODULE_LICENSE("GPL"); @@ -138,17 +134,17 @@ same_target(struct sk_buff **pskb, const struct xt_target *target, const void *targinfo) { - struct ip_conntrack *ct; + struct nf_conn *ct; enum ip_conntrack_info ctinfo; u_int32_t tmpip, aindex; __be32 new_ip; const struct ipt_same_info *same = targinfo; - struct ip_nat_range newrange; - const struct ip_conntrack_tuple *t; + struct nf_nat_range newrange; + const struct nf_conntrack_tuple *t; - IP_NF_ASSERT(hooknum == NF_IP_PRE_ROUTING || + NF_CT_ASSERT(hooknum == NF_IP_PRE_ROUTING || hooknum == NF_IP_POST_ROUTING); - ct = ip_conntrack_get(*pskb, &ctinfo); + ct = nf_ct_get(*pskb, &ctinfo); t = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple; @@ -157,17 +153,10 @@ same_target(struct sk_buff **pskb, Here we calculate the index in same->iparray which holds the ipaddress we should use */ -#ifdef CONFIG_NF_NAT_NEEDED tmpip = ntohl(t->src.u3.ip); if (!(same->info & IPT_SAME_NODST)) tmpip += ntohl(t->dst.u3.ip); -#else - tmpip = ntohl(t->src.ip); - - if (!(same->info & IPT_SAME_NODST)) - tmpip += ntohl(t->dst.ip); -#endif aindex = tmpip % same->ipnum; new_ip = htonl(same->iparray[aindex]); @@ -178,13 +167,13 @@ same_target(struct sk_buff **pskb, NIPQUAD(new_ip)); /* Transfer from original range. */ - newrange = ((struct ip_nat_range) + newrange = ((struct nf_nat_range) { same->range[0].flags, new_ip, new_ip, /* FIXME: Use ports from correct range! */ same->range[0].min, same->range[0].max }); /* Hand modified range to generic setup. */ - return ip_nat_setup_info(ct, &newrange, hooknum); + return nf_nat_setup_info(ct, &newrange, hooknum); } static struct xt_target same_reg = { diff --git a/net/ipv4/netfilter/nf_nat_h323.c b/net/ipv4/netfilter/nf_nat_h323.c index 3c58fea0d391..fcebc968d37f 100644 --- a/net/ipv4/netfilter/nf_nat_h323.c +++ b/net/ipv4/netfilter/nf_nat_h323.c @@ -33,7 +33,7 @@ static int set_addr(struct sk_buff **pskb, unsigned int addroff, __be32 ip, __be16 port) { enum ip_conntrack_info ctinfo; - struct nf_conn *ct = ip_conntrack_get(*pskb, &ctinfo); + struct nf_conn *ct = nf_ct_get(*pskb, &ctinfo); struct { __be32 ip; __be16 port; @@ -383,7 +383,7 @@ static int nat_h245(struct sk_buff **pskb, struct nf_conn *ct, static void ip_nat_q931_expect(struct nf_conn *new, struct nf_conntrack_expect *this) { - struct ip_nat_range range; + struct nf_nat_range range; if (this->tuple.src.u3.ip != 0) { /* Only accept calls from GK */ nf_nat_follow_master(new, this); diff --git a/net/ipv4/netfilter/nf_nat_pptp.c b/net/ipv4/netfilter/nf_nat_pptp.c index 7ba341c22eaa..a66888749ceb 100644 --- a/net/ipv4/netfilter/nf_nat_pptp.c +++ b/net/ipv4/netfilter/nf_nat_pptp.c @@ -53,7 +53,7 @@ static void pptp_nat_expected(struct nf_conn *ct, struct nf_conntrack_tuple t; struct nf_ct_pptp_master *ct_pptp_info; struct nf_nat_pptp *nat_pptp_info; - struct ip_nat_range range; + struct nf_nat_range range; ct_pptp_info = &nfct_help(master)->help.ct_pptp_info; nat_pptp_info = &nfct_nat(master)->help.nat_pptp_info; diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig index 54698af6d0af..c558f3214255 100644 --- a/net/netfilter/Kconfig +++ b/net/netfilter/Kconfig @@ -25,6 +25,7 @@ config NETFILTER_NETLINK_LOG and is also scheduled to replace the old syslog-based ipt_LOG and ip6t_LOG modules. +# Rename this to NF_CONNTRACK in a 2.6.25 config NF_CONNTRACK_ENABLED tristate "Netfilter connection tracking support" help @@ -39,42 +40,9 @@ config NF_CONNTRACK_ENABLED To compile it as a module, choose M here. If unsure, say N. -choice - prompt "Netfilter connection tracking support" - depends on NF_CONNTRACK_ENABLED - -config NF_CONNTRACK_SUPPORT - bool "Layer 3 Independent Connection tracking" - help - Layer 3 independent connection tracking is experimental scheme - which generalize ip_conntrack to support other layer 3 protocols. - - This is required to do Masquerading or other kinds of Network - Address Translation (except for Fast NAT). It can also be used to - enhance packet filtering (see `Connection state match support' - below). - -config IP_NF_CONNTRACK_SUPPORT - bool "Layer 3 Dependent Connection tracking (OBSOLETE)" - help - The old, Layer 3 dependent ip_conntrack subsystem of netfilter. - - This is required to do Masquerading or other kinds of Network - Address Translation (except for Fast NAT). It can also be used to - enhance packet filtering (see `Connection state match support' - below). - -endchoice - config NF_CONNTRACK tristate - default m if NF_CONNTRACK_SUPPORT && NF_CONNTRACK_ENABLED=m - default y if NF_CONNTRACK_SUPPORT && NF_CONNTRACK_ENABLED=y - -config IP_NF_CONNTRACK - tristate - default m if IP_NF_CONNTRACK_SUPPORT && NF_CONNTRACK_ENABLED=m - default y if IP_NF_CONNTRACK_SUPPORT && NF_CONNTRACK_ENABLED=y + default NF_CONNTRACK_ENABLED config NF_CT_ACCT bool "Connection tracking flow accounting" @@ -303,9 +271,8 @@ config NETFILTER_XT_TARGET_CONNMARK tristate '"CONNMARK" target support' depends on NETFILTER_XTABLES depends on IP_NF_MANGLE || IP6_NF_MANGLE - depends on IP_NF_CONNTRACK || NF_CONNTRACK - select IP_NF_CONNTRACK_MARK if IP_NF_CONNTRACK - select NF_CONNTRACK_MARK if NF_CONNTRACK + depends on NF_CONNTRACK + select NF_CONNTRACK_MARK help This option adds a `CONNMARK' target, which allows one to manipulate the connection mark value. Similar to the MARK target, but @@ -366,7 +333,7 @@ config NETFILTER_XT_TARGET_NOTRACK tristate '"NOTRACK" target support' depends on NETFILTER_XTABLES depends on IP_NF_RAW || IP6_NF_RAW - depends on IP_NF_CONNTRACK || NF_CONNTRACK + depends on NF_CONNTRACK help The NOTRACK target allows a select rule to specify which packets *not* to enter the conntrack/NAT @@ -387,9 +354,7 @@ config NETFILTER_XT_TARGET_SECMARK config NETFILTER_XT_TARGET_CONNSECMARK tristate '"CONNSECMARK" target support' - depends on NETFILTER_XTABLES && \ - ((NF_CONNTRACK && NF_CONNTRACK_SECMARK) || \ - (IP_NF_CONNTRACK && IP_NF_CONNTRACK_SECMARK)) + depends on NETFILTER_XTABLES && NF_CONNTRACK && NF_CONNTRACK_SECMARK help The CONNSECMARK target copies security markings from packets to connections, and restores security markings from connections @@ -437,9 +402,8 @@ config NETFILTER_XT_MATCH_COMMENT config NETFILTER_XT_MATCH_CONNBYTES tristate '"connbytes" per-connection counter match support' depends on NETFILTER_XTABLES - depends on IP_NF_CONNTRACK || NF_CONNTRACK - select IP_NF_CT_ACCT if IP_NF_CONNTRACK - select NF_CT_ACCT if NF_CONNTRACK + depends on NF_CONNTRACK + select NF_CT_ACCT help This option adds a `connbytes' match, which allows you to match the number of bytes and/or packets for each direction within a connection. @@ -450,9 +414,8 @@ config NETFILTER_XT_MATCH_CONNBYTES config NETFILTER_XT_MATCH_CONNMARK tristate '"connmark" connection mark match support' depends on NETFILTER_XTABLES - depends on IP_NF_CONNTRACK || NF_CONNTRACK - select IP_NF_CONNTRACK_MARK if IP_NF_CONNTRACK - select NF_CONNTRACK_MARK if NF_CONNTRACK + depends on NF_CONNTRACK + select NF_CONNTRACK_MARK help This option adds a `connmark' match, which allows you to match the connection mark value previously set for the session by `CONNMARK'. @@ -464,7 +427,7 @@ config NETFILTER_XT_MATCH_CONNMARK config NETFILTER_XT_MATCH_CONNTRACK tristate '"conntrack" connection tracking match support' depends on NETFILTER_XTABLES - depends on IP_NF_CONNTRACK || NF_CONNTRACK + depends on NF_CONNTRACK help This is a general conntrack match module, a superset of the state match. @@ -508,7 +471,7 @@ config NETFILTER_XT_MATCH_ESP config NETFILTER_XT_MATCH_HELPER tristate '"helper" match support' depends on NETFILTER_XTABLES - depends on IP_NF_CONNTRACK || NF_CONNTRACK + depends on NF_CONNTRACK help Helper matching allows you to match packets in dynamic connections tracked by a conntrack-helper, ie. ip_conntrack_ftp @@ -632,7 +595,7 @@ config NETFILTER_XT_MATCH_SCTP config NETFILTER_XT_MATCH_STATE tristate '"state" match support' depends on NETFILTER_XTABLES - depends on IP_NF_CONNTRACK || NF_CONNTRACK + depends on NF_CONNTRACK help Connection state matching allows you to match packets based on their relationship to a tracked connection (ie. previous packets). This diff --git a/net/netfilter/xt_CONNMARK.c b/net/netfilter/xt_CONNMARK.c index 795c058b16a5..b03ce009d0bf 100644 --- a/net/netfilter/xt_CONNMARK.c +++ b/net/netfilter/xt_CONNMARK.c @@ -30,10 +30,7 @@ MODULE_ALIAS("ipt_CONNMARK"); #include #include -#include -#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) #include -#endif static unsigned int target(struct sk_buff **pskb, @@ -44,40 +41,33 @@ target(struct sk_buff **pskb, const void *targinfo) { const struct xt_connmark_target_info *markinfo = targinfo; + struct nf_conn *ct; + enum ip_conntrack_info ctinfo; u_int32_t diff; u_int32_t mark; u_int32_t newmark; - u_int32_t ctinfo; - u_int32_t *ctmark = nf_ct_get_mark(*pskb, &ctinfo); - if (ctmark) { + ct = nf_ct_get(*pskb, &ctinfo); + if (ct) { switch(markinfo->mode) { case XT_CONNMARK_SET: - newmark = (*ctmark & ~markinfo->mask) | markinfo->mark; - if (newmark != *ctmark) { - *ctmark = newmark; -#if defined(CONFIG_IP_NF_CONNTRACK) || defined(CONFIG_IP_NF_CONNTRACK_MODULE) - ip_conntrack_event_cache(IPCT_MARK, *pskb); -#else + newmark = (ct->mark & ~markinfo->mask) | markinfo->mark; + if (newmark != ct->mark) { + ct->mark = newmark; nf_conntrack_event_cache(IPCT_MARK, *pskb); -#endif } break; case XT_CONNMARK_SAVE: - newmark = (*ctmark & ~markinfo->mask) | + newmark = (ct->mark & ~markinfo->mask) | ((*pskb)->mark & markinfo->mask); - if (*ctmark != newmark) { - *ctmark = newmark; -#if defined(CONFIG_IP_NF_CONNTRACK) || defined(CONFIG_IP_NF_CONNTRACK_MODULE) - ip_conntrack_event_cache(IPCT_MARK, *pskb); -#else + if (ct->mark != newmark) { + ct->mark = newmark; nf_conntrack_event_cache(IPCT_MARK, *pskb); -#endif } break; case XT_CONNMARK_RESTORE: mark = (*pskb)->mark; - diff = (*ctmark ^ mark) & markinfo->mask; + diff = (ct->mark ^ mark) & markinfo->mask; (*pskb)->mark = mark ^ diff; break; } diff --git a/net/netfilter/xt_CONNSECMARK.c b/net/netfilter/xt_CONNSECMARK.c index 1ab0db641f96..81c0c58bab47 100644 --- a/net/netfilter/xt_CONNSECMARK.c +++ b/net/netfilter/xt_CONNSECMARK.c @@ -19,7 +19,7 @@ #include #include #include -#include +#include #define PFX "CONNSECMARK: " @@ -36,12 +36,12 @@ MODULE_ALIAS("ip6t_CONNSECMARK"); static void secmark_save(struct sk_buff *skb) { if (skb->secmark) { - u32 *connsecmark; + struct nf_conn *ct; enum ip_conntrack_info ctinfo; - connsecmark = nf_ct_get_secmark(skb, &ctinfo); - if (connsecmark && !*connsecmark) - *connsecmark = skb->secmark; + ct = nf_ct_get(skb, &ctinfo); + if (ct && !ct->secmark) + ct->secmark = skb->secmark; } } @@ -52,12 +52,12 @@ static void secmark_save(struct sk_buff *skb) static void secmark_restore(struct sk_buff *skb) { if (!skb->secmark) { - u32 *connsecmark; + struct nf_conn *ct; enum ip_conntrack_info ctinfo; - connsecmark = nf_ct_get_secmark(skb, &ctinfo); - if (connsecmark && *connsecmark) - skb->secmark = *connsecmark; + ct = nf_ct_get(skb, &ctinfo); + if (ct && ct->secmark) + skb->secmark = ct->secmark; } } diff --git a/net/netfilter/xt_NOTRACK.c b/net/netfilter/xt_NOTRACK.c index b874a2008b2b..5085fb3d1e2d 100644 --- a/net/netfilter/xt_NOTRACK.c +++ b/net/netfilter/xt_NOTRACK.c @@ -5,7 +5,7 @@ #include #include -#include +#include MODULE_LICENSE("GPL"); MODULE_ALIAS("ipt_NOTRACK"); @@ -26,7 +26,7 @@ target(struct sk_buff **pskb, If there is a real ct entry correspondig to this packet, it'll hang aroun till timing out. We don't deal with it for performance reasons. JK */ - nf_ct_untrack(*pskb); + (*pskb)->nfct = &nf_conntrack_untracked.ct_general; (*pskb)->nfctinfo = IP_CT_NEW; nf_conntrack_get((*pskb)->nfct); diff --git a/net/netfilter/xt_connbytes.c b/net/netfilter/xt_connbytes.c index 302043bc41b2..fec9316a1e10 100644 --- a/net/netfilter/xt_connbytes.c +++ b/net/netfilter/xt_connbytes.c @@ -12,9 +12,9 @@ */ #include #include -#include #include #include +#include #include #include @@ -35,13 +35,17 @@ match(const struct sk_buff *skb, int *hotdrop) { const struct xt_connbytes_info *sinfo = matchinfo; + struct nf_conn *ct; + enum ip_conntrack_info ctinfo; u_int64_t what = 0; /* initialize to make gcc happy */ u_int64_t bytes = 0; u_int64_t pkts = 0; const struct ip_conntrack_counter *counters; - if (!(counters = nf_ct_get_counters(skb))) - return 0; /* no match */ + ct = nf_ct_get(skb, &ctinfo); + if (!ct) + return 0; + counters = ct->counters; switch (sinfo->what) { case XT_CONNBYTES_PKTS: diff --git a/net/netfilter/xt_connmark.c b/net/netfilter/xt_connmark.c index 36c2defff238..e1803256c792 100644 --- a/net/netfilter/xt_connmark.c +++ b/net/netfilter/xt_connmark.c @@ -21,16 +21,15 @@ #include #include +#include +#include +#include MODULE_AUTHOR("Henrik Nordstrom "); MODULE_DESCRIPTION("IP tables connmark match module"); MODULE_LICENSE("GPL"); MODULE_ALIAS("ipt_connmark"); -#include -#include -#include - static int match(const struct sk_buff *skb, const struct net_device *in, @@ -42,12 +41,14 @@ match(const struct sk_buff *skb, int *hotdrop) { const struct xt_connmark_info *info = matchinfo; - u_int32_t ctinfo; - const u_int32_t *ctmark = nf_ct_get_mark(skb, &ctinfo); - if (!ctmark) + struct nf_conn *ct; + enum ip_conntrack_info ctinfo; + + ct = nf_ct_get(skb, &ctinfo); + if (!ct) return 0; - return (((*ctmark) & info->mask) == info->mark) ^ info->invert; + return (((ct->mark) & info->mask) == info->mark) ^ info->invert; } static int diff --git a/net/netfilter/xt_conntrack.c b/net/netfilter/xt_conntrack.c index 2885c378288e..f4ea8fe07a53 100644 --- a/net/netfilter/xt_conntrack.c +++ b/net/netfilter/xt_conntrack.c @@ -10,121 +10,15 @@ #include #include - -#if defined(CONFIG_IP_NF_CONNTRACK) || defined(CONFIG_IP_NF_CONNTRACK_MODULE) -#include -#include -#else -#include -#endif - #include #include -#include +#include MODULE_LICENSE("GPL"); MODULE_AUTHOR("Marc Boucher "); MODULE_DESCRIPTION("iptables connection tracking match module"); MODULE_ALIAS("ipt_conntrack"); -#if defined(CONFIG_IP_NF_CONNTRACK) || defined(CONFIG_IP_NF_CONNTRACK_MODULE) - -static int -match(const struct sk_buff *skb, - const struct net_device *in, - const struct net_device *out, - const struct xt_match *match, - const void *matchinfo, - int offset, - unsigned int protoff, - int *hotdrop) -{ - const struct xt_conntrack_info *sinfo = matchinfo; - struct ip_conntrack *ct; - enum ip_conntrack_info ctinfo; - unsigned int statebit; - - ct = ip_conntrack_get((struct sk_buff *)skb, &ctinfo); - -#define FWINV(bool, invflg) ((bool) ^ !!(sinfo->invflags & invflg)) - - if (ct == &ip_conntrack_untracked) - statebit = XT_CONNTRACK_STATE_UNTRACKED; - else if (ct) - statebit = XT_CONNTRACK_STATE_BIT(ctinfo); - else - statebit = XT_CONNTRACK_STATE_INVALID; - - if (sinfo->flags & XT_CONNTRACK_STATE) { - if (ct) { - if (test_bit(IPS_SRC_NAT_BIT, &ct->status)) - statebit |= XT_CONNTRACK_STATE_SNAT; - if (test_bit(IPS_DST_NAT_BIT, &ct->status)) - statebit |= XT_CONNTRACK_STATE_DNAT; - } - if (FWINV((statebit & sinfo->statemask) == 0, - XT_CONNTRACK_STATE)) - return 0; - } - - if (ct == NULL) { - if (sinfo->flags & ~XT_CONNTRACK_STATE) - return 0; - return 1; - } - - if (sinfo->flags & XT_CONNTRACK_PROTO && - FWINV(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum != - sinfo->tuple[IP_CT_DIR_ORIGINAL].dst.protonum, - XT_CONNTRACK_PROTO)) - return 0; - - if (sinfo->flags & XT_CONNTRACK_ORIGSRC && - FWINV((ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.ip & - sinfo->sipmsk[IP_CT_DIR_ORIGINAL].s_addr) != - sinfo->tuple[IP_CT_DIR_ORIGINAL].src.ip, - XT_CONNTRACK_ORIGSRC)) - return 0; - - if (sinfo->flags & XT_CONNTRACK_ORIGDST && - FWINV((ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.ip & - sinfo->dipmsk[IP_CT_DIR_ORIGINAL].s_addr) != - sinfo->tuple[IP_CT_DIR_ORIGINAL].dst.ip, - XT_CONNTRACK_ORIGDST)) - return 0; - - if (sinfo->flags & XT_CONNTRACK_REPLSRC && - FWINV((ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.ip & - sinfo->sipmsk[IP_CT_DIR_REPLY].s_addr) != - sinfo->tuple[IP_CT_DIR_REPLY].src.ip, - XT_CONNTRACK_REPLSRC)) - return 0; - - if (sinfo->flags & XT_CONNTRACK_REPLDST && - FWINV((ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.ip & - sinfo->dipmsk[IP_CT_DIR_REPLY].s_addr) != - sinfo->tuple[IP_CT_DIR_REPLY].dst.ip, - XT_CONNTRACK_REPLDST)) - return 0; - - if (sinfo->flags & XT_CONNTRACK_STATUS && - FWINV((ct->status & sinfo->statusmask) == 0, - XT_CONNTRACK_STATUS)) - return 0; - - if (sinfo->flags & XT_CONNTRACK_EXPIRES) { - unsigned long expires = timer_pending(&ct->timeout) ? - (ct->timeout.expires - jiffies)/HZ : 0; - - if (FWINV(!(expires >= sinfo->expires_min && - expires <= sinfo->expires_max), - XT_CONNTRACK_EXPIRES)) - return 0; - } - return 1; -} - -#else /* CONFIG_IP_NF_CONNTRACK */ static int match(const struct sk_buff *skb, const struct net_device *in, @@ -220,8 +114,6 @@ match(const struct sk_buff *skb, return 1; } -#endif /* CONFIG_NF_IP_CONNTRACK */ - static int checkentry(const char *tablename, const void *ip, diff --git a/net/netfilter/xt_helper.c b/net/netfilter/xt_helper.c index 407d1d5da8a1..bc70b26ba5b4 100644 --- a/net/netfilter/xt_helper.c +++ b/net/netfilter/xt_helper.c @@ -13,18 +13,11 @@ #include #include #include -#if defined(CONFIG_IP_NF_CONNTRACK) || defined(CONFIG_IP_NF_CONNTRACK_MODULE) -#include -#include -#include -#else #include #include #include -#endif #include #include -#include MODULE_LICENSE("GPL"); MODULE_AUTHOR("Martin Josefsson "); @@ -38,55 +31,6 @@ MODULE_ALIAS("ip6t_helper"); #define DEBUGP(format, args...) #endif -#if defined(CONFIG_IP_NF_CONNTRACK) || defined(CONFIG_IP_NF_CONNTRACK_MODULE) -static int -match(const struct sk_buff *skb, - const struct net_device *in, - const struct net_device *out, - const struct xt_match *match, - const void *matchinfo, - int offset, - unsigned int protoff, - int *hotdrop) -{ - const struct xt_helper_info *info = matchinfo; - struct ip_conntrack *ct; - enum ip_conntrack_info ctinfo; - int ret = info->invert; - - ct = ip_conntrack_get((struct sk_buff *)skb, &ctinfo); - if (!ct) { - DEBUGP("xt_helper: Eek! invalid conntrack?\n"); - return ret; - } - - if (!ct->master) { - DEBUGP("xt_helper: conntrack %p has no master\n", ct); - return ret; - } - - read_lock_bh(&ip_conntrack_lock); - if (!ct->master->helper) { - DEBUGP("xt_helper: master ct %p has no helper\n", - exp->expectant); - goto out_unlock; - } - - DEBUGP("master's name = %s , info->name = %s\n", - ct->master->helper->name, info->name); - - if (info->name[0] == '\0') - ret ^= 1; - else - ret ^= !strncmp(ct->master->helper->name, info->name, - strlen(ct->master->helper->name)); -out_unlock: - read_unlock_bh(&ip_conntrack_lock); - return ret; -} - -#else /* CONFIG_IP_NF_CONNTRACK */ - static int match(const struct sk_buff *skb, const struct net_device *in, @@ -134,7 +78,6 @@ out_unlock: read_unlock_bh(&nf_conntrack_lock); return ret; } -#endif static int check(const char *tablename, const void *inf, diff --git a/net/netfilter/xt_state.c b/net/netfilter/xt_state.c index df37b912163a..149294f7df71 100644 --- a/net/netfilter/xt_state.c +++ b/net/netfilter/xt_state.c @@ -10,7 +10,7 @@ #include #include -#include +#include #include #include @@ -36,7 +36,7 @@ match(const struct sk_buff *skb, if (nf_ct_is_untracked(skb)) statebit = XT_STATE_UNTRACKED; - else if (!nf_ct_get_ctinfo(skb, &ctinfo)) + else if (!nf_ct_get(skb, &ctinfo)) statebit = XT_STATE_INVALID; else statebit = XT_STATE_BIT(ctinfo); -- cgit v1.2.3-59-g8ed1b From ac5357ebac43e191003c2cd0722377dccfa01a84 Mon Sep 17 00:00:00 2001 From: Patrick McHardy Date: Wed, 14 Mar 2007 16:38:25 -0700 Subject: [NETFILTER]: nf_conntrack: remove ugly hack in l4proto registration Remove ugly special-casing of nf_conntrack_l4proto_generic, all it wants is its sysctl tables registered, so do that explicitly in an init function and move the remaining protocol initialization and cleanup code to nf_conntrack_proto.c as well. Signed-off-by: Patrick McHardy Signed-off-by: David S. Miller --- include/net/netfilter/nf_conntrack_core.h | 3 +++ net/netfilter/nf_conntrack_core.c | 18 ++-------------- net/netfilter/nf_conntrack_proto.c | 34 +++++++++++++++++++++++-------- 3 files changed, 31 insertions(+), 24 deletions(-) (limited to 'include') diff --git a/include/net/netfilter/nf_conntrack_core.h b/include/net/netfilter/nf_conntrack_core.h index 85634e1865c3..9fb906688ffa 100644 --- a/include/net/netfilter/nf_conntrack_core.h +++ b/include/net/netfilter/nf_conntrack_core.h @@ -27,6 +27,9 @@ extern unsigned int nf_conntrack_in(int pf, extern int nf_conntrack_init(void); extern void nf_conntrack_cleanup(void); +extern int nf_conntrack_proto_init(void); +extern void nf_conntrack_proto_fini(void); + struct nf_conntrack_l3proto; extern struct nf_conntrack_l3proto *nf_ct_find_l3proto(u_int16_t pf); /* Like above, but you already have conntrack read lock. */ diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 7694c51f1251..9858bcb29aa0 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -1152,14 +1152,7 @@ void nf_conntrack_cleanup(void) free_conntrack_hash(nf_conntrack_hash, nf_conntrack_vmalloc, nf_conntrack_htable_size); - nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_generic); - - /* free l3proto protocol tables */ - for (i = 0; i < PF_MAX; i++) - if (nf_ct_protos[i]) { - kfree(nf_ct_protos[i]); - nf_ct_protos[i] = NULL; - } + nf_conntrack_proto_fini(); } static struct list_head *alloc_hashtable(int size, int *vmalloced) @@ -1237,7 +1230,6 @@ module_param_call(hashsize, set_hashsize, param_get_uint, int __init nf_conntrack_init(void) { - unsigned int i; int ret; /* Idea from tcp.c: use 1/16384 of memory. On i386: 32MB @@ -1279,16 +1271,10 @@ int __init nf_conntrack_init(void) goto err_free_conntrack_slab; } - ret = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_generic); + ret = nf_conntrack_proto_init(); if (ret < 0) goto out_free_expect_slab; - /* Don't NEED lock here, but good form anyway. */ - write_lock_bh(&nf_conntrack_lock); - for (i = 0; i < AF_MAX; i++) - nf_ct_l3protos[i] = &nf_conntrack_l3proto_generic; - write_unlock_bh(&nf_conntrack_lock); - /* For use by REJECT target */ rcu_assign_pointer(ip_ct_attach, __nf_conntrack_attach); diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c index e2c4a58603a8..0ca2f0ba7c76 100644 --- a/net/netfilter/nf_conntrack_proto.c +++ b/net/netfilter/nf_conntrack_proto.c @@ -294,9 +294,6 @@ int nf_conntrack_l4proto_register(struct nf_conntrack_l4proto *l4proto) goto out; } - if (l4proto == &nf_conntrack_l4proto_generic) - return nf_ct_l4proto_register_sysctl(l4proto); - mutex_lock(&nf_ct_proto_mutex); retry: if (nf_ct_protos[l4proto->l3proto]) { @@ -353,11 +350,6 @@ void nf_conntrack_l4proto_unregister(struct nf_conntrack_l4proto *l4proto) { BUG_ON(l4proto->l3proto >= PF_MAX); - if (l4proto == &nf_conntrack_l4proto_generic) { - nf_ct_l4proto_unregister_sysctl(l4proto); - return; - } - mutex_lock(&nf_ct_proto_mutex); BUG_ON(nf_ct_protos[l4proto->l3proto][l4proto->l4proto] != l4proto); rcu_assign_pointer(nf_ct_protos[l4proto->l3proto][l4proto->l4proto], @@ -371,3 +363,29 @@ void nf_conntrack_l4proto_unregister(struct nf_conntrack_l4proto *l4proto) nf_ct_iterate_cleanup(kill_l4proto, l4proto); } EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_unregister); + +int nf_conntrack_proto_init(void) +{ + unsigned int i; + int err; + + err = nf_ct_l4proto_register_sysctl(&nf_conntrack_l4proto_generic); + if (err < 0) + return err; + + for (i = 0; i < AF_MAX; i++) + rcu_assign_pointer(nf_ct_l3protos[i], + &nf_conntrack_l3proto_generic); + return 0; +} + +void nf_conntrack_proto_fini(void) +{ + unsigned int i; + + nf_ct_l4proto_unregister_sysctl(&nf_conntrack_l4proto_generic); + + /* free l3proto protocol tables */ + for (i = 0; i < PF_MAX; i++) + kfree(nf_ct_protos[i]); +} -- cgit v1.2.3-59-g8ed1b From a3c5029cf7a96da3acdf6884a21581b5bef310c3 Mon Sep 17 00:00:00 2001 From: Patrick McHardy Date: Wed, 14 Mar 2007 16:39:25 -0700 Subject: [NETFILTER]: nfnetlink: use mutex instead of semaphore Signed-off-by: Patrick McHardy Signed-off-by: David S. Miller --- include/linux/netfilter/nfnetlink.h | 13 ----------- include/net/netfilter/nf_conntrack_l3proto.h | 5 +--- net/netfilter/nfnetlink.c | 35 +++++++++++++++++++--------- 3 files changed, 25 insertions(+), 28 deletions(-) (limited to 'include') diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h index 1e9c821f152d..6179648a014e 100644 --- a/include/linux/netfilter/nfnetlink.h +++ b/include/linux/netfilter/nfnetlink.h @@ -129,19 +129,6 @@ extern void __nfa_fill(struct sk_buff *skb, int attrtype, ({ if (skb_tailroom(skb) < (int)NFA_SPACE(attrlen)) goto nfattr_failure; \ __nfa_fill(skb, attrtype, attrlen, data); }) -extern struct semaphore nfnl_sem; - -#define nfnl_shlock() down(&nfnl_sem) -#define nfnl_shlock_nowait() down_trylock(&nfnl_sem) - -#define nfnl_shunlock() do { up(&nfnl_sem); \ - if(nfnl && nfnl->sk_receive_queue.qlen) \ - nfnl->sk_data_ready(nfnl, 0); \ - } while(0) - -extern void nfnl_lock(void); -extern void nfnl_unlock(void); - extern int nfnetlink_subsys_register(struct nfnetlink_subsystem *n); extern int nfnetlink_subsys_unregister(struct nfnetlink_subsystem *n); diff --git a/include/net/netfilter/nf_conntrack_l3proto.h b/include/net/netfilter/nf_conntrack_l3proto.h index eb575cbd4c95..f32f714e5d92 100644 --- a/include/net/netfilter/nf_conntrack_l3proto.h +++ b/include/net/netfilter/nf_conntrack_l3proto.h @@ -90,10 +90,7 @@ extern struct nf_conntrack_l3proto *nf_ct_l3protos[AF_MAX]; /* Protocol registration. */ extern int nf_conntrack_l3proto_register(struct nf_conntrack_l3proto *proto); extern void nf_conntrack_l3proto_unregister(struct nf_conntrack_l3proto *proto); - -extern struct nf_conntrack_l3proto * -nf_ct_l3proto_find_get(u_int16_t l3proto); - +extern struct nf_conntrack_l3proto *nf_ct_l3proto_find_get(u_int16_t l3proto); extern void nf_ct_l3proto_put(struct nf_conntrack_l3proto *p); /* Existing built-in protocols */ diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c index bf23e489e4cd..7865a47c981e 100644 --- a/net/netfilter/nfnetlink.c +++ b/net/netfilter/nfnetlink.c @@ -28,6 +28,7 @@ #include #include #include +#include #include #include @@ -51,16 +52,28 @@ static char __initdata nfversion[] = "0.30"; static struct sock *nfnl = NULL; static struct nfnetlink_subsystem *subsys_table[NFNL_SUBSYS_COUNT]; -DECLARE_MUTEX(nfnl_sem); +static DEFINE_MUTEX(nfnl_mutex); -void nfnl_lock(void) +static void nfnl_lock(void) { - nfnl_shlock(); + mutex_lock(&nfnl_mutex); } -void nfnl_unlock(void) +static int nfnl_trylock(void) { - nfnl_shunlock(); + return !mutex_trylock(&nfnl_mutex); +} + +static void __nfnl_unlock(void) +{ + mutex_unlock(&nfnl_mutex); +} + +static void nfnl_unlock(void) +{ + mutex_unlock(&nfnl_mutex); + if (nfnl->sk_receive_queue.qlen) + nfnl->sk_data_ready(nfnl, 0); } int nfnetlink_subsys_register(struct nfnetlink_subsystem *n) @@ -248,11 +261,11 @@ static int nfnetlink_rcv_msg(struct sk_buff *skb, ss = nfnetlink_get_subsys(type); if (!ss) { #ifdef CONFIG_KMOD - /* don't call nfnl_shunlock, since it would reenter + /* don't call nfnl_unlock, since it would reenter * with further packet processing */ - up(&nfnl_sem); + __nfnl_unlock(); request_module("nfnetlink-subsys-%d", NFNL_SUBSYS_ID(type)); - nfnl_shlock(); + nfnl_lock(); ss = nfnetlink_get_subsys(type); if (!ss) #endif @@ -322,7 +335,7 @@ static void nfnetlink_rcv(struct sock *sk, int len) do { struct sk_buff *skb; - if (nfnl_shlock_nowait()) + if (nfnl_trylock()) return; while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) { @@ -337,9 +350,9 @@ static void nfnetlink_rcv(struct sock *sk, int len) kfree_skb(skb); } - /* don't call nfnl_shunlock, since it would reenter + /* don't call nfnl_unlock, since it would reenter * with further packet processing */ - up(&nfnl_sem); + __nfnl_unlock(); } while(nfnl && nfnl->sk_receive_queue.qlen); } -- cgit v1.2.3-59-g8ed1b From 010c7d6f867e98c86723f420d485583464fbab45 Mon Sep 17 00:00:00 2001 From: Patrick McHardy Date: Wed, 14 Mar 2007 16:40:10 -0700 Subject: [NETFILTER]: nf_conntrack: uninline notifier registration functions Signed-off-by: Patrick McHardy Signed-off-by: David S. Miller --- include/net/netfilter/nf_conntrack_ecache.h | 30 ++++++----------------------- net/netfilter/nf_conntrack_ecache.c | 23 ++++++++++++++++++++++ 2 files changed, 29 insertions(+), 24 deletions(-) (limited to 'include') diff --git a/include/net/netfilter/nf_conntrack_ecache.h b/include/net/netfilter/nf_conntrack_ecache.h index b62a8a9ec9d8..811c9073c532 100644 --- a/include/net/netfilter/nf_conntrack_ecache.h +++ b/include/net/netfilter/nf_conntrack_ecache.h @@ -20,30 +20,8 @@ DECLARE_PER_CPU(struct nf_conntrack_ecache, nf_conntrack_ecache); #define CONNTRACK_ECACHE(x) (__get_cpu_var(nf_conntrack_ecache).x) extern struct atomic_notifier_head nf_conntrack_chain; -extern struct atomic_notifier_head nf_conntrack_expect_chain; - -static inline int nf_conntrack_register_notifier(struct notifier_block *nb) -{ - return atomic_notifier_chain_register(&nf_conntrack_chain, nb); -} - -static inline int nf_conntrack_unregister_notifier(struct notifier_block *nb) -{ - return atomic_notifier_chain_unregister(&nf_conntrack_chain, nb); -} - -static inline int -nf_conntrack_expect_register_notifier(struct notifier_block *nb) -{ - return atomic_notifier_chain_register(&nf_conntrack_expect_chain, nb); -} - -static inline int -nf_conntrack_expect_unregister_notifier(struct notifier_block *nb) -{ - return atomic_notifier_chain_unregister(&nf_conntrack_expect_chain, - nb); -} +extern int nf_conntrack_register_notifier(struct notifier_block *nb); +extern int nf_conntrack_unregister_notifier(struct notifier_block *nb); extern void nf_ct_deliver_cached_events(const struct nf_conn *ct); extern void __nf_ct_event_cache_init(struct nf_conn *ct); @@ -71,6 +49,10 @@ static inline void nf_conntrack_event(enum ip_conntrack_events event, atomic_notifier_call_chain(&nf_conntrack_chain, event, ct); } +extern struct atomic_notifier_head nf_conntrack_expect_chain; +extern int nf_conntrack_expect_register_notifier(struct notifier_block *nb); +extern int nf_conntrack_expect_unregister_notifier(struct notifier_block *nb); + static inline void nf_conntrack_expect_event(enum ip_conntrack_expect_events event, struct nf_conntrack_expect *exp) diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c index 1a223e0c0856..6bd421df2dbc 100644 --- a/net/netfilter/nf_conntrack_ecache.c +++ b/net/netfilter/nf_conntrack_ecache.c @@ -91,3 +91,26 @@ void nf_ct_event_cache_flush(void) } } +int nf_conntrack_register_notifier(struct notifier_block *nb) +{ + return atomic_notifier_chain_register(&nf_conntrack_chain, nb); +} +EXPORT_SYMBOL_GPL(nf_conntrack_register_notifier); + +int nf_conntrack_unregister_notifier(struct notifier_block *nb) +{ + return atomic_notifier_chain_unregister(&nf_conntrack_chain, nb); +} +EXPORT_SYMBOL_GPL(nf_conntrack_unregister_notifier); + +int nf_conntrack_expect_register_notifier(struct notifier_block *nb) +{ + return atomic_notifier_chain_register(&nf_conntrack_expect_chain, nb); +} +EXPORT_SYMBOL_GPL(nf_conntrack_expect_register_notifier); + +int nf_conntrack_expect_unregister_notifier(struct notifier_block *nb) +{ + return atomic_notifier_chain_unregister(&nf_conntrack_expect_chain, nb); +} +EXPORT_SYMBOL_GPL(nf_conntrack_expect_unregister_notifier); -- cgit v1.2.3-59-g8ed1b From 8e87e014ec881ce353e1f43340157f519b5d9f30 Mon Sep 17 00:00:00 2001 From: Patrick McHardy Date: Wed, 14 Mar 2007 16:42:29 -0700 Subject: [JHASH]: Use const in jhash2 Use const to avoid forcing users to cast const data. Signed-off-by: Patrick McHardy Signed-off-by: David S. Miller --- include/linux/jhash.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/jhash.h b/include/linux/jhash.h index 82c7ae412eec..2a2f99fbcb16 100644 --- a/include/linux/jhash.h +++ b/include/linux/jhash.h @@ -84,7 +84,7 @@ static inline u32 jhash(const void *key, u32 length, u32 initval) /* A special optimized version that handles 1 or more of u32s. * The length parameter here is the number of u32s in the key. */ -static inline u32 jhash2(u32 *k, u32 length, u32 initval) +static inline u32 jhash2(const u32 *k, u32 length, u32 initval) { u32 a, b, c, len; -- cgit v1.2.3-59-g8ed1b From edda553c324bdc5bb5c2d553b524cab37058a855 Mon Sep 17 00:00:00 2001 From: Yasuyuki Kozakai Date: Wed, 14 Mar 2007 16:43:37 -0700 Subject: [NETFILTER]: nf_conntrack: add __nf_copy() to copy members in skb This unifies the codes to copy netfilter related datas. Note that __nf_copy() assumes destination skb doesn't have any netfilter related members. Signed-off-by: Yasuyuki Kozakai Signed-off-by: Patrick McHardy Signed-off-by: David S. Miller --- include/linux/skbuff.h | 17 +++++++++++++++++ net/core/skbuff.c | 28 ++-------------------------- 2 files changed, 19 insertions(+), 26 deletions(-) (limited to 'include') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 39a6da243b24..62ab1ab07028 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -1498,8 +1498,25 @@ static inline void nf_reset(struct sk_buff *skb) #endif } +/* Note: This doesn't put any conntrack and bridge info in dst. */ +static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src) +{ + dst->nfct = src->nfct; + nf_conntrack_get(src->nfct); + dst->nfctinfo = src->nfctinfo; +#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) + dst->nfct_reasm = src->nfct_reasm; + nf_conntrack_get_reasm(src->nfct_reasm); +#endif +#ifdef CONFIG_BRIDGE_NETFILTER + dst->nf_bridge = src->nf_bridge; + nf_bridge_get(src->nf_bridge); +#endif +} + #else /* CONFIG_NETFILTER */ static inline void nf_reset(struct sk_buff *skb) {} +static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src) {} #endif /* CONFIG_NETFILTER */ #ifdef CONFIG_NETWORK_SECMARK diff --git a/net/core/skbuff.c b/net/core/skbuff.c index b242020c02f7..408cc99af6b3 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -422,19 +422,7 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask) C(protocol); n->destructor = NULL; C(mark); -#ifdef CONFIG_NETFILTER - C(nfct); - nf_conntrack_get(skb->nfct); - C(nfctinfo); -#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) - C(nfct_reasm); - nf_conntrack_get_reasm(skb->nfct_reasm); -#endif -#ifdef CONFIG_BRIDGE_NETFILTER - C(nf_bridge); - nf_bridge_get(skb->nf_bridge); -#endif -#endif /*CONFIG_NETFILTER*/ + __nf_copy(n, skb); #ifdef CONFIG_NET_SCHED C(tc_index); #ifdef CONFIG_NET_CLS_ACT @@ -483,22 +471,10 @@ static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old) new->tstamp = old->tstamp; new->destructor = NULL; new->mark = old->mark; -#ifdef CONFIG_NETFILTER - new->nfct = old->nfct; - nf_conntrack_get(old->nfct); - new->nfctinfo = old->nfctinfo; -#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) - new->nfct_reasm = old->nfct_reasm; - nf_conntrack_get_reasm(old->nfct_reasm); -#endif + __nf_copy(new, old); #if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE) new->ipvs_property = old->ipvs_property; #endif -#ifdef CONFIG_BRIDGE_NETFILTER - new->nf_bridge = old->nf_bridge; - nf_bridge_get(old->nf_bridge); -#endif -#endif #ifdef CONFIG_NET_SCHED #ifdef CONFIG_NET_CLS_ACT new->tc_verd = old->tc_verd; -- cgit v1.2.3-59-g8ed1b From e7ac05f3407a3fb5a1b2ff5d5554899eaa0a10a3 Mon Sep 17 00:00:00 2001 From: Yasuyuki Kozakai Date: Wed, 14 Mar 2007 16:44:01 -0700 Subject: [NETFILTER]: nf_conntrack: add nf_copy() to safely copy members in skb This unifies the codes to copy netfilter related datas. Before copying, nf_copy() puts original members in destination skb. Signed-off-by: Yasuyuki Kozakai Signed-off-by: Patrick McHardy Signed-off-by: David S. Miller --- include/linux/skbuff.h | 13 +++++++++++++ net/ipv4/ip_output.c | 13 +------------ net/ipv6/ip6_output.c | 18 +----------------- 3 files changed, 15 insertions(+), 29 deletions(-) (limited to 'include') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 62ab1ab07028..47c57be97d43 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -1514,9 +1514,22 @@ static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src) #endif } +static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src) +{ + nf_conntrack_put(dst->nfct); +#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) + nf_conntrack_put_reasm(dst->nfct_reasm); +#endif +#ifdef CONFIG_BRIDGE_NETFILTER + nf_bridge_put(dst->nf_bridge); +#endif + __nf_copy(dst, src); +} + #else /* CONFIG_NETFILTER */ static inline void nf_reset(struct sk_buff *skb) {} static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src) {} +static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src) {} #endif /* CONFIG_NETFILTER */ #ifdef CONFIG_NETWORK_SECMARK diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 11029b9d4cf7..11ab100d6c6c 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -384,20 +384,9 @@ static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from) #ifdef CONFIG_NET_SCHED to->tc_index = from->tc_index; #endif -#ifdef CONFIG_NETFILTER - /* Connection association is same as pre-frag packet */ - nf_conntrack_put(to->nfct); - to->nfct = from->nfct; - nf_conntrack_get(to->nfct); - to->nfctinfo = from->nfctinfo; + nf_copy(to, from); #if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE) to->ipvs_property = from->ipvs_property; -#endif -#ifdef CONFIG_BRIDGE_NETFILTER - nf_bridge_put(to->nf_bridge); - to->nf_bridge = from->nf_bridge; - nf_bridge_get(to->nf_bridge); -#endif #endif skb_copy_secmark(to, from); } diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 0f4434eff66a..49523c2a9f10 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -504,23 +504,7 @@ static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from) #ifdef CONFIG_NET_SCHED to->tc_index = from->tc_index; #endif -#ifdef CONFIG_NETFILTER - /* Connection association is same as pre-frag packet */ - nf_conntrack_put(to->nfct); - to->nfct = from->nfct; - nf_conntrack_get(to->nfct); - to->nfctinfo = from->nfctinfo; -#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) - nf_conntrack_put_reasm(to->nfct_reasm); - to->nfct_reasm = from->nfct_reasm; - nf_conntrack_get_reasm(to->nfct_reasm); -#endif -#ifdef CONFIG_BRIDGE_NETFILTER - nf_bridge_put(to->nf_bridge); - to->nf_bridge = from->nf_bridge; - nf_bridge_get(to->nf_bridge); -#endif -#endif + nf_copy(to, from); skb_copy_secmark(to, from); } -- cgit v1.2.3-59-g8ed1b From c8e2078cfe414a99cf6f2f2f1d78c7e75392e9d4 Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Wed, 14 Mar 2007 16:45:19 -0700 Subject: [NETFILTER]: ctnetlink: add support for internal tcp connection tracking flags handling This patch let userspace programs set the IP_CT_TCP_BE_LIBERAL flag to force the pickup of established connections. Signed-off-by: Pablo Neira Ayuso Signed-off-by: Patrick McHardy Signed-off-by: David S. Miller --- include/linux/netfilter/nf_conntrack_tcp.h | 5 +++ include/linux/netfilter/nfnetlink_conntrack.h | 4 +++ net/netfilter/nf_conntrack_proto_tcp.c | 45 ++++++++++++++++++++++++++- 3 files changed, 53 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/netfilter/nf_conntrack_tcp.h b/include/linux/netfilter/nf_conntrack_tcp.h index 007af4c2770b..22ce29995f13 100644 --- a/include/linux/netfilter/nf_conntrack_tcp.h +++ b/include/linux/netfilter/nf_conntrack_tcp.h @@ -30,6 +30,11 @@ enum tcp_conntrack { /* Be liberal in window checking */ #define IP_CT_TCP_FLAG_BE_LIBERAL 0x08 +struct nf_ct_tcp_flags { + u_int8_t flags; + u_int8_t mask; +}; + #ifdef __KERNEL__ struct ip_ct_tcp_state { diff --git a/include/linux/netfilter/nfnetlink_conntrack.h b/include/linux/netfilter/nfnetlink_conntrack.h index b5883ccee295..d7c35039721e 100644 --- a/include/linux/netfilter/nfnetlink_conntrack.h +++ b/include/linux/netfilter/nfnetlink_conntrack.h @@ -83,6 +83,10 @@ enum ctattr_protoinfo { enum ctattr_protoinfo_tcp { CTA_PROTOINFO_TCP_UNSPEC, CTA_PROTOINFO_TCP_STATE, + CTA_PROTOINFO_TCP_WSCALE_ORIGINAL, + CTA_PROTOINFO_TCP_WSCALE_REPLY, + CTA_PROTOINFO_TCP_FLAGS_ORIGINAL, + CTA_PROTOINFO_TCP_FLAGS_REPLY, __CTA_PROTOINFO_TCP_MAX }; #define CTA_PROTOINFO_TCP_MAX (__CTA_PROTOINFO_TCP_MAX - 1) diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c index a1363626bccc..8439768f9d1c 100644 --- a/net/netfilter/nf_conntrack_proto_tcp.c +++ b/net/netfilter/nf_conntrack_proto_tcp.c @@ -1101,11 +1101,26 @@ static int tcp_to_nfattr(struct sk_buff *skb, struct nfattr *nfa, const struct nf_conn *ct) { struct nfattr *nest_parms; + struct nf_ct_tcp_flags tmp = {}; read_lock_bh(&tcp_lock); nest_parms = NFA_NEST(skb, CTA_PROTOINFO_TCP); NFA_PUT(skb, CTA_PROTOINFO_TCP_STATE, sizeof(u_int8_t), &ct->proto.tcp.state); + + NFA_PUT(skb, CTA_PROTOINFO_TCP_WSCALE_ORIGINAL, sizeof(u_int8_t), + &ct->proto.tcp.seen[0].td_scale); + + NFA_PUT(skb, CTA_PROTOINFO_TCP_WSCALE_REPLY, sizeof(u_int8_t), + &ct->proto.tcp.seen[1].td_scale); + + tmp.flags = ct->proto.tcp.seen[0].flags; + NFA_PUT(skb, CTA_PROTOINFO_TCP_FLAGS_ORIGINAL, + sizeof(struct nf_ct_tcp_flags), &tmp); + + tmp.flags = ct->proto.tcp.seen[1].flags; + NFA_PUT(skb, CTA_PROTOINFO_TCP_FLAGS_REPLY, + sizeof(struct nf_ct_tcp_flags), &tmp); read_unlock_bh(&tcp_lock); NFA_NEST_END(skb, nest_parms); @@ -1118,7 +1133,11 @@ nfattr_failure: } static const size_t cta_min_tcp[CTA_PROTOINFO_TCP_MAX] = { - [CTA_PROTOINFO_TCP_STATE-1] = sizeof(u_int8_t), + [CTA_PROTOINFO_TCP_STATE-1] = sizeof(u_int8_t), + [CTA_PROTOINFO_TCP_WSCALE_ORIGINAL-1] = sizeof(u_int8_t), + [CTA_PROTOINFO_TCP_WSCALE_REPLY-1] = sizeof(u_int8_t), + [CTA_PROTOINFO_TCP_FLAGS_ORIGINAL-1] = sizeof(struct nf_ct_tcp_flags), + [CTA_PROTOINFO_TCP_FLAGS_REPLY-1] = sizeof(struct nf_ct_tcp_flags) }; static int nfattr_to_tcp(struct nfattr *cda[], struct nf_conn *ct) @@ -1142,6 +1161,30 @@ static int nfattr_to_tcp(struct nfattr *cda[], struct nf_conn *ct) write_lock_bh(&tcp_lock); ct->proto.tcp.state = *(u_int8_t *)NFA_DATA(tb[CTA_PROTOINFO_TCP_STATE-1]); + + if (tb[CTA_PROTOINFO_TCP_FLAGS_ORIGINAL-1]) { + struct nf_ct_tcp_flags *attr = + NFA_DATA(tb[CTA_PROTOINFO_TCP_FLAGS_ORIGINAL-1]); + ct->proto.tcp.seen[0].flags &= ~attr->mask; + ct->proto.tcp.seen[0].flags |= attr->flags & attr->mask; + } + + if (tb[CTA_PROTOINFO_TCP_FLAGS_REPLY-1]) { + struct nf_ct_tcp_flags *attr = + NFA_DATA(tb[CTA_PROTOINFO_TCP_FLAGS_REPLY-1]); + ct->proto.tcp.seen[1].flags &= ~attr->mask; + ct->proto.tcp.seen[1].flags |= attr->flags & attr->mask; + } + + if (tb[CTA_PROTOINFO_TCP_WSCALE_ORIGINAL-1] && + tb[CTA_PROTOINFO_TCP_WSCALE_REPLY-1] && + ct->proto.tcp.seen[0].flags & IP_CT_TCP_FLAG_WINDOW_SCALE && + ct->proto.tcp.seen[1].flags & IP_CT_TCP_FLAG_WINDOW_SCALE) { + ct->proto.tcp.seen[0].td_scale = *(u_int8_t *) + NFA_DATA(tb[CTA_PROTOINFO_TCP_WSCALE_ORIGINAL-1]); + ct->proto.tcp.seen[1].td_scale = *(u_int8_t *) + NFA_DATA(tb[CTA_PROTOINFO_TCP_WSCALE_REPLY-1]); + } write_unlock_bh(&tcp_lock); return 0; -- cgit v1.2.3-59-g8ed1b From 0a6114d94b6d6f82e81cb8e0d8b0d4cf50739fec Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Thu, 15 Mar 2007 21:08:55 -0300 Subject: [KBUILD]: Unifdef headers changed by the skb layer header refactorings Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: David S. Miller --- include/linux/Kbuild | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/linux/Kbuild b/include/linux/Kbuild index e81e301a4d71..ea86f2e02716 100644 --- a/include/linux/Kbuild +++ b/include/linux/Kbuild @@ -69,9 +69,7 @@ header-y += hdsmart.h header-y += hysdn_if.h header-y += i2c-dev.h header-y += i8k.h -header-y += icmp.h header-y += if_arcnet.h -header-y += if_arp.h header-y += if_bonding.h header-y += if_cablemodem.h header-y += if_fc.h @@ -88,7 +86,6 @@ header-y += if_tunnel.h header-y += in6.h header-y += in_route.h header-y += ioctl.h -header-y += ip.h header-y += ipmi_msgdefs.h header-y += ip_mp_alg.h header-y += ipsec.h @@ -210,8 +207,10 @@ unifdef-y += hiddev.h unifdef-y += hpet.h unifdef-y += i2c.h unifdef-y += i2o-dev.h +unifdef-y += icmp.h unifdef-y += icmpv6.h unifdef-y += if_addr.h +unifdef-y += if_arp.h unifdef-y += if_bridge.h unifdef-y += if_ec.h unifdef-y += if_eql.h @@ -231,6 +230,7 @@ unifdef-y += inet_diag.h unifdef-y += in.h unifdef-y += inotify.h unifdef-y += input.h +unifdef-y += ip.h unifdef-y += ipc.h unifdef-y += ipmi.h unifdef-y += ipv6.h -- cgit v1.2.3-59-g8ed1b From 641b9e0e8b7f96425da6ce98f3361e3af0baee29 Mon Sep 17 00:00:00 2001 From: Patrick McHardy Date: Fri, 16 Mar 2007 01:18:42 -0700 Subject: [NET_SCHED]: Use ktime as clocksource Get rid of the manual clock source selection mess and use ktime. Also use a scalar representation, which allows to clean up pkt_sched.h a bit more and results in less ktime_to_ns() calls in most cases. The PSCHED_US2JIFFIE/PSCHED_JIFFIE2US macros are implemented quite inefficient by this patch, following patches will convert all qdiscs to hrtimers and get rid of them entirely. Signed-off-by: Patrick McHardy Signed-off-by: David S. Miller --- include/net/pkt_sched.h | 169 ++++-------------------------------------------- kernel/hrtimer.c | 1 + net/sched/Kconfig | 56 ---------------- net/sched/sch_api.c | 77 +--------------------- net/sched/sch_hfsc.c | 31 +-------- 5 files changed, 19 insertions(+), 315 deletions(-) (limited to 'include') diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h index f6afee73235d..1c12afd113d6 100644 --- a/include/net/pkt_sched.h +++ b/include/net/pkt_sched.h @@ -2,6 +2,7 @@ #define __NET_PKT_SCHED_H #include +#include #include struct qdisc_walker @@ -37,176 +38,32 @@ static inline void *qdisc_priv(struct Qdisc *q) The things are not so bad, because we may use artifical clock evaluated by integration of network data flow in the most critical places. - - Note: we do not use fastgettimeofday. - The reason is that, when it is not the same thing as - gettimeofday, it returns invalid timestamp, which is - not updated, when net_bh is active. */ -/* General note about internal clock. - - Any clock source returns time intervals, measured in units - close to 1usec. With source CONFIG_NET_SCH_CLK_GETTIMEOFDAY it is precisely - microseconds, otherwise something close but different chosen to minimize - arithmetic cost. Ratio usec/internal untis in form nominator/denominator - may be read from /proc/net/psched. - */ - - -#ifdef CONFIG_NET_SCH_CLK_GETTIMEOFDAY - -typedef struct timeval psched_time_t; -typedef long psched_tdiff_t; - -#define PSCHED_GET_TIME(stamp) do_gettimeofday(&(stamp)) -#define PSCHED_US2JIFFIE(usecs) usecs_to_jiffies(usecs) -#define PSCHED_JIFFIE2US(delay) jiffies_to_usecs(delay) - -#else /* !CONFIG_NET_SCH_CLK_GETTIMEOFDAY */ - typedef u64 psched_time_t; typedef long psched_tdiff_t; -#ifdef CONFIG_NET_SCH_CLK_JIFFIES - -#if HZ < 96 -#define PSCHED_JSCALE 14 -#elif HZ >= 96 && HZ < 192 -#define PSCHED_JSCALE 13 -#elif HZ >= 192 && HZ < 384 -#define PSCHED_JSCALE 12 -#elif HZ >= 384 && HZ < 768 -#define PSCHED_JSCALE 11 -#elif HZ >= 768 -#define PSCHED_JSCALE 10 -#endif - -#define PSCHED_GET_TIME(stamp) ((stamp) = (get_jiffies_64()<>PSCHED_JSCALE) -#define PSCHED_JIFFIE2US(delay) ((delay)< - -extern psched_tdiff_t psched_clock_per_hz; -extern int psched_clock_scale; -extern psched_time_t psched_time_base; -extern cycles_t psched_time_mark; - -#define PSCHED_GET_TIME(stamp) \ -do { \ - cycles_t cur = get_cycles(); \ - if (sizeof(cycles_t) == sizeof(u32)) { \ - if (cur <= psched_time_mark) \ - psched_time_base += 0x100000000ULL; \ - psched_time_mark = cur; \ - (stamp) = (psched_time_base + cur)>>psched_clock_scale; \ - } else { \ - (stamp) = cur>>psched_clock_scale; \ - } \ -} while (0) -#define PSCHED_US2JIFFIE(delay) (((delay)+psched_clock_per_hz-1)/psched_clock_per_hz) -#define PSCHED_JIFFIE2US(delay) ((delay)*psched_clock_per_hz) - -#endif /* CONFIG_NET_SCH_CLK_CPU */ - -#endif /* !CONFIG_NET_SCH_CLK_GETTIMEOFDAY */ - -#ifdef CONFIG_NET_SCH_CLK_GETTIMEOFDAY -#define PSCHED_TDIFF(tv1, tv2) \ -({ \ - int __delta_sec = (tv1).tv_sec - (tv2).tv_sec; \ - int __delta = (tv1).tv_usec - (tv2).tv_usec; \ - if (__delta_sec) { \ - switch (__delta_sec) { \ - default: \ - __delta = 0; \ - case 2: \ - __delta += USEC_PER_SEC; \ - case 1: \ - __delta += USEC_PER_SEC; \ - } \ - } \ - __delta; \ -}) - -static inline int -psched_tod_diff(int delta_sec, int bound) -{ - int delta; - - if (bound <= USEC_PER_SEC || delta_sec > (0x7FFFFFFF/USEC_PER_SEC)-1) - return bound; - delta = delta_sec * USEC_PER_SEC; - if (delta > bound || delta < 0) - delta = bound; - return delta; -} - -#define PSCHED_TDIFF_SAFE(tv1, tv2, bound) \ -({ \ - int __delta_sec = (tv1).tv_sec - (tv2).tv_sec; \ - int __delta = (tv1).tv_usec - (tv2).tv_usec; \ - switch (__delta_sec) { \ - default: \ - __delta = psched_tod_diff(__delta_sec, bound); break; \ - case 2: \ - __delta += USEC_PER_SEC; \ - case 1: \ - __delta += USEC_PER_SEC; \ - case 0: \ - if (__delta > bound || __delta < 0) \ - __delta = bound; \ - } \ - __delta; \ -}) - -#define PSCHED_TLESS(tv1, tv2) (((tv1).tv_usec < (tv2).tv_usec && \ - (tv1).tv_sec <= (tv2).tv_sec) || \ - (tv1).tv_sec < (tv2).tv_sec) - -#define PSCHED_TADD2(tv, delta, tv_res) \ -({ \ - int __delta = (tv).tv_usec + (delta); \ - (tv_res).tv_sec = (tv).tv_sec; \ - while (__delta >= USEC_PER_SEC) { (tv_res).tv_sec++; __delta -= USEC_PER_SEC; } \ - (tv_res).tv_usec = __delta; \ -}) - -#define PSCHED_TADD(tv, delta) \ -({ \ - (tv).tv_usec += (delta); \ - while ((tv).tv_usec >= USEC_PER_SEC) { (tv).tv_sec++; \ - (tv).tv_usec -= USEC_PER_SEC; } \ -}) - -/* Set/check that time is in the "past perfect"; - it depends on concrete representation of system time - */ - -#define PSCHED_SET_PASTPERFECT(t) ((t).tv_sec = 0) -#define PSCHED_IS_PASTPERFECT(t) ((t).tv_sec == 0) +/* Avoid doing 64 bit divide by 1000 */ +#define PSCHED_US2NS(x) ((s64)(x) << 10) +#define PSCHED_NS2US(x) ((x) >> 10) -#define PSCHED_AUDIT_TDIFF(t) ({ if ((t) > 2000000) (t) = 2000000; }) +#define PSCHED_TICKS_PER_SEC PSCHED_NS2US(NSEC_PER_SEC) +#define PSCHED_GET_TIME(stamp) \ + ((stamp) = PSCHED_NS2US(ktime_to_ns(ktime_get()))) -#else /* !CONFIG_NET_SCH_CLK_GETTIMEOFDAY */ +#define PSCHED_US2JIFFIE(usecs) usecs_to_jiffies(PSCHED_US2NS((usecs)) / NSEC_PER_USEC) +#define PSCHED_JIFFIE2US(delay) PSCHED_NS2US(jiffies_to_usecs((delay)) * NSEC_PER_USEC) -#define PSCHED_TDIFF(tv1, tv2) (long)((tv1) - (tv2)) +#define PSCHED_TDIFF(tv1, tv2) (long)((tv1) - (tv2)) #define PSCHED_TDIFF_SAFE(tv1, tv2, bound) \ - min_t(long long, (tv1) - (tv2), bound) - - -#define PSCHED_TLESS(tv1, tv2) ((tv1) < (tv2)) + min_t(long long, (tv1) - (tv2), bound) +#define PSCHED_TLESS(tv1, tv2) ((tv1) < (tv2)) #define PSCHED_TADD2(tv, delta, tv_res) ((tv_res) = (tv) + (delta)) -#define PSCHED_TADD(tv, delta) ((tv) += (delta)) +#define PSCHED_TADD(tv, delta) ((tv) += (delta)) #define PSCHED_SET_PASTPERFECT(t) ((t) = 0) #define PSCHED_IS_PASTPERFECT(t) ((t) == 0) #define PSCHED_AUDIT_TDIFF(t) -#endif /* !CONFIG_NET_SCH_CLK_GETTIMEOFDAY */ - extern struct Qdisc_ops pfifo_qdisc_ops; extern struct Qdisc_ops bfifo_qdisc_ops; diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index b74860aaf5f1..f5cfde8c9025 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c @@ -59,6 +59,7 @@ ktime_t ktime_get(void) return timespec_to_ktime(now); } +EXPORT_SYMBOL_GPL(ktime_get); /** * ktime_get_real - get the real (wall-) time in ktime_t format diff --git a/net/sched/Kconfig b/net/sched/Kconfig index f4544dd86476..475df8449be9 100644 --- a/net/sched/Kconfig +++ b/net/sched/Kconfig @@ -46,62 +46,6 @@ config NET_SCH_FIFO if NET_SCHED -choice - prompt "Packet scheduler clock source" - default NET_SCH_CLK_GETTIMEOFDAY - ---help--- - Packet schedulers need a monotonic clock that increments at a static - rate. The kernel provides several suitable interfaces, each with - different properties: - - - high resolution (us or better) - - fast to read (minimal locking, no i/o access) - - synchronized on all processors - - handles cpu clock frequency changes - - but nothing provides all of the above. - -config NET_SCH_CLK_JIFFIES - bool "Timer interrupt" - ---help--- - Say Y here if you want to use the timer interrupt (jiffies) as clock - source. This clock source is fast, synchronized on all processors and - handles cpu clock frequency changes, but its resolution is too low - for accurate shaping except at very low speed. - -config NET_SCH_CLK_GETTIMEOFDAY - bool "gettimeofday" - ---help--- - Say Y here if you want to use gettimeofday as clock source. This clock - source has high resolution, is synchronized on all processors and - handles cpu clock frequency changes, but it is slow. - - Choose this if you need a high resolution clock source but can't use - the CPU's cycle counter. - -# don't allow on SMP x86 because they can have unsynchronized TSCs. -# gettimeofday is a good alternative -config NET_SCH_CLK_CPU - bool "CPU cycle counter" - depends on ((X86_TSC || X86_64) && !SMP) || ALPHA || SPARC64 || PPC64 || IA64 - ---help--- - Say Y here if you want to use the CPU's cycle counter as clock source. - This is a cheap and high resolution clock source, but on some - architectures it is not synchronized on all processors and doesn't - handle cpu clock frequency changes. - - The useable cycle counters are: - - x86/x86_64 - Timestamp Counter - alpha - Cycle Counter - sparc64 - %ticks register - ppc64 - Time base - ia64 - Interval Time Counter - - Choose this if your CPU's cycle counter is working properly. - -endchoice - comment "Queueing/Scheduling" config NET_SCH_CBQ diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index 4a927a5e1fa6..d71bf79eb80b 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c @@ -1175,15 +1175,12 @@ reclassify: return -1; } -static int psched_us_per_tick = 1; -static int psched_tick_per_us = 1; - #ifdef CONFIG_PROC_FS static int psched_show(struct seq_file *seq, void *v) { seq_printf(seq, "%08x %08x %08x %08x\n", - psched_tick_per_us, psched_us_per_tick, - 1000000, HZ); + (u32)NSEC_PER_USEC, (u32)PSCHED_US2NS(1), + 1000000, HZ); return 0; } @@ -1202,80 +1199,10 @@ static const struct file_operations psched_fops = { }; #endif -#ifdef CONFIG_NET_SCH_CLK_CPU -psched_tdiff_t psched_clock_per_hz; -int psched_clock_scale; -EXPORT_SYMBOL(psched_clock_per_hz); -EXPORT_SYMBOL(psched_clock_scale); - -psched_time_t psched_time_base; -cycles_t psched_time_mark; -EXPORT_SYMBOL(psched_time_mark); -EXPORT_SYMBOL(psched_time_base); - -/* - * Periodically adjust psched_time_base to avoid overflow - * with 32-bit get_cycles(). Safe up to 4GHz CPU. - */ -static void psched_tick(unsigned long); -static DEFINE_TIMER(psched_timer, psched_tick, 0, 0); - -static void psched_tick(unsigned long dummy) -{ - if (sizeof(cycles_t) == sizeof(u32)) { - psched_time_t dummy_stamp; - PSCHED_GET_TIME(dummy_stamp); - psched_timer.expires = jiffies + 1*HZ; - add_timer(&psched_timer); - } -} - -int __init psched_calibrate_clock(void) -{ - psched_time_t stamp, stamp1; - struct timeval tv, tv1; - psched_tdiff_t delay; - long rdelay; - unsigned long stop; - - psched_tick(0); - stop = jiffies + HZ/10; - PSCHED_GET_TIME(stamp); - do_gettimeofday(&tv); - while (time_before(jiffies, stop)) { - barrier(); - cpu_relax(); - } - PSCHED_GET_TIME(stamp1); - do_gettimeofday(&tv1); - - delay = PSCHED_TDIFF(stamp1, stamp); - rdelay = tv1.tv_usec - tv.tv_usec; - rdelay += (tv1.tv_sec - tv.tv_sec)*1000000; - if (rdelay > delay) - return -1; - delay /= rdelay; - psched_tick_per_us = delay; - while ((delay>>=1) != 0) - psched_clock_scale++; - psched_us_per_tick = 1<>psched_clock_scale; - return 0; -} -#endif - static int __init pktsched_init(void) { struct rtnetlink_link *link_p; -#ifdef CONFIG_NET_SCH_CLK_CPU - if (psched_calibrate_clock() < 0) - return -1; -#elif defined(CONFIG_NET_SCH_CLK_JIFFIES) - psched_tick_per_us = HZ< -#undef PSCHED_GET_TIME -#define PSCHED_GET_TIME(stamp) \ -do { \ - struct timeval tv; \ - do_gettimeofday(&tv); \ - (stamp) = 1ULL * USEC_PER_SEC * tv.tv_sec + tv.tv_usec; \ -} while (0) -#endif - #define HT_INFINITY 0xffffffffffffffffULL /* infinite time value */ @@ -394,28 +380,17 @@ cftree_update(struct hfsc_class *cl) * ism: (psched_us/byte) << ISM_SHIFT * dx: psched_us * - * Clock source resolution (CONFIG_NET_SCH_CLK_*) - * JIFFIES: for 48<=HZ<=1534 resolution is between 0.63us and 1.27us. - * CPU: resolution is between 0.5us and 1us. - * GETTIMEOFDAY: resolution is exactly 1us. + * The clock source resolution with ktime is 1.024us. * * sm and ism are scaled in order to keep effective digits. * SM_SHIFT and ISM_SHIFT are selected to keep at least 4 effective * digits in decimal using the following table. * - * Note: We can afford the additional accuracy (altq hfsc keeps at most - * 3 effective digits) thanks to the fact that linux clock is bounded - * much more tightly. - * * bits/sec 100Kbps 1Mbps 10Mbps 100Mbps 1Gbps * ------------+------------------------------------------------------- - * bytes/0.5us 6.25e-3 62.5e-3 625e-3 6250e-e 62500e-3 - * bytes/us 12.5e-3 125e-3 1250e-3 12500e-3 125000e-3 - * bytes/1.27us 15.875e-3 158.75e-3 1587.5e-3 15875e-3 158750e-3 + * bytes/1.024us 12.8e-3 128e-3 1280e-3 12800e-3 128000e-3 * - * 0.5us/byte 160 16 1.6 0.16 0.016 - * us/byte 80 8 0.8 0.08 0.008 - * 1.27us/byte 63 6.3 0.63 0.063 0.0063 + * 1.024us/byte 78.125 7.8125 0.78125 0.078125 0.0078125 */ #define SM_SHIFT 20 #define ISM_SHIFT 18 -- cgit v1.2.3-59-g8ed1b From 4179477f637caa730626bd597fdf28c5bad73565 Mon Sep 17 00:00:00 2001 From: Patrick McHardy Date: Fri, 16 Mar 2007 01:19:15 -0700 Subject: [NET_SCHED]: Add hrtimer based qdisc watchdog Signed-off-by: Patrick McHardy Signed-off-by: David S. Miller --- include/net/pkt_sched.h | 10 ++++++++++ net/sched/sch_api.c | 36 ++++++++++++++++++++++++++++++++++++ 2 files changed, 46 insertions(+) (limited to 'include') diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h index 1c12afd113d6..b090d55d5eb8 100644 --- a/include/net/pkt_sched.h +++ b/include/net/pkt_sched.h @@ -64,6 +64,16 @@ typedef long psched_tdiff_t; #define PSCHED_IS_PASTPERFECT(t) ((t) == 0) #define PSCHED_AUDIT_TDIFF(t) +struct qdisc_watchdog { + struct hrtimer timer; + struct Qdisc *qdisc; +}; + +extern void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc); +extern void qdisc_watchdog_schedule(struct qdisc_watchdog *wd, + psched_time_t expires); +extern void qdisc_watchdog_cancel(struct qdisc_watchdog *wd); + extern struct Qdisc_ops pfifo_qdisc_ops; extern struct Qdisc_ops bfifo_qdisc_ops; diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index d71bf79eb80b..6bc395cb235b 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c @@ -34,6 +34,7 @@ #include #include #include +#include #include #include @@ -291,6 +292,41 @@ void qdisc_put_rtab(struct qdisc_rate_table *tab) } } +static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer) +{ + struct qdisc_watchdog *wd = container_of(timer, struct qdisc_watchdog, + timer); + + wd->qdisc->flags &= ~TCQ_F_THROTTLED; + netif_schedule(wd->qdisc->dev); + return HRTIMER_NORESTART; +} + +void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc) +{ + hrtimer_init(&wd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); + wd->timer.function = qdisc_watchdog; + wd->qdisc = qdisc; +} +EXPORT_SYMBOL(qdisc_watchdog_init); + +void qdisc_watchdog_schedule(struct qdisc_watchdog *wd, psched_time_t expires) +{ + ktime_t time; + + wd->qdisc->flags |= TCQ_F_THROTTLED; + time = ktime_set(0, 0); + time = ktime_add_ns(time, PSCHED_US2NS(expires)); + hrtimer_start(&wd->timer, time, HRTIMER_MODE_ABS); +} +EXPORT_SYMBOL(qdisc_watchdog_schedule); + +void qdisc_watchdog_cancel(struct qdisc_watchdog *wd) +{ + hrtimer_cancel(&wd->timer); + wd->qdisc->flags &= ~TCQ_F_THROTTLED; +} +EXPORT_SYMBOL(qdisc_watchdog_cancel); /* Allocate an unique handle from space managed by kernel */ -- cgit v1.2.3-59-g8ed1b From 00c04af9df3d26e5a8093da850e982a7b6aeada7 Mon Sep 17 00:00:00 2001 From: Patrick McHardy Date: Fri, 16 Mar 2007 01:23:02 -0700 Subject: [NET_SCHED]: kill jiffie conversion macros Now that all packet schedulers have been converted to hrtimers most users of PSCHED_JIFFIE2US and PSCHED_US2JIFFIE are gone. The remaining users use it to convert external time units to packet scheduler clock ticks, so use PSCHED_TICKS_PER_SEC instead. Signed-off-by: Patrick McHardy Signed-off-by: David S. Miller --- include/net/pkt_sched.h | 3 --- net/sched/sch_hfsc.c | 12 ++++++------ net/sched/sch_htb.c | 2 +- 3 files changed, 7 insertions(+), 10 deletions(-) (limited to 'include') diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h index b090d55d5eb8..6555e57ff6c9 100644 --- a/include/net/pkt_sched.h +++ b/include/net/pkt_sched.h @@ -51,9 +51,6 @@ typedef long psched_tdiff_t; #define PSCHED_GET_TIME(stamp) \ ((stamp) = PSCHED_NS2US(ktime_to_ns(ktime_get()))) -#define PSCHED_US2JIFFIE(usecs) usecs_to_jiffies(PSCHED_US2NS((usecs)) / NSEC_PER_USEC) -#define PSCHED_JIFFIE2US(delay) PSCHED_NS2US(jiffies_to_usecs((delay)) * NSEC_PER_USEC) - #define PSCHED_TDIFF(tv1, tv2) (long)((tv1) - (tv2)) #define PSCHED_TDIFF_SAFE(tv1, tv2, bound) \ min_t(long long, (tv1) - (tv2), bound) diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c index 3cc2714fd5ae..5197b6caaf2d 100644 --- a/net/sched/sch_hfsc.c +++ b/net/sched/sch_hfsc.c @@ -434,8 +434,8 @@ m2sm(u32 m) u64 sm; sm = ((u64)m << SM_SHIFT); - sm += PSCHED_JIFFIE2US(HZ) - 1; - do_div(sm, PSCHED_JIFFIE2US(HZ)); + sm += PSCHED_TICKS_PER_SEC - 1; + do_div(sm, PSCHED_TICKS_PER_SEC); return sm; } @@ -448,7 +448,7 @@ m2ism(u32 m) if (m == 0) ism = HT_INFINITY; else { - ism = ((u64)PSCHED_JIFFIE2US(HZ) << ISM_SHIFT); + ism = ((u64)PSCHED_TICKS_PER_SEC << ISM_SHIFT); ism += m - 1; do_div(ism, m); } @@ -461,7 +461,7 @@ d2dx(u32 d) { u64 dx; - dx = ((u64)d * PSCHED_JIFFIE2US(HZ)); + dx = ((u64)d * PSCHED_TICKS_PER_SEC); dx += USEC_PER_SEC - 1; do_div(dx, USEC_PER_SEC); return dx; @@ -473,7 +473,7 @@ sm2m(u64 sm) { u64 m; - m = (sm * PSCHED_JIFFIE2US(HZ)) >> SM_SHIFT; + m = (sm * PSCHED_TICKS_PER_SEC) >> SM_SHIFT; return (u32)m; } @@ -484,7 +484,7 @@ dx2d(u64 dx) u64 d; d = dx * USEC_PER_SEC; - do_div(d, PSCHED_JIFFIE2US(HZ)); + do_div(d, PSCHED_TICKS_PER_SEC); return (u32)d; } diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index 4d84200f097b..f76c20c0a109 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c @@ -1469,7 +1469,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, /* set class to be in HTB_CAN_SEND state */ cl->tokens = hopt->buffer; cl->ctokens = hopt->cbuffer; - cl->mbuffer = PSCHED_JIFFIE2US(HZ * 60); /* 1min */ + cl->mbuffer = 60 * PSCHED_TICKS_PER_SEC; /* 1min */ PSCHED_GET_TIME(cl->t_c); cl->cmode = HTB_CAN_SEND; -- cgit v1.2.3-59-g8ed1b From cfe1fc7759fdacb0c650b575daed1692bf3eaece Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Fri, 16 Mar 2007 17:26:39 -0300 Subject: [SK_BUFF]: Introduce skb_network_header_len For the common sequence "skb->h.raw - skb->nh.raw", similar to skb->mac_len, that is precalculated tho, don't think we need to bloat skb with one more member, so just use this new helper, reducing the number of non-skbuff.h references to the layer headers even more. Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: David S. Miller --- drivers/net/gianfar.c | 2 +- drivers/net/pasemi_mac.c | 4 ++-- include/linux/skbuff.h | 5 +++++ net/core/skbuff.c | 2 +- net/ipv4/ip_output.c | 2 +- net/ipv6/esp6.c | 3 +-- net/ipv6/exthdrs.c | 8 ++++---- net/ipv6/ip6_input.c | 2 +- net/ipv6/ip6_output.c | 4 ++-- net/ipv6/mcast.c | 2 +- net/ipv6/netfilter/nf_conntrack_reasm.c | 2 +- net/ipv6/raw.c | 2 +- net/ipv6/reassembly.c | 8 +++++--- net/ipv6/xfrm6_policy.c | 2 +- 14 files changed, 27 insertions(+), 21 deletions(-) (limited to 'include') diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c index b9f44602c5e1..b666a0cc0642 100644 --- a/drivers/net/gianfar.c +++ b/drivers/net/gianfar.c @@ -953,7 +953,7 @@ static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb) * l4os is the distance between the start of the * l3 hdr and the l4 hdr */ fcb->l3os = (u16)(skb_network_offset(skb) - GMAC_FCB_LEN); - fcb->l4os = (u16)(skb->h.raw - skb->nh.raw); + fcb->l4os = skb_network_header_len(skb); fcb->flags = flags; } diff --git a/drivers/net/pasemi_mac.c b/drivers/net/pasemi_mac.c index 1d8129986cc5..76fe9dd8e841 100644 --- a/drivers/net/pasemi_mac.c +++ b/drivers/net/pasemi_mac.c @@ -734,12 +734,12 @@ static int pasemi_mac_start_tx(struct sk_buff *skb, struct net_device *dev) switch (ip_hdr(skb)->protocol) { case IPPROTO_TCP: dflags |= XCT_MACTX_CSUM_TCP; - dflags |= XCT_MACTX_IPH((skb->h.raw - skb->nh.raw) >> 2); + dflags |= XCT_MACTX_IPH(skb_network_header_len(skb) >> 2); dflags |= XCT_MACTX_IPO(nh - skb->data); break; case IPPROTO_UDP: dflags |= XCT_MACTX_CSUM_UDP; - dflags |= XCT_MACTX_IPH((skb->h.raw - skb->nh.raw) >> 2); + dflags |= XCT_MACTX_IPH(skb_network_header_len(skb) >> 2); dflags |= XCT_MACTX_IPO(nh - skb->data); break; } diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 47c57be97d43..230dd43fc9b3 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -992,6 +992,11 @@ static inline int skb_network_offset(const struct sk_buff *skb) return skb->nh.raw - skb->data; } +static inline u32 skb_network_header_len(const struct sk_buff *skb) +{ + return skb->h.raw - skb->nh.raw; +} + static inline unsigned char *skb_mac_header(const struct sk_buff *skb) { return skb->mac.raw; diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 87e000633f41..f38af6c01b12 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -1906,7 +1906,7 @@ struct sk_buff *skb_segment(struct sk_buff *skb, int features) skb_reserve(nskb, headroom); skb_reset_mac_header(nskb); skb_set_network_header(nskb, skb->mac_len); - nskb->h.raw = nskb->nh.raw + (skb->h.raw - skb->nh.raw); + nskb->h.raw = nskb->nh.raw + skb_network_header_len(skb); memcpy(skb_put(nskb, doffset), skb->data, doffset); if (!sg) { diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 11a6ac756f8c..02988fb262d6 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -1187,7 +1187,7 @@ int ip_push_pending_frames(struct sock *sk) if (skb->data < skb_network_header(skb)) __skb_pull(skb, skb_network_offset(skb)); while ((tmp_skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) { - __skb_pull(tmp_skb, skb->h.raw - skb->nh.raw); + __skb_pull(tmp_skb, skb_network_header_len(skb)); *tail_skb = tmp_skb; tail_skb = &(tmp_skb->next); skb->len += tmp_skb->len; diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c index 436eb9e6a6cf..7fdf84dee73f 100644 --- a/net/ipv6/esp6.c +++ b/net/ipv6/esp6.c @@ -147,8 +147,7 @@ static int esp6_input(struct xfrm_state *x, struct sk_buff *skb) int blksize = ALIGN(crypto_blkcipher_blocksize(tfm), 4); int alen = esp->auth.icv_trunc_len; int elen = skb->len - sizeof(struct ipv6_esp_hdr) - esp->conf.ivlen - alen; - - int hdr_len = skb->h.raw - skb->nh.raw; + int hdr_len = skb_network_header_len(skb); int nfrags; int ret = 0; diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c index f763409ea740..f34cc2bd489a 100644 --- a/net/ipv6/exthdrs.c +++ b/net/ipv6/exthdrs.c @@ -143,7 +143,7 @@ static int ip6_parse_tlv(struct tlvtype_proc *procs, struct sk_buff **skbp) struct sk_buff *skb = *skbp; struct tlvtype_proc *curr; const unsigned char *nh = skb_network_header(skb); - int off = skb->h.raw - skb->nh.raw; + int off = skb_network_header_len(skb); int len = (skb_transport_header(skb)[1] + 1) << 3; if (skb_transport_offset(skb) + len > skb_headlen(skb)) @@ -297,7 +297,7 @@ static int ipv6_destopt_rcv(struct sk_buff **skbp) return -1; } - opt->lastopt = opt->dst1 = skb->h.raw - skb->nh.raw; + opt->lastopt = opt->dst1 = skb_network_header_len(skb); #ifdef CONFIG_IPV6_MIP6 dstbuf = opt->dst1; #endif @@ -443,7 +443,7 @@ looped_back: break; } - opt->lastopt = opt->srcrt = skb->h.raw - skb->nh.raw; + opt->lastopt = opt->srcrt = skb_network_header_len(skb); skb->h.raw += (hdr->hdrlen + 1) << 3; opt->dst0 = opt->dst1; opt->dst1 = 0; @@ -738,7 +738,7 @@ int ipv6_parse_hopopts(struct sk_buff **skbp) /* * skb_network_header(skb) is equal to skb->data, and - * skb->h.raw - skb->nh.raw is always equal to + * skb_network_header_len(skb) is always equal to * sizeof(struct ipv6hdr) by definition of * hop-by-hop options. */ diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c index 44275411d1a8..cf0c4406b59e 100644 --- a/net/ipv6/ip6_input.c +++ b/net/ipv6/ip6_input.c @@ -182,7 +182,7 @@ resubmit: nf_reset(skb); skb_postpull_rcsum(skb, skb_network_header(skb), - skb->h.raw - skb->nh.raw); + skb_network_header_len(skb)); hdr = ipv6_hdr(skb); if (ipv6_addr_is_multicast(&hdr->daddr) && !ipv6_chk_mcast_addr(skb->dev, &hdr->daddr, diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 32e8c3f73c79..57a326080757 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -1325,7 +1325,7 @@ int ip6_push_pending_frames(struct sock *sk) if (skb->data < skb_network_header(skb)) __skb_pull(skb, skb_network_offset(skb)); while ((tmp_skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) { - __skb_pull(tmp_skb, skb->h.raw - skb->nh.raw); + __skb_pull(tmp_skb, skb_network_header_len(skb)); *tail_skb = tmp_skb; tail_skb = &(tmp_skb->next); skb->len += tmp_skb->len; @@ -1337,7 +1337,7 @@ int ip6_push_pending_frames(struct sock *sk) } ipv6_addr_copy(final_dst, &fl->fl6_dst); - __skb_pull(skb, skb->h.raw - skb->nh.raw); + __skb_pull(skb, skb_network_header_len(skb)); if (opt && opt->opt_flen) ipv6_push_frag_opts(skb, opt, &proto); if (opt && opt->opt_nflen) diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index 07e86ebb46b8..4c45bcce75e8 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c @@ -1168,7 +1168,7 @@ int igmp6_event_query(struct sk_buff *skb) /* compute payload length excluding extension headers */ len = ntohs(ipv6_hdr(skb)->payload_len) + sizeof(struct ipv6hdr); - len -= skb->h.raw - skb->nh.raw; + len -= skb_network_header_len(skb); /* Drop queries with not link local source */ if (!(ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL)) diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c index 84ce5b3c4b21..490e7e151f2d 100644 --- a/net/ipv6/netfilter/nf_conntrack_reasm.c +++ b/net/ipv6/netfilter/nf_conntrack_reasm.c @@ -657,7 +657,7 @@ nf_ct_frag6_reasm(struct nf_ct_frag6_queue *fq, struct net_device *dev) /* Yes, and fold redundant checksum back. 8) */ if (head->ip_summed == CHECKSUM_COMPLETE) head->csum = csum_partial(skb_network_header(head), - head->h.raw - head->nh.raw, + skb_network_header_len(head), head->csum); fq->fragments = NULL; diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index 116257d59a36..f925ca7c1a50 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c @@ -362,7 +362,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb) if (skb->ip_summed == CHECKSUM_COMPLETE) { skb_postpull_rcsum(skb, skb_network_header(skb), - skb->h.raw - skb->nh.raw); + skb_network_header_len(skb)); if (!csum_ipv6_magic(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr, skb->len, inet->num, skb->csum)) diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c index 31d4271ea540..6dfacfa7a599 100644 --- a/net/ipv6/reassembly.c +++ b/net/ipv6/reassembly.c @@ -679,7 +679,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff **skb_in, /* Yes, and fold redundant checksum back. 8) */ if (head->ip_summed == CHECKSUM_COMPLETE) head->csum = csum_partial(skb_network_header(head), - head->h.raw - head->nh.raw, + skb_network_header_len(head), head->csum); rcu_read_lock(); @@ -715,13 +715,15 @@ static int ipv6_frag_rcv(struct sk_buff **skbp) /* Jumbo payload inhibits frag. header */ if (hdr->payload_len==0) { IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_INHDRERRORS); - icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, skb->h.raw-skb->nh.raw); + icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, + skb_network_header_len(skb)); return -1; } if (!pskb_may_pull(skb, (skb_transport_offset(skb) + sizeof(struct frag_hdr)))) { IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_INHDRERRORS); - icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, skb->h.raw-skb->nh.raw); + icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, + skb_network_header_len(skb)); return -1; } diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c index b93bfb87f494..ef746d4f3131 100644 --- a/net/ipv6/xfrm6_policy.c +++ b/net/ipv6/xfrm6_policy.c @@ -270,7 +270,7 @@ error: static inline void _decode_session6(struct sk_buff *skb, struct flowi *fl) { - u16 offset = skb->h.raw - skb->nh.raw; + u16 offset = skb_network_header_len(skb); struct ipv6hdr *hdr = ipv6_hdr(skb); struct ipv6_opt_hdr *exthdr; const unsigned char *nh = skb_network_header(skb); -- cgit v1.2.3-59-g8ed1b From b0e380b1d8a8e0aca215df97702f99815f05c094 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Tue, 10 Apr 2007 21:21:55 -0700 Subject: [SK_BUFF]: unions of just one member don't get anything done, kill them Renaming skb->h to skb->transport_header, skb->nh to skb->network_header and skb->mac to skb->mac_header, to match the names of the associated helpers (skb[_[re]set]_{transport,network,mac}_header). Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: David S. Miller --- drivers/net/arcnet/arcnet.c | 17 ++++++---- drivers/net/bonding/bond_3ad.c | 4 +-- drivers/net/bonding/bond_alb.c | 2 +- drivers/net/wireless/hostap/hostap_80211_rx.c | 2 +- include/linux/if_vlan.h | 4 +-- include/linux/skbuff.h | 49 +++++++++++---------------- net/802/psnap.c | 2 +- net/8021q/vlan_dev.c | 2 +- net/appletalk/ddp.c | 8 ++--- net/bridge/br_netfilter.c | 24 ++++++------- net/core/dev.c | 6 ++-- net/core/pktgen.c | 16 ++++----- net/core/skbuff.c | 21 ++++++------ net/ieee80211/ieee80211_rx.c | 2 +- net/ipv4/ah4.c | 4 +-- net/ipv4/igmp.c | 2 +- net/ipv4/ip_gre.c | 2 +- net/ipv4/ip_output.c | 11 +++--- net/ipv4/ipcomp.c | 2 +- net/ipv4/ipip.c | 4 +-- net/ipv4/ipmr.c | 8 ++--- net/ipv4/ipvs/ip_vs_xmit.c | 4 +-- net/ipv4/netfilter/ipt_LOG.c | 2 +- net/ipv4/netfilter/ipt_ULOG.c | 2 +- net/ipv4/raw.c | 2 +- net/ipv4/xfrm4_mode_beet.c | 4 +-- net/ipv4/xfrm4_mode_transport.c | 8 ++--- net/ipv4/xfrm4_mode_tunnel.c | 2 +- net/ipv6/ah6.c | 8 ++--- net/ipv6/exthdrs.c | 6 ++-- net/ipv6/ip6_input.c | 2 +- net/ipv6/ip6_output.c | 11 +++--- net/ipv6/ip6_tunnel.c | 4 +-- net/ipv6/ipcomp6.c | 2 +- net/ipv6/netfilter/ip6t_LOG.c | 2 +- net/ipv6/netfilter/nf_conntrack_reasm.c | 4 +-- net/ipv6/raw.c | 2 +- net/ipv6/reassembly.c | 8 ++--- net/ipv6/sit.c | 4 +-- net/ipv6/xfrm6_mode_beet.c | 4 +-- net/ipv6/xfrm6_mode_transport.c | 4 +-- net/ipv6/xfrm6_mode_tunnel.c | 4 +-- net/llc/llc_input.c | 2 +- net/packet/af_packet.c | 28 +++++++-------- net/sctp/input.c | 8 ++--- net/sctp/ipv6.c | 8 ++--- 46 files changed, 162 insertions(+), 165 deletions(-) (limited to 'include') diff --git a/drivers/net/arcnet/arcnet.c b/drivers/net/arcnet/arcnet.c index 83004fdab0a4..681e20b8466f 100644 --- a/drivers/net/arcnet/arcnet.c +++ b/drivers/net/arcnet/arcnet.c @@ -519,9 +519,12 @@ static int arcnet_header(struct sk_buff *skb, struct net_device *dev, * real header when we do rebuild_header. */ *(uint16_t *) skb_push(skb, 2) = type; - if (skb->nh.raw - skb->mac.raw != 2) + /* + * XXX: Why not use skb->mac_len? + */ + if (skb->network_header - skb->mac_header != 2) BUGMSG(D_NORMAL, "arcnet_header: Yikes! diff (%d) is not 2!\n", - (int)(skb->nh.raw - skb->mac.raw)); + (int)(skb->network_header - skb->mac_header)); return -2; /* return error -- can't transmit yet! */ } else { @@ -554,11 +557,13 @@ static int arcnet_rebuild_header(struct sk_buff *skb) unsigned short type; uint8_t daddr=0; struct ArcProto *proto; - - if (skb->nh.raw - skb->mac.raw != 2) { + /* + * XXX: Why not use skb->mac_len? + */ + if (skb->network_header - skb->mac_header != 2) { BUGMSG(D_NORMAL, - "rebuild_header: shouldn't be here! (hdrsize=%d)\n", - (int)(skb->nh.raw - skb->mac.raw)); + "rebuild_header: shouldn't be here! (hdrsize=%d)\n", + (int)(skb->network_header - skb->mac_header)); return 0; } type = *(uint16_t *) skb_pull(skb, 2); diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c index 05c870d6f6c3..7e03f41ae2c2 100644 --- a/drivers/net/bonding/bond_3ad.c +++ b/drivers/net/bonding/bond_3ad.c @@ -885,7 +885,7 @@ static int ad_lacpdu_send(struct port *port) skb->dev = slave->dev; skb_reset_mac_header(skb); - skb->nh.raw = skb->mac.raw + ETH_HLEN; + skb->network_header = skb->mac_header + ETH_HLEN; skb->protocol = PKT_TYPE_LACPDU; skb->priority = TC_PRIO_CONTROL; @@ -929,7 +929,7 @@ static int ad_marker_send(struct port *port, struct marker *marker) skb->dev = slave->dev; skb_reset_mac_header(skb); - skb->nh.raw = skb->mac.raw + ETH_HLEN; + skb->network_header = skb->mac_header + ETH_HLEN; skb->protocol = PKT_TYPE_LACPDU; marker_header = (struct marker_header *)skb_put(skb, length); diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c index b8cf777542fa..92c3b6f6a8e7 100644 --- a/drivers/net/bonding/bond_alb.c +++ b/drivers/net/bonding/bond_alb.c @@ -896,7 +896,7 @@ static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[]) memcpy(data, &pkt, size); skb_reset_mac_header(skb); - skb->nh.raw = skb->mac.raw + ETH_HLEN; + skb->network_header = skb->mac_header + ETH_HLEN; skb->protocol = pkt.type; skb->priority = TC_PRIO_CONTROL; skb->dev = slave->dev; diff --git a/drivers/net/wireless/hostap/hostap_80211_rx.c b/drivers/net/wireless/hostap/hostap_80211_rx.c index 7b7c1ca8f1f4..35a3a50724fe 100644 --- a/drivers/net/wireless/hostap/hostap_80211_rx.c +++ b/drivers/net/wireless/hostap/hostap_80211_rx.c @@ -1077,7 +1077,7 @@ void hostap_80211_rx(struct net_device *dev, struct sk_buff *skb, skb2->protocol = __constant_htons(ETH_P_802_3); skb_reset_mac_header(skb2); skb_reset_network_header(skb2); - /* skb2->nh.raw += ETH_HLEN; */ + /* skb2->network_header += ETH_HLEN; */ dev_queue_xmit(skb2); } diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h index 544490d9d0bd..81e9bc93569b 100644 --- a/include/linux/if_vlan.h +++ b/include/linux/if_vlan.h @@ -275,8 +275,8 @@ static inline struct sk_buff *__vlan_put_tag(struct sk_buff *skb, unsigned short veth->h_vlan_TCI = htons(tag); skb->protocol = __constant_htons(ETH_P_8021Q); - skb->mac.raw -= VLAN_HLEN; - skb->nh.raw -= VLAN_HLEN; + skb->mac_header -= VLAN_HLEN; + skb->network_header -= VLAN_HLEN; return skb; } diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 230dd43fc9b3..c45ad1263271 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -69,8 +69,8 @@ * NONE: skb is checksummed by protocol or csum is not required. * * PARTIAL: device is required to csum packet as seen by hard_start_xmit - * from skb->h.raw to the end and to record the checksum - * at skb->h.raw+skb->csum. + * from skb->transport_header to the end and to record the checksum + * at skb->transport_header + skb->csum. * * Device must show its capabilities in dev->features, set * at device setup time. @@ -188,8 +188,8 @@ enum { * @dev: Device we arrived on/are leaving by * @iif: ifindex of device we arrived on * @h: Transport layer header - * @nh: Network layer header - * @mac: Link layer header + * @network_header: Network layer header + * @mac_header: Link layer header * @dst: destination entry * @sp: the security path, used for xfrm * @cb: Control buffer. Free for use by every layer. Put private vars here @@ -236,18 +236,9 @@ struct sk_buff { int iif; /* 4 byte hole on 64 bit*/ - union { - unsigned char *raw; - } h; - - union { - unsigned char *raw; - } nh; - - union { - unsigned char *raw; - } mac; - + unsigned char *transport_header; + unsigned char *network_header; + unsigned char *mac_header; struct dst_entry *dst; struct sec_path *sp; @@ -953,68 +944,68 @@ static inline void skb_reserve(struct sk_buff *skb, int len) static inline unsigned char *skb_transport_header(const struct sk_buff *skb) { - return skb->h.raw; + return skb->transport_header; } static inline void skb_reset_transport_header(struct sk_buff *skb) { - skb->h.raw = skb->data; + skb->transport_header = skb->data; } static inline void skb_set_transport_header(struct sk_buff *skb, const int offset) { - skb->h.raw = skb->data + offset; + skb->transport_header = skb->data + offset; } static inline int skb_transport_offset(const struct sk_buff *skb) { - return skb->h.raw - skb->data; + return skb->transport_header - skb->data; } static inline unsigned char *skb_network_header(const struct sk_buff *skb) { - return skb->nh.raw; + return skb->network_header; } static inline void skb_reset_network_header(struct sk_buff *skb) { - skb->nh.raw = skb->data; + skb->network_header = skb->data; } static inline void skb_set_network_header(struct sk_buff *skb, const int offset) { - skb->nh.raw = skb->data + offset; + skb->network_header = skb->data + offset; } static inline int skb_network_offset(const struct sk_buff *skb) { - return skb->nh.raw - skb->data; + return skb->network_header - skb->data; } static inline u32 skb_network_header_len(const struct sk_buff *skb) { - return skb->h.raw - skb->nh.raw; + return skb->transport_header - skb->network_header; } static inline unsigned char *skb_mac_header(const struct sk_buff *skb) { - return skb->mac.raw; + return skb->mac_header; } static inline int skb_mac_header_was_set(const struct sk_buff *skb) { - return skb->mac.raw != NULL; + return skb->mac_header != NULL; } static inline void skb_reset_mac_header(struct sk_buff *skb) { - skb->mac.raw = skb->data; + skb->mac_header = skb->data; } static inline void skb_set_mac_header(struct sk_buff *skb, const int offset) { - skb->mac.raw = skb->data + offset; + skb->mac_header = skb->data + offset; } /* diff --git a/net/802/psnap.c b/net/802/psnap.c index 7cba1f426081..04ee43e7538f 100644 --- a/net/802/psnap.c +++ b/net/802/psnap.c @@ -59,7 +59,7 @@ static int snap_rcv(struct sk_buff *skb, struct net_device *dev, proto = find_snap_client(skb_transport_header(skb)); if (proto) { /* Pass the frame on. */ - skb->h.raw += 5; + skb->transport_header += 5; skb_pull_rcsum(skb, 5); rc = proto->rcvfunc(skb, dev, &snap_packet_type, orig_dev); } else { diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index 0991e293940f..42a35bed0881 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c @@ -83,7 +83,7 @@ static inline struct sk_buff *vlan_check_reorder_header(struct sk_buff *skb) /* Lifted from Gleb's VLAN code... */ memmove(skb->data - ETH_HLEN, skb->data - VLAN_ETH_HLEN, 12); - skb->mac.raw += VLAN_HLEN; + skb->mac_header += VLAN_HLEN; } } diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c index 137341b4d833..f6a92a0b7aa6 100644 --- a/net/appletalk/ddp.c +++ b/net/appletalk/ddp.c @@ -1383,10 +1383,10 @@ free_it: * @pt - packet type * * Receive a packet (in skb) from device dev. This has come from the SNAP - * decoder, and on entry skb->h.raw is the DDP header, skb->len is the DDP - * header, skb->len is the DDP length. The physical headers have been - * extracted. PPP should probably pass frames marked as for this layer. - * [ie ARPHRD_ETHERTALK] + * decoder, and on entry skb->transport_header is the DDP header, skb->len + * is the DDP header, skb->len is the DDP length. The physical headers + * have been extracted. PPP should probably pass frames marked as for this + * layer. [ie ARPHRD_ETHERTALK] */ static int atalk_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c index f2796c97b4a2..8cee7fdc16c3 100644 --- a/net/bridge/br_netfilter.c +++ b/net/bridge/br_netfilter.c @@ -174,7 +174,7 @@ static int br_nf_pre_routing_finish_ipv6(struct sk_buff *skb) skb->dev = nf_bridge->physindev; if (skb->protocol == htons(ETH_P_8021Q)) { skb_push(skb, VLAN_HLEN); - skb->nh.raw -= VLAN_HLEN; + skb->network_header -= VLAN_HLEN; } NF_HOOK_THRESH(PF_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL, br_handle_frame_finish, 1); @@ -255,7 +255,7 @@ static int br_nf_pre_routing_finish_bridge(struct sk_buff *skb) else { if (skb->protocol == htons(ETH_P_8021Q)) { skb_pull(skb, VLAN_HLEN); - skb->nh.raw += VLAN_HLEN; + skb->network_header += VLAN_HLEN; } skb->dst->output(skb); } @@ -325,7 +325,7 @@ bridged_dnat: if (skb->protocol == htons(ETH_P_8021Q)) { skb_push(skb, VLAN_HLEN); - skb->nh.raw -= VLAN_HLEN; + skb->network_header -= VLAN_HLEN; } NF_HOOK_THRESH(PF_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL, @@ -344,7 +344,7 @@ bridged_dnat: skb->dev = nf_bridge->physindev; if (skb->protocol == htons(ETH_P_8021Q)) { skb_push(skb, VLAN_HLEN); - skb->nh.raw -= VLAN_HLEN; + skb->network_header -= VLAN_HLEN; } NF_HOOK_THRESH(PF_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL, br_handle_frame_finish, 1); @@ -497,7 +497,7 @@ static unsigned int br_nf_pre_routing(unsigned int hook, struct sk_buff **pskb, if (skb->protocol == htons(ETH_P_8021Q)) { skb_pull_rcsum(skb, VLAN_HLEN); - skb->nh.raw += VLAN_HLEN; + skb->network_header += VLAN_HLEN; } return br_nf_pre_routing_ipv6(hook, skb, in, out, okfn); } @@ -514,7 +514,7 @@ static unsigned int br_nf_pre_routing(unsigned int hook, struct sk_buff **pskb, if (skb->protocol == htons(ETH_P_8021Q)) { skb_pull_rcsum(skb, VLAN_HLEN); - skb->nh.raw += VLAN_HLEN; + skb->network_header += VLAN_HLEN; } if (!pskb_may_pull(skb, sizeof(struct iphdr))) @@ -595,7 +595,7 @@ static int br_nf_forward_finish(struct sk_buff *skb) } if (skb->protocol == htons(ETH_P_8021Q)) { skb_push(skb, VLAN_HLEN); - skb->nh.raw -= VLAN_HLEN; + skb->network_header -= VLAN_HLEN; } NF_HOOK_THRESH(PF_BRIDGE, NF_BR_FORWARD, skb, in, skb->dev, br_forward_finish, 1); @@ -631,7 +631,7 @@ static unsigned int br_nf_forward_ip(unsigned int hook, struct sk_buff **pskb, if (skb->protocol == htons(ETH_P_8021Q)) { skb_pull(*pskb, VLAN_HLEN); - (*pskb)->nh.raw += VLAN_HLEN; + (*pskb)->network_header += VLAN_HLEN; } nf_bridge = skb->nf_bridge; @@ -667,13 +667,13 @@ static unsigned int br_nf_forward_arp(unsigned int hook, struct sk_buff **pskb, if (!IS_VLAN_ARP(skb)) return NF_ACCEPT; skb_pull(*pskb, VLAN_HLEN); - (*pskb)->nh.raw += VLAN_HLEN; + (*pskb)->network_header += VLAN_HLEN; } if (arp_hdr(skb)->ar_pln != 4) { if (IS_VLAN_ARP(skb)) { skb_push(*pskb, VLAN_HLEN); - (*pskb)->nh.raw -= VLAN_HLEN; + (*pskb)->network_header -= VLAN_HLEN; } return NF_ACCEPT; } @@ -723,7 +723,7 @@ static unsigned int br_nf_local_out(unsigned int hook, struct sk_buff **pskb, } if (skb->protocol == htons(ETH_P_8021Q)) { skb_push(skb, VLAN_HLEN); - skb->nh.raw -= VLAN_HLEN; + skb->network_header -= VLAN_HLEN; } NF_HOOK(PF_BRIDGE, NF_BR_FORWARD, skb, realindev, skb->dev, @@ -790,7 +790,7 @@ static unsigned int br_nf_post_routing(unsigned int hook, struct sk_buff **pskb, if (skb->protocol == htons(ETH_P_8021Q)) { skb_pull(skb, VLAN_HLEN); - skb->nh.raw += VLAN_HLEN; + skb->network_header += VLAN_HLEN; } nf_bridge_save_header(skb); diff --git a/net/core/dev.c b/net/core/dev.c index 30fcc7f9d4ed..6562e5736e2f 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1077,7 +1077,7 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev) skb_reset_network_header(skb2); } - skb2->h.raw = skb2->nh.raw; + skb2->transport_header = skb2->network_header; skb2->pkt_type = PACKET_OUTGOING; ptype->func(skb2, skb->dev, ptype, skb->dev); } @@ -1207,7 +1207,7 @@ struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features) BUG_ON(skb_shinfo(skb)->frag_list); skb_reset_mac_header(skb); - skb->mac_len = skb->nh.raw - skb->mac.raw; + skb->mac_len = skb->network_header - skb->mac_header; __skb_pull(skb, skb->mac_len); if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) { @@ -1774,7 +1774,7 @@ int netif_receive_skb(struct sk_buff *skb) skb_reset_network_header(skb); skb_reset_transport_header(skb); - skb->mac_len = skb->nh.raw - skb->mac.raw; + skb->mac_len = skb->network_header - skb->mac_header; pt_prev = NULL; diff --git a/net/core/pktgen.c b/net/core/pktgen.c index ae8cf9a285fd..9da8357addcd 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c @@ -2358,7 +2358,7 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev, } skb_set_network_header(skb, skb->tail - skb->data); - skb->h.raw = skb->nh.raw + sizeof(struct iphdr); + skb->transport_header = skb->network_header + sizeof(struct iphdr); skb_put(skb, sizeof(struct iphdr) + sizeof(struct udphdr)); iph = ip_hdr(skb); @@ -2391,9 +2391,9 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev, iph->check = 0; iph->check = ip_fast_csum((void *)iph, iph->ihl); skb->protocol = protocol; - skb->mac.raw = (skb->nh.raw - ETH_HLEN - - pkt_dev->nr_labels * sizeof(u32) - - VLAN_TAG_SIZE(pkt_dev) - SVLAN_TAG_SIZE(pkt_dev)); + skb->mac_header = (skb->network_header - ETH_HLEN - + pkt_dev->nr_labels * sizeof(u32) - + VLAN_TAG_SIZE(pkt_dev) - SVLAN_TAG_SIZE(pkt_dev)); skb->dev = odev; skb->pkt_type = PACKET_HOST; @@ -2697,7 +2697,7 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev, } skb_set_network_header(skb, skb->tail - skb->data); - skb->h.raw = skb->nh.raw + sizeof(struct ipv6hdr); + skb->transport_header = skb->network_header + sizeof(struct ipv6hdr); skb_put(skb, sizeof(struct ipv6hdr) + sizeof(struct udphdr)); iph = ipv6_hdr(skb); @@ -2738,9 +2738,9 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev, ipv6_addr_copy(&iph->daddr, &pkt_dev->cur_in6_daddr); ipv6_addr_copy(&iph->saddr, &pkt_dev->cur_in6_saddr); - skb->mac.raw = (skb->nh.raw - ETH_HLEN - - pkt_dev->nr_labels * sizeof(u32) - - VLAN_TAG_SIZE(pkt_dev) - SVLAN_TAG_SIZE(pkt_dev)); + skb->mac_header = (skb->network_header - ETH_HLEN - + pkt_dev->nr_labels * sizeof(u32) - + VLAN_TAG_SIZE(pkt_dev) - SVLAN_TAG_SIZE(pkt_dev)); skb->protocol = protocol; skb->dev = odev; skb->pkt_type = PACKET_HOST; diff --git a/net/core/skbuff.c b/net/core/skbuff.c index f38af6c01b12..1e71764be4a4 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -396,9 +396,9 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask) n->sk = NULL; C(tstamp); C(dev); - C(h); - C(nh); - C(mac); + C(transport_header); + C(network_header); + C(mac_header); C(dst); dst_clone(skb->dst); C(sp); @@ -461,9 +461,9 @@ static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old) #ifdef CONFIG_INET new->sp = secpath_get(old->sp); #endif - new->h.raw = old->h.raw + offset; - new->nh.raw = old->nh.raw + offset; - new->mac.raw = old->mac.raw + offset; + new->transport_header = old->transport_header + offset; + new->network_header = old->network_header + offset; + new->mac_header = old->mac_header + offset; memcpy(new->cb, old->cb, sizeof(old->cb)); new->local_df = old->local_df; new->fclone = SKB_FCLONE_UNAVAILABLE; @@ -639,9 +639,9 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, skb->end = data + size; skb->data += off; skb->tail += off; - skb->mac.raw += off; - skb->h.raw += off; - skb->nh.raw += off; + skb->transport_header += off; + skb->network_header += off; + skb->mac_header += off; skb->cloned = 0; skb->nohdr = 0; atomic_set(&skb_shinfo(skb)->dataref, 1); @@ -1906,7 +1906,8 @@ struct sk_buff *skb_segment(struct sk_buff *skb, int features) skb_reserve(nskb, headroom); skb_reset_mac_header(nskb); skb_set_network_header(nskb, skb->mac_len); - nskb->h.raw = nskb->nh.raw + skb_network_header_len(skb); + nskb->transport_header = (nskb->network_header + + skb_network_header_len(skb)); memcpy(skb_put(nskb, doffset), skb->data, doffset); if (!sg) { diff --git a/net/ieee80211/ieee80211_rx.c b/net/ieee80211/ieee80211_rx.c index 7f5a352800a0..59a765c49cf9 100644 --- a/net/ieee80211/ieee80211_rx.c +++ b/net/ieee80211/ieee80211_rx.c @@ -793,7 +793,7 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb, skb2->protocol = __constant_htons(ETH_P_802_3); skb_reset_mac_header(skb2); skb_reset_network_header(skb2); - /* skb2->nh.raw += ETH_HLEN; */ + /* skb2->network_header += ETH_HLEN; */ dev_queue_xmit(skb2); } #endif diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c index e1bb9e0aa5f3..6da8ff597ad3 100644 --- a/net/ipv4/ah4.c +++ b/net/ipv4/ah4.c @@ -181,9 +181,9 @@ static int ah_input(struct xfrm_state *x, struct sk_buff *skb) } } ((struct iphdr*)work_buf)->protocol = ah->nexthdr; - skb->nh.raw += ah_hlen; + skb->network_header += ah_hlen; memcpy(skb_network_header(skb), work_buf, ihl); - skb->h.raw = skb->nh.raw; + skb->transport_header = skb->network_header; __skb_pull(skb, ah_hlen + ihl); return 0; diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index 4695ada1d9b1..1fc637fb6750 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c @@ -333,7 +333,7 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size) ((u8*)&pip[1])[2] = 0; ((u8*)&pip[1])[3] = 0; - skb->h.raw = skb->nh.raw + sizeof(struct iphdr) + 4; + skb->transport_header = skb->network_header + sizeof(struct iphdr) + 4; skb_put(skb, sizeof(*pig)); pig = igmpv3_report_hdr(skb); pig->type = IGMPV3_HOST_MEMBERSHIP_REPORT; diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index e6a9e452fd61..f49afaa81298 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c @@ -828,7 +828,7 @@ static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) old_iph = ip_hdr(skb); } - skb->h.raw = skb->nh.raw; + skb->transport_header = skb->network_header; skb_push(skb, gre_hlen); skb_reset_network_header(skb); memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 02988fb262d6..875da382d9b9 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -582,7 +582,7 @@ slow_path: skb_reserve(skb2, ll_rs); skb_put(skb2, len + hlen); skb_reset_network_header(skb2); - skb2->h.raw = skb2->nh.raw + hlen; + skb2->transport_header = skb2->network_header + hlen; /* * Charge the memory for the fragment to any owner @@ -713,7 +713,7 @@ static inline int ip_ufo_append_data(struct sock *sk, skb_reset_network_header(skb); /* initialize protocol header pointer */ - skb->h.raw = skb->nh.raw + fragheaderlen; + skb->transport_header = skb->network_header + fragheaderlen; skb->ip_summed = CHECKSUM_PARTIAL; skb->csum = 0; @@ -918,7 +918,8 @@ alloc_new_skb: */ data = skb_put(skb, fraglen); skb_set_network_header(skb, exthdrlen); - skb->h.raw = skb->nh.raw + fragheaderlen; + skb->transport_header = (skb->network_header + + fragheaderlen); data += fragheaderlen; if (fraggap) { @@ -1112,8 +1113,8 @@ ssize_t ip_append_page(struct sock *sk, struct page *page, */ skb_put(skb, fragheaderlen + fraggap); skb_reset_network_header(skb); - skb->h.raw = skb->nh.raw + fragheaderlen; - + skb->transport_header = (skb->network_header + + fragheaderlen); if (fraggap) { skb->csum = skb_copy_and_csum_bits(skb_prev, maxfraglen, diff --git a/net/ipv4/ipcomp.c b/net/ipv4/ipcomp.c index 1f13cc507a47..ba348b1e5f84 100644 --- a/net/ipv4/ipcomp.c +++ b/net/ipv4/ipcomp.c @@ -87,7 +87,7 @@ static int ipcomp_input(struct xfrm_state *x, struct sk_buff *skb) iph = ip_hdr(skb); ipch = (void *)skb->data; iph->protocol = ipch->nexthdr; - skb->h.raw = skb->nh.raw + sizeof(*ipch); + skb->transport_header = skb->network_header + sizeof(*ipch); __skb_pull(skb, sizeof(*ipch)); err = ipcomp_decompress(x, skb); diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c index b32b50114806..37ab39170175 100644 --- a/net/ipv4/ipip.c +++ b/net/ipv4/ipip.c @@ -485,7 +485,7 @@ static int ipip_rcv(struct sk_buff *skb) secpath_reset(skb); - skb->mac.raw = skb->nh.raw; + skb->mac_header = skb->network_header; skb_reset_network_header(skb); skb->protocol = htons(ETH_P_IP); skb->pkt_type = PACKET_HOST; @@ -617,7 +617,7 @@ static int ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) old_iph = ip_hdr(skb); } - skb->h.raw = skb->nh.raw; + skb->transport_header = skb->network_header; skb_push(skb, sizeof(struct iphdr)); skb_reset_network_header(skb); memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index 357894259f8f..50d0b301380e 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c @@ -597,7 +597,7 @@ static int ipmr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert) msg->im_msgtype = assert; igmp->code = 0; ip_hdr(skb)->tot_len = htons(skb->len); /* Fix the length */ - skb->h.raw = skb->nh.raw; + skb->transport_header = skb->network_header; } if (mroute_socket == NULL) { @@ -1102,7 +1102,7 @@ static void ip_encap(struct sk_buff *skb, __be32 saddr, __be32 daddr) struct iphdr *old_iph = ip_hdr(skb); skb_push(skb, sizeof(struct iphdr)); - skb->h.raw = skb->nh.raw; + skb->transport_header = skb->network_header; skb_reset_network_header(skb); iph = ip_hdr(skb); @@ -1461,7 +1461,7 @@ int pim_rcv_v1(struct sk_buff * skb) if (reg_dev == NULL) goto drop; - skb->mac.raw = skb->nh.raw; + skb->mac_header = skb->network_header; skb_pull(skb, (u8*)encap - skb->data); skb_reset_network_header(skb); skb->dev = reg_dev; @@ -1517,7 +1517,7 @@ static int pim_rcv(struct sk_buff * skb) if (reg_dev == NULL) goto drop; - skb->mac.raw = skb->nh.raw; + skb->mac_header = skb->network_header; skb_pull(skb, (u8*)encap - skb->data); skb_reset_network_header(skb); skb->dev = reg_dev; diff --git a/net/ipv4/ipvs/ip_vs_xmit.c b/net/ipv4/ipvs/ip_vs_xmit.c index c6276d08b31e..fded9b2f227c 100644 --- a/net/ipv4/ipvs/ip_vs_xmit.c +++ b/net/ipv4/ipvs/ip_vs_xmit.c @@ -323,7 +323,7 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, struct iphdr *old_iph = ip_hdr(skb); u8 tos = old_iph->tos; __be16 df = old_iph->frag_off; - unsigned char *old_h = skb_transport_header(skb); + unsigned char *old_transport_header = skb->transport_header; struct iphdr *iph; /* Our new IP header */ int max_headroom; /* The extra header space needed */ int mtu; @@ -381,7 +381,7 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, old_iph = ip_hdr(skb); } - skb->h.raw = old_h; + skb->transport_header = old_transport_header; /* fix old IP header checksum */ ip_send_check(old_iph); diff --git a/net/ipv4/netfilter/ipt_LOG.c b/net/ipv4/netfilter/ipt_LOG.c index c697971fe317..2fa36618c517 100644 --- a/net/ipv4/netfilter/ipt_LOG.c +++ b/net/ipv4/netfilter/ipt_LOG.c @@ -399,7 +399,7 @@ ipt_log_packet(unsigned int pf, /* MAC logging for input chain only. */ printk("MAC="); if (skb->dev && skb->dev->hard_header_len - && skb->mac.raw != skb->nh.raw) { + && skb->mac_header != skb->network_header) { int i; const unsigned char *p = skb_mac_header(skb); for (i = 0; i < skb->dev->hard_header_len; i++,p++) diff --git a/net/ipv4/netfilter/ipt_ULOG.c b/net/ipv4/netfilter/ipt_ULOG.c index fae2a34d23d0..ace711e2b05a 100644 --- a/net/ipv4/netfilter/ipt_ULOG.c +++ b/net/ipv4/netfilter/ipt_ULOG.c @@ -251,7 +251,7 @@ static void ipt_ulog_packet(unsigned int hooknum, *(pm->prefix) = '\0'; if (in && in->hard_header_len > 0 - && skb->mac.raw != skb->nh.raw + && skb->mac_header != skb->network_header && in->hard_header_len <= ULOG_MAC_LEN) { memcpy(pm->mac, skb_mac_header(skb), in->hard_header_len); pm->mac_len = in->hard_header_len; diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c index bf101dc1a972..24d7c9f31918 100644 --- a/net/ipv4/raw.c +++ b/net/ipv4/raw.c @@ -297,7 +297,7 @@ static int raw_send_hdrinc(struct sock *sk, void *from, size_t length, skb->ip_summed = CHECKSUM_NONE; - skb->h.raw = skb->nh.raw; + skb->transport_header = skb->network_header; err = memcpy_fromiovecend((void *)iph, from, 0, length); if (err) goto error_fault; diff --git a/net/ipv4/xfrm4_mode_beet.c b/net/ipv4/xfrm4_mode_beet.c index 74859dfb3a25..3650e027ce70 100644 --- a/net/ipv4/xfrm4_mode_beet.c +++ b/net/ipv4/xfrm4_mode_beet.c @@ -33,7 +33,7 @@ static int xfrm4_beet_output(struct xfrm_state *x, struct sk_buff *skb) int hdrlen, optlen; iph = ip_hdr(skb); - skb->h.raw = skb->nh.raw; + skb->transport_header = skb->network_header; hdrlen = 0; optlen = iph->ihl * 4 - sizeof(*iph); @@ -43,7 +43,7 @@ static int xfrm4_beet_output(struct xfrm_state *x, struct sk_buff *skb) skb_push(skb, x->props.header_len + hdrlen); skb_reset_network_header(skb); top_iph = ip_hdr(skb); - skb->h.raw += sizeof(*iph) - hdrlen; + skb->transport_header += sizeof(*iph) - hdrlen; memmove(top_iph, iph, sizeof(*iph)); if (unlikely(optlen)) { diff --git a/net/ipv4/xfrm4_mode_transport.c b/net/ipv4/xfrm4_mode_transport.c index dc8834ea3754..601047161ea6 100644 --- a/net/ipv4/xfrm4_mode_transport.c +++ b/net/ipv4/xfrm4_mode_transport.c @@ -26,9 +26,7 @@ static int xfrm4_transport_output(struct xfrm_state *x, struct sk_buff *skb) struct iphdr *iph = ip_hdr(skb); int ihl = iph->ihl * 4; - skb->h.raw = skb->nh.raw; - skb->h.raw += ihl; - + skb->transport_header = skb->network_header + ihl; skb_push(skb, x->props.header_len); skb_reset_network_header(skb); memmove(skb_network_header(skb), iph, ihl); @@ -47,10 +45,10 @@ static int xfrm4_transport_input(struct xfrm_state *x, struct sk_buff *skb) { int ihl = skb->data - skb_transport_header(skb); - if (skb->h.raw != skb->nh.raw) { + if (skb->transport_header != skb->network_header) { memmove(skb_transport_header(skb), skb_network_header(skb), ihl); - skb->nh.raw = skb->h.raw; + skb->network_header = skb->transport_header; } ip_hdr(skb)->tot_len = htons(skb->len + ihl); skb_reset_transport_header(skb); diff --git a/net/ipv4/xfrm4_mode_tunnel.c b/net/ipv4/xfrm4_mode_tunnel.c index 521e52f055c1..a2f2e6a5ec5d 100644 --- a/net/ipv4/xfrm4_mode_tunnel.c +++ b/net/ipv4/xfrm4_mode_tunnel.c @@ -47,7 +47,7 @@ static int xfrm4_tunnel_output(struct xfrm_state *x, struct sk_buff *skb) int flags; iph = ip_hdr(skb); - skb->h.raw = skb->nh.raw; + skb->transport_header = skb->network_header; skb_push(skb, x->props.header_len); skb_reset_network_header(skb); diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c index d2af4fe3725b..b696c8401200 100644 --- a/net/ipv6/ah6.c +++ b/net/ipv6/ah6.c @@ -316,8 +316,8 @@ static int ah6_input(struct xfrm_state *x, struct sk_buff *skb) * * To erase AH: * Keeping copy of cleared headers. After AH processing, - * Moving the pointer of skb->nh.raw by using skb_pull as long as AH - * header length. Then copy back the copy as long as hdr_len + * Moving the pointer of skb->network_header by using skb_pull as long + * as AH header length. Then copy back the copy as long as hdr_len * If destination header following AH exists, copy it into after [Ext2]. * * |<>|[IPv6][Ext1][Ext2][Dest][Payload] @@ -384,9 +384,9 @@ static int ah6_input(struct xfrm_state *x, struct sk_buff *skb) } } - skb->nh.raw += ah_hlen; + skb->network_header += ah_hlen; memcpy(skb_network_header(skb), tmp_hdr, hdr_len); - skb->h.raw = skb->nh.raw; + skb->transport_header = skb->network_header; __skb_pull(skb, ah_hlen + hdr_len); kfree(tmp_hdr); diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c index f34cc2bd489a..a6a275db88cd 100644 --- a/net/ipv6/exthdrs.c +++ b/net/ipv6/exthdrs.c @@ -306,7 +306,7 @@ static int ipv6_destopt_rcv(struct sk_buff **skbp) if (ip6_parse_tlv(tlvprocdestopt_lst, skbp)) { dst_release(dst); skb = *skbp; - skb->h.raw += (skb_transport_header(skb)[1] + 1) << 3; + skb->transport_header += (skb_transport_header(skb)[1] + 1) << 3; opt = IP6CB(skb); #ifdef CONFIG_IPV6_MIP6 opt->nhoff = dstbuf; @@ -444,7 +444,7 @@ looped_back: } opt->lastopt = opt->srcrt = skb_network_header_len(skb); - skb->h.raw += (hdr->hdrlen + 1) << 3; + skb->transport_header += (hdr->hdrlen + 1) << 3; opt->dst0 = opt->dst1; opt->dst1 = 0; opt->nhoff = (&hdr->nexthdr) - skb_network_header(skb); @@ -752,7 +752,7 @@ int ipv6_parse_hopopts(struct sk_buff **skbp) opt->hop = sizeof(struct ipv6hdr); if (ip6_parse_tlv(tlvprochopopt_lst, skbp)) { skb = *skbp; - skb->h.raw += (skb_transport_header(skb)[1] + 1) << 3; + skb->transport_header += (skb_transport_header(skb)[1] + 1) << 3; opt = IP6CB(skb); opt->nhoff = sizeof(struct ipv6hdr); return 1; diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c index cf0c4406b59e..be0ee8a34f9b 100644 --- a/net/ipv6/ip6_input.c +++ b/net/ipv6/ip6_input.c @@ -101,7 +101,7 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt if (hdr->version != 6) goto err; - skb->h.raw = skb->nh.raw + sizeof(*hdr); + skb->transport_header = skb->network_header + sizeof(*hdr); IP6CB(skb)->nhoff = offsetof(struct ipv6hdr, nexthdr); pkt_len = ntohs(hdr->payload_len); diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 57a326080757..b2c092c6b9dc 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -733,7 +733,8 @@ slow_path: skb_put(frag, len + hlen + sizeof(struct frag_hdr)); skb_reset_network_header(frag); fh = (struct frag_hdr *)(skb_network_header(frag) + hlen); - frag->h.raw = frag->nh.raw + hlen + sizeof(struct frag_hdr); + frag->transport_header = (frag->network_header + hlen + + sizeof(struct frag_hdr)); /* * Charge the memory for the fragment to any owner @@ -761,7 +762,7 @@ slow_path: /* * Copy a block of the IP datagram. */ - if (skb_copy_bits(skb, ptr, frag->h.raw, len)) + if (skb_copy_bits(skb, ptr, skb_transport_header(skb), len)) BUG(); left -= len; @@ -976,7 +977,7 @@ static inline int ip6_ufo_append_data(struct sock *sk, skb_reset_network_header(skb); /* initialize protocol header pointer */ - skb->h.raw = skb->nh.raw + fragheaderlen; + skb->transport_header = skb->network_header + fragheaderlen; skb->ip_summed = CHECKSUM_PARTIAL; skb->csum = 0; @@ -1198,8 +1199,8 @@ alloc_new_skb: data = skb_put(skb, fraglen); skb_set_network_header(skb, exthdrlen); data += fragheaderlen; - skb->h.raw = skb->nh.raw + fragheaderlen; - + skb->transport_header = (skb->network_header + + fragheaderlen); if (fraggap) { skb->csum = skb_copy_and_csum_bits( skb_prev, maxfraglen, diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index 05b59a77bc69..a0902fbdb4e1 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c @@ -701,7 +701,7 @@ static int ip6_tnl_rcv(struct sk_buff *skb, __u16 protocol, goto discard; } secpath_reset(skb); - skb->mac.raw = skb->nh.raw; + skb->mac_header = skb->network_header; skb_reset_network_header(skb); skb->protocol = htons(protocol); skb->pkt_type = PACKET_HOST; @@ -898,7 +898,7 @@ static int ip6_tnl_xmit2(struct sk_buff *skb, dst_release(skb->dst); skb->dst = dst_clone(dst); - skb->h.raw = skb->nh.raw; + skb->transport_header = skb->network_header; proto = fl->proto; if (encap_limit >= 0) { diff --git a/net/ipv6/ipcomp6.c b/net/ipv6/ipcomp6.c index 5555c98dea03..7691a1b5caac 100644 --- a/net/ipv6/ipcomp6.c +++ b/net/ipv6/ipcomp6.c @@ -81,7 +81,7 @@ static int ipcomp6_input(struct xfrm_state *x, struct sk_buff *skb) /* Remove ipcomp header and decompress original payload */ iph = ipv6_hdr(skb); ipch = (void *)skb->data; - skb->h.raw = skb->nh.raw + sizeof(*ipch); + skb->transport_header = skb->network_header + sizeof(*ipch); __skb_pull(skb, sizeof(*ipch)); /* decompression */ diff --git a/net/ipv6/netfilter/ip6t_LOG.c b/net/ipv6/netfilter/ip6t_LOG.c index 54d176187f3f..b465e24e90b3 100644 --- a/net/ipv6/netfilter/ip6t_LOG.c +++ b/net/ipv6/netfilter/ip6t_LOG.c @@ -396,7 +396,7 @@ ip6t_log_packet(unsigned int pf, /* MAC logging for input chain only. */ printk("MAC="); if (skb->dev && (len = skb->dev->hard_header_len) && - skb->mac.raw != skb->nh.raw) { + skb->mac_header != skb->network_header) { const unsigned char *p = skb_mac_header(skb); int i; diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c index 490e7e151f2d..b7889ceef556 100644 --- a/net/ipv6/netfilter/nf_conntrack_reasm.c +++ b/net/ipv6/netfilter/nf_conntrack_reasm.c @@ -630,8 +630,8 @@ nf_ct_frag6_reasm(struct nf_ct_frag6_queue *fq, struct net_device *dev) skb_network_header(head)[fq->nhoffset] = skb_transport_header(head)[0]; memmove(head->head + sizeof(struct frag_hdr), head->head, (head->data - head->head) - sizeof(struct frag_hdr)); - head->mac.raw += sizeof(struct frag_hdr); - head->nh.raw += sizeof(struct frag_hdr); + head->mac_header += sizeof(struct frag_hdr); + head->network_header += sizeof(struct frag_hdr); skb_shinfo(head)->frag_list = head->next; skb_reset_transport_header(head); diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index f925ca7c1a50..8705f6a502d9 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c @@ -583,7 +583,7 @@ static int rawv6_send_hdrinc(struct sock *sk, void *from, int length, skb->ip_summed = CHECKSUM_NONE; - skb->h.raw = skb->nh.raw; + skb->transport_header = skb->network_header; err = memcpy_fromiovecend((void *)iph, from, 0, length); if (err) goto error_fault; diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c index 6dfacfa7a599..de795c04e34c 100644 --- a/net/ipv6/reassembly.c +++ b/net/ipv6/reassembly.c @@ -646,11 +646,11 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff **skb_in, /* We have to remove fragment header from datagram and to relocate * header in order to calculate ICV correctly. */ nhoff = fq->nhoffset; - skb_network_header(head)[nhoff] = head->h.raw[0]; + skb_network_header(head)[nhoff] = skb_transport_header(head)[0]; memmove(head->head + sizeof(struct frag_hdr), head->head, (head->data - head->head) - sizeof(struct frag_hdr)); - head->mac.raw += sizeof(struct frag_hdr); - head->nh.raw += sizeof(struct frag_hdr); + head->mac_header += sizeof(struct frag_hdr); + head->network_header += sizeof(struct frag_hdr); skb_shinfo(head)->frag_list = head->next; skb_reset_transport_header(head); @@ -732,7 +732,7 @@ static int ipv6_frag_rcv(struct sk_buff **skbp) if (!(fhdr->frag_off & htons(0xFFF9))) { /* It is not a fragmented frame */ - skb->h.raw += sizeof(struct frag_hdr); + skb->transport_header += sizeof(struct frag_hdr); IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), IPSTATS_MIB_REASMOKS); IP6CB(skb)->nhoff = (u8 *)fhdr - skb_network_header(skb); diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index 1e8827b90aa7..27fe10ffacb0 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c @@ -382,7 +382,7 @@ static int ipip6_rcv(struct sk_buff *skb) read_lock(&ipip6_lock); if ((tunnel = ipip6_tunnel_lookup(iph->saddr, iph->daddr)) != NULL) { secpath_reset(skb); - skb->mac.raw = skb->nh.raw; + skb->mac_header = skb->network_header; skb_reset_network_header(skb); IPCB(skb)->flags = 0; skb->protocol = htons(ETH_P_IPV6); @@ -553,7 +553,7 @@ static int ipip6_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) iph6 = ipv6_hdr(skb); } - skb->h.raw = skb->nh.raw; + skb->transport_header = skb->network_header; skb_push(skb, sizeof(struct iphdr)); skb_reset_network_header(skb); memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); diff --git a/net/ipv6/xfrm6_mode_beet.c b/net/ipv6/xfrm6_mode_beet.c index 8a01b0da2ddd..2e61d6ddece3 100644 --- a/net/ipv6/xfrm6_mode_beet.c +++ b/net/ipv6/xfrm6_mode_beet.c @@ -48,8 +48,8 @@ static int xfrm6_beet_output(struct xfrm_state *x, struct sk_buff *skb) skb_reset_network_header(skb); top_iph = ipv6_hdr(skb); - skb->h.raw = skb->nh.raw + sizeof(struct ipv6hdr); - skb->nh.raw += offsetof(struct ipv6hdr, nexthdr); + skb->transport_header = skb->network_header + sizeof(struct ipv6hdr); + skb->network_header += offsetof(struct ipv6hdr, nexthdr); ipv6_addr_copy(&top_iph->saddr, (struct in6_addr *)&x->props.saddr); ipv6_addr_copy(&top_iph->daddr, (struct in6_addr *)&x->id.daddr); diff --git a/net/ipv6/xfrm6_mode_transport.c b/net/ipv6/xfrm6_mode_transport.c index eb1864b5aae7..c026bfea820a 100644 --- a/net/ipv6/xfrm6_mode_transport.c +++ b/net/ipv6/xfrm6_mode_transport.c @@ -54,10 +54,10 @@ static int xfrm6_transport_input(struct xfrm_state *x, struct sk_buff *skb) { int ihl = skb->data - skb_transport_header(skb); - if (skb->h.raw != skb->nh.raw) { + if (skb->transport_header != skb->network_header) { memmove(skb_transport_header(skb), skb_network_header(skb), ihl); - skb->nh.raw = skb->h.raw; + skb->network_header = skb->transport_header; } ipv6_hdr(skb)->payload_len = htons(skb->len + ihl - sizeof(struct ipv6hdr)); diff --git a/net/ipv6/xfrm6_mode_tunnel.c b/net/ipv6/xfrm6_mode_tunnel.c index 21d65df7479e..a6c0cdf46ad6 100644 --- a/net/ipv6/xfrm6_mode_tunnel.c +++ b/net/ipv6/xfrm6_mode_tunnel.c @@ -55,8 +55,8 @@ static int xfrm6_tunnel_output(struct xfrm_state *x, struct sk_buff *skb) skb_reset_network_header(skb); top_iph = ipv6_hdr(skb); - skb->h.raw = skb->nh.raw + sizeof(struct ipv6hdr); - skb->nh.raw += offsetof(struct ipv6hdr, nexthdr); + skb->transport_header = skb->network_header + sizeof(struct ipv6hdr); + skb->network_header += offsetof(struct ipv6hdr, nexthdr); top_iph->version = 6; if (xdst->route->ops->family == AF_INET6) { diff --git a/net/llc/llc_input.c b/net/llc/llc_input.c index b3f65d1e80b1..099ed8fec145 100644 --- a/net/llc/llc_input.c +++ b/net/llc/llc_input.c @@ -112,7 +112,7 @@ static inline int llc_fixup_skb(struct sk_buff *skb) if (unlikely(!pskb_may_pull(skb, llc_len))) return 0; - skb->h.raw += llc_len; + skb->transport_header += llc_len; skb_pull(skb, llc_len); if (skb->protocol == htons(ETH_P_802_2)) { __be16 pdulen = eth_hdr(skb)->h_proto; diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index a059cc7be672..51c059b09a37 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -114,22 +114,22 @@ On receive: ----------- Incoming, dev->hard_header!=NULL - mac.raw -> ll header - data -> data + mac_header -> ll header + data -> data Outgoing, dev->hard_header!=NULL - mac.raw -> ll header - data -> ll header + mac_header -> ll header + data -> ll header Incoming, dev->hard_header==NULL - mac.raw -> UNKNOWN position. It is very likely, that it points to ll header. - PPP makes it, that is wrong, because introduce assymetry - between rx and tx paths. - data -> data + mac_header -> UNKNOWN position. It is very likely, that it points to ll + header. PPP makes it, that is wrong, because introduce + assymetry between rx and tx paths. + data -> data Outgoing, dev->hard_header==NULL - mac.raw -> data. ll header is still not built! - data -> data + mac_header -> data. ll header is still not built! + data -> data Resume If dev->hard_header==NULL we are unlikely to restore sensible ll header. @@ -139,12 +139,12 @@ On transmit: ------------ dev->hard_header != NULL - mac.raw -> ll header - data -> ll header + mac_header -> ll header + data -> ll header dev->hard_header == NULL (ll header is added by device, we cannot control it) - mac.raw -> data - data -> data + mac_header -> data + data -> data We should set nh.raw on output to correct posistion, packet classifier depends on it. diff --git a/net/sctp/input.c b/net/sctp/input.c index f38e91b38719..87feee166da9 100644 --- a/net/sctp/input.c +++ b/net/sctp/input.c @@ -522,14 +522,14 @@ void sctp_v4_err(struct sk_buff *skb, __u32 info) } /* Fix up skb to look at the embedded net header. */ - saveip = skb->nh.raw; - savesctp = skb->h.raw; + saveip = skb->network_header; + savesctp = skb->transport_header; skb_reset_network_header(skb); skb_set_transport_header(skb, ihlen); sk = sctp_err_lookup(AF_INET, skb, sctp_hdr(skb), &asoc, &transport); /* Put back, the original pointers. */ - skb->nh.raw = saveip; - skb->h.raw = savesctp; + skb->network_header = saveip; + skb->transport_header = savesctp; if (!sk) { ICMP_INC_STATS_BH(ICMP_MIB_INERRORS); return; diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c index cd0af9238782..afcb0093c290 100644 --- a/net/sctp/ipv6.c +++ b/net/sctp/ipv6.c @@ -132,14 +132,14 @@ SCTP_STATIC void sctp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, idev = in6_dev_get(skb->dev); /* Fix up skb to look at the embedded net header. */ - saveip = skb->nh.raw; - savesctp = skb->h.raw; + saveip = skb->network_header; + savesctp = skb->transport_header; skb_reset_network_header(skb); skb_set_transport_header(skb, offset); sk = sctp_err_lookup(AF_INET6, skb, sctp_hdr(skb), &asoc, &transport); /* Put back, the original pointers. */ - skb->nh.raw = saveip; - skb->h.raw = savesctp; + skb->network_header = saveip; + skb->transport_header = savesctp; if (!sk) { ICMP6_INC_STATS_BH(idev, ICMP6_MIB_INERRORS); goto out; -- cgit v1.2.3-59-g8ed1b From 2e07fa9cd3bac1e28cfe3131ed86b053afb02fc9 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Tue, 10 Apr 2007 21:22:35 -0700 Subject: [SK_BUFF]: Use offsets for skb->{mac,network,transport}_header on 64bit architectures With this we save 8 bytes per network packet, leaving a 4 bytes hole to be used in further shrinking work, likely with the offsetization of other pointers, such as ->{data,tail,end}, at the cost of adds, that were minimized by the usual practice of setting skb->{mac,nh,n}.raw to a local variable that is then accessed multiple times in each function, it also is not more expensive than before with regards to most of the handling of such headers, like setting one of these headers to another (transport to network, etc), or subtracting, adding to/from it, comparing them, etc. Now we have this layout for sk_buff on a x86_64 machine: [acme@mica net-2.6.22]$ pahole vmlinux sk_buff struct sk_buff { struct sk_buff * next; /* 0 8 */ struct sk_buff * prev; /* 8 8 */ struct rb_node rb; /* 16 24 */ struct sock * sk; /* 40 8 */ ktime_t tstamp; /* 48 8 */ struct net_device * dev; /* 56 8 */ /* --- cacheline 1 boundary (64 bytes) --- */ struct net_device * input_dev; /* 64 8 */ sk_buff_data_t transport_header; /* 72 4 */ sk_buff_data_t network_header; /* 76 4 */ sk_buff_data_t mac_header; /* 80 4 */ /* XXX 4 bytes hole, try to pack */ struct dst_entry * dst; /* 88 8 */ struct sec_path * sp; /* 96 8 */ char cb[48]; /* 104 48 */ /* cacheline 2 boundary (128 bytes) was 24 bytes ago*/ unsigned int len; /* 152 4 */ unsigned int data_len; /* 156 4 */ unsigned int mac_len; /* 160 4 */ union { __wsum csum; /* 4 */ __u32 csum_offset; /* 4 */ }; /* 164 4 */ __u32 priority; /* 168 4 */ __u8 local_df:1; /* 172 1 */ __u8 cloned:1; /* 172 1 */ __u8 ip_summed:2; /* 172 1 */ __u8 nohdr:1; /* 172 1 */ __u8 nfctinfo:3; /* 172 1 */ __u8 pkt_type:3; /* 173 1 */ __u8 fclone:2; /* 173 1 */ __u8 ipvs_property:1; /* 173 1 */ /* XXX 2 bits hole, try to pack */ __be16 protocol; /* 174 2 */ void (*destructor)(struct sk_buff *); /* 176 8 */ struct nf_conntrack * nfct; /* 184 8 */ /* --- cacheline 3 boundary (192 bytes) --- */ struct sk_buff * nfct_reasm; /* 192 8 */ struct nf_bridge_info *nf_bridge; /* 200 8 */ __u16 tc_index; /* 208 2 */ __u16 tc_verd; /* 210 2 */ dma_cookie_t dma_cookie; /* 212 4 */ __u32 secmark; /* 216 4 */ __u32 mark; /* 220 4 */ unsigned int truesize; /* 224 4 */ atomic_t users; /* 228 4 */ unsigned char * head; /* 232 8 */ unsigned char * data; /* 240 8 */ unsigned char * tail; /* 248 8 */ /* --- cacheline 4 boundary (256 bytes) --- */ unsigned char * end; /* 256 8 */ }; /* size: 264, cachelines: 5 */ /* sum members: 260, holes: 1, sum holes: 4 */ /* bit holes: 1, sum bit holes: 2 bits */ /* last cacheline: 8 bytes */ On 32 bits nothing changes, and pointers continue to be used with the compiler turning all this abstraction layer into dust. But there are some sk_buff validation tricks that are now possible, humm... :-) Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: David S. Miller --- include/linux/skbuff.h | 104 +++++++++++++++++++++++++++++++++++++-------- net/core/skbuff.c | 18 ++++++-- net/ipv4/ipvs/ip_vs_xmit.c | 2 +- net/sctp/input.c | 4 +- net/sctp/ipv6.c | 2 +- 5 files changed, 104 insertions(+), 26 deletions(-) (limited to 'include') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index c45ad1263271..2e7405500626 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -179,6 +179,16 @@ enum { SKB_GSO_TCPV6 = 1 << 4, }; +#if BITS_PER_LONG > 32 +#define NET_SKBUFF_DATA_USES_OFFSET 1 +#endif + +#ifdef NET_SKBUFF_DATA_USES_OFFSET +typedef unsigned int sk_buff_data_t; +#else +typedef unsigned char *sk_buff_data_t; +#endif + /** * struct sk_buff - socket buffer * @next: Next buffer in list @@ -236,9 +246,9 @@ struct sk_buff { int iif; /* 4 byte hole on 64 bit*/ - unsigned char *transport_header; - unsigned char *network_header; - unsigned char *mac_header; + sk_buff_data_t transport_header; + sk_buff_data_t network_header; + sk_buff_data_t mac_header; struct dst_entry *dst; struct sec_path *sp; @@ -942,50 +952,92 @@ static inline void skb_reserve(struct sk_buff *skb, int len) skb->tail += len; } +#ifdef NET_SKBUFF_DATA_USES_OFFSET static inline unsigned char *skb_transport_header(const struct sk_buff *skb) { - return skb->transport_header; + return skb->head + skb->transport_header; } static inline void skb_reset_transport_header(struct sk_buff *skb) { - skb->transport_header = skb->data; + skb->transport_header = skb->data - skb->head; } static inline void skb_set_transport_header(struct sk_buff *skb, const int offset) { - skb->transport_header = skb->data + offset; -} - -static inline int skb_transport_offset(const struct sk_buff *skb) -{ - return skb->transport_header - skb->data; + skb_reset_transport_header(skb); + skb->transport_header += offset; } static inline unsigned char *skb_network_header(const struct sk_buff *skb) { - return skb->network_header; + return skb->head + skb->network_header; } static inline void skb_reset_network_header(struct sk_buff *skb) { - skb->network_header = skb->data; + skb->network_header = skb->data - skb->head; } static inline void skb_set_network_header(struct sk_buff *skb, const int offset) { - skb->network_header = skb->data + offset; + skb_reset_network_header(skb); + skb->network_header += offset; } -static inline int skb_network_offset(const struct sk_buff *skb) +static inline unsigned char *skb_mac_header(const struct sk_buff *skb) { - return skb->network_header - skb->data; + return skb->head + skb->mac_header; } -static inline u32 skb_network_header_len(const struct sk_buff *skb) +static inline int skb_mac_header_was_set(const struct sk_buff *skb) { - return skb->transport_header - skb->network_header; + return skb->mac_header != ~0U; +} + +static inline void skb_reset_mac_header(struct sk_buff *skb) +{ + skb->mac_header = skb->data - skb->head; +} + +static inline void skb_set_mac_header(struct sk_buff *skb, const int offset) +{ + skb_reset_mac_header(skb); + skb->mac_header += offset; +} + +#else /* NET_SKBUFF_DATA_USES_OFFSET */ + +static inline unsigned char *skb_transport_header(const struct sk_buff *skb) +{ + return skb->transport_header; +} + +static inline void skb_reset_transport_header(struct sk_buff *skb) +{ + skb->transport_header = skb->data; +} + +static inline void skb_set_transport_header(struct sk_buff *skb, + const int offset) +{ + skb->transport_header = skb->data + offset; +} + +static inline unsigned char *skb_network_header(const struct sk_buff *skb) +{ + return skb->network_header; +} + +static inline void skb_reset_network_header(struct sk_buff *skb) +{ + skb->network_header = skb->data; +} + +static inline void skb_set_network_header(struct sk_buff *skb, const int offset) +{ + skb->network_header = skb->data + offset; } static inline unsigned char *skb_mac_header(const struct sk_buff *skb) @@ -1007,6 +1059,22 @@ static inline void skb_set_mac_header(struct sk_buff *skb, const int offset) { skb->mac_header = skb->data + offset; } +#endif /* NET_SKBUFF_DATA_USES_OFFSET */ + +static inline int skb_transport_offset(const struct sk_buff *skb) +{ + return skb_transport_header(skb) - skb->data; +} + +static inline u32 skb_network_header_len(const struct sk_buff *skb) +{ + return skb->transport_header - skb->network_header; +} + +static inline int skb_network_offset(const struct sk_buff *skb) +{ + return skb_network_header(skb) - skb->data; +} /* * CPUs often take a performance hit when accessing unaligned memory diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 1e71764be4a4..a48b08681261 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -448,11 +448,12 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask) static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old) { +#ifndef NET_SKBUFF_DATA_USES_OFFSET /* * Shift between the two data areas in bytes */ unsigned long offset = new->data - old->data; - +#endif new->sk = NULL; new->dev = old->dev; new->priority = old->priority; @@ -461,9 +462,15 @@ static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old) #ifdef CONFIG_INET new->sp = secpath_get(old->sp); #endif - new->transport_header = old->transport_header + offset; - new->network_header = old->network_header + offset; - new->mac_header = old->mac_header + offset; + new->transport_header = old->transport_header; + new->network_header = old->network_header; + new->mac_header = old->mac_header; +#ifndef NET_SKBUFF_DATA_USES_OFFSET + /* {transport,network,mac}_header are relative to skb->head */ + new->transport_header += offset; + new->network_header += offset; + new->mac_header += offset; +#endif memcpy(new->cb, old->cb, sizeof(old->cb)); new->local_df = old->local_df; new->fclone = SKB_FCLONE_UNAVAILABLE; @@ -639,9 +646,12 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, skb->end = data + size; skb->data += off; skb->tail += off; +#ifndef NET_SKBUFF_DATA_USES_OFFSET + /* {transport,network,mac}_header are relative to skb->head */ skb->transport_header += off; skb->network_header += off; skb->mac_header += off; +#endif skb->cloned = 0; skb->nohdr = 0; atomic_set(&skb_shinfo(skb)->dataref, 1); diff --git a/net/ipv4/ipvs/ip_vs_xmit.c b/net/ipv4/ipvs/ip_vs_xmit.c index fded9b2f227c..900ce29db382 100644 --- a/net/ipv4/ipvs/ip_vs_xmit.c +++ b/net/ipv4/ipvs/ip_vs_xmit.c @@ -323,7 +323,7 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, struct iphdr *old_iph = ip_hdr(skb); u8 tos = old_iph->tos; __be16 df = old_iph->frag_off; - unsigned char *old_transport_header = skb->transport_header; + sk_buff_data_t old_transport_header = skb->transport_header; struct iphdr *iph; /* Our new IP header */ int max_headroom; /* The extra header space needed */ int mtu; diff --git a/net/sctp/input.c b/net/sctp/input.c index 87feee166da9..1ff47b18724a 100644 --- a/net/sctp/input.c +++ b/net/sctp/input.c @@ -513,7 +513,7 @@ void sctp_v4_err(struct sk_buff *skb, __u32 info) struct sctp_association *asoc = NULL; struct sctp_transport *transport; struct inet_sock *inet; - char *saveip, *savesctp; + sk_buff_data_t saveip, savesctp; int err; if (skb->len < ihlen + 8) { @@ -527,7 +527,7 @@ void sctp_v4_err(struct sk_buff *skb, __u32 info) skb_reset_network_header(skb); skb_set_transport_header(skb, ihlen); sk = sctp_err_lookup(AF_INET, skb, sctp_hdr(skb), &asoc, &transport); - /* Put back, the original pointers. */ + /* Put back, the original values. */ skb->network_header = saveip; skb->transport_header = savesctp; if (!sk) { diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c index afcb0093c290..5b0cdda4b449 100644 --- a/net/sctp/ipv6.c +++ b/net/sctp/ipv6.c @@ -126,7 +126,7 @@ SCTP_STATIC void sctp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, struct sctp_association *asoc; struct sctp_transport *transport; struct ipv6_pinfo *np; - char *saveip, *savesctp; + sk_buff_data_t saveip, savesctp; int err; idev = in6_dev_get(skb->dev); -- cgit v1.2.3-59-g8ed1b From 27a884dc3cb63b93c2b3b643f5b31eed5f8a4d26 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Thu, 19 Apr 2007 20:29:13 -0700 Subject: [SK_BUFF]: Convert skb->tail to sk_buff_data_t So that it is also an offset from skb->head, reduces its size from 8 to 4 bytes on 64bit architectures, allowing us to combine the 4 bytes hole left by the layer headers conversion, reducing struct sk_buff size to 256 bytes, i.e. 4 64byte cachelines, and since the sk_buff slab cache is SLAB_HWCACHE_ALIGN... :-) Many calculations that previously required that skb->{transport,network, mac}_header be first converted to a pointer now can be done directly, being meaningful as offsets or pointers. Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: David S. Miller --- arch/ia64/sn/kernel/xpnet.c | 10 ++--- drivers/atm/he.c | 4 +- drivers/atm/idt77252.c | 3 +- drivers/atm/nicstar.c | 10 +++-- drivers/infiniband/hw/amso1100/c2.c | 5 ++- drivers/isdn/i4l/isdn_net.c | 2 +- drivers/media/dvb/dvb-core/dvb_net.c | 10 +++-- drivers/net/cris/eth_v10.c | 2 +- drivers/net/cxgb3/sge.c | 6 +-- drivers/net/e1000/e1000_main.c | 4 +- drivers/net/ibm_emac/ibm_emac_core.c | 2 +- drivers/net/macb.c | 3 +- drivers/net/pcmcia/nmclan_cs.c | 2 +- drivers/net/s2io.c | 4 +- drivers/net/tulip/uli526x.c | 14 +++++-- drivers/net/wan/hdlc_fr.c | 2 +- drivers/net/wan/lmc/lmc_main.c | 4 +- drivers/net/wireless/hostap/hostap_80211_rx.c | 2 +- drivers/s390/net/ctcmain.c | 11 ++++-- drivers/s390/net/netiucv.c | 10 +++-- drivers/usb/atm/usbatm.c | 10 ++--- drivers/usb/net/asix.c | 6 +-- drivers/usb/net/gl620a.c | 2 +- drivers/usb/net/net1080.c | 2 +- drivers/usb/net/rndis_host.c | 2 +- include/linux/netfilter/nfnetlink.h | 4 +- include/linux/netlink.h | 2 +- include/linux/rtnetlink.h | 6 +-- include/linux/skbuff.h | 57 +++++++++++++++++++++------ include/net/inet_ecn.h | 6 +-- include/net/netlink.h | 8 ++-- include/net/pkt_cls.h | 2 +- kernel/audit.c | 8 ++-- net/atm/lec.c | 2 +- net/bluetooth/rfcomm/core.c | 2 +- net/core/dev.c | 4 +- net/core/filter.c | 2 +- net/core/gen_stats.c | 4 +- net/core/pktgen.c | 4 +- net/core/skbuff.c | 35 +++++++++------- net/core/wireless.c | 4 +- net/decnet/dn_nsp_out.c | 6 ++- net/decnet/dn_route.c | 4 +- net/decnet/dn_table.c | 8 ++-- net/decnet/netfilter/dn_rtmsg.c | 2 +- net/econet/af_econet.c | 2 +- net/ieee80211/ieee80211_rx.c | 2 +- net/ipv4/esp4.c | 8 ++-- net/ipv4/icmp.c | 3 +- net/ipv4/igmp.c | 4 +- net/ipv4/inet_diag.c | 12 +++--- net/ipv4/ip_sockglue.c | 2 +- net/ipv4/ipmr.c | 9 +++-- net/ipv4/ipvs/ip_vs_ftp.c | 4 +- net/ipv4/netfilter/arpt_mangle.c | 8 ++-- net/ipv4/netfilter/ip_queue.c | 4 +- net/ipv4/netfilter/nf_nat_helper.c | 3 +- net/ipv4/tcp.c | 2 +- net/ipv4/tcp_output.c | 2 +- net/ipv6/datagram.c | 2 +- net/ipv6/esp6.c | 8 ++-- net/ipv6/exthdrs.c | 2 +- net/ipv6/icmp.c | 3 +- net/ipv6/ip6_output.c | 2 +- net/ipv6/mcast.c | 6 +-- net/ipv6/mip6.c | 4 +- net/ipv6/ndisc.c | 19 ++++----- net/ipv6/netfilter/ip6_queue.c | 4 +- net/ipv6/raw.c | 2 +- net/irda/ircomm/ircomm_param.c | 4 +- net/irda/irlan/irlan_common.c | 2 +- net/irda/qos.c | 14 +++---- net/netfilter/nf_conntrack_netlink.c | 16 +++----- net/netfilter/nfnetlink_log.c | 3 +- net/netfilter/nfnetlink_queue.c | 4 +- net/netlink/af_netlink.c | 2 +- net/packet/af_packet.c | 2 +- net/sched/act_api.c | 52 ++++++++++++------------ net/sched/act_gact.c | 2 +- net/sched/act_ipt.c | 2 +- net/sched/act_mirred.c | 2 +- net/sched/act_pedit.c | 2 +- net/sched/act_police.c | 8 ++-- net/sched/act_simple.c | 2 +- net/sched/cls_api.c | 14 +++---- net/sched/cls_basic.c | 4 +- net/sched/cls_fw.c | 4 +- net/sched/cls_route.c | 4 +- net/sched/cls_rsvp.h | 4 +- net/sched/cls_tcindex.c | 6 +-- net/sched/cls_u32.c | 6 +-- net/sched/ematch.c | 17 ++++---- net/sched/sch_api.c | 8 ++-- net/sched/sch_atm.c | 4 +- net/sched/sch_cbq.c | 20 +++++----- net/sched/sch_hfsc.c | 6 +-- net/sched/sch_htb.c | 10 ++--- net/sched/sch_ingress.c | 4 +- net/sched/sch_netem.c | 4 +- net/sched/sch_prio.c | 2 +- net/sched/sch_sfq.c | 2 +- net/sched/sch_tbf.c | 4 +- net/sctp/input.c | 4 +- net/sctp/inqueue.c | 8 ++-- net/sctp/sm_make_chunk.c | 4 +- net/sctp/sm_statefuns.c | 4 +- net/tipc/config.c | 2 +- net/tipc/socket.c | 2 +- net/xfrm/xfrm_user.c | 40 +++++++++---------- security/selinux/netlink.c | 2 +- 110 files changed, 396 insertions(+), 329 deletions(-) (limited to 'include') diff --git a/arch/ia64/sn/kernel/xpnet.c b/arch/ia64/sn/kernel/xpnet.c index 68d59d912c99..eb416c95967d 100644 --- a/arch/ia64/sn/kernel/xpnet.c +++ b/arch/ia64/sn/kernel/xpnet.c @@ -264,7 +264,7 @@ xpnet_receive(partid_t partid, int channel, struct xpnet_message *msg) dev_dbg(xpnet, "head=0x%p skb->data=0x%p skb->tail=0x%p " "skb->end=0x%p skb->len=%d\n", (void *) skb->head, - (void *) skb->data, (void *) skb->tail, (void *) skb->end, + (void *)skb->data, skb_tail_pointer(skb), (void *)skb->end, skb->len); skb->protocol = eth_type_trans(skb, xpnet_device); @@ -272,7 +272,7 @@ xpnet_receive(partid_t partid, int channel, struct xpnet_message *msg) dev_dbg(xpnet, "passing skb to network layer; \n\tskb->head=0x%p " "skb->data=0x%p skb->tail=0x%p skb->end=0x%p skb->len=%d\n", - (void *) skb->head, (void *) skb->data, (void *) skb->tail, + (void *)skb->head, (void *)skb->data, skb_tail_pointer(skb), (void *) skb->end, skb->len); @@ -475,7 +475,7 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) dev_dbg(xpnet, ">skb->head=0x%p skb->data=0x%p skb->tail=0x%p " "skb->end=0x%p skb->len=%d\n", (void *) skb->head, - (void *) skb->data, (void *) skb->tail, (void *) skb->end, + (void *)skb->data, skb_tail_pointer(skb), (void *)skb->end, skb->len); @@ -497,7 +497,7 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) /* get the beginning of the first cacheline and end of last */ start_addr = ((u64) skb->data & ~(L1_CACHE_BYTES - 1)); - end_addr = L1_CACHE_ALIGN((u64) skb->tail); + end_addr = L1_CACHE_ALIGN((u64)skb_tail_pointer(skb)); /* calculate how many bytes to embed in the XPC message */ embedded_bytes = 0; @@ -573,7 +573,7 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) msg->magic = XPNET_MAGIC; msg->size = end_addr - start_addr; msg->leadin_ignore = (u64) skb->data - start_addr; - msg->tailout_ignore = end_addr - (u64) skb->tail; + msg->tailout_ignore = end_addr - (u64)skb_tail_pointer(skb); msg->buf_pa = __pa(start_addr); dev_dbg(xpnet, "sending XPC message to %d:%d\nmsg->buf_pa=" diff --git a/drivers/atm/he.c b/drivers/atm/he.c index 8510026b690a..d33aba6864c2 100644 --- a/drivers/atm/he.c +++ b/drivers/atm/he.c @@ -1901,13 +1901,13 @@ he_service_rbrq(struct he_dev *he_dev, int group) case ATM_AAL0: /* 2.10.1.5 raw cell receive */ skb->len = ATM_AAL0_SDU; - skb->tail = skb->data + skb->len; + skb_set_tail_pointer(skb, skb->len); break; case ATM_AAL5: /* 2.10.1.2 aal5 receive */ skb->len = AAL5_LEN(skb->data, he_vcc->pdu_len); - skb->tail = skb->data + skb->len; + skb_set_tail_pointer(skb, skb->len); #ifdef USE_CHECKSUM_HW if (vcc->vpi == 0 && vcc->vci >= ATM_NOT_RSV_VCI) { skb->ip_summed = CHECKSUM_COMPLETE; diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c index b4b80140c398..1e49799cd6cf 100644 --- a/drivers/atm/idt77252.c +++ b/drivers/atm/idt77252.c @@ -1816,7 +1816,8 @@ push_rx_skb(struct idt77252_dev *card, struct sk_buff *skb, int queue) u32 handle; u32 addr; - skb->data = skb->tail = skb->head; + skb->data = skb->head; + skb_reset_tail_pointer(skb); skb->len = 0; skb_reserve(skb, 16); diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c index aab9b3733d52..26f4b7033494 100644 --- a/drivers/atm/nicstar.c +++ b/drivers/atm/nicstar.c @@ -2208,7 +2208,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe) if (i == 1 && ns_rsqe_eopdu(rsqe)) *((u32 *) sb->data) |= 0x00000002; skb_put(sb, NS_AAL0_HEADER); - memcpy(sb->tail, cell, ATM_CELL_PAYLOAD); + memcpy(skb_tail_pointer(sb), cell, ATM_CELL_PAYLOAD); skb_put(sb, ATM_CELL_PAYLOAD); ATM_SKB(sb)->vcc = vcc; __net_timestamp(sb); @@ -2252,7 +2252,8 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe) vc->rx_iov = iovb; NS_SKB(iovb)->iovcnt = 0; iovb->len = 0; - iovb->tail = iovb->data = iovb->head; + iovb->data = iovb->head; + skb_reset_tail_pointer(iovb); NS_SKB(iovb)->vcc = vcc; /* IMPORTANT: a pointer to the sk_buff containing the small or large buffer is stored as iovec base, NOT a pointer to the @@ -2265,7 +2266,8 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe) recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data, NS_MAX_IOVECS); NS_SKB(iovb)->iovcnt = 0; iovb->len = 0; - iovb->tail = iovb->data = iovb->head; + iovb->data = iovb->head; + skb_reset_tail_pointer(iovb); NS_SKB(iovb)->vcc = vcc; } iov = &((struct iovec *) iovb->data)[NS_SKB(iovb)->iovcnt++]; @@ -2489,7 +2491,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe) { lb = (struct sk_buff *) iov->iov_base; tocopy = min_t(int, remaining, iov->iov_len); - memcpy(hb->tail, lb->data, tocopy); + memcpy(skb_tail_pointer(hb), lb->data, tocopy); skb_put(hb, tocopy); iov++; remaining -= tocopy; diff --git a/drivers/infiniband/hw/amso1100/c2.c b/drivers/infiniband/hw/amso1100/c2.c index 7698feafa6a7..58bc272bd407 100644 --- a/drivers/infiniband/hw/amso1100/c2.c +++ b/drivers/infiniband/hw/amso1100/c2.c @@ -439,7 +439,8 @@ static void c2_rx_error(struct c2_port *c2_port, struct c2_element *elem) } /* Setup the skb for reuse since we're dropping this pkt */ - elem->skb->tail = elem->skb->data = elem->skb->head; + elem->skb->data = elem->skb->head; + skb_reset_tail_pointer(elem->skb); /* Zero out the rxp hdr in the sk_buff */ memset(elem->skb->data, 0, sizeof(*rxp_hdr)); @@ -521,7 +522,7 @@ static void c2_rx_interrupt(struct net_device *netdev) * "sizeof(struct c2_rxp_hdr)". */ skb->data += sizeof(*rxp_hdr); - skb->tail = skb->data + buflen; + skb_set_tail_pointer(skb, buflen); skb->len = buflen; skb->protocol = eth_type_trans(skb, netdev); diff --git a/drivers/isdn/i4l/isdn_net.c b/drivers/isdn/i4l/isdn_net.c index cd3b1fa4a414..aa83277aba74 100644 --- a/drivers/isdn/i4l/isdn_net.c +++ b/drivers/isdn/i4l/isdn_net.c @@ -881,7 +881,7 @@ isdn_net_log_skb(struct sk_buff * skb, isdn_net_local * lp) addinfo[0] = '\0'; /* This check stolen from 2.1.72 dev_queue_xmit_nit() */ - if (p < skb->data || p >= skb->tail) { + if (p < skb->data || skb->network_header >= skb->tail) { /* fall back to old isdn_net_log_packet method() */ char * buf = skb->data; diff --git a/drivers/media/dvb/dvb-core/dvb_net.c b/drivers/media/dvb/dvb-core/dvb_net.c index c6b004182d91..9de177a5b9f1 100644 --- a/drivers/media/dvb/dvb-core/dvb_net.c +++ b/drivers/media/dvb/dvb-core/dvb_net.c @@ -600,6 +600,7 @@ static void dvb_net_ule( struct net_device *dev, const u8 *buf, size_t buf_len ) /* Check CRC32, we've got it in our skb already. */ unsigned short ulen = htons(priv->ule_sndu_len); unsigned short utype = htons(priv->ule_sndu_type); + const u8 *tail; struct kvec iov[3] = { { &ulen, sizeof ulen }, { &utype, sizeof utype }, @@ -613,10 +614,11 @@ static void dvb_net_ule( struct net_device *dev, const u8 *buf, size_t buf_len ) } ule_crc = iov_crc32(ule_crc, iov, 3); - expected_crc = *((u8 *)priv->ule_skb->tail - 4) << 24 | - *((u8 *)priv->ule_skb->tail - 3) << 16 | - *((u8 *)priv->ule_skb->tail - 2) << 8 | - *((u8 *)priv->ule_skb->tail - 1); + tail = skb_tail_pointer(priv->ule_skb); + expected_crc = *(tail - 4) << 24 | + *(tail - 3) << 16 | + *(tail - 2) << 8 | + *(tail - 1); if (ule_crc != expected_crc) { printk(KERN_WARNING "%lu: CRC32 check FAILED: %08x / %08x, SNDU len %d type %#x, ts_remain %d, next 2: %x.\n", priv->ts_count, ule_crc, expected_crc, priv->ule_sndu_len, priv->ule_sndu_type, ts_remain, ts_remain > 2 ? *(unsigned short *)from_where : 0); diff --git a/drivers/net/cris/eth_v10.c b/drivers/net/cris/eth_v10.c index 98643801a3b0..7feb9c561147 100644 --- a/drivers/net/cris/eth_v10.c +++ b/drivers/net/cris/eth_v10.c @@ -1348,7 +1348,7 @@ e100_rx(struct net_device *dev) #ifdef ETHDEBUG printk("head = 0x%x, data = 0x%x, tail = 0x%x, end = 0x%x\n", - skb->head, skb->data, skb->tail, skb->end); + skb->head, skb->data, skb_tail_pointer(skb), skb->end); printk("copying packet to 0x%x.\n", skb_data_ptr); #endif diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c index 610e4769efa4..c5faf1380e15 100644 --- a/drivers/net/cxgb3/sge.c +++ b/drivers/net/cxgb3/sge.c @@ -1325,13 +1325,13 @@ static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb, flits = skb_transport_offset(skb) / 8; sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl; sgl_flits = make_sgl(skb, sgp, skb_transport_header(skb), - skb->tail - skb_transport_header(skb), + skb->tail - skb->transport_header, adap->pdev); if (need_skb_unmap()) { setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits); skb->destructor = deferred_unmap_destructor; ((struct unmap_info *)skb->cb)->len = (skb->tail - - skb_transport_header(skb)); + skb->transport_header); } write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, @@ -1353,7 +1353,7 @@ static inline unsigned int calc_tx_descs_ofld(const struct sk_buff *skb) return 1; /* packet fits as immediate data */ flits = skb_transport_offset(skb) / 8; /* headers */ - if (skb->tail != skb_transport_header(skb)) + if (skb->tail != skb->transport_header) cnt++; return flits_to_desc(flits + sgl_len(cnt)); } diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c index e86deb2ef823..e7c93f44f810 100644 --- a/drivers/net/e1000/e1000_main.c +++ b/drivers/net/e1000/e1000_main.c @@ -3304,7 +3304,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) * NOTE: this is a TSO only workaround * if end byte alignment not correct move us * into the next dword */ - if ((unsigned long)(skb->tail - 1) & 4) + if ((unsigned long)(skb_tail_pointer(skb) - 1) & 4) break; /* fall through */ case e1000_82571: @@ -4388,7 +4388,7 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, PCI_DMA_FROMDEVICE); vaddr = kmap_atomic(ps_page->ps_page[0], KM_SKB_DATA_SOFTIRQ); - memcpy(skb->tail, vaddr, l1); + memcpy(skb_tail_pointer(skb), vaddr, l1); kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ); pci_dma_sync_single_for_device(pdev, ps_page_dma->ps_page_dma[0], diff --git a/drivers/net/ibm_emac/ibm_emac_core.c b/drivers/net/ibm_emac/ibm_emac_core.c index b1ad62d89eb9..3d82d46f4998 100644 --- a/drivers/net/ibm_emac/ibm_emac_core.c +++ b/drivers/net/ibm_emac/ibm_emac_core.c @@ -1338,7 +1338,7 @@ static inline int emac_rx_sg_append(struct ocp_enet_private *dev, int slot) dev_kfree_skb(dev->rx_sg_skb); dev->rx_sg_skb = NULL; } else { - cacheable_memcpy(dev->rx_sg_skb->tail, + cacheable_memcpy(skb_tail_pointer(dev->rx_sg_skb), dev->rx_skb[slot]->data, len); skb_put(dev->rx_sg_skb, len); emac_recycle_rx_skb(dev, slot, len); diff --git a/drivers/net/macb.c b/drivers/net/macb.c index 0c3649be0d05..98bf51afcee7 100644 --- a/drivers/net/macb.c +++ b/drivers/net/macb.c @@ -575,7 +575,8 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev) int i; dev_dbg(&bp->pdev->dev, "start_xmit: len %u head %p data %p tail %p end %p\n", - skb->len, skb->head, skb->data, skb->tail, skb->end); + skb->len, skb->head, skb->data, + skb_tail_pointer(skb), skb->end); dev_dbg(&bp->pdev->dev, "data:"); for (i = 0; i < 16; i++) diff --git a/drivers/net/pcmcia/nmclan_cs.c b/drivers/net/pcmcia/nmclan_cs.c index ec0af65cd5d7..73da611fd536 100644 --- a/drivers/net/pcmcia/nmclan_cs.c +++ b/drivers/net/pcmcia/nmclan_cs.c @@ -1185,7 +1185,7 @@ static int mace_rx(struct net_device *dev, unsigned char RxCnt) skb_reserve(skb, 2); insw(ioaddr + AM2150_RCV, skb_put(skb, pkt_len), pkt_len>>1); if (pkt_len & 1) - *(skb->tail-1) = inb(ioaddr + AM2150_RCV); + *(skb_tail_pointer(skb) - 1) = inb(ioaddr + AM2150_RCV); skb->protocol = eth_type_trans(skb, dev); netif_rx(skb); /* Send the packet to the upper (protocol) layers. */ diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c index 46ebf141ee5a..600d3ff347fc 100644 --- a/drivers/net/s2io.c +++ b/drivers/net/s2io.c @@ -2195,7 +2195,7 @@ static int fill_rxd_3buf(struct s2io_nic *nic, struct RxD_t *rxdp, struct \ frag_list->next = NULL; tmp = (void *)ALIGN((long)frag_list->data, ALIGN_SIZE + 1); frag_list->data = tmp; - frag_list->tail = tmp; + skb_reset_tail_pointer(frag_list); /* Buffer-2 receives L4 data payload */ ((struct RxD3*)rxdp)->Buffer2_ptr = pci_map_single(nic->pdev, @@ -2349,7 +2349,7 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no) tmp += ALIGN_SIZE; tmp &= ~ALIGN_SIZE; skb->data = (void *) (unsigned long)tmp; - skb->tail = (void *) (unsigned long)tmp; + skb_reset_tail_pointer(skb); if (!(((struct RxD3*)rxdp)->Buffer0_ptr)) ((struct RxD3*)rxdp)->Buffer0_ptr = diff --git a/drivers/net/tulip/uli526x.c b/drivers/net/tulip/uli526x.c index 9a5850fa644a..e46f4cb02c15 100644 --- a/drivers/net/tulip/uli526x.c +++ b/drivers/net/tulip/uli526x.c @@ -829,7 +829,9 @@ static void uli526x_rx_packet(struct net_device *dev, struct uli526x_board_info != NULL) ) { /* size less than COPY_SIZE, allocate a rxlen SKB */ skb_reserve(skb, 2); /* 16byte align */ - memcpy(skb_put(skb, rxlen), rxptr->rx_skb_ptr->tail, rxlen); + memcpy(skb_put(skb, rxlen), + skb_tail_pointer(rxptr->rx_skb_ptr), + rxlen); uli526x_reuse_skb(db, rxptr->rx_skb_ptr); } else skb_put(skb, rxlen); @@ -1175,7 +1177,10 @@ static void uli526x_reuse_skb(struct uli526x_board_info *db, struct sk_buff * sk if (!(rxptr->rdes0 & cpu_to_le32(0x80000000))) { rxptr->rx_skb_ptr = skb; - rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev, skb->tail, RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) ); + rxptr->rdes2 = cpu_to_le32(pci_map_single(db->pdev, + skb_tail_pointer(skb), + RX_ALLOC_SIZE, + PCI_DMA_FROMDEVICE)); wmb(); rxptr->rdes0 = cpu_to_le32(0x80000000); db->rx_avail_cnt++; @@ -1339,7 +1344,10 @@ static void allocate_rx_buffer(struct uli526x_board_info *db) if ( ( skb = dev_alloc_skb(RX_ALLOC_SIZE) ) == NULL ) break; rxptr->rx_skb_ptr = skb; /* FIXME (?) */ - rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev, skb->tail, RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) ); + rxptr->rdes2 = cpu_to_le32(pci_map_single(db->pdev, + skb_tail_pointer(skb), + RX_ALLOC_SIZE, + PCI_DMA_FROMDEVICE)); wmb(); rxptr->rdes0 = cpu_to_le32(0x80000000); rxptr = rxptr->next_rx_desc; diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c index b747228c7198..aeb2789adf26 100644 --- a/drivers/net/wan/hdlc_fr.c +++ b/drivers/net/wan/hdlc_fr.c @@ -533,7 +533,7 @@ static void fr_lmi_send(struct net_device *dev, int fullrep) skb->protocol = __constant_htons(NLPID_CCITT_ANSI_LMI); fr_hard_header(&skb, LMI_CCITT_ANSI_DLCI); } - data = skb->tail; + data = skb_tail_pointer(skb); data[i++] = LMI_CALLREF; data[i++] = dce ? LMI_STATUS : LMI_STATUS_ENQUIRY; if (lmi == LMI_ANSI) diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c index d4851465c83b..b731f3aae0df 100644 --- a/drivers/net/wan/lmc/lmc_main.c +++ b/drivers/net/wan/lmc/lmc_main.c @@ -1636,7 +1636,7 @@ static int lmc_rx (struct net_device *dev) /*fold00*/ if (nsb) { sc->lmc_rxq[i] = nsb; nsb->dev = dev; - sc->lmc_rxring[i].buffer1 = virt_to_bus (nsb->tail); + sc->lmc_rxring[i].buffer1 = virt_to_bus(skb_tail_pointer(nsb)); } sc->failed_recv_alloc = 1; goto skip_packet; @@ -1679,7 +1679,7 @@ static int lmc_rx (struct net_device *dev) /*fold00*/ if (nsb) { sc->lmc_rxq[i] = nsb; nsb->dev = dev; - sc->lmc_rxring[i].buffer1 = virt_to_bus (nsb->tail); + sc->lmc_rxring[i].buffer1 = virt_to_bus(skb_tail_pointer(nsb)); /* Transferred to 21140 below */ } else { diff --git a/drivers/net/wireless/hostap/hostap_80211_rx.c b/drivers/net/wireless/hostap/hostap_80211_rx.c index 35a3a50724fe..5e3e9e262706 100644 --- a/drivers/net/wireless/hostap/hostap_80211_rx.c +++ b/drivers/net/wireless/hostap/hostap_80211_rx.c @@ -922,7 +922,7 @@ void hostap_80211_rx(struct net_device *dev, struct sk_buff *skb, if (frag != 0) flen -= hdrlen; - if (frag_skb->tail + flen > frag_skb->end) { + if (skb_tail_pointer(frag_skb) + flen > frag_skb->end) { printk(KERN_WARNING "%s: host decrypted and " "reassembled frame did not fit skb\n", dev->name); diff --git a/drivers/s390/net/ctcmain.c b/drivers/s390/net/ctcmain.c index 787c01317042..54e3f806cd52 100644 --- a/drivers/s390/net/ctcmain.c +++ b/drivers/s390/net/ctcmain.c @@ -706,7 +706,8 @@ ch_action_txdone(fsm_instance * fi, int event, void *arg) spin_unlock(&ch->collect_lock); return; } - ch->trans_skb->tail = ch->trans_skb->data = ch->trans_skb_data; + ch->trans_skb->data = ch->trans_skb_data; + skb_reset_tail_pointer(ch->trans_skb); ch->trans_skb->len = 0; if (ch->prof.maxmulti < (ch->collect_len + 2)) ch->prof.maxmulti = ch->collect_len + 2; @@ -831,7 +832,8 @@ ch_action_rx(fsm_instance * fi, int event, void *arg) ctc_unpack_skb(ch, skb); } again: - skb->data = skb->tail = ch->trans_skb_data; + skb->data = ch->trans_skb_data; + skb_reset_tail_pointer(skb); skb->len = 0; if (ctc_checkalloc_buffer(ch, 1)) return; @@ -2226,7 +2228,8 @@ transmit_skb(struct channel *ch, struct sk_buff *skb) * IDAL support in CTC is broken, so we have to * care about skb's above 2G ourselves. */ - hi = ((unsigned long) skb->tail + LL_HEADER_LENGTH) >> 31; + hi = ((unsigned long)skb_tail_pointer(skb) + + LL_HEADER_LENGTH) >> 31; if (hi) { nskb = alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA); if (!nskb) { @@ -2262,7 +2265,7 @@ transmit_skb(struct channel *ch, struct sk_buff *skb) return -EBUSY; } - ch->trans_skb->tail = ch->trans_skb->data; + skb_reset_tail_pointer(ch->trans_skb); ch->trans_skb->len = 0; ch->ccw[1].count = skb->len; memcpy(skb_put(ch->trans_skb, skb->len), skb->data, diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c index 82edf2014402..cd42bd54988c 100644 --- a/drivers/s390/net/netiucv.c +++ b/drivers/s390/net/netiucv.c @@ -689,7 +689,8 @@ static void conn_action_rx(fsm_instance *fi, int event, void *arg) msg->length, conn->max_buffsize); return; } - conn->rx_buff->data = conn->rx_buff->tail = conn->rx_buff->head; + conn->rx_buff->data = conn->rx_buff->head; + skb_reset_tail_pointer(conn->rx_buff); conn->rx_buff->len = 0; rc = iucv_message_receive(conn->path, msg, 0, conn->rx_buff->data, msg->length, NULL); @@ -735,7 +736,8 @@ static void conn_action_txdone(fsm_instance *fi, int event, void *arg) } } } - conn->tx_buff->data = conn->tx_buff->tail = conn->tx_buff->head; + conn->tx_buff->data = conn->tx_buff->head; + skb_reset_tail_pointer(conn->tx_buff); conn->tx_buff->len = 0; spin_lock_irqsave(&conn->collect_lock, saveflags); while ((skb = skb_dequeue(&conn->collect_queue))) { @@ -1164,8 +1166,8 @@ static int netiucv_transmit_skb(struct iucv_connection *conn, * Copy the skb to a new allocated skb in lowmem only if the * data is located above 2G in memory or tailroom is < 2. */ - unsigned long hi = - ((unsigned long)(skb->tail + NETIUCV_HDRLEN)) >> 31; + unsigned long hi = ((unsigned long)(skb_tail_pointer(skb) + + NETIUCV_HDRLEN)) >> 31; int copied = 0; if (hi || (skb_tailroom(skb) < 2)) { nskb = alloc_skb(skb->len + NETIUCV_HDRLEN + diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c index ec63b0ee0743..4d8f282b23d1 100644 --- a/drivers/usb/atm/usbatm.c +++ b/drivers/usb/atm/usbatm.c @@ -335,15 +335,15 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char sarb = instance->cached_vcc->sarb; - if (sarb->tail + ATM_CELL_PAYLOAD > sarb->end) { + if (skb_tail_pointer(sarb) + ATM_CELL_PAYLOAD > sarb->end) { atm_rldbg(instance, "%s: buffer overrun (sarb->len %u, vcc: 0x%p)!\n", __func__, sarb->len, vcc); /* discard cells already received */ skb_trim(sarb, 0); - UDSL_ASSERT(sarb->tail + ATM_CELL_PAYLOAD <= sarb->end); + UDSL_ASSERT(skb_tail_pointer(sarb) + ATM_CELL_PAYLOAD <= sarb->end); } - memcpy(sarb->tail, source + ATM_CELL_HEADER, ATM_CELL_PAYLOAD); + memcpy(skb_tail_pointer(sarb), source + ATM_CELL_HEADER, ATM_CELL_PAYLOAD); __skb_put(sarb, ATM_CELL_PAYLOAD); if (pti & 1) { @@ -370,7 +370,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char goto out; } - if (crc32_be(~0, sarb->tail - pdu_length, pdu_length) != 0xc704dd7b) { + if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) { atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n", __func__, vcc); atomic_inc(&vcc->stats->rx_err); @@ -396,7 +396,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char goto out; /* atm_charge increments rx_drop */ } - memcpy(skb->data, sarb->tail - pdu_length, length); + memcpy(skb->data, skb_tail_pointer(sarb) - pdu_length, length); __skb_put(skb, length); vdbg("%s: sending skb 0x%p, skb->len %u, skb->truesize %u", diff --git a/drivers/usb/net/asix.c b/drivers/usb/net/asix.c index 5808ea082459..f56e2dab3712 100644 --- a/drivers/usb/net/asix.c +++ b/drivers/usb/net/asix.c @@ -298,7 +298,7 @@ static int asix_rx_fixup(struct usbnet *dev, struct sk_buff *skb) if (ax_skb) { ax_skb->len = size; ax_skb->data = packet; - ax_skb->tail = packet + size; + skb_set_tail_pointer(ax_skb, size); usbnet_skb_return(dev, ax_skb); } else { return 0; @@ -338,7 +338,7 @@ static struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb, && ((headroom + tailroom) >= (4 + padlen))) { if ((headroom < 4) || (tailroom < padlen)) { skb->data = memmove(skb->head + 4, skb->data, skb->len); - skb->tail = skb->data + skb->len; + skb_set_tail_pointer(skb, skb->len); } } else { struct sk_buff *skb2; @@ -356,7 +356,7 @@ static struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb, if ((skb->len % 512) == 0) { cpu_to_le32s(&padbytes); - memcpy( skb->tail, &padbytes, sizeof(padbytes)); + memcpy(skb_tail_pointer(skb), &padbytes, sizeof(padbytes)); skb_put(skb, sizeof(padbytes)); } return skb; diff --git a/drivers/usb/net/gl620a.c b/drivers/usb/net/gl620a.c index d257a8e026d6..031cf5ca4dbb 100644 --- a/drivers/usb/net/gl620a.c +++ b/drivers/usb/net/gl620a.c @@ -157,7 +157,7 @@ genelink_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags) if ((headroom < (4 + 4*1)) || (tailroom < padlen)) { skb->data = memmove(skb->head + (4 + 4*1), skb->data, skb->len); - skb->tail = skb->data + skb->len; + skb_set_tail_pointer(skb, skb->len); } } else { struct sk_buff *skb2; diff --git a/drivers/usb/net/net1080.c b/drivers/usb/net/net1080.c index ccebfdef4751..19bf8dae70c9 100644 --- a/drivers/usb/net/net1080.c +++ b/drivers/usb/net/net1080.c @@ -520,7 +520,7 @@ net1080_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags) skb->data = memmove(skb->head + sizeof (struct nc_header), skb->data, skb->len); - skb->tail = skb->data + len; + skb_set_tail_pointer(skb, len); goto encapsulate; } } diff --git a/drivers/usb/net/rndis_host.c b/drivers/usb/net/rndis_host.c index 39a21c74fdf4..1d36772ba6e1 100644 --- a/drivers/usb/net/rndis_host.c +++ b/drivers/usb/net/rndis_host.c @@ -588,7 +588,7 @@ rndis_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags) if (likely((sizeof *hdr) <= room)) { skb->data = memmove(skb->head + sizeof *hdr, skb->data, len); - skb->tail = skb->data + len; + skb_set_tail_pointer(skb, len); goto fill; } } diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h index 6179648a014e..e1ea5dfbbbd4 100644 --- a/include/linux/netfilter/nfnetlink.h +++ b/include/linux/netfilter/nfnetlink.h @@ -62,11 +62,11 @@ struct nfattr #define NFA_DATA(nfa) ((void *)(((char *)(nfa)) + NFA_LENGTH(0))) #define NFA_PAYLOAD(nfa) ((int)((nfa)->nfa_len) - NFA_LENGTH(0)) #define NFA_NEST(skb, type) \ -({ struct nfattr *__start = (struct nfattr *) (skb)->tail; \ +({ struct nfattr *__start = (struct nfattr *)skb_tail_pointer(skb); \ NFA_PUT(skb, (NFNL_NFA_NEST | type), 0, NULL); \ __start; }) #define NFA_NEST_END(skb, start) \ -({ (start)->nfa_len = ((skb)->tail - (unsigned char *) (start)); \ +({ (start)->nfa_len = skb_tail_pointer(skb) - (unsigned char *)(start); \ (skb)->len; }) #define NFA_NEST_CANCEL(skb, start) \ ({ if (start) \ diff --git a/include/linux/netlink.h b/include/linux/netlink.h index a9d3ad5bc80f..68a632b372ec 100644 --- a/include/linux/netlink.h +++ b/include/linux/netlink.h @@ -229,7 +229,7 @@ __nlmsg_put(struct sk_buff *skb, u32 pid, u32 seq, int type, int len, int flags) (cb)->nlh->nlmsg_seq, type, len, flags) #define NLMSG_END(skb, nlh) \ -({ (nlh)->nlmsg_len = (skb)->tail - (unsigned char *) (nlh); \ +({ (nlh)->nlmsg_len = skb_tail_pointer(skb) - (unsigned char *)(nlh); \ (skb)->len; }) #define NLMSG_CANCEL(skb, nlh) \ diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h index 4a629ea70cc4..3a4cb242ecd2 100644 --- a/include/linux/rtnetlink.h +++ b/include/linux/rtnetlink.h @@ -605,7 +605,7 @@ extern void __rta_fill(struct sk_buff *skb, int attrtype, int attrlen, const voi #define RTA_PUT_NOHDR(skb, attrlen, data) \ ({ RTA_APPEND(skb, RTA_ALIGN(attrlen), data); \ - memset(skb->tail - (RTA_ALIGN(attrlen) - attrlen), 0, \ + memset(skb_tail_pointer(skb) - (RTA_ALIGN(attrlen) - attrlen), 0, \ RTA_ALIGN(attrlen) - attrlen); }) #define RTA_PUT_U8(skb, attrtype, value) \ @@ -637,12 +637,12 @@ extern void __rta_fill(struct sk_buff *skb, int attrtype, int attrlen, const voi RTA_PUT(skb, attrtype, 0, NULL); #define RTA_NEST(skb, type) \ -({ struct rtattr *__start = (struct rtattr *) (skb)->tail; \ +({ struct rtattr *__start = (struct rtattr *)skb_tail_pointer(skb); \ RTA_PUT(skb, type, 0, NULL); \ __start; }) #define RTA_NEST_END(skb, start) \ -({ (start)->rta_len = ((skb)->tail - (unsigned char *) (start)); \ +({ (start)->rta_len = skb_tail_pointer(skb) - (unsigned char *)(start); \ (skb)->len; }) #define RTA_NEST_CANCEL(skb, start) \ diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 2e7405500626..e1c2392ecb56 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -246,9 +246,6 @@ struct sk_buff { int iif; /* 4 byte hole on 64 bit*/ - sk_buff_data_t transport_header; - sk_buff_data_t network_header; - sk_buff_data_t mac_header; struct dst_entry *dst; struct sec_path *sp; @@ -303,13 +300,16 @@ struct sk_buff { __u32 mark; + sk_buff_data_t transport_header; + sk_buff_data_t network_header; + sk_buff_data_t mac_header; /* These elements must be at the end, see alloc_skb() for details. */ - unsigned int truesize; - atomic_t users; + sk_buff_data_t tail; unsigned char *head, *data, - *tail, *end; + unsigned int truesize; + atomic_t users; }; #ifdef __KERNEL__ @@ -812,12 +812,45 @@ static inline void skb_fill_page_desc(struct sk_buff *skb, int i, #define SKB_FRAG_ASSERT(skb) BUG_ON(skb_shinfo(skb)->frag_list) #define SKB_LINEAR_ASSERT(skb) BUG_ON(skb_is_nonlinear(skb)) +#ifdef NET_SKBUFF_DATA_USES_OFFSET +static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb) +{ + return skb->head + skb->tail; +} + +static inline void skb_reset_tail_pointer(struct sk_buff *skb) +{ + skb->tail = skb->data - skb->head; +} + +static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset) +{ + skb_reset_tail_pointer(skb); + skb->tail += offset; +} +#else /* NET_SKBUFF_DATA_USES_OFFSET */ +static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb) +{ + return skb->tail; +} + +static inline void skb_reset_tail_pointer(struct sk_buff *skb) +{ + skb->tail = skb->data; +} + +static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset) +{ + skb->tail = skb->data + offset; +} +#endif /* NET_SKBUFF_DATA_USES_OFFSET */ + /* * Add data to an sk_buff */ static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len) { - unsigned char *tmp = skb->tail; + unsigned char *tmp = skb_tail_pointer(skb); SKB_LINEAR_ASSERT(skb); skb->tail += len; skb->len += len; @@ -835,11 +868,11 @@ static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len) */ static inline unsigned char *skb_put(struct sk_buff *skb, unsigned int len) { - unsigned char *tmp = skb->tail; + unsigned char *tmp = skb_tail_pointer(skb); SKB_LINEAR_ASSERT(skb); skb->tail += len; skb->len += len; - if (unlikely(skb->tail>skb->end)) + if (unlikely(skb_tail_pointer(skb) > skb->end)) skb_over_panic(skb, len, current_text_addr()); return tmp; } @@ -935,7 +968,7 @@ static inline int skb_headroom(const struct sk_buff *skb) */ static inline int skb_tailroom(const struct sk_buff *skb) { - return skb_is_nonlinear(skb) ? 0 : skb->end - skb->tail; + return skb_is_nonlinear(skb) ? 0 : skb->end - skb_tail_pointer(skb); } /** @@ -1127,8 +1160,8 @@ static inline void __skb_trim(struct sk_buff *skb, unsigned int len) WARN_ON(1); return; } - skb->len = len; - skb->tail = skb->data + len; + skb->len = len; + skb_set_tail_pointer(skb, len); } /** diff --git a/include/net/inet_ecn.h b/include/net/inet_ecn.h index 06a2c69a89e5..de8399a79774 100644 --- a/include/net/inet_ecn.h +++ b/include/net/inet_ecn.h @@ -114,14 +114,12 @@ static inline int INET_ECN_set_ce(struct sk_buff *skb) { switch (skb->protocol) { case __constant_htons(ETH_P_IP): - if (skb_network_header(skb) + sizeof(struct iphdr) <= - skb->tail) + if (skb->network_header + sizeof(struct iphdr) <= skb->tail) return IP_ECN_set_ce(ip_hdr(skb)); break; case __constant_htons(ETH_P_IPV6): - if (skb_network_header(skb) + sizeof(struct ipv6hdr) <= - skb->tail) + if (skb->network_header + sizeof(struct ipv6hdr) <= skb->tail) return IP6_ECN_set_ce(ipv6_hdr(skb)); break; } diff --git a/include/net/netlink.h b/include/net/netlink.h index bcaf67b7a19d..2c7ab107f20d 100644 --- a/include/net/netlink.h +++ b/include/net/netlink.h @@ -525,7 +525,7 @@ static inline struct sk_buff *nlmsg_new(size_t payload, gfp_t flags) */ static inline int nlmsg_end(struct sk_buff *skb, struct nlmsghdr *nlh) { - nlh->nlmsg_len = skb->tail - (unsigned char *) nlh; + nlh->nlmsg_len = skb_tail_pointer(skb) - (unsigned char *)nlh; return skb->len; } @@ -538,7 +538,7 @@ static inline int nlmsg_end(struct sk_buff *skb, struct nlmsghdr *nlh) */ static inline void *nlmsg_get_pos(struct sk_buff *skb) { - return skb->tail; + return skb_tail_pointer(skb); } /** @@ -940,7 +940,7 @@ static inline unsigned long nla_get_msecs(struct nlattr *nla) */ static inline struct nlattr *nla_nest_start(struct sk_buff *skb, int attrtype) { - struct nlattr *start = (struct nlattr *) skb->tail; + struct nlattr *start = (struct nlattr *)skb_tail_pointer(skb); if (nla_put(skb, attrtype, 0, NULL) < 0) return NULL; @@ -960,7 +960,7 @@ static inline struct nlattr *nla_nest_start(struct sk_buff *skb, int attrtype) */ static inline int nla_nest_end(struct sk_buff *skb, struct nlattr *start) { - start->nla_len = skb->tail - (unsigned char *) start; + start->nla_len = skb_tail_pointer(skb) - (unsigned char *)start; return skb->len; } diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h index dcb3a91f1364..4129df708079 100644 --- a/include/net/pkt_cls.h +++ b/include/net/pkt_cls.h @@ -337,7 +337,7 @@ static inline unsigned char * tcf_get_base_ptr(struct sk_buff *skb, int layer) static inline int tcf_valid_offset(const struct sk_buff *skb, const unsigned char *ptr, const int len) { - return unlikely((ptr + len) < skb->tail && ptr > skb->head); + return unlikely((ptr + len) < skb_tail_pointer(skb) && ptr > skb->head); } #ifdef CONFIG_NET_CLS_IND diff --git a/kernel/audit.c b/kernel/audit.c index 76c9a11b72d6..ea8521417d13 100644 --- a/kernel/audit.c +++ b/kernel/audit.c @@ -1073,7 +1073,7 @@ static void audit_log_vformat(struct audit_buffer *ab, const char *fmt, goto out; } va_copy(args2, args); - len = vsnprintf(skb->tail, avail, fmt, args); + len = vsnprintf(skb_tail_pointer(skb), avail, fmt, args); if (len >= avail) { /* The printk buffer is 1024 bytes long, so if we get * here and AUDIT_BUFSIZ is at least 1024, then we can @@ -1082,7 +1082,7 @@ static void audit_log_vformat(struct audit_buffer *ab, const char *fmt, max_t(unsigned, AUDIT_BUFSIZ, 1+len-avail)); if (!avail) goto out; - len = vsnprintf(skb->tail, avail, fmt, args2); + len = vsnprintf(skb_tail_pointer(skb), avail, fmt, args2); } if (len > 0) skb_put(skb, len); @@ -1143,7 +1143,7 @@ void audit_log_hex(struct audit_buffer *ab, const unsigned char *buf, return; } - ptr = skb->tail; + ptr = skb_tail_pointer(skb); for (i=0; i>4]; /* Upper nibble */ *ptr++ = hex[buf[i] & 0x0F]; /* Lower nibble */ @@ -1175,7 +1175,7 @@ static void audit_log_n_string(struct audit_buffer *ab, size_t slen, if (!avail) return; } - ptr = skb->tail; + ptr = skb_tail_pointer(skb); *ptr++ = '"'; memcpy(ptr, string, slen); ptr += slen; diff --git a/net/atm/lec.c b/net/atm/lec.c index d339645dc796..a8c6b285e06c 100644 --- a/net/atm/lec.c +++ b/net/atm/lec.c @@ -283,7 +283,7 @@ static int lec_start_xmit(struct sk_buff *skb, struct net_device *dev) } DPRINTK("skbuff head:%lx data:%lx tail:%lx end:%lx\n", - (long)skb->head, (long)skb->data, (long)skb->tail, + (long)skb->head, (long)skb->data, (long)skb_tail_pointer(skb), (long)skb->end); #if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE) if (memcmp(skb->data, bridge_ula_lec, sizeof(bridge_ula_lec)) == 0) diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c index 94f457360560..10cc13cfae6c 100644 --- a/net/bluetooth/rfcomm/core.c +++ b/net/bluetooth/rfcomm/core.c @@ -1567,7 +1567,7 @@ static int rfcomm_recv_frame(struct rfcomm_session *s, struct sk_buff *skb) /* Trim FCS */ skb->len--; skb->tail--; - fcs = *(u8 *) skb->tail; + fcs = *(u8 *)skb_tail_pointer(skb); if (__check_fcs(skb->data, type, fcs)) { BT_ERR("bad checksum in packet"); diff --git a/net/core/dev.c b/net/core/dev.c index 6562e5736e2f..86dc9f693f66 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1069,7 +1069,7 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev) skb_reset_mac_header(skb2); if (skb_network_header(skb2) < skb2->data || - skb_network_header(skb2) > skb2->tail) { + skb2->network_header > skb2->tail) { if (net_ratelimit()) printk(KERN_CRIT "protocol %04x is " "buggy, dev %s\n", @@ -1175,7 +1175,7 @@ int skb_checksum_help(struct sk_buff *skb) BUG_ON(offset > (int)skb->len); csum = skb_checksum(skb, offset, skb->len-offset, 0); - offset = skb->tail - skb_transport_header(skb); + offset = skb->tail - skb->transport_header; BUG_ON(offset <= 0); BUG_ON(skb->csum_offset + 2 > offset); diff --git a/net/core/filter.c b/net/core/filter.c index d2358a5e6339..bd903aaf7aa7 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -46,7 +46,7 @@ static void *__load_pointer(struct sk_buff *skb, int k) else if (k >= SKF_LL_OFF) ptr = skb_mac_header(skb) + k - SKF_LL_OFF; - if (ptr >= skb->head && ptr < skb->tail) + if (ptr >= skb->head && ptr < skb_tail_pointer(skb)) return ptr; return NULL; } diff --git a/net/core/gen_stats.c b/net/core/gen_stats.c index 259473d0559d..bcc25591d8ac 100644 --- a/net/core/gen_stats.c +++ b/net/core/gen_stats.c @@ -61,7 +61,7 @@ gnet_stats_start_copy_compat(struct sk_buff *skb, int type, int tc_stats_type, spin_lock_bh(lock); d->lock = lock; if (type) - d->tail = (struct rtattr *) skb->tail; + d->tail = (struct rtattr *)skb_tail_pointer(skb); d->skb = skb; d->compat_tc_stats = tc_stats_type; d->compat_xstats = xstats_type; @@ -212,7 +212,7 @@ int gnet_stats_finish_copy(struct gnet_dump *d) { if (d->tail) - d->tail->rta_len = d->skb->tail - (u8 *) d->tail; + d->tail->rta_len = skb_tail_pointer(d->skb) - (u8 *)d->tail; if (d->compat_tc_stats) if (gnet_stats_copy(d, d->compat_tc_stats, &d->tc_stats, diff --git a/net/core/pktgen.c b/net/core/pktgen.c index 9da8357addcd..f9469ea530cc 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c @@ -2357,7 +2357,7 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev, *vlan_encapsulated_proto = htons(ETH_P_IP); } - skb_set_network_header(skb, skb->tail - skb->data); + skb->network_header = skb->tail; skb->transport_header = skb->network_header + sizeof(struct iphdr); skb_put(skb, sizeof(struct iphdr) + sizeof(struct udphdr)); @@ -2696,7 +2696,7 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev, *vlan_encapsulated_proto = htons(ETH_P_IPV6); } - skb_set_network_header(skb, skb->tail - skb->data); + skb->network_header = skb->tail; skb->transport_header = skb->network_header + sizeof(struct ipv6hdr); skb_put(skb, sizeof(struct ipv6hdr) + sizeof(struct udphdr)); diff --git a/net/core/skbuff.c b/net/core/skbuff.c index a48b08681261..ddcbc4d10dab 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -87,8 +87,9 @@ static struct kmem_cache *skbuff_fclone_cache __read_mostly; void skb_over_panic(struct sk_buff *skb, int sz, void *here) { printk(KERN_EMERG "skb_over_panic: text:%p len:%d put:%d head:%p " - "data:%p tail:%p end:%p dev:%s\n", - here, skb->len, sz, skb->head, skb->data, skb->tail, skb->end, + "data:%p tail:%#lx end:%p dev:%s\n", + here, skb->len, sz, skb->head, skb->data, + (unsigned long)skb->tail, skb->end, skb->dev ? skb->dev->name : ""); BUG(); } @@ -105,8 +106,9 @@ void skb_over_panic(struct sk_buff *skb, int sz, void *here) void skb_under_panic(struct sk_buff *skb, int sz, void *here) { printk(KERN_EMERG "skb_under_panic: text:%p len:%d put:%d head:%p " - "data:%p tail:%p end:%p dev:%s\n", - here, skb->len, sz, skb->head, skb->data, skb->tail, skb->end, + "data:%p tail:%#lx end:%p dev:%s\n", + here, skb->len, sz, skb->head, skb->data, + (unsigned long)skb->tail, skb->end, skb->dev ? skb->dev->name : ""); BUG(); } @@ -167,7 +169,7 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, atomic_set(&skb->users, 1); skb->head = data; skb->data = data; - skb->tail = data; + skb_reset_tail_pointer(skb); skb->end = data + size; /* make sure we initialize shinfo sequentially */ shinfo = skb_shinfo(skb); @@ -629,7 +631,12 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, /* Copy only real data... and, alas, header. This should be * optimized for the cases when header is void. */ - memcpy(data + nhead, skb->head, skb->tail - skb->head); + memcpy(data + nhead, skb->head, + skb->tail +#ifndef NET_SKBUFF_DATA_USES_OFFSET + - skb->head +#endif + ); memcpy(data + size, skb->end, sizeof(struct skb_shared_info)); for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) @@ -645,9 +652,9 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, skb->head = data; skb->end = data + size; skb->data += off; - skb->tail += off; #ifndef NET_SKBUFF_DATA_USES_OFFSET - /* {transport,network,mac}_header are relative to skb->head */ + /* {transport,network,mac}_header and tail are relative to skb->head */ + skb->tail += off; skb->transport_header += off; skb->network_header += off; skb->mac_header += off; @@ -762,7 +769,7 @@ int skb_pad(struct sk_buff *skb, int pad) return 0; } - ntail = skb->data_len + pad - (skb->end - skb->tail); + ntail = skb->data_len + pad - (skb->end - skb_tail_pointer(skb)); if (likely(skb_cloned(skb) || ntail > 0)) { err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC); if (unlikely(err)) @@ -863,7 +870,7 @@ done: } else { skb->len = len; skb->data_len = 0; - skb->tail = skb->data + len; + skb_set_tail_pointer(skb, len); } return 0; @@ -900,7 +907,7 @@ unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta) * plus 128 bytes for future expansions. If we have enough * room at tail, reallocate without expansion only if skb is cloned. */ - int i, k, eat = (skb->tail + delta) - skb->end; + int i, k, eat = (skb_tail_pointer(skb) + delta) - skb->end; if (eat > 0 || skb_cloned(skb)) { if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0, @@ -908,7 +915,7 @@ unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta) return NULL; } - if (skb_copy_bits(skb, skb_headlen(skb), skb->tail, delta)) + if (skb_copy_bits(skb, skb_headlen(skb), skb_tail_pointer(skb), delta)) BUG(); /* Optimization: no fragments, no reasons to preestimate @@ -1004,7 +1011,7 @@ pull_pages: skb->tail += delta; skb->data_len -= delta; - return skb->tail; + return skb_tail_pointer(skb); } /* Copy some data bits from skb to kernel buffer. */ @@ -1539,7 +1546,7 @@ static inline void skb_split_inside_header(struct sk_buff *skb, skb1->len += skb1->data_len; skb->data_len = 0; skb->len = len; - skb->tail = skb->data + len; + skb_set_tail_pointer(skb, len); } static inline void skb_split_no_header(struct sk_buff *skb, diff --git a/net/core/wireless.c b/net/core/wireless.c index 7c6a5db544f1..4a777b68e3bc 100644 --- a/net/core/wireless.c +++ b/net/core/wireless.c @@ -1938,7 +1938,7 @@ static inline int rtnetlink_fill_iwinfo(struct sk_buff * skb, { struct ifinfomsg *r; struct nlmsghdr *nlh; - unsigned char *b = skb->tail; + unsigned char *b = skb_tail_pointer(skb); nlh = NLMSG_PUT(skb, 0, 0, type, sizeof(*r)); r = NLMSG_DATA(nlh); @@ -1952,7 +1952,7 @@ static inline int rtnetlink_fill_iwinfo(struct sk_buff * skb, /* Add the wireless events in the netlink packet */ RTA_PUT(skb, IFLA_WIRELESS, event_len, event); - nlh->nlmsg_len = skb->tail - b; + nlh->nlmsg_len = skb_tail_pointer(skb) - b; return skb->len; nlmsg_failure: diff --git a/net/decnet/dn_nsp_out.c b/net/decnet/dn_nsp_out.c index 84b8c5b45fef..7404653880b0 100644 --- a/net/decnet/dn_nsp_out.c +++ b/net/decnet/dn_nsp_out.c @@ -681,8 +681,10 @@ void dn_nsp_send_conninit(struct sock *sk, unsigned char msgflg) if (scp->peer.sdn_objnum) type = 0; - skb_put(skb, dn_sockaddr2username(&scp->peer, skb->tail, type)); - skb_put(skb, dn_sockaddr2username(&scp->addr, skb->tail, 2)); + skb_put(skb, dn_sockaddr2username(&scp->peer, + skb_tail_pointer(skb), type)); + skb_put(skb, dn_sockaddr2username(&scp->addr, + skb_tail_pointer(skb), 2)); menuver = DN_MENUVER_ACC | DN_MENUVER_USR; if (scp->peer.sdn_flags & SDF_PROXY) diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c index bb73bf16630f..9678b096b844 100644 --- a/net/decnet/dn_route.c +++ b/net/decnet/dn_route.c @@ -1468,7 +1468,7 @@ static int dn_rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq, struct dn_route *rt = (struct dn_route *)skb->dst; struct rtmsg *r; struct nlmsghdr *nlh; - unsigned char *b = skb->tail; + unsigned char *b = skb_tail_pointer(skb); long expires; nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*r), flags); @@ -1509,7 +1509,7 @@ static int dn_rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq, if (rt->fl.iif) RTA_PUT(skb, RTA_IIF, sizeof(int), &rt->fl.iif); - nlh->nlmsg_len = skb->tail - b; + nlh->nlmsg_len = skb_tail_pointer(skb) - b; return skb->len; nlmsg_failure: diff --git a/net/decnet/dn_table.c b/net/decnet/dn_table.c index 780a141f8342..544c45540746 100644 --- a/net/decnet/dn_table.c +++ b/net/decnet/dn_table.c @@ -295,7 +295,7 @@ static int dn_fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event, { struct rtmsg *rtm; struct nlmsghdr *nlh; - unsigned char *b = skb->tail; + unsigned char *b = skb_tail_pointer(skb); nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*rtm), flags); rtm = NLMSG_DATA(nlh); @@ -337,13 +337,13 @@ static int dn_fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event, nhp->rtnh_ifindex = nh->nh_oif; if (nh->nh_gw) RTA_PUT(skb, RTA_GATEWAY, 2, &nh->nh_gw); - nhp->rtnh_len = skb->tail - (unsigned char *)nhp; + nhp->rtnh_len = skb_tail_pointer(skb) - (unsigned char *)nhp; } endfor_nexthops(fi); mp_head->rta_type = RTA_MULTIPATH; - mp_head->rta_len = skb->tail - (u8*)mp_head; + mp_head->rta_len = skb_tail_pointer(skb) - (u8 *)mp_head; } - nlh->nlmsg_len = skb->tail - b; + nlh->nlmsg_len = skb_tail_pointer(skb) - b; return skb->len; diff --git a/net/decnet/netfilter/dn_rtmsg.c b/net/decnet/netfilter/dn_rtmsg.c index 0e62def05a58..ceefd9dd0c92 100644 --- a/net/decnet/netfilter/dn_rtmsg.c +++ b/net/decnet/netfilter/dn_rtmsg.c @@ -33,7 +33,7 @@ static struct sk_buff *dnrmg_build_message(struct sk_buff *rt_skb, int *errp) { struct sk_buff *skb = NULL; size_t size; - unsigned char *old_tail; + sk_buff_data_t old_tail; struct nlmsghdr *nlh; unsigned char *ptr; struct nf_dn_rtmsg *rtm; diff --git a/net/econet/af_econet.c b/net/econet/af_econet.c index 78993dadb53a..b5524f32ac2d 100644 --- a/net/econet/af_econet.c +++ b/net/econet/af_econet.c @@ -366,7 +366,7 @@ static int econet_sendmsg(struct kiocb *iocb, struct socket *sock, fh->cb = cb; fh->port = port; if (sock->type != SOCK_DGRAM) { - skb->tail = skb->data; + skb_reset_tail_pointer(skb); skb->len = 0; } else if (res < 0) goto out_free; diff --git a/net/ieee80211/ieee80211_rx.c b/net/ieee80211/ieee80211_rx.c index 59a765c49cf9..2b854941e06c 100644 --- a/net/ieee80211/ieee80211_rx.c +++ b/net/ieee80211/ieee80211_rx.c @@ -595,7 +595,7 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb, if (frag != 0) flen -= hdrlen; - if (frag_skb->tail + flen > frag_skb->end) { + if (skb_tail_pointer(frag_skb) + flen > frag_skb->end) { printk(KERN_WARNING "%s: host decrypted and " "reassembled frame did not fit skb\n", dev->name); diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c index de019f9fbfe1..5e5613930ffb 100644 --- a/net/ipv4/esp4.c +++ b/net/ipv4/esp4.c @@ -21,6 +21,7 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb) struct blkcipher_desc desc; struct esp_data *esp; struct sk_buff *trailer; + u8 *tail; int blksize; int clen; int alen; @@ -49,12 +50,13 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb) goto error; /* Fill padding... */ + tail = skb_tail_pointer(trailer); do { int i; for (i=0; ilen - 2; i++) - *(u8*)(trailer->tail + i) = i+1; + tail[i] = i + 1; } while (0); - *(u8*)(trailer->tail + clen-skb->len - 2) = (clen - skb->len)-2; + tail[clen - skb->len - 2] = (clen - skb->len) - 2; pskb_put(skb, trailer, clen - skb->len); __skb_push(skb, skb->data - skb_network_header(skb)); @@ -62,7 +64,7 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb) esph = (struct ip_esp_hdr *)(skb_network_header(skb) + top_iph->ihl * 4); top_iph->tot_len = htons(skb->len + alen); - *(u8*)(trailer->tail - 1) = top_iph->protocol; + *(skb_tail_pointer(skb) - 1) = top_iph->protocol; /* this is non-NULL only with UDP Encapsulation */ if (x->encap) { diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index 8372f8b8f0cd..d38cbba92a4d 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c @@ -450,7 +450,8 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info) */ iph = ip_hdr(skb_in); - if ((u8 *)iph < skb_in->head || (u8 *)(iph + 1) > skb_in->tail) + if ((u8 *)iph < skb_in->head || + (skb_in->network_header + sizeof(*iph)) > skb_in->tail) goto out; /* diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index 1fc637fb6750..2506021c2935 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c @@ -348,8 +348,8 @@ static int igmpv3_sendpack(struct sk_buff *skb) { struct iphdr *pip = ip_hdr(skb); struct igmphdr *pig = igmp_hdr(skb); - const int iplen = skb->tail - skb_network_header(skb); - const int igmplen = skb->tail - skb_transport_header(skb); + const int iplen = skb->tail - skb->network_header; + const int igmplen = skb->tail - skb->transport_header; pip->tot_len = htons(iplen); ip_send_check(pip); diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c index 5df71cd08da8..37362cd1d07f 100644 --- a/net/ipv4/inet_diag.c +++ b/net/ipv4/inet_diag.c @@ -60,7 +60,7 @@ static int inet_csk_diag_fill(struct sock *sk, struct nlmsghdr *nlh; void *info = NULL; struct inet_diag_meminfo *minfo = NULL; - unsigned char *b = skb->tail; + unsigned char *b = skb_tail_pointer(skb); const struct inet_diag_handler *handler; handler = inet_diag_table[unlh->nlmsg_type]; @@ -147,7 +147,7 @@ static int inet_csk_diag_fill(struct sock *sk, icsk->icsk_ca_ops && icsk->icsk_ca_ops->get_info) icsk->icsk_ca_ops->get_info(sk, ext, skb); - nlh->nlmsg_len = skb->tail - b; + nlh->nlmsg_len = skb_tail_pointer(skb) - b; return skb->len; rtattr_failure: @@ -163,7 +163,7 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw, { long tmo; struct inet_diag_msg *r; - const unsigned char *previous_tail = skb->tail; + const unsigned char *previous_tail = skb_tail_pointer(skb); struct nlmsghdr *nlh = NLMSG_PUT(skb, pid, seq, unlh->nlmsg_type, sizeof(*r)); @@ -205,7 +205,7 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw, &tw6->tw_v6_daddr); } #endif - nlh->nlmsg_len = skb->tail - previous_tail; + nlh->nlmsg_len = skb_tail_pointer(skb) - previous_tail; return skb->len; nlmsg_failure: skb_trim(skb, previous_tail - skb->data); @@ -535,7 +535,7 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk, { const struct inet_request_sock *ireq = inet_rsk(req); struct inet_sock *inet = inet_sk(sk); - unsigned char *b = skb->tail; + unsigned char *b = skb_tail_pointer(skb); struct inet_diag_msg *r; struct nlmsghdr *nlh; long tmo; @@ -574,7 +574,7 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk, &inet6_rsk(req)->rmt_addr); } #endif - nlh->nlmsg_len = skb->tail - b; + nlh->nlmsg_len = skb_tail_pointer(skb) - b; return skb->len; diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index fcb35cd5ccfd..c199d2311731 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c @@ -316,7 +316,7 @@ void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 port, u32 inf serr->addr_offset = (u8 *)&iph->daddr - skb_network_header(skb); serr->port = port; - __skb_pull(skb, skb->tail - skb->data); + __skb_pull(skb, skb_tail_pointer(skb) - skb->data); skb_reset_transport_header(skb); if (sock_queue_err_skb(sk, skb)) diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index 50d0b301380e..ea0a491dce92 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c @@ -513,7 +513,8 @@ static void ipmr_cache_resolve(struct mfc_cache *uc, struct mfc_cache *c) struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr)); if (ipmr_fill_mroute(skb, c, NLMSG_DATA(nlh)) > 0) { - nlh->nlmsg_len = skb->tail - (u8*)nlh; + nlh->nlmsg_len = (skb_tail_pointer(skb) - + (u8 *)nlh); } else { nlh->nlmsg_type = NLMSG_ERROR; nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr)); @@ -580,7 +581,7 @@ static int ipmr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert) * Copy the IP header */ - skb_set_network_header(skb, skb->tail - skb->data); + skb->network_header = skb->tail; skb_put(skb, ihl); memcpy(skb->data,pkt->data,ihl); ip_hdr(skb)->protocol = 0; /* Flag to the kernel this is a route add */ @@ -1544,7 +1545,7 @@ ipmr_fill_mroute(struct sk_buff *skb, struct mfc_cache *c, struct rtmsg *rtm) int ct; struct rtnexthop *nhp; struct net_device *dev = vif_table[c->mfc_parent].dev; - u8 *b = skb->tail; + u8 *b = skb_tail_pointer(skb); struct rtattr *mp_head; if (dev) @@ -1564,7 +1565,7 @@ ipmr_fill_mroute(struct sk_buff *skb, struct mfc_cache *c, struct rtmsg *rtm) } } mp_head->rta_type = RTA_MULTIPATH; - mp_head->rta_len = skb->tail - (u8*)mp_head; + mp_head->rta_len = skb_tail_pointer(skb) - (u8 *)mp_head; rtm->rtm_type = RTN_MULTICAST; return 1; diff --git a/net/ipv4/ipvs/ip_vs_ftp.c b/net/ipv4/ipvs/ip_vs_ftp.c index 25bd68967305..344ddbbdc756 100644 --- a/net/ipv4/ipvs/ip_vs_ftp.c +++ b/net/ipv4/ipvs/ip_vs_ftp.c @@ -162,7 +162,7 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp, iph = ip_hdr(*pskb); th = (struct tcphdr *)&(((char *)iph)[iph->ihl*4]); data = (char *)th + (th->doff << 2); - data_limit = (*pskb)->tail; + data_limit = skb_tail_pointer(*pskb); if (ip_vs_ftp_get_addrport(data, data_limit, SERVER_STRING, @@ -269,7 +269,7 @@ static int ip_vs_ftp_in(struct ip_vs_app *app, struct ip_vs_conn *cp, the length of the header in 32-bit multiples, it is accurate to calculate data address by th+HLEN*4 */ data = data_start = (char *)th + (th->doff << 2); - data_limit = (*pskb)->tail; + data_limit = skb_tail_pointer(*pskb); while (data <= data_limit - 6) { if (strnicmp(data, "PASV\r\n", 6) == 0) { diff --git a/net/ipv4/netfilter/arpt_mangle.c b/net/ipv4/netfilter/arpt_mangle.c index b4450f1ccc1b..6298d404e7c7 100644 --- a/net/ipv4/netfilter/arpt_mangle.c +++ b/net/ipv4/netfilter/arpt_mangle.c @@ -37,28 +37,28 @@ target(struct sk_buff **pskb, /* We assume that pln and hln were checked in the match */ if (mangle->flags & ARPT_MANGLE_SDEV) { if (ARPT_DEV_ADDR_LEN_MAX < hln || - (arpptr + hln > (**pskb).tail)) + (arpptr + hln > skb_tail_pointer(*pskb))) return NF_DROP; memcpy(arpptr, mangle->src_devaddr, hln); } arpptr += hln; if (mangle->flags & ARPT_MANGLE_SIP) { if (ARPT_MANGLE_ADDR_LEN_MAX < pln || - (arpptr + pln > (**pskb).tail)) + (arpptr + pln > skb_tail_pointer(*pskb))) return NF_DROP; memcpy(arpptr, &mangle->u_s.src_ip, pln); } arpptr += pln; if (mangle->flags & ARPT_MANGLE_TDEV) { if (ARPT_DEV_ADDR_LEN_MAX < hln || - (arpptr + hln > (**pskb).tail)) + (arpptr + hln > skb_tail_pointer(*pskb))) return NF_DROP; memcpy(arpptr, mangle->tgt_devaddr, hln); } arpptr += hln; if (mangle->flags & ARPT_MANGLE_TIP) { if (ARPT_MANGLE_ADDR_LEN_MAX < pln || - (arpptr + pln > (**pskb).tail)) + (arpptr + pln > skb_tail_pointer(*pskb))) return NF_DROP; memcpy(arpptr, &mangle->u_t.tgt_ip, pln); } diff --git a/net/ipv4/netfilter/ip_queue.c b/net/ipv4/netfilter/ip_queue.c index 5842f1aa973a..15e0d2002235 100644 --- a/net/ipv4/netfilter/ip_queue.c +++ b/net/ipv4/netfilter/ip_queue.c @@ -191,7 +191,7 @@ ipq_flush(int verdict) static struct sk_buff * ipq_build_packet_message(struct ipq_queue_entry *entry, int *errp) { - unsigned char *old_tail; + sk_buff_data_t old_tail; size_t size = 0; size_t data_len = 0; struct sk_buff *skb; @@ -235,7 +235,7 @@ ipq_build_packet_message(struct ipq_queue_entry *entry, int *errp) if (!skb) goto nlmsg_failure; - old_tail= skb->tail; + old_tail = skb->tail; nlh = NLMSG_PUT(skb, 0, 0, IPQM_PACKET, size - sizeof(*nlh)); pmsg = NLMSG_DATA(nlh); memset(pmsg, 0, sizeof(*pmsg)); diff --git a/net/ipv4/netfilter/nf_nat_helper.c b/net/ipv4/netfilter/nf_nat_helper.c index c2c92ff12781..8a40fbe842b7 100644 --- a/net/ipv4/netfilter/nf_nat_helper.c +++ b/net/ipv4/netfilter/nf_nat_helper.c @@ -92,7 +92,8 @@ static void mangle_contents(struct sk_buff *skb, /* move post-replacement */ memmove(data + match_offset + rep_len, data + match_offset + match_len, - skb->tail - (data + match_offset + match_len)); + skb->tail - (skb->network_header + dataoff + + match_offset + match_len)); /* insert data from buffer */ memcpy(data + match_offset, rep_buffer, rep_len); diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 2b214cc3724c..18a09a78ca0b 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -2231,7 +2231,7 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features) th->cwr = 0; } while (skb->next); - delta = htonl(oldlen + (skb->tail - skb_transport_header(skb)) + + delta = htonl(oldlen + (skb->tail - skb->transport_header) + skb->data_len); th->check = ~csum_fold((__force __wsum)((__force u32)th->check + (__force u32)delta)); diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 29c53fbb2204..c22cdcd84320 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -733,7 +733,7 @@ static void __pskb_trim_head(struct sk_buff *skb, int len) } skb_shinfo(skb)->nr_frags = k; - skb->tail = skb->data; + skb_reset_tail_pointer(skb); skb->data_len -= len; skb->len = skb->data_len; } diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c index f16f4f0c5814..4a355fea4098 100644 --- a/net/ipv6/datagram.c +++ b/net/ipv6/datagram.c @@ -268,7 +268,7 @@ void ipv6_local_error(struct sock *sk, int err, struct flowi *fl, u32 info) serr->addr_offset = (u8 *)&iph->daddr - skb_network_header(skb); serr->port = fl->fl_ip_dport; - __skb_pull(skb, skb->tail - skb->data); + __skb_pull(skb, skb_tail_pointer(skb) - skb->data); skb_reset_transport_header(skb); if (sock_queue_err_skb(sk, skb)) diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c index 7fdf84dee73f..b8e8914cc002 100644 --- a/net/ipv6/esp6.c +++ b/net/ipv6/esp6.c @@ -51,6 +51,7 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb) int clen; int alen; int nfrags; + u8 *tail; struct esp_data *esp = x->data; int hdr_len = (skb_transport_offset(skb) + sizeof(*esph) + esp->conf.ivlen); @@ -78,18 +79,19 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb) } /* Fill padding... */ + tail = skb_tail_pointer(trailer); do { int i; for (i=0; ilen - 2; i++) - *(u8*)(trailer->tail + i) = i+1; + tail[i] = i + 1; } while (0); - *(u8*)(trailer->tail + clen-skb->len - 2) = (clen - skb->len)-2; + tail[clen-skb->len - 2] = (clen - skb->len) - 2; pskb_put(skb, trailer, clen - skb->len); top_iph = (struct ipv6hdr *)__skb_push(skb, hdr_len); esph = (struct ipv6_esp_hdr *)skb_transport_header(skb); top_iph->payload_len = htons(skb->len + alen - sizeof(*top_iph)); - *(u8 *)(trailer->tail - 1) = *skb_network_header(skb); + *(skb_tail_pointer(skb) - 1) = *skb_network_header(skb); *skb_network_header(skb) = IPPROTO_ESP; esph->spi = x->id.spi; diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c index a6a275db88cd..275d2e812a44 100644 --- a/net/ipv6/exthdrs.c +++ b/net/ipv6/exthdrs.c @@ -51,7 +51,7 @@ int ipv6_find_tlv(struct sk_buff *skb, int offset, int type) { const unsigned char *nh = skb_network_header(skb); - int packet_len = skb->tail - nh; + int packet_len = skb->tail - skb->network_header; struct ipv6_opt_hdr *hdr; int len; diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c index d3edc3cf1ce9..e94992ab92e6 100644 --- a/net/ipv6/icmp.c +++ b/net/ipv6/icmp.c @@ -317,7 +317,8 @@ void icmpv6_send(struct sk_buff *skb, int type, int code, __u32 info, int hlimit, tclass; int err = 0; - if ((u8*)hdr < skb->head || (u8*)(hdr+1) > skb->tail) + if ((u8 *)hdr < skb->head || + (skb->network_header + sizeof(*hdr)) > skb->tail) return; /* diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index b2c092c6b9dc..e2b8db6b9aef 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -514,7 +514,7 @@ int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr) u16 offset = sizeof(struct ipv6hdr); struct ipv6_opt_hdr *exthdr = (struct ipv6_opt_hdr *)(ipv6_hdr(skb) + 1); - unsigned int packet_len = skb->tail - skb_network_header(skb); + unsigned int packet_len = skb->tail - skb->network_header; int found_rhdr = 0; *nexthdr = &ipv6_hdr(skb)->nexthdr; diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index 4c45bcce75e8..6c2758951d60 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c @@ -1423,7 +1423,7 @@ static struct sk_buff *mld_newpack(struct net_device *dev, int size) memcpy(skb_put(skb, sizeof(ra)), ra, sizeof(ra)); - skb_set_transport_header(skb, skb->tail - skb->data); + skb_set_transport_header(skb, skb_tail_pointer(skb) - skb->data); skb_put(skb, sizeof(*pmr)); pmr = (struct mld2_report *)skb_transport_header(skb); pmr->type = ICMPV6_MLD2_REPORT; @@ -1468,8 +1468,8 @@ static void mld_sendpack(struct sk_buff *skb) int err; IP6_INC_STATS(idev, IPSTATS_MIB_OUTREQUESTS); - payload_len = skb->tail - skb_network_header(skb) - sizeof(*pip6); - mldlen = skb->tail - skb_transport_header(skb); + payload_len = (skb->tail - skb->network_header) - sizeof(*pip6); + mldlen = skb->tail - skb->transport_header; pip6->payload_len = htons(payload_len); pmr->csum = csum_ipv6_magic(&pip6->saddr, &pip6->daddr, mldlen, diff --git a/net/ipv6/mip6.c b/net/ipv6/mip6.c index f0288e92fb52..6ed763ee6785 100644 --- a/net/ipv6/mip6.c +++ b/net/ipv6/mip6.c @@ -260,7 +260,7 @@ static int mip6_destopt_offset(struct xfrm_state *x, struct sk_buff *skb, struct ipv6_opt_hdr *exthdr = (struct ipv6_opt_hdr *)(ipv6_hdr(skb) + 1); const unsigned char *nh = skb_network_header(skb); - unsigned int packet_len = skb->tail - nh; + unsigned int packet_len = skb->tail - skb->network_header; int found_rhdr = 0; *nexthdr = &ipv6_hdr(skb)->nexthdr; @@ -392,7 +392,7 @@ static int mip6_rthdr_offset(struct xfrm_state *x, struct sk_buff *skb, struct ipv6_opt_hdr *exthdr = (struct ipv6_opt_hdr *)(ipv6_hdr(skb) + 1); const unsigned char *nh = skb_network_header(skb); - unsigned int packet_len = skb->tail - nh; + unsigned int packet_len = skb->tail - skb->network_header; int found_rhdr = 0; *nexthdr = &ipv6_hdr(skb)->nexthdr; diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c index f8e619772fb4..b1cf70816477 100644 --- a/net/ipv6/ndisc.c +++ b/net/ipv6/ndisc.c @@ -492,7 +492,7 @@ static void ndisc_send_na(struct net_device *dev, struct neighbour *neigh, skb_reserve(skb, LL_RESERVED_SPACE(dev)); ip6_nd_hdr(sk, skb, dev, src_addr, daddr, IPPROTO_ICMPV6, len); - skb_set_transport_header(skb, skb->tail - skb->data); + skb->transport_header = skb->tail; skb_put(skb, len); msg = (struct nd_msg *)skb_transport_header(skb); @@ -584,7 +584,7 @@ void ndisc_send_ns(struct net_device *dev, struct neighbour *neigh, skb_reserve(skb, LL_RESERVED_SPACE(dev)); ip6_nd_hdr(sk, skb, dev, saddr, daddr, IPPROTO_ICMPV6, len); - skb_set_transport_header(skb, skb->tail - skb->data); + skb->transport_header = skb->tail; skb_put(skb, len); msg = (struct nd_msg *)skb_transport_header(skb); msg->icmph.icmp6_type = NDISC_NEIGHBOUR_SOLICITATION; @@ -685,7 +685,7 @@ void ndisc_send_rs(struct net_device *dev, struct in6_addr *saddr, skb_reserve(skb, LL_RESERVED_SPACE(dev)); ip6_nd_hdr(sk, skb, dev, saddr, daddr, IPPROTO_ICMPV6, len); - skb_set_transport_header(skb, skb->tail - skb->data); + skb->transport_header = skb->tail; skb_put(skb, len); hdr = icmp6_hdr(skb); hdr->icmp6_type = NDISC_ROUTER_SOLICITATION; @@ -767,7 +767,8 @@ static void ndisc_recv_ns(struct sk_buff *skb) struct in6_addr *saddr = &ipv6_hdr(skb)->saddr; struct in6_addr *daddr = &ipv6_hdr(skb)->daddr; u8 *lladdr = NULL; - u32 ndoptlen = skb->tail - msg->opt; + u32 ndoptlen = skb->tail - (skb->transport_header + + offsetof(struct nd_msg, opt)); struct ndisc_options ndopts; struct net_device *dev = skb->dev; struct inet6_ifaddr *ifp; @@ -945,7 +946,8 @@ static void ndisc_recv_na(struct sk_buff *skb) struct in6_addr *saddr = &ipv6_hdr(skb)->saddr; struct in6_addr *daddr = &ipv6_hdr(skb)->daddr; u8 *lladdr = NULL; - u32 ndoptlen = skb->tail - msg->opt; + u32 ndoptlen = skb->tail - (skb->transport_header + + offsetof(struct nd_msg, opt)); struct ndisc_options ndopts; struct net_device *dev = skb->dev; struct inet6_ifaddr *ifp; @@ -1111,8 +1113,7 @@ static void ndisc_router_discovery(struct sk_buff *skb) __u8 * opt = (__u8 *)(ra_msg + 1); - optlen = (skb->tail - skb_transport_header(skb)) - - sizeof(struct ra_msg); + optlen = (skb->tail - skb->transport_header) - sizeof(struct ra_msg); if (!(ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL)) { ND_PRINTK2(KERN_WARNING @@ -1361,7 +1362,7 @@ static void ndisc_redirect_rcv(struct sk_buff *skb) return; } - optlen = skb->tail - skb_transport_header(skb); + optlen = skb->tail - skb->transport_header; optlen -= sizeof(struct icmp6hdr) + 2 * sizeof(struct in6_addr); if (optlen < 0) { @@ -1522,7 +1523,7 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh, ip6_nd_hdr(sk, buff, dev, &saddr_buf, &ipv6_hdr(skb)->saddr, IPPROTO_ICMPV6, len); - skb_set_transport_header(buff, buff->tail - buff->data); + skb_set_transport_header(buff, skb_tail_pointer(buff) - buff->data); skb_put(buff, len); icmph = icmp6_hdr(buff); diff --git a/net/ipv6/netfilter/ip6_queue.c b/net/ipv6/netfilter/ip6_queue.c index 66a2c4135251..5cfce218c5e1 100644 --- a/net/ipv6/netfilter/ip6_queue.c +++ b/net/ipv6/netfilter/ip6_queue.c @@ -189,7 +189,7 @@ ipq_flush(int verdict) static struct sk_buff * ipq_build_packet_message(struct ipq_queue_entry *entry, int *errp) { - unsigned char *old_tail; + sk_buff_data_t old_tail; size_t size = 0; size_t data_len = 0; struct sk_buff *skb; @@ -233,7 +233,7 @@ ipq_build_packet_message(struct ipq_queue_entry *entry, int *errp) if (!skb) goto nlmsg_failure; - old_tail= skb->tail; + old_tail = skb->tail; nlh = NLMSG_PUT(skb, 0, 0, IPQM_PACKET, size - sizeof(*nlh)); pmsg = NLMSG_DATA(nlh); memset(pmsg, 0, sizeof(*pmsg)); diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index 8705f6a502d9..2b3be68b70a7 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c @@ -1077,7 +1077,7 @@ static int rawv6_ioctl(struct sock *sk, int cmd, unsigned long arg) spin_lock_bh(&sk->sk_receive_queue.lock); skb = skb_peek(&sk->sk_receive_queue); if (skb != NULL) - amount = skb->tail - skb_transport_header(skb); + amount = skb->tail - skb->transport_header; spin_unlock_bh(&sk->sk_receive_queue.lock); return put_user(amount, (int __user *)arg); } diff --git a/net/irda/ircomm/ircomm_param.c b/net/irda/ircomm/ircomm_param.c index 01d7c9c7b3b4..e5e4792a0314 100644 --- a/net/irda/ircomm/ircomm_param.c +++ b/net/irda/ircomm/ircomm_param.c @@ -133,8 +133,8 @@ int ircomm_param_request(struct ircomm_tty_cb *self, __u8 pi, int flush) * Inserting is a little bit tricky since we don't know how much * room we will need. But this should hopefully work OK */ - count = irda_param_insert(self, pi, skb->tail, skb_tailroom(skb), - &ircomm_param_info); + count = irda_param_insert(self, pi, skb_tail_pointer(skb), + skb_tailroom(skb), &ircomm_param_info); if (count < 0) { IRDA_WARNING("%s(), no room for parameter!\n", __FUNCTION__); spin_unlock_irqrestore(&self->spinlock, flags); diff --git a/net/irda/irlan/irlan_common.c b/net/irda/irlan/irlan_common.c index fcf9d6599628..ed69773b0f8e 100644 --- a/net/irda/irlan/irlan_common.c +++ b/net/irda/irlan/irlan_common.c @@ -1039,7 +1039,7 @@ static int __irlan_insert_param(struct sk_buff *skb, char *param, int type, } /* Insert at end of sk-buffer */ - frame = skb->tail; + frame = skb_tail_pointer(skb); /* Make space for data */ if (skb_tailroom(skb) < (param_len+value_len+3)) { diff --git a/net/irda/qos.c b/net/irda/qos.c index 349012c926b7..aeb18cf1dcae 100644 --- a/net/irda/qos.c +++ b/net/irda/qos.c @@ -469,49 +469,49 @@ int irlap_insert_qos_negotiation_params(struct irlap_cb *self, int ret; /* Insert data rate */ - ret = irda_param_insert(self, PI_BAUD_RATE, skb->tail, + ret = irda_param_insert(self, PI_BAUD_RATE, skb_tail_pointer(skb), skb_tailroom(skb), &irlap_param_info); if (ret < 0) return ret; skb_put(skb, ret); /* Insert max turnaround time */ - ret = irda_param_insert(self, PI_MAX_TURN_TIME, skb->tail, + ret = irda_param_insert(self, PI_MAX_TURN_TIME, skb_tail_pointer(skb), skb_tailroom(skb), &irlap_param_info); if (ret < 0) return ret; skb_put(skb, ret); /* Insert data size */ - ret = irda_param_insert(self, PI_DATA_SIZE, skb->tail, + ret = irda_param_insert(self, PI_DATA_SIZE, skb_tail_pointer(skb), skb_tailroom(skb), &irlap_param_info); if (ret < 0) return ret; skb_put(skb, ret); /* Insert window size */ - ret = irda_param_insert(self, PI_WINDOW_SIZE, skb->tail, + ret = irda_param_insert(self, PI_WINDOW_SIZE, skb_tail_pointer(skb), skb_tailroom(skb), &irlap_param_info); if (ret < 0) return ret; skb_put(skb, ret); /* Insert additional BOFs */ - ret = irda_param_insert(self, PI_ADD_BOFS, skb->tail, + ret = irda_param_insert(self, PI_ADD_BOFS, skb_tail_pointer(skb), skb_tailroom(skb), &irlap_param_info); if (ret < 0) return ret; skb_put(skb, ret); /* Insert minimum turnaround time */ - ret = irda_param_insert(self, PI_MIN_TURN_TIME, skb->tail, + ret = irda_param_insert(self, PI_MIN_TURN_TIME, skb_tail_pointer(skb), skb_tailroom(skb), &irlap_param_info); if (ret < 0) return ret; skb_put(skb, ret); /* Insert link disconnect/threshold time */ - ret = irda_param_insert(self, PI_LINK_DISC, skb->tail, + ret = irda_param_insert(self, PI_LINK_DISC, skb_tail_pointer(skb), skb_tailroom(skb), &irlap_param_info); if (ret < 0) return ret; diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index 48f05314ebf7..442300c633d7 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c @@ -268,9 +268,7 @@ ctnetlink_fill_info(struct sk_buff *skb, u32 pid, u32 seq, struct nlmsghdr *nlh; struct nfgenmsg *nfmsg; struct nfattr *nest_parms; - unsigned char *b; - - b = skb->tail; + unsigned char *b = skb_tail_pointer(skb); event |= NFNL_SUBSYS_CTNETLINK << 8; nlh = NLMSG_PUT(skb, pid, seq, event, sizeof(struct nfgenmsg)); @@ -303,7 +301,7 @@ ctnetlink_fill_info(struct sk_buff *skb, u32 pid, u32 seq, ctnetlink_dump_use(skb, ct) < 0) goto nfattr_failure; - nlh->nlmsg_len = skb->tail - b; + nlh->nlmsg_len = skb_tail_pointer(skb) - b; return skb->len; nlmsg_failure: @@ -322,7 +320,7 @@ static int ctnetlink_conntrack_event(struct notifier_block *this, struct nf_conn *ct = (struct nf_conn *)ptr; struct sk_buff *skb; unsigned int type; - unsigned char *b; + sk_buff_data_t b; unsigned int flags = 0, group; /* ignore our fake conntrack entry */ @@ -1152,9 +1150,7 @@ ctnetlink_exp_fill_info(struct sk_buff *skb, u32 pid, u32 seq, { struct nlmsghdr *nlh; struct nfgenmsg *nfmsg; - unsigned char *b; - - b = skb->tail; + unsigned char *b = skb_tail_pointer(skb); event |= NFNL_SUBSYS_CTNETLINK_EXP << 8; nlh = NLMSG_PUT(skb, pid, seq, event, sizeof(struct nfgenmsg)); @@ -1168,7 +1164,7 @@ ctnetlink_exp_fill_info(struct sk_buff *skb, u32 pid, u32 seq, if (ctnetlink_exp_dump_expect(skb, exp) < 0) goto nfattr_failure; - nlh->nlmsg_len = skb->tail - b; + nlh->nlmsg_len = skb_tail_pointer(skb) - b; return skb->len; nlmsg_failure: @@ -1186,7 +1182,7 @@ static int ctnetlink_expect_event(struct notifier_block *this, struct nf_conntrack_expect *exp = (struct nf_conntrack_expect *)ptr; struct sk_buff *skb; unsigned int type; - unsigned char *b; + sk_buff_data_t b; int flags = 0; if (events & IPEXP_NEW) { diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c index 5eeebd2efa7a..9709f94787f8 100644 --- a/net/netfilter/nfnetlink_log.c +++ b/net/netfilter/nfnetlink_log.c @@ -409,15 +409,14 @@ __build_packet_message(struct nfulnl_instance *inst, const struct nf_loginfo *li, const char *prefix, unsigned int plen) { - unsigned char *old_tail; struct nfulnl_msg_packet_hdr pmsg; struct nlmsghdr *nlh; struct nfgenmsg *nfmsg; __be32 tmp_uint; + sk_buff_data_t old_tail = inst->skb->tail; UDEBUG("entered\n"); - old_tail = inst->skb->tail; nlh = NLMSG_PUT(inst->skb, 0, 0, NFNL_SUBSYS_ULOG << 8 | NFULNL_MSG_PACKET, sizeof(struct nfgenmsg)); diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c index cfbee39f61d6..b6585caa431e 100644 --- a/net/netfilter/nfnetlink_queue.c +++ b/net/netfilter/nfnetlink_queue.c @@ -338,7 +338,7 @@ static struct sk_buff * nfqnl_build_packet_message(struct nfqnl_instance *queue, struct nfqnl_queue_entry *entry, int *errp) { - unsigned char *old_tail; + sk_buff_data_t old_tail; size_t size; size_t data_len = 0; struct sk_buff *skb; @@ -404,7 +404,7 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue, if (!skb) goto nlmsg_failure; - old_tail= skb->tail; + old_tail = skb->tail; nlh = NLMSG_PUT(skb, 0, 0, NFNL_SUBSYS_QUEUE << 8 | NFQNL_MSG_PACKET, sizeof(struct nfgenmsg)); diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 50dc5edb7752..fdb6eb13cbcb 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -785,7 +785,7 @@ static inline struct sk_buff *netlink_trim(struct sk_buff *skb, skb_orphan(skb); - delta = skb->end - skb->tail; + delta = skb->end - skb_tail_pointer(skb); if (delta * 2 < skb->truesize) return skb; diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 51c059b09a37..36388b2f32f9 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -775,7 +775,7 @@ static int packet_sendmsg(struct kiocb *iocb, struct socket *sock, err = -EINVAL; res = dev->hard_header(skb, dev, ntohs(proto), addr, NULL, len); if (sock->type != SOCK_DGRAM) { - skb->tail = skb->data; + skb_reset_tail_pointer(skb); skb->len = 0; } else if (res < 0) goto out_free; diff --git a/net/sched/act_api.c b/net/sched/act_api.c index cb21617a5670..28326fb1fc4e 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c @@ -93,7 +93,7 @@ static int tcf_dump_walker(struct sk_buff *skb, struct netlink_callback *cb, continue; a->priv = p; a->order = n_i; - r = (struct rtattr*) skb->tail; + r = (struct rtattr *)skb_tail_pointer(skb); RTA_PUT(skb, a->order, 0, NULL); err = tcf_action_dump_1(skb, a, 0, 0); if (err < 0) { @@ -101,7 +101,7 @@ static int tcf_dump_walker(struct sk_buff *skb, struct netlink_callback *cb, skb_trim(skb, (u8*)r - skb->data); goto done; } - r->rta_len = skb->tail - (u8*)r; + r->rta_len = skb_tail_pointer(skb) - (u8 *)r; n_i++; if (n_i >= TCA_ACT_MAX_PRIO) goto done; @@ -125,7 +125,7 @@ static int tcf_del_walker(struct sk_buff *skb, struct tc_action *a, struct rtattr *r ; int i= 0, n_i = 0; - r = (struct rtattr*) skb->tail; + r = (struct rtattr *)skb_tail_pointer(skb); RTA_PUT(skb, a->order, 0, NULL); RTA_PUT(skb, TCA_KIND, IFNAMSIZ, a->ops->kind); for (i = 0; i < (hinfo->hmask + 1); i++) { @@ -140,7 +140,7 @@ static int tcf_del_walker(struct sk_buff *skb, struct tc_action *a, } } RTA_PUT(skb, TCA_FCNT, 4, &n_i); - r->rta_len = skb->tail - (u8*)r; + r->rta_len = skb_tail_pointer(skb) - (u8 *)r; return n_i; rtattr_failure: @@ -423,7 +423,7 @@ int tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int bind, int ref) { int err = -EINVAL; - unsigned char *b = skb->tail; + unsigned char *b = skb_tail_pointer(skb); struct rtattr *r; if (a->ops == NULL || a->ops->dump == NULL) @@ -432,10 +432,10 @@ tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int bind, int ref) RTA_PUT(skb, TCA_KIND, IFNAMSIZ, a->ops->kind); if (tcf_action_copy_stats(skb, a, 0)) goto rtattr_failure; - r = (struct rtattr*) skb->tail; + r = (struct rtattr *)skb_tail_pointer(skb); RTA_PUT(skb, TCA_OPTIONS, 0, NULL); if ((err = tcf_action_dump_old(skb, a, bind, ref)) > 0) { - r->rta_len = skb->tail - (u8*)r; + r->rta_len = skb_tail_pointer(skb) - (u8 *)r; return err; } @@ -449,17 +449,17 @@ tcf_action_dump(struct sk_buff *skb, struct tc_action *act, int bind, int ref) { struct tc_action *a; int err = -EINVAL; - unsigned char *b = skb->tail; + unsigned char *b = skb_tail_pointer(skb); struct rtattr *r ; while ((a = act) != NULL) { - r = (struct rtattr*) skb->tail; + r = (struct rtattr *)skb_tail_pointer(skb); act = a->next; RTA_PUT(skb, a->order, 0, NULL); err = tcf_action_dump_1(skb, a, bind, ref); if (err < 0) goto errout; - r->rta_len = skb->tail - (u8*)r; + r->rta_len = skb_tail_pointer(skb) - (u8 *)r; } return 0; @@ -635,7 +635,7 @@ tca_get_fill(struct sk_buff *skb, struct tc_action *a, u32 pid, u32 seq, { struct tcamsg *t; struct nlmsghdr *nlh; - unsigned char *b = skb->tail; + unsigned char *b = skb_tail_pointer(skb); struct rtattr *x; nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*t), flags); @@ -645,15 +645,15 @@ tca_get_fill(struct sk_buff *skb, struct tc_action *a, u32 pid, u32 seq, t->tca__pad1 = 0; t->tca__pad2 = 0; - x = (struct rtattr*) skb->tail; + x = (struct rtattr *)skb_tail_pointer(skb); RTA_PUT(skb, TCA_ACT_TAB, 0, NULL); if (tcf_action_dump(skb, a, bind, ref) < 0) goto rtattr_failure; - x->rta_len = skb->tail - (u8*)x; + x->rta_len = skb_tail_pointer(skb) - (u8 *)x; - nlh->nlmsg_len = skb->tail - b; + nlh->nlmsg_len = skb_tail_pointer(skb) - b; return skb->len; rtattr_failure: @@ -767,7 +767,7 @@ static int tca_action_flush(struct rtattr *rta, struct nlmsghdr *n, u32 pid) return -ENOBUFS; } - b = (unsigned char *)skb->tail; + b = skb_tail_pointer(skb); if (rtattr_parse_nested(tb, TCA_ACT_MAX, rta) < 0) goto err_out; @@ -783,16 +783,16 @@ static int tca_action_flush(struct rtattr *rta, struct nlmsghdr *n, u32 pid) t->tca__pad1 = 0; t->tca__pad2 = 0; - x = (struct rtattr *) skb->tail; + x = (struct rtattr *)skb_tail_pointer(skb); RTA_PUT(skb, TCA_ACT_TAB, 0, NULL); err = a->ops->walk(skb, &dcb, RTM_DELACTION, a); if (err < 0) goto rtattr_failure; - x->rta_len = skb->tail - (u8 *) x; + x->rta_len = skb_tail_pointer(skb) - (u8 *)x; - nlh->nlmsg_len = skb->tail - b; + nlh->nlmsg_len = skb_tail_pointer(skb) - b; nlh->nlmsg_flags |= NLM_F_ROOT; module_put(a->ops->owner); kfree(a); @@ -884,7 +884,7 @@ static int tcf_add_notify(struct tc_action *a, u32 pid, u32 seq, int event, if (!skb) return -ENOBUFS; - b = (unsigned char *)skb->tail; + b = skb_tail_pointer(skb); nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*t), flags); t = NLMSG_DATA(nlh); @@ -892,15 +892,15 @@ static int tcf_add_notify(struct tc_action *a, u32 pid, u32 seq, int event, t->tca__pad1 = 0; t->tca__pad2 = 0; - x = (struct rtattr*) skb->tail; + x = (struct rtattr *)skb_tail_pointer(skb); RTA_PUT(skb, TCA_ACT_TAB, 0, NULL); if (tcf_action_dump(skb, a, 0, 0) < 0) goto rtattr_failure; - x->rta_len = skb->tail - (u8*)x; + x->rta_len = skb_tail_pointer(skb) - (u8 *)x; - nlh->nlmsg_len = skb->tail - b; + nlh->nlmsg_len = skb_tail_pointer(skb) - b; NETLINK_CB(skb).dst_group = RTNLGRP_TC; err = rtnetlink_send(skb, pid, RTNLGRP_TC, flags&NLM_F_ECHO); @@ -1015,7 +1015,7 @@ static int tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb) { struct nlmsghdr *nlh; - unsigned char *b = skb->tail; + unsigned char *b = skb_tail_pointer(skb); struct rtattr *x; struct tc_action_ops *a_o; struct tc_action a; @@ -1048,7 +1048,7 @@ tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb) t->tca__pad1 = 0; t->tca__pad2 = 0; - x = (struct rtattr *) skb->tail; + x = (struct rtattr *)skb_tail_pointer(skb); RTA_PUT(skb, TCA_ACT_TAB, 0, NULL); ret = a_o->walk(skb, cb, RTM_GETACTION, &a); @@ -1056,12 +1056,12 @@ tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb) goto rtattr_failure; if (ret > 0) { - x->rta_len = skb->tail - (u8 *) x; + x->rta_len = skb_tail_pointer(skb) - (u8 *)x; ret = skb->len; } else skb_trim(skb, (u8*)x - skb->data); - nlh->nlmsg_len = skb->tail - b; + nlh->nlmsg_len = skb_tail_pointer(skb) - b; if (NETLINK_CB(cb->skb).pid && ret) nlh->nlmsg_flags |= NLM_F_MULTI; module_put(a_o->owner); diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c index 87d0faf32867..aad748b3b38c 100644 --- a/net/sched/act_gact.c +++ b/net/sched/act_gact.c @@ -155,7 +155,7 @@ static int tcf_gact(struct sk_buff *skb, struct tc_action *a, struct tcf_result static int tcf_gact_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref) { - unsigned char *b = skb->tail; + unsigned char *b = skb_tail_pointer(skb); struct tc_gact opt; struct tcf_gact *gact = a->priv; struct tcf_t t; diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c index 47f0b1324239..2ccfd5b20fab 100644 --- a/net/sched/act_ipt.c +++ b/net/sched/act_ipt.c @@ -245,7 +245,7 @@ static int tcf_ipt(struct sk_buff *skb, struct tc_action *a, static int tcf_ipt_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref) { - unsigned char *b = skb->tail; + unsigned char *b = skb_tail_pointer(skb); struct tcf_ipt *ipt = a->priv; struct ipt_entry_target *t; struct tcf_t tm; diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c index 3e93683e9ab3..15f6ecdaf611 100644 --- a/net/sched/act_mirred.c +++ b/net/sched/act_mirred.c @@ -206,7 +206,7 @@ bad_mirred: static int tcf_mirred_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref) { - unsigned char *b = skb->tail; + unsigned char *b = skb_tail_pointer(skb); struct tcf_mirred *m = a->priv; struct tc_mirred opt; struct tcf_t t; diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c index 20813eee8af4..d654cea1a46c 100644 --- a/net/sched/act_pedit.c +++ b/net/sched/act_pedit.c @@ -195,7 +195,7 @@ done: static int tcf_pedit_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref) { - unsigned char *b = skb->tail; + unsigned char *b = skb_tail_pointer(skb); struct tcf_pedit *p = a->priv; struct tc_pedit *opt; struct tcf_t t; diff --git a/net/sched/act_police.c b/net/sched/act_police.c index 10a5a5c36f76..068b23763665 100644 --- a/net/sched/act_police.c +++ b/net/sched/act_police.c @@ -80,7 +80,7 @@ static int tcf_act_police_walker(struct sk_buff *skb, struct netlink_callback *c continue; a->priv = p; a->order = index; - r = (struct rtattr*) skb->tail; + r = (struct rtattr *)skb_tail_pointer(skb); RTA_PUT(skb, a->order, 0, NULL); if (type == RTM_DELACTION) err = tcf_action_dump_1(skb, a, 0, 1); @@ -91,7 +91,7 @@ static int tcf_act_police_walker(struct sk_buff *skb, struct netlink_callback *c skb_trim(skb, (u8*)r - skb->data); goto done; } - r->rta_len = skb->tail - (u8*)r; + r->rta_len = skb_tail_pointer(skb) - (u8 *)r; n_i++; } } @@ -326,7 +326,7 @@ static int tcf_act_police(struct sk_buff *skb, struct tc_action *a, static int tcf_act_police_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref) { - unsigned char *b = skb->tail; + unsigned char *b = skb_tail_pointer(skb); struct tcf_police *police = a->priv; struct tc_police opt; @@ -572,7 +572,7 @@ EXPORT_SYMBOL(tcf_police); int tcf_police_dump(struct sk_buff *skb, struct tcf_police *police) { - unsigned char *b = skb->tail; + unsigned char *b = skb_tail_pointer(skb); struct tc_police opt; opt.index = police->tcf_index; diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c index c7971182af07..ecbcfa59b76c 100644 --- a/net/sched/act_simple.c +++ b/net/sched/act_simple.c @@ -155,7 +155,7 @@ static inline int tcf_simp_cleanup(struct tc_action *a, int bind) static inline int tcf_simp_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref) { - unsigned char *b = skb->tail; + unsigned char *b = skb_tail_pointer(skb); struct tcf_defact *d = a->priv; struct tc_defact opt; struct tcf_t t; diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index 5c6ffdb77d2d..84231baf77d1 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -323,7 +323,7 @@ tcf_fill_node(struct sk_buff *skb, struct tcf_proto *tp, unsigned long fh, { struct tcmsg *tcm; struct nlmsghdr *nlh; - unsigned char *b = skb->tail; + unsigned char *b = skb_tail_pointer(skb); nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*tcm), flags); tcm = NLMSG_DATA(nlh); @@ -340,7 +340,7 @@ tcf_fill_node(struct sk_buff *skb, struct tcf_proto *tp, unsigned long fh, if (tp->ops->dump && tp->ops->dump(tp, fh, skb, tcm) < 0) goto rtattr_failure; } - nlh->nlmsg_len = skb->tail - b; + nlh->nlmsg_len = skb_tail_pointer(skb) - b; return skb->len; nlmsg_failure: @@ -563,30 +563,30 @@ tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts, * to work with both old and new modes of entering * tc data even if iproute2 was newer - jhs */ - struct rtattr * p_rta = (struct rtattr*) skb->tail; + struct rtattr *p_rta = (struct rtattr *)skb_tail_pointer(skb); if (exts->action->type != TCA_OLD_COMPAT) { RTA_PUT(skb, map->action, 0, NULL); if (tcf_action_dump(skb, exts->action, 0, 0) < 0) goto rtattr_failure; - p_rta->rta_len = skb->tail - (u8*)p_rta; + p_rta->rta_len = skb_tail_pointer(skb) - (u8 *)p_rta; } else if (map->police) { RTA_PUT(skb, map->police, 0, NULL); if (tcf_action_dump_old(skb, exts->action, 0, 0) < 0) goto rtattr_failure; - p_rta->rta_len = skb->tail - (u8*)p_rta; + p_rta->rta_len = skb_tail_pointer(skb) - (u8 *)p_rta; } } #elif defined CONFIG_NET_CLS_POLICE if (map->police && exts->police) { - struct rtattr * p_rta = (struct rtattr*) skb->tail; + struct rtattr *p_rta = (struct rtattr *)skb_tail_pointer(skb); RTA_PUT(skb, map->police, 0, NULL); if (tcf_police_dump(skb, exts->police) < 0) goto rtattr_failure; - p_rta->rta_len = skb->tail - (u8*)p_rta; + p_rta->rta_len = skb_tail_pointer(skb) - (u8 *)p_rta; } #endif return 0; diff --git a/net/sched/cls_basic.c b/net/sched/cls_basic.c index 4a91f082a81d..800ec2ac326b 100644 --- a/net/sched/cls_basic.c +++ b/net/sched/cls_basic.c @@ -245,7 +245,7 @@ static int basic_dump(struct tcf_proto *tp, unsigned long fh, struct sk_buff *skb, struct tcmsg *t) { struct basic_filter *f = (struct basic_filter *) fh; - unsigned char *b = skb->tail; + unsigned char *b = skb_tail_pointer(skb); struct rtattr *rta; if (f == NULL) @@ -263,7 +263,7 @@ static int basic_dump(struct tcf_proto *tp, unsigned long fh, tcf_em_tree_dump(skb, &f->ematches, TCA_BASIC_EMATCHES) < 0) goto rtattr_failure; - rta->rta_len = (skb->tail - b); + rta->rta_len = skb_tail_pointer(skb) - b; return skb->len; rtattr_failure: diff --git a/net/sched/cls_fw.c b/net/sched/cls_fw.c index 5dbb9d451f73..f5f355852a87 100644 --- a/net/sched/cls_fw.c +++ b/net/sched/cls_fw.c @@ -348,7 +348,7 @@ static int fw_dump(struct tcf_proto *tp, unsigned long fh, { struct fw_head *head = (struct fw_head *)tp->root; struct fw_filter *f = (struct fw_filter*)fh; - unsigned char *b = skb->tail; + unsigned char *b = skb_tail_pointer(skb); struct rtattr *rta; if (f == NULL) @@ -374,7 +374,7 @@ static int fw_dump(struct tcf_proto *tp, unsigned long fh, if (tcf_exts_dump(skb, &f->exts, &fw_ext_map) < 0) goto rtattr_failure; - rta->rta_len = skb->tail - b; + rta->rta_len = skb_tail_pointer(skb) - b; if (tcf_exts_dump_stats(skb, &f->exts, &fw_ext_map) < 0) goto rtattr_failure; diff --git a/net/sched/cls_route.c b/net/sched/cls_route.c index abc47cc48ad0..1f94df36239d 100644 --- a/net/sched/cls_route.c +++ b/net/sched/cls_route.c @@ -562,7 +562,7 @@ static int route4_dump(struct tcf_proto *tp, unsigned long fh, struct sk_buff *skb, struct tcmsg *t) { struct route4_filter *f = (struct route4_filter*)fh; - unsigned char *b = skb->tail; + unsigned char *b = skb_tail_pointer(skb); struct rtattr *rta; u32 id; @@ -591,7 +591,7 @@ static int route4_dump(struct tcf_proto *tp, unsigned long fh, if (tcf_exts_dump(skb, &f->exts, &route_ext_map) < 0) goto rtattr_failure; - rta->rta_len = skb->tail - b; + rta->rta_len = skb_tail_pointer(skb) - b; if (tcf_exts_dump_stats(skb, &f->exts, &route_ext_map) < 0) goto rtattr_failure; diff --git a/net/sched/cls_rsvp.h b/net/sched/cls_rsvp.h index 6f373b020eb4..87ed6f3c5070 100644 --- a/net/sched/cls_rsvp.h +++ b/net/sched/cls_rsvp.h @@ -593,7 +593,7 @@ static int rsvp_dump(struct tcf_proto *tp, unsigned long fh, { struct rsvp_filter *f = (struct rsvp_filter*)fh; struct rsvp_session *s; - unsigned char *b = skb->tail; + unsigned char *b = skb_tail_pointer(skb); struct rtattr *rta; struct tc_rsvp_pinfo pinfo; @@ -623,7 +623,7 @@ static int rsvp_dump(struct tcf_proto *tp, unsigned long fh, if (tcf_exts_dump(skb, &f->exts, &rsvp_ext_map) < 0) goto rtattr_failure; - rta->rta_len = skb->tail - b; + rta->rta_len = skb_tail_pointer(skb) - b; if (tcf_exts_dump_stats(skb, &f->exts, &rsvp_ext_map) < 0) goto rtattr_failure; diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c index 7563fdcef4b7..0537d6066b43 100644 --- a/net/sched/cls_tcindex.c +++ b/net/sched/cls_tcindex.c @@ -448,7 +448,7 @@ static int tcindex_dump(struct tcf_proto *tp, unsigned long fh, { struct tcindex_data *p = PRIV(tp); struct tcindex_filter_result *r = (struct tcindex_filter_result *) fh; - unsigned char *b = skb->tail; + unsigned char *b = skb_tail_pointer(skb); struct rtattr *rta; DPRINTK("tcindex_dump(tp %p,fh 0x%lx,skb %p,t %p),p %p,r %p,b %p\n", @@ -463,7 +463,7 @@ static int tcindex_dump(struct tcf_proto *tp, unsigned long fh, RTA_PUT(skb,TCA_TCINDEX_SHIFT,sizeof(p->shift),&p->shift); RTA_PUT(skb,TCA_TCINDEX_FALL_THROUGH,sizeof(p->fall_through), &p->fall_through); - rta->rta_len = skb->tail-b; + rta->rta_len = skb_tail_pointer(skb) - b; } else { if (p->perfect) { t->tcm_handle = r-p->perfect; @@ -486,7 +486,7 @@ static int tcindex_dump(struct tcf_proto *tp, unsigned long fh, if (tcf_exts_dump(skb, &r->exts, &tcindex_ext_map) < 0) goto rtattr_failure; - rta->rta_len = skb->tail-b; + rta->rta_len = skb_tail_pointer(skb) - b; if (tcf_exts_dump_stats(skb, &r->exts, &tcindex_ext_map) < 0) goto rtattr_failure; diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c index 695b34051b9f..fa11bb750049 100644 --- a/net/sched/cls_u32.c +++ b/net/sched/cls_u32.c @@ -213,7 +213,7 @@ check_terminal: off2 = 0; } - if (ptr < skb->tail) + if (ptr < skb_tail_pointer(skb)) goto next_ht; } @@ -718,7 +718,7 @@ static int u32_dump(struct tcf_proto *tp, unsigned long fh, struct sk_buff *skb, struct tcmsg *t) { struct tc_u_knode *n = (struct tc_u_knode*)fh; - unsigned char *b = skb->tail; + unsigned char *b = skb_tail_pointer(skb); struct rtattr *rta; if (n == NULL) @@ -765,7 +765,7 @@ static int u32_dump(struct tcf_proto *tp, unsigned long fh, #endif } - rta->rta_len = skb->tail - b; + rta->rta_len = skb_tail_pointer(skb) - b; if (TC_U32_KEY(n->handle)) if (tcf_exts_dump_stats(skb, &n->exts, &u32_ext_map) < 0) goto rtattr_failure; diff --git a/net/sched/ematch.c b/net/sched/ematch.c index 959c306c5714..63146d339d81 100644 --- a/net/sched/ematch.c +++ b/net/sched/ematch.c @@ -418,17 +418,19 @@ void tcf_em_tree_destroy(struct tcf_proto *tp, struct tcf_ematch_tree *tree) int tcf_em_tree_dump(struct sk_buff *skb, struct tcf_ematch_tree *tree, int tlv) { int i; - struct rtattr * top_start = (struct rtattr*) skb->tail; - struct rtattr * list_start; + u8 *tail; + struct rtattr *top_start = (struct rtattr *)skb_tail_pointer(skb); + struct rtattr *list_start; RTA_PUT(skb, tlv, 0, NULL); RTA_PUT(skb, TCA_EMATCH_TREE_HDR, sizeof(tree->hdr), &tree->hdr); - list_start = (struct rtattr *) skb->tail; + list_start = (struct rtattr *)skb_tail_pointer(skb); RTA_PUT(skb, TCA_EMATCH_TREE_LIST, 0, NULL); + tail = skb_tail_pointer(skb); for (i = 0; i < tree->hdr.nmatches; i++) { - struct rtattr *match_start = (struct rtattr*) skb->tail; + struct rtattr *match_start = (struct rtattr *)tail; struct tcf_ematch *em = tcf_em_get_match(tree, i); struct tcf_ematch_hdr em_hdr = { .kind = em->ops ? em->ops->kind : TCF_EM_CONTAINER, @@ -447,11 +449,12 @@ int tcf_em_tree_dump(struct sk_buff *skb, struct tcf_ematch_tree *tree, int tlv) } else if (em->datalen > 0) RTA_PUT_NOHDR(skb, em->datalen, (void *) em->data); - match_start->rta_len = skb->tail - (u8*) match_start; + tail = skb_tail_pointer(skb); + match_start->rta_len = tail - (u8 *)match_start; } - list_start->rta_len = skb->tail - (u8 *) list_start; - top_start->rta_len = skb->tail - (u8 *) top_start; + list_start->rta_len = tail - (u8 *)list_start; + top_start->rta_len = tail - (u8 *)top_start; return 0; diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index ecbdc6b42a9c..7482a950717b 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c @@ -813,7 +813,7 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid, { struct tcmsg *tcm; struct nlmsghdr *nlh; - unsigned char *b = skb->tail; + unsigned char *b = skb_tail_pointer(skb); struct gnet_dump d; nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*tcm), flags); @@ -847,7 +847,7 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid, if (gnet_stats_finish_copy(&d) < 0) goto rtattr_failure; - nlh->nlmsg_len = skb->tail - b; + nlh->nlmsg_len = skb_tail_pointer(skb) - b; return skb->len; nlmsg_failure: @@ -1051,7 +1051,7 @@ static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q, { struct tcmsg *tcm; struct nlmsghdr *nlh; - unsigned char *b = skb->tail; + unsigned char *b = skb_tail_pointer(skb); struct gnet_dump d; struct Qdisc_class_ops *cl_ops = q->ops->cl_ops; @@ -1076,7 +1076,7 @@ static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q, if (gnet_stats_finish_copy(&d) < 0) goto rtattr_failure; - nlh->nlmsg_len = skb->tail - b; + nlh->nlmsg_len = skb_tail_pointer(skb) - b; return skb->len; nlmsg_failure: diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c index baca8743c12b..1d7bb1632138 100644 --- a/net/sched/sch_atm.c +++ b/net/sched/sch_atm.c @@ -631,7 +631,7 @@ static int atm_tc_dump_class(struct Qdisc *sch, unsigned long cl, { struct atm_qdisc_data *p = PRIV(sch); struct atm_flow_data *flow = (struct atm_flow_data *) cl; - unsigned char *b = skb->tail; + unsigned char *b = skb_tail_pointer(skb); struct rtattr *rta; DPRINTK("atm_tc_dump_class(sch %p,[qdisc %p],flow %p,skb %p,tcm %p)\n", @@ -661,7 +661,7 @@ static int atm_tc_dump_class(struct Qdisc *sch, unsigned long cl, RTA_PUT(skb,TCA_ATM_EXCESS,sizeof(zero),&zero); } - rta->rta_len = skb->tail-b; + rta->rta_len = skb_tail_pointer(skb) - b; return skb->len; rtattr_failure: diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c index d83414d828d8..be98a01253e9 100644 --- a/net/sched/sch_cbq.c +++ b/net/sched/sch_cbq.c @@ -1465,7 +1465,7 @@ static int cbq_init(struct Qdisc *sch, struct rtattr *opt) static __inline__ int cbq_dump_rate(struct sk_buff *skb, struct cbq_class *cl) { - unsigned char *b = skb->tail; + unsigned char *b = skb_tail_pointer(skb); RTA_PUT(skb, TCA_CBQ_RATE, sizeof(cl->R_tab->rate), &cl->R_tab->rate); return skb->len; @@ -1477,7 +1477,7 @@ rtattr_failure: static __inline__ int cbq_dump_lss(struct sk_buff *skb, struct cbq_class *cl) { - unsigned char *b = skb->tail; + unsigned char *b = skb_tail_pointer(skb); struct tc_cbq_lssopt opt; opt.flags = 0; @@ -1502,7 +1502,7 @@ rtattr_failure: static __inline__ int cbq_dump_wrr(struct sk_buff *skb, struct cbq_class *cl) { - unsigned char *b = skb->tail; + unsigned char *b = skb_tail_pointer(skb); struct tc_cbq_wrropt opt; opt.flags = 0; @@ -1520,7 +1520,7 @@ rtattr_failure: static __inline__ int cbq_dump_ovl(struct sk_buff *skb, struct cbq_class *cl) { - unsigned char *b = skb->tail; + unsigned char *b = skb_tail_pointer(skb); struct tc_cbq_ovl opt; opt.strategy = cl->ovl_strategy; @@ -1537,7 +1537,7 @@ rtattr_failure: static __inline__ int cbq_dump_fopt(struct sk_buff *skb, struct cbq_class *cl) { - unsigned char *b = skb->tail; + unsigned char *b = skb_tail_pointer(skb); struct tc_cbq_fopt opt; if (cl->split || cl->defmap) { @@ -1556,7 +1556,7 @@ rtattr_failure: #ifdef CONFIG_NET_CLS_POLICE static __inline__ int cbq_dump_police(struct sk_buff *skb, struct cbq_class *cl) { - unsigned char *b = skb->tail; + unsigned char *b = skb_tail_pointer(skb); struct tc_cbq_police opt; if (cl->police) { @@ -1590,14 +1590,14 @@ static int cbq_dump_attr(struct sk_buff *skb, struct cbq_class *cl) static int cbq_dump(struct Qdisc *sch, struct sk_buff *skb) { struct cbq_sched_data *q = qdisc_priv(sch); - unsigned char *b = skb->tail; + unsigned char *b = skb_tail_pointer(skb); struct rtattr *rta; rta = (struct rtattr*)b; RTA_PUT(skb, TCA_OPTIONS, 0, NULL); if (cbq_dump_attr(skb, &q->link) < 0) goto rtattr_failure; - rta->rta_len = skb->tail - b; + rta->rta_len = skb_tail_pointer(skb) - b; return skb->len; rtattr_failure: @@ -1619,7 +1619,7 @@ cbq_dump_class(struct Qdisc *sch, unsigned long arg, struct sk_buff *skb, struct tcmsg *tcm) { struct cbq_class *cl = (struct cbq_class*)arg; - unsigned char *b = skb->tail; + unsigned char *b = skb_tail_pointer(skb); struct rtattr *rta; if (cl->tparent) @@ -1633,7 +1633,7 @@ cbq_dump_class(struct Qdisc *sch, unsigned long arg, RTA_PUT(skb, TCA_OPTIONS, 0, NULL); if (cbq_dump_attr(skb, cl) < 0) goto rtattr_failure; - rta->rta_len = skb->tail - b; + rta->rta_len = skb_tail_pointer(skb) - b; return skb->len; rtattr_failure: diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c index 5197b6caaf2d..80e6f811e3bc 100644 --- a/net/sched/sch_hfsc.c +++ b/net/sched/sch_hfsc.c @@ -1363,7 +1363,7 @@ hfsc_dump_class(struct Qdisc *sch, unsigned long arg, struct sk_buff *skb, struct tcmsg *tcm) { struct hfsc_class *cl = (struct hfsc_class *)arg; - unsigned char *b = skb->tail; + unsigned char *b = skb_tail_pointer(skb); struct rtattr *rta = (struct rtattr *)b; tcm->tcm_parent = cl->cl_parent ? cl->cl_parent->classid : TC_H_ROOT; @@ -1374,7 +1374,7 @@ hfsc_dump_class(struct Qdisc *sch, unsigned long arg, struct sk_buff *skb, RTA_PUT(skb, TCA_OPTIONS, 0, NULL); if (hfsc_dump_curves(skb, cl) < 0) goto rtattr_failure; - rta->rta_len = skb->tail - b; + rta->rta_len = skb_tail_pointer(skb) - b; return skb->len; rtattr_failure: @@ -1576,7 +1576,7 @@ static int hfsc_dump_qdisc(struct Qdisc *sch, struct sk_buff *skb) { struct hfsc_sched *q = qdisc_priv(sch); - unsigned char *b = skb->tail; + unsigned char *b = skb_tail_pointer(skb); struct tc_hfsc_qopt qopt; qopt.defcls = q->defcls; diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index f76c20c0a109..c687388a8cb6 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c @@ -1110,7 +1110,7 @@ static int htb_init(struct Qdisc *sch, struct rtattr *opt) static int htb_dump(struct Qdisc *sch, struct sk_buff *skb) { struct htb_sched *q = qdisc_priv(sch); - unsigned char *b = skb->tail; + unsigned char *b = skb_tail_pointer(skb); struct rtattr *rta; struct tc_htb_glob gopt; spin_lock_bh(&sch->dev->queue_lock); @@ -1123,12 +1123,12 @@ static int htb_dump(struct Qdisc *sch, struct sk_buff *skb) rta = (struct rtattr *)b; RTA_PUT(skb, TCA_OPTIONS, 0, NULL); RTA_PUT(skb, TCA_HTB_INIT, sizeof(gopt), &gopt); - rta->rta_len = skb->tail - b; + rta->rta_len = skb_tail_pointer(skb) - b; spin_unlock_bh(&sch->dev->queue_lock); return skb->len; rtattr_failure: spin_unlock_bh(&sch->dev->queue_lock); - skb_trim(skb, skb->tail - skb->data); + skb_trim(skb, skb_tail_pointer(skb) - skb->data); return -1; } @@ -1136,7 +1136,7 @@ static int htb_dump_class(struct Qdisc *sch, unsigned long arg, struct sk_buff *skb, struct tcmsg *tcm) { struct htb_class *cl = (struct htb_class *)arg; - unsigned char *b = skb->tail; + unsigned char *b = skb_tail_pointer(skb); struct rtattr *rta; struct tc_htb_opt opt; @@ -1159,7 +1159,7 @@ static int htb_dump_class(struct Qdisc *sch, unsigned long arg, opt.prio = cl->un.leaf.prio; opt.level = cl->level; RTA_PUT(skb, TCA_HTB_PARMS, sizeof(opt), &opt); - rta->rta_len = skb->tail - b; + rta->rta_len = skb_tail_pointer(skb) - b; spin_unlock_bh(&sch->dev->queue_lock); return skb->len; rtattr_failure: diff --git a/net/sched/sch_ingress.c b/net/sched/sch_ingress.c index cfe070ee6ee3..d19f4070c237 100644 --- a/net/sched/sch_ingress.c +++ b/net/sched/sch_ingress.c @@ -362,12 +362,12 @@ static void ingress_destroy(struct Qdisc *sch) static int ingress_dump(struct Qdisc *sch, struct sk_buff *skb) { - unsigned char *b = skb->tail; + unsigned char *b = skb_tail_pointer(skb); struct rtattr *rta; rta = (struct rtattr *) b; RTA_PUT(skb, TCA_OPTIONS, 0, NULL); - rta->rta_len = skb->tail - b; + rta->rta_len = skb_tail_pointer(skb) - b; return skb->len; rtattr_failure: diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index 915f82a2cc3d..2a9b1e429ff8 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c @@ -583,7 +583,7 @@ static void netem_destroy(struct Qdisc *sch) static int netem_dump(struct Qdisc *sch, struct sk_buff *skb) { const struct netem_sched_data *q = qdisc_priv(sch); - unsigned char *b = skb->tail; + unsigned char *b = skb_tail_pointer(skb); struct rtattr *rta = (struct rtattr *) b; struct tc_netem_qopt qopt; struct tc_netem_corr cor; @@ -611,7 +611,7 @@ static int netem_dump(struct Qdisc *sch, struct sk_buff *skb) corrupt.correlation = q->corrupt_cor.rho; RTA_PUT(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt); - rta->rta_len = skb->tail - b; + rta->rta_len = skb_tail_pointer(skb) - b; return skb->len; diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c index de889f23f22a..5b371109ec1c 100644 --- a/net/sched/sch_prio.c +++ b/net/sched/sch_prio.c @@ -271,7 +271,7 @@ static int prio_init(struct Qdisc *sch, struct rtattr *opt) static int prio_dump(struct Qdisc *sch, struct sk_buff *skb) { struct prio_sched_data *q = qdisc_priv(sch); - unsigned char *b = skb->tail; + unsigned char *b = skb_tail_pointer(skb); struct tc_prio_qopt opt; opt.bands = q->bands; diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c index e3695407afc6..a511ba83e26f 100644 --- a/net/sched/sch_sfq.c +++ b/net/sched/sch_sfq.c @@ -461,7 +461,7 @@ static void sfq_destroy(struct Qdisc *sch) static int sfq_dump(struct Qdisc *sch, struct sk_buff *skb) { struct sfq_sched_data *q = qdisc_priv(sch); - unsigned char *b = skb->tail; + unsigned char *b = skb_tail_pointer(skb); struct tc_sfq_qopt opt; opt.quantum = q->quantum; diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c index f14692f3a14e..231895562c66 100644 --- a/net/sched/sch_tbf.c +++ b/net/sched/sch_tbf.c @@ -387,7 +387,7 @@ static void tbf_destroy(struct Qdisc *sch) static int tbf_dump(struct Qdisc *sch, struct sk_buff *skb) { struct tbf_sched_data *q = qdisc_priv(sch); - unsigned char *b = skb->tail; + unsigned char *b = skb_tail_pointer(skb); struct rtattr *rta; struct tc_tbf_qopt opt; @@ -403,7 +403,7 @@ static int tbf_dump(struct Qdisc *sch, struct sk_buff *skb) opt.mtu = q->mtu; opt.buffer = q->buffer; RTA_PUT(skb, TCA_TBF_PARMS, sizeof(opt), &opt); - rta->rta_len = skb->tail - b; + rta->rta_len = skb_tail_pointer(skb) - b; return skb->len; diff --git a/net/sctp/input.c b/net/sctp/input.c index 1ff47b18724a..18b97eedc1fa 100644 --- a/net/sctp/input.c +++ b/net/sctp/input.c @@ -612,7 +612,7 @@ int sctp_rcv_ootb(struct sk_buff *skb) break; ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length)); - if (ch_end > skb->tail) + if (ch_end > skb_tail_pointer(skb)) break; /* RFC 8.4, 2) If the OOTB packet contains an ABORT chunk, the @@ -644,7 +644,7 @@ int sctp_rcv_ootb(struct sk_buff *skb) } ch = (sctp_chunkhdr_t *) ch_end; - } while (ch_end < skb->tail); + } while (ch_end < skb_tail_pointer(skb)); return 0; diff --git a/net/sctp/inqueue.c b/net/sctp/inqueue.c index c30629e17781..88aa22407549 100644 --- a/net/sctp/inqueue.c +++ b/net/sctp/inqueue.c @@ -159,16 +159,16 @@ struct sctp_chunk *sctp_inq_pop(struct sctp_inq *queue) * the skb->tail. */ if (unlikely(skb_is_nonlinear(chunk->skb))) { - if (chunk->chunk_end > chunk->skb->tail) - chunk->chunk_end = chunk->skb->tail; + if (chunk->chunk_end > skb_tail_pointer(chunk->skb)) + chunk->chunk_end = skb_tail_pointer(chunk->skb); } skb_pull(chunk->skb, sizeof(sctp_chunkhdr_t)); chunk->subh.v = NULL; /* Subheader is no longer valid. */ - if (chunk->chunk_end < chunk->skb->tail) { + if (chunk->chunk_end < skb_tail_pointer(chunk->skb)) { /* This is not a singleton */ chunk->singleton = 0; - } else if (chunk->chunk_end > chunk->skb->tail) { + } else if (chunk->chunk_end > skb_tail_pointer(chunk->skb)) { /* RFC 2960, Section 6.10 Bundling * * Partial chunks MUST NOT be placed in an SCTP packet. diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index 60c5b59d4c65..759ea3d19976 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c @@ -1143,7 +1143,7 @@ void *sctp_addto_chunk(struct sctp_chunk *chunk, int len, const void *data) /* Adjust the chunk length field. */ chunk->chunk_hdr->length = htons(chunklen + padlen + len); - chunk->chunk_end = chunk->skb->tail; + chunk->chunk_end = skb_tail_pointer(chunk->skb); return target; } @@ -1168,7 +1168,7 @@ int sctp_user_addto_chunk(struct sctp_chunk *chunk, int off, int len, /* Adjust the chunk length field. */ chunk->chunk_hdr->length = htons(ntohs(chunk->chunk_hdr->length) + len); - chunk->chunk_end = chunk->skb->tail; + chunk->chunk_end = skb_tail_pointer(chunk->skb); out: return err; diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index bf502c499c81..438e5dc5c714 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c @@ -3115,7 +3115,7 @@ sctp_disposition_t sctp_sf_ootb(const struct sctp_endpoint *ep, break; ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length)); - if (ch_end > skb->tail) + if (ch_end > skb_tail_pointer(skb)) break; if (SCTP_CID_SHUTDOWN_ACK == ch->type) @@ -3130,7 +3130,7 @@ sctp_disposition_t sctp_sf_ootb(const struct sctp_endpoint *ep, return sctp_sf_pdiscard(ep, asoc, type, arg, commands); ch = (sctp_chunkhdr_t *) ch_end; - } while (ch_end < skb->tail); + } while (ch_end < skb_tail_pointer(skb)); if (ootb_shut_ack) sctp_sf_shut_8_4_5(ep, asoc, type, arg, commands); diff --git a/net/tipc/config.c b/net/tipc/config.c index 14789a82de53..c71337a22d33 100644 --- a/net/tipc/config.c +++ b/net/tipc/config.c @@ -89,7 +89,7 @@ struct sk_buff *tipc_cfg_reply_alloc(int payload_size) int tipc_cfg_append_tlv(struct sk_buff *buf, int tlv_type, void *tlv_data, int tlv_data_size) { - struct tlv_desc *tlv = (struct tlv_desc *)buf->tail; + struct tlv_desc *tlv = (struct tlv_desc *)skb_tail_pointer(buf); int new_tlv_space = TLV_SPACE(tlv_data_size); if (skb_tailroom(buf) < new_tlv_space) { diff --git a/net/tipc/socket.c b/net/tipc/socket.c index b71739fbe2c6..45832fb75ea4 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -1020,7 +1020,7 @@ restart: if (!err) { buf_crs = (unsigned char *)(TIPC_SKB_CB(buf)->handle); - sz = buf->tail - buf_crs; + sz = skb_tail_pointer(buf) - buf_crs; needed = (buf_len - sz_copied); sz_to_copy = (sz <= needed) ? sz : needed; diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index 816e3690b60f..814bb3125ada 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c @@ -576,7 +576,7 @@ static int dump_one_state(struct xfrm_state *x, int count, void *ptr) struct sk_buff *skb = sp->out_skb; struct xfrm_usersa_info *p; struct nlmsghdr *nlh; - unsigned char *b = skb->tail; + unsigned char *b = skb_tail_pointer(skb); if (sp->this_idx < sp->start_idx) goto out; @@ -621,7 +621,7 @@ static int dump_one_state(struct xfrm_state *x, int count, void *ptr) if (x->lastused) RTA_PUT(skb, XFRMA_LASTUSED, sizeof(x->lastused), &x->lastused); - nlh->nlmsg_len = skb->tail - b; + nlh->nlmsg_len = skb_tail_pointer(skb) - b; out: sp->this_idx++; return 0; @@ -1157,7 +1157,7 @@ static int dump_one_policy(struct xfrm_policy *xp, int dir, int count, void *ptr struct sk_buff *in_skb = sp->in_skb; struct sk_buff *skb = sp->out_skb; struct nlmsghdr *nlh; - unsigned char *b = skb->tail; + unsigned char *b = skb_tail_pointer(skb); if (sp->this_idx < sp->start_idx) goto out; @@ -1176,7 +1176,7 @@ static int dump_one_policy(struct xfrm_policy *xp, int dir, int count, void *ptr if (copy_to_user_policy_type(xp->type, skb) < 0) goto nlmsg_failure; - nlh->nlmsg_len = skb->tail - b; + nlh->nlmsg_len = skb_tail_pointer(skb) - b; out: sp->this_idx++; return 0; @@ -1330,7 +1330,7 @@ static int build_aevent(struct sk_buff *skb, struct xfrm_state *x, struct km_eve struct xfrm_aevent_id *id; struct nlmsghdr *nlh; struct xfrm_lifetime_cur ltime; - unsigned char *b = skb->tail; + unsigned char *b = skb_tail_pointer(skb); nlh = NLMSG_PUT(skb, c->pid, c->seq, XFRM_MSG_NEWAE, sizeof(*id)); id = NLMSG_DATA(nlh); @@ -1362,7 +1362,7 @@ static int build_aevent(struct sk_buff *skb, struct xfrm_state *x, struct km_eve RTA_PUT(skb,XFRMA_ETIMER_THRESH,sizeof(u32),&etimer); } - nlh->nlmsg_len = skb->tail - b; + nlh->nlmsg_len = skb_tail_pointer(skb) - b; return skb->len; rtattr_failure: @@ -1744,7 +1744,7 @@ static int build_migrate(struct sk_buff *skb, struct xfrm_migrate *m, struct xfrm_migrate *mp; struct xfrm_userpolicy_id *pol_id; struct nlmsghdr *nlh; - unsigned char *b = skb->tail; + unsigned char *b = skb_tail_pointer(skb); int i; nlh = NLMSG_PUT(skb, 0, 0, XFRM_MSG_MIGRATE, sizeof(*pol_id)); @@ -1764,7 +1764,7 @@ static int build_migrate(struct sk_buff *skb, struct xfrm_migrate *m, goto nlmsg_failure; } - nlh->nlmsg_len = skb->tail - b; + nlh->nlmsg_len = skb_tail_pointer(skb) - b; return skb->len; nlmsg_failure: skb_trim(skb, b - skb->data); @@ -1942,7 +1942,7 @@ static int build_expire(struct sk_buff *skb, struct xfrm_state *x, struct km_eve { struct xfrm_user_expire *ue; struct nlmsghdr *nlh; - unsigned char *b = skb->tail; + unsigned char *b = skb_tail_pointer(skb); nlh = NLMSG_PUT(skb, c->pid, 0, XFRM_MSG_EXPIRE, sizeof(*ue)); @@ -1952,7 +1952,7 @@ static int build_expire(struct sk_buff *skb, struct xfrm_state *x, struct km_eve copy_to_user_state(x, &ue->state); ue->hard = (c->data.hard != 0) ? 1 : 0; - nlh->nlmsg_len = skb->tail - b; + nlh->nlmsg_len = skb_tail_pointer(skb) - b; return skb->len; nlmsg_failure: @@ -1999,7 +1999,7 @@ static int xfrm_notify_sa_flush(struct km_event *c) struct xfrm_usersa_flush *p; struct nlmsghdr *nlh; struct sk_buff *skb; - unsigned char *b; + sk_buff_data_t b; int len = NLMSG_LENGTH(sizeof(struct xfrm_usersa_flush)); skb = alloc_skb(len, GFP_ATOMIC); @@ -2045,7 +2045,7 @@ static int xfrm_notify_sa(struct xfrm_state *x, struct km_event *c) struct xfrm_usersa_id *id; struct nlmsghdr *nlh; struct sk_buff *skb; - unsigned char *b; + sk_buff_data_t b; int len = xfrm_sa_len(x); int headlen; @@ -2129,7 +2129,7 @@ static int build_acquire(struct sk_buff *skb, struct xfrm_state *x, { struct xfrm_user_acquire *ua; struct nlmsghdr *nlh; - unsigned char *b = skb->tail; + unsigned char *b = skb_tail_pointer(skb); __u32 seq = xfrm_get_acqseq(); nlh = NLMSG_PUT(skb, 0, 0, XFRM_MSG_ACQUIRE, @@ -2153,7 +2153,7 @@ static int build_acquire(struct sk_buff *skb, struct xfrm_state *x, if (copy_to_user_policy_type(xp->type, skb) < 0) goto nlmsg_failure; - nlh->nlmsg_len = skb->tail - b; + nlh->nlmsg_len = skb_tail_pointer(skb) - b; return skb->len; nlmsg_failure: @@ -2249,7 +2249,7 @@ static int build_polexpire(struct sk_buff *skb, struct xfrm_policy *xp, struct xfrm_user_polexpire *upe; struct nlmsghdr *nlh; int hard = c->data.hard; - unsigned char *b = skb->tail; + unsigned char *b = skb_tail_pointer(skb); nlh = NLMSG_PUT(skb, c->pid, 0, XFRM_MSG_POLEXPIRE, sizeof(*upe)); upe = NLMSG_DATA(nlh); @@ -2264,7 +2264,7 @@ static int build_polexpire(struct sk_buff *skb, struct xfrm_policy *xp, goto nlmsg_failure; upe->hard = !!hard; - nlh->nlmsg_len = skb->tail - b; + nlh->nlmsg_len = skb_tail_pointer(skb) - b; return skb->len; nlmsg_failure: @@ -2300,7 +2300,7 @@ static int xfrm_notify_policy(struct xfrm_policy *xp, int dir, struct km_event * struct xfrm_userpolicy_id *id; struct nlmsghdr *nlh; struct sk_buff *skb; - unsigned char *b; + sk_buff_data_t b; int len = RTA_SPACE(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr); int headlen; @@ -2357,7 +2357,7 @@ static int xfrm_notify_policy_flush(struct km_event *c) { struct nlmsghdr *nlh; struct sk_buff *skb; - unsigned char *b; + sk_buff_data_t b; int len = 0; #ifdef CONFIG_XFRM_SUB_POLICY len += RTA_SPACE(sizeof(struct xfrm_userpolicy_type)); @@ -2410,7 +2410,7 @@ static int build_report(struct sk_buff *skb, u8 proto, { struct xfrm_user_report *ur; struct nlmsghdr *nlh; - unsigned char *b = skb->tail; + unsigned char *b = skb_tail_pointer(skb); nlh = NLMSG_PUT(skb, 0, 0, XFRM_MSG_REPORT, sizeof(*ur)); ur = NLMSG_DATA(nlh); @@ -2422,7 +2422,7 @@ static int build_report(struct sk_buff *skb, u8 proto, if (addr) RTA_PUT(skb, XFRMA_COADDR, sizeof(*addr), addr); - nlh->nlmsg_len = skb->tail - b; + nlh->nlmsg_len = skb_tail_pointer(skb) - b; return skb->len; nlmsg_failure: diff --git a/security/selinux/netlink.c b/security/selinux/netlink.c index e203883406dd..33f2e064a682 100644 --- a/security/selinux/netlink.c +++ b/security/selinux/netlink.c @@ -66,7 +66,7 @@ static void selnl_add_payload(struct nlmsghdr *nlh, int len, int msgtype, void * static void selnl_notify(int msgtype, void *data) { int len; - unsigned char *tmp; + sk_buff_data_t tmp; struct sk_buff *skb; struct nlmsghdr *nlh; -- cgit v1.2.3-59-g8ed1b From 4305b541357ddbd205aa145dc378926b7cb12283 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Thu, 19 Apr 2007 20:43:29 -0700 Subject: [SK_BUFF]: Convert skb->end to sk_buff_data_t Now to convert the last one, skb->data, that will allow many simplifications and removal of some of the offset helpers. Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: David S. Miller --- arch/ia64/sn/kernel/xpnet.c | 6 ++-- drivers/atm/ambassador.c | 2 +- drivers/atm/idt77252.c | 24 ++++++++----- drivers/infiniband/hw/cxgb3/iwch_cm.c | 2 +- drivers/net/cris/eth_v10.c | 3 +- drivers/net/forcedeth.c | 30 ++++++++++------ drivers/net/macb.c | 2 +- drivers/net/wan/lmc/lmc_main.c | 2 +- drivers/net/wireless/hostap/hostap_80211_rx.c | 2 +- drivers/usb/atm/usbatm.c | 4 +-- include/linux/skbuff.h | 23 +++++++++--- net/atm/lec.c | 2 +- net/core/skbuff.c | 51 +++++++++++++++++---------- net/ieee80211/ieee80211_rx.c | 2 +- net/netlink/af_netlink.c | 2 +- 15 files changed, 101 insertions(+), 56 deletions(-) (limited to 'include') diff --git a/arch/ia64/sn/kernel/xpnet.c b/arch/ia64/sn/kernel/xpnet.c index eb416c95967d..98d79142f32b 100644 --- a/arch/ia64/sn/kernel/xpnet.c +++ b/arch/ia64/sn/kernel/xpnet.c @@ -264,7 +264,7 @@ xpnet_receive(partid_t partid, int channel, struct xpnet_message *msg) dev_dbg(xpnet, "head=0x%p skb->data=0x%p skb->tail=0x%p " "skb->end=0x%p skb->len=%d\n", (void *) skb->head, - (void *)skb->data, skb_tail_pointer(skb), (void *)skb->end, + (void *)skb->data, skb_tail_pointer(skb), skb_end_pointer(skb), skb->len); skb->protocol = eth_type_trans(skb, xpnet_device); @@ -273,7 +273,7 @@ xpnet_receive(partid_t partid, int channel, struct xpnet_message *msg) dev_dbg(xpnet, "passing skb to network layer; \n\tskb->head=0x%p " "skb->data=0x%p skb->tail=0x%p skb->end=0x%p skb->len=%d\n", (void *)skb->head, (void *)skb->data, skb_tail_pointer(skb), - (void *) skb->end, skb->len); + skb_end_pointer(skb), skb->len); xpnet_device->last_rx = jiffies; @@ -475,7 +475,7 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) dev_dbg(xpnet, ">skb->head=0x%p skb->data=0x%p skb->tail=0x%p " "skb->end=0x%p skb->len=%d\n", (void *) skb->head, - (void *)skb->data, skb_tail_pointer(skb), (void *)skb->end, + (void *)skb->data, skb_tail_pointer(skb), skb_end_pointer(skb), skb->len); diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c index 3c372e08f77d..59651abfa4f8 100644 --- a/drivers/atm/ambassador.c +++ b/drivers/atm/ambassador.c @@ -821,7 +821,7 @@ static inline void fill_rx_pool (amb_dev * dev, unsigned char pool, } // cast needed as there is no %? for pointer differences PRINTD (DBG_SKB, "allocated skb at %p, head %p, area %li", - skb, skb->head, (long) (skb->end - skb->head)); + skb, skb->head, (long) (skb_end_pointer(skb) - skb->head)); rx.handle = virt_to_bus (skb); rx.host_address = cpu_to_be32 (virt_to_bus (skb->data)); if (rx_give (dev, &rx, pool)) diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c index 1e49799cd6cf..20f2a3a82656 100644 --- a/drivers/atm/idt77252.c +++ b/drivers/atm/idt77252.c @@ -1065,7 +1065,8 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe) vcc = vc->rx_vcc; pci_dma_sync_single_for_cpu(card->pcidev, IDT77252_PRV_PADDR(skb), - skb->end - skb->data, PCI_DMA_FROMDEVICE); + skb_end_pointer(skb) - skb->data, + PCI_DMA_FROMDEVICE); if ((vcc->qos.aal == ATM_AAL0) || (vcc->qos.aal == ATM_AAL34)) { @@ -1194,7 +1195,8 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe) } pci_unmap_single(card->pcidev, IDT77252_PRV_PADDR(skb), - skb->end - skb->data, PCI_DMA_FROMDEVICE); + skb_end_pointer(skb) - skb->data, + PCI_DMA_FROMDEVICE); sb_pool_remove(card, skb); skb_trim(skb, len); @@ -1267,7 +1269,7 @@ idt77252_rx_raw(struct idt77252_dev *card) tail = readl(SAR_REG_RAWCT); pci_dma_sync_single_for_cpu(card->pcidev, IDT77252_PRV_PADDR(queue), - queue->end - queue->head - 16, + skb_end_pointer(queue) - queue->head - 16, PCI_DMA_FROMDEVICE); while (head != tail) { @@ -1363,7 +1365,8 @@ drop: queue = card->raw_cell_head; pci_dma_sync_single_for_cpu(card->pcidev, IDT77252_PRV_PADDR(queue), - queue->end - queue->data, + (skb_end_pointer(queue) - + queue->data), PCI_DMA_FROMDEVICE); } else { card->raw_cell_head = NULL; @@ -1875,7 +1878,7 @@ add_rx_skb(struct idt77252_dev *card, int queue, } paddr = pci_map_single(card->pcidev, skb->data, - skb->end - skb->data, + skb_end_pointer(skb) - skb->data, PCI_DMA_FROMDEVICE); IDT77252_PRV_PADDR(skb) = paddr; @@ -1889,7 +1892,7 @@ add_rx_skb(struct idt77252_dev *card, int queue, outunmap: pci_unmap_single(card->pcidev, IDT77252_PRV_PADDR(skb), - skb->end - skb->data, PCI_DMA_FROMDEVICE); + skb_end_pointer(skb) - skb->data, PCI_DMA_FROMDEVICE); handle = IDT77252_PRV_POOL(skb); card->sbpool[POOL_QUEUE(handle)].skb[POOL_INDEX(handle)] = NULL; @@ -1906,12 +1909,14 @@ recycle_rx_skb(struct idt77252_dev *card, struct sk_buff *skb) int err; pci_dma_sync_single_for_device(card->pcidev, IDT77252_PRV_PADDR(skb), - skb->end - skb->data, PCI_DMA_FROMDEVICE); + skb_end_pointer(skb) - skb->data, + PCI_DMA_FROMDEVICE); err = push_rx_skb(card, skb, POOL_QUEUE(handle)); if (err) { pci_unmap_single(card->pcidev, IDT77252_PRV_PADDR(skb), - skb->end - skb->data, PCI_DMA_FROMDEVICE); + skb_end_pointer(skb) - skb->data, + PCI_DMA_FROMDEVICE); sb_pool_remove(card, skb); dev_kfree_skb(skb); } @@ -3123,7 +3128,8 @@ deinit_card(struct idt77252_dev *card) if (skb) { pci_unmap_single(card->pcidev, IDT77252_PRV_PADDR(skb), - skb->end - skb->data, + (skb_end_pointer(skb) - + skb->data), PCI_DMA_FROMDEVICE); card->sbpool[i].skb[j] = NULL; dev_kfree_skb(skb); diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c index 66ad4d40ba1d..e842c65a3f4d 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_cm.c +++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c @@ -477,7 +477,7 @@ static void send_mpa_req(struct iwch_ep *ep, struct sk_buff *skb) BUG_ON(skb_cloned(skb)); mpalen = sizeof(*mpa) + ep->plen; - if (skb->data + mpalen + sizeof(*req) > skb->end) { + if (skb->data + mpalen + sizeof(*req) > skb_end_pointer(skb)) { kfree_skb(skb); skb=alloc_skb(mpalen + sizeof(*req), GFP_KERNEL); if (!skb) { diff --git a/drivers/net/cris/eth_v10.c b/drivers/net/cris/eth_v10.c index 7feb9c561147..5bdf5ca85a65 100644 --- a/drivers/net/cris/eth_v10.c +++ b/drivers/net/cris/eth_v10.c @@ -1348,7 +1348,8 @@ e100_rx(struct net_device *dev) #ifdef ETHDEBUG printk("head = 0x%x, data = 0x%x, tail = 0x%x, end = 0x%x\n", - skb->head, skb->data, skb_tail_pointer(skb), skb->end); + skb->head, skb->data, skb_tail_pointer(skb), + skb_end_pointer(skb)); printk("copying packet to 0x%x.\n", skb_data_ptr); #endif diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c index d5d458c3421f..d3f4bcaa9692 100644 --- a/drivers/net/forcedeth.c +++ b/drivers/net/forcedeth.c @@ -1386,9 +1386,13 @@ static int nv_alloc_rx(struct net_device *dev) struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD); if (skb) { np->put_rx_ctx->skb = skb; - np->put_rx_ctx->dma = pci_map_single(np->pci_dev, skb->data, - skb->end-skb->data, PCI_DMA_FROMDEVICE); - np->put_rx_ctx->dma_len = skb->end-skb->data; + np->put_rx_ctx->dma = pci_map_single(np->pci_dev, + skb->data, + (skb_end_pointer(skb) - + skb->data), + PCI_DMA_FROMDEVICE); + np->put_rx_ctx->dma_len = (skb_end_pointer(skb) - + skb->data); np->put_rx.orig->buf = cpu_to_le32(np->put_rx_ctx->dma); wmb(); np->put_rx.orig->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL); @@ -1416,9 +1420,13 @@ static int nv_alloc_rx_optimized(struct net_device *dev) struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD); if (skb) { np->put_rx_ctx->skb = skb; - np->put_rx_ctx->dma = pci_map_single(np->pci_dev, skb->data, - skb->end-skb->data, PCI_DMA_FROMDEVICE); - np->put_rx_ctx->dma_len = skb->end-skb->data; + np->put_rx_ctx->dma = pci_map_single(np->pci_dev, + skb->data, + (skb_end_pointer(skb) - + skb->data), + PCI_DMA_FROMDEVICE); + np->put_rx_ctx->dma_len = (skb_end_pointer(skb) - + skb->data); np->put_rx.ex->bufhigh = cpu_to_le64(np->put_rx_ctx->dma) >> 32; np->put_rx.ex->buflow = cpu_to_le64(np->put_rx_ctx->dma) & 0x0FFFFFFFF; wmb(); @@ -1602,8 +1610,9 @@ static void nv_drain_rx(struct net_device *dev) wmb(); if (np->rx_skb[i].skb) { pci_unmap_single(np->pci_dev, np->rx_skb[i].dma, - np->rx_skb[i].skb->end-np->rx_skb[i].skb->data, - PCI_DMA_FROMDEVICE); + (skb_end_pointer(np->rx_skb[i].skb) - + np->rx_skb[i].skb->data), + PCI_DMA_FROMDEVICE); dev_kfree_skb(np->rx_skb[i].skb); np->rx_skb[i].skb = NULL; } @@ -4378,7 +4387,8 @@ static int nv_loopback_test(struct net_device *dev) for (i = 0; i < pkt_len; i++) pkt_data[i] = (u8)(i & 0xff); test_dma_addr = pci_map_single(np->pci_dev, tx_skb->data, - tx_skb->end-tx_skb->data, PCI_DMA_FROMDEVICE); + (skb_end_pointer(tx_skb) - + tx_skb->data), PCI_DMA_FROMDEVICE); if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { np->tx_ring.orig[0].buf = cpu_to_le32(test_dma_addr); @@ -4435,7 +4445,7 @@ static int nv_loopback_test(struct net_device *dev) } pci_unmap_page(np->pci_dev, test_dma_addr, - tx_skb->end-tx_skb->data, + (skb_end_pointer(tx_skb) - tx_skb->data), PCI_DMA_TODEVICE); dev_kfree_skb_any(tx_skb); out: diff --git a/drivers/net/macb.c b/drivers/net/macb.c index 98bf51afcee7..9e233f8216a7 100644 --- a/drivers/net/macb.c +++ b/drivers/net/macb.c @@ -576,7 +576,7 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev) dev_dbg(&bp->pdev->dev, "start_xmit: len %u head %p data %p tail %p end %p\n", skb->len, skb->head, skb->data, - skb_tail_pointer(skb), skb->end); + skb_tail_pointer(skb), skb_end_pointer(skb)); dev_dbg(&bp->pdev->dev, "data:"); for (i = 0; i < 16; i++) diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c index b731f3aae0df..5bb18c0955bc 100644 --- a/drivers/net/wan/lmc/lmc_main.c +++ b/drivers/net/wan/lmc/lmc_main.c @@ -1932,7 +1932,7 @@ static void lmc_softreset (lmc_softc_t * const sc) /*fold00*/ sc->lmc_rxring[i].status = 0x80000000; /* used to be PKT_BUF_SZ now uses skb since we lose some to head room */ - sc->lmc_rxring[i].length = skb->end - skb->data; + sc->lmc_rxring[i].length = skb_end_pointer(skb) - skb->data; /* use to be tail which is dumb since you're thinking why write * to the end of the packj,et but since there's nothing there tail == data diff --git a/drivers/net/wireless/hostap/hostap_80211_rx.c b/drivers/net/wireless/hostap/hostap_80211_rx.c index 5e3e9e262706..35a3a50724fe 100644 --- a/drivers/net/wireless/hostap/hostap_80211_rx.c +++ b/drivers/net/wireless/hostap/hostap_80211_rx.c @@ -922,7 +922,7 @@ void hostap_80211_rx(struct net_device *dev, struct sk_buff *skb, if (frag != 0) flen -= hdrlen; - if (skb_tail_pointer(frag_skb) + flen > frag_skb->end) { + if (frag_skb->tail + flen > frag_skb->end) { printk(KERN_WARNING "%s: host decrypted and " "reassembled frame did not fit skb\n", dev->name); diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c index 4d8f282b23d1..a076f735a7bc 100644 --- a/drivers/usb/atm/usbatm.c +++ b/drivers/usb/atm/usbatm.c @@ -335,12 +335,12 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char sarb = instance->cached_vcc->sarb; - if (skb_tail_pointer(sarb) + ATM_CELL_PAYLOAD > sarb->end) { + if (sarb->tail + ATM_CELL_PAYLOAD > sarb->end) { atm_rldbg(instance, "%s: buffer overrun (sarb->len %u, vcc: 0x%p)!\n", __func__, sarb->len, vcc); /* discard cells already received */ skb_trim(sarb, 0); - UDSL_ASSERT(skb_tail_pointer(sarb) + ATM_CELL_PAYLOAD <= sarb->end); + UDSL_ASSERT(sarb->tail + ATM_CELL_PAYLOAD <= sarb->end); } memcpy(skb_tail_pointer(sarb), source + ATM_CELL_HEADER, ATM_CELL_PAYLOAD); diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index e1c2392ecb56..656dc0e901cc 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -305,9 +305,9 @@ struct sk_buff { sk_buff_data_t mac_header; /* These elements must be at the end, see alloc_skb() for details. */ sk_buff_data_t tail; + sk_buff_data_t end; unsigned char *head, - *data, - *end; + *data; unsigned int truesize; atomic_t users; }; @@ -392,8 +392,20 @@ extern unsigned int skb_find_text(struct sk_buff *skb, unsigned int from, unsigned int to, struct ts_config *config, struct ts_state *state); +#ifdef NET_SKBUFF_DATA_USES_OFFSET +static inline unsigned char *skb_end_pointer(const struct sk_buff *skb) +{ + return skb->head + skb->end; +} +#else +static inline unsigned char *skb_end_pointer(const struct sk_buff *skb) +{ + return skb->end; +} +#endif + /* Internal */ -#define skb_shinfo(SKB) ((struct skb_shared_info *)((SKB)->end)) +#define skb_shinfo(SKB) ((struct skb_shared_info *)(skb_end_pointer(SKB))) /** * skb_queue_empty - check if a queue is empty @@ -843,6 +855,7 @@ static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset) { skb->tail = skb->data + offset; } + #endif /* NET_SKBUFF_DATA_USES_OFFSET */ /* @@ -872,7 +885,7 @@ static inline unsigned char *skb_put(struct sk_buff *skb, unsigned int len) SKB_LINEAR_ASSERT(skb); skb->tail += len; skb->len += len; - if (unlikely(skb_tail_pointer(skb) > skb->end)) + if (unlikely(skb->tail > skb->end)) skb_over_panic(skb, len, current_text_addr()); return tmp; } @@ -968,7 +981,7 @@ static inline int skb_headroom(const struct sk_buff *skb) */ static inline int skb_tailroom(const struct sk_buff *skb) { - return skb_is_nonlinear(skb) ? 0 : skb->end - skb_tail_pointer(skb); + return skb_is_nonlinear(skb) ? 0 : skb->end - skb->tail; } /** diff --git a/net/atm/lec.c b/net/atm/lec.c index a8c6b285e06c..4b3e72f31b3b 100644 --- a/net/atm/lec.c +++ b/net/atm/lec.c @@ -284,7 +284,7 @@ static int lec_start_xmit(struct sk_buff *skb, struct net_device *dev) DPRINTK("skbuff head:%lx data:%lx tail:%lx end:%lx\n", (long)skb->head, (long)skb->data, (long)skb_tail_pointer(skb), - (long)skb->end); + (long)skb_end_pointer(skb)); #if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE) if (memcmp(skb->data, bridge_ula_lec, sizeof(bridge_ula_lec)) == 0) lec_handle_bridge(skb, dev); diff --git a/net/core/skbuff.c b/net/core/skbuff.c index ddcbc4d10dab..a203bedefe09 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -87,9 +87,9 @@ static struct kmem_cache *skbuff_fclone_cache __read_mostly; void skb_over_panic(struct sk_buff *skb, int sz, void *here) { printk(KERN_EMERG "skb_over_panic: text:%p len:%d put:%d head:%p " - "data:%p tail:%#lx end:%p dev:%s\n", + "data:%p tail:%#lx end:%#lx dev:%s\n", here, skb->len, sz, skb->head, skb->data, - (unsigned long)skb->tail, skb->end, + (unsigned long)skb->tail, (unsigned long)skb->end, skb->dev ? skb->dev->name : ""); BUG(); } @@ -106,9 +106,9 @@ void skb_over_panic(struct sk_buff *skb, int sz, void *here) void skb_under_panic(struct sk_buff *skb, int sz, void *here) { printk(KERN_EMERG "skb_under_panic: text:%p len:%d put:%d head:%p " - "data:%p tail:%#lx end:%p dev:%s\n", + "data:%p tail:%#lx end:%#lx dev:%s\n", here, skb->len, sz, skb->head, skb->data, - (unsigned long)skb->tail, skb->end, + (unsigned long)skb->tail, (unsigned long)skb->end, skb->dev ? skb->dev->name : ""); BUG(); } @@ -170,7 +170,7 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, skb->head = data; skb->data = data; skb_reset_tail_pointer(skb); - skb->end = data + size; + skb->end = skb->tail + size; /* make sure we initialize shinfo sequentially */ shinfo = skb_shinfo(skb); atomic_set(&shinfo->dataref, 1); @@ -520,8 +520,12 @@ struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask) /* * Allocate the copy buffer */ - struct sk_buff *n = alloc_skb(skb->end - skb->head + skb->data_len, - gfp_mask); + struct sk_buff *n; +#ifdef NET_SKBUFF_DATA_USES_OFFSET + n = alloc_skb(skb->end + skb->data_len, gfp_mask); +#else + n = alloc_skb(skb->end - skb->head + skb->data_len, gfp_mask); +#endif if (!n) return NULL; @@ -558,8 +562,12 @@ struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask) /* * Allocate the copy buffer */ - struct sk_buff *n = alloc_skb(skb->end - skb->head, gfp_mask); - + struct sk_buff *n; +#ifdef NET_SKBUFF_DATA_USES_OFFSET + n = alloc_skb(skb->end, gfp_mask); +#else + n = alloc_skb(skb->end - skb->head, gfp_mask); +#endif if (!n) goto out; @@ -617,7 +625,11 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, { int i; u8 *data; +#ifdef NET_SKBUFF_DATA_USES_OFFSET + int size = nhead + skb->end + ntail; +#else int size = nhead + (skb->end - skb->head) + ntail; +#endif long off; if (skb_shared(skb)) @@ -632,12 +644,13 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, /* Copy only real data... and, alas, header. This should be * optimized for the cases when header is void. */ memcpy(data + nhead, skb->head, - skb->tail -#ifndef NET_SKBUFF_DATA_USES_OFFSET - - skb->head +#ifdef NET_SKBUFF_DATA_USES_OFFSET + skb->tail); +#else + skb->tail - skb->head); #endif - ); - memcpy(data + size, skb->end, sizeof(struct skb_shared_info)); + memcpy(data + size, skb_end_pointer(skb), + sizeof(struct skb_shared_info)); for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) get_page(skb_shinfo(skb)->frags[i].page); @@ -650,9 +663,11 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, off = (data + nhead) - skb->head; skb->head = data; - skb->end = data + size; skb->data += off; -#ifndef NET_SKBUFF_DATA_USES_OFFSET +#ifdef NET_SKBUFF_DATA_USES_OFFSET + skb->end = size; +#else + skb->end = skb->head + size; /* {transport,network,mac}_header and tail are relative to skb->head */ skb->tail += off; skb->transport_header += off; @@ -769,7 +784,7 @@ int skb_pad(struct sk_buff *skb, int pad) return 0; } - ntail = skb->data_len + pad - (skb->end - skb_tail_pointer(skb)); + ntail = skb->data_len + pad - (skb->end - skb->tail); if (likely(skb_cloned(skb) || ntail > 0)) { err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC); if (unlikely(err)) @@ -907,7 +922,7 @@ unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta) * plus 128 bytes for future expansions. If we have enough * room at tail, reallocate without expansion only if skb is cloned. */ - int i, k, eat = (skb_tail_pointer(skb) + delta) - skb->end; + int i, k, eat = (skb->tail + delta) - skb->end; if (eat > 0 || skb_cloned(skb)) { if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0, diff --git a/net/ieee80211/ieee80211_rx.c b/net/ieee80211/ieee80211_rx.c index 2b854941e06c..59a765c49cf9 100644 --- a/net/ieee80211/ieee80211_rx.c +++ b/net/ieee80211/ieee80211_rx.c @@ -595,7 +595,7 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb, if (frag != 0) flen -= hdrlen; - if (skb_tail_pointer(frag_skb) + flen > frag_skb->end) { + if (frag_skb->tail + flen > frag_skb->end) { printk(KERN_WARNING "%s: host decrypted and " "reassembled frame did not fit skb\n", dev->name); diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index fdb6eb13cbcb..50dc5edb7752 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -785,7 +785,7 @@ static inline struct sk_buff *netlink_trim(struct sk_buff *skb, skb_orphan(skb); - delta = skb->end - skb_tail_pointer(skb); + delta = skb->end - skb->tail; if (delta * 2 < skb->truesize) return skb; -- cgit v1.2.3-59-g8ed1b From b529ccf2799c14346d1518e9bdf1f88f03643e99 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Wed, 25 Apr 2007 19:08:35 -0700 Subject: [NETLINK]: Introduce nlmsg_hdr() helper For the common "(struct nlmsghdr *)skb->data" sequence, so that we reduce the number of direct accesses to skb->data and for consistency with all the other cast skb member helpers. Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: David S. Miller --- drivers/connector/connector.c | 2 +- drivers/scsi/scsi_netlink.c | 2 +- drivers/scsi/scsi_transport_iscsi.c | 2 +- fs/ecryptfs/netlink.c | 4 ++-- include/linux/netlink.h | 5 +++++ kernel/audit.c | 6 +++--- kernel/taskstats.c | 4 ++-- net/decnet/netfilter/dn_rtmsg.c | 2 +- net/ipv4/fib_frontend.c | 2 +- net/ipv4/inet_diag.c | 2 +- net/ipv4/netfilter/ip_queue.c | 2 +- net/ipv6/netfilter/ip6_queue.c | 2 +- net/netlink/af_netlink.c | 2 +- net/tipc/netlink.c | 2 +- security/selinux/hooks.c | 2 +- 15 files changed, 23 insertions(+), 18 deletions(-) (limited to 'include') diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c index a905f7820331..7f9c4fb7e5b0 100644 --- a/drivers/connector/connector.c +++ b/drivers/connector/connector.c @@ -212,7 +212,7 @@ static void cn_rx_skb(struct sk_buff *__skb) skb = skb_get(__skb); if (skb->len >= NLMSG_SPACE(0)) { - nlh = (struct nlmsghdr *)skb->data; + nlh = nlmsg_hdr(skb); if (nlh->nlmsg_len < sizeof(struct cn_msg) || skb->len < nlh->nlmsg_len || diff --git a/drivers/scsi/scsi_netlink.c b/drivers/scsi/scsi_netlink.c index 1b59b27e887f..45646a285244 100644 --- a/drivers/scsi/scsi_netlink.c +++ b/drivers/scsi/scsi_netlink.c @@ -50,7 +50,7 @@ scsi_nl_rcv_msg(struct sk_buff *skb) while (skb->len >= NLMSG_SPACE(0)) { err = 0; - nlh = (struct nlmsghdr *) skb->data; + nlh = nlmsg_hdr(skb); if ((nlh->nlmsg_len < (sizeof(*nlh) + sizeof(*hdr))) || (skb->len < nlh->nlmsg_len)) { printk(KERN_WARNING "%s: discarding partial skb\n", diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index ce0d14af33c8..10590cd7e9ed 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c @@ -1081,7 +1081,7 @@ iscsi_if_rx(struct sock *sk, int len) struct nlmsghdr *nlh; struct iscsi_uevent *ev; - nlh = (struct nlmsghdr *)skb->data; + nlh = nlmsg_hdr(skb); if (nlh->nlmsg_len < sizeof(*nlh) || skb->len < nlh->nlmsg_len) { break; diff --git a/fs/ecryptfs/netlink.c b/fs/ecryptfs/netlink.c index e3aa2253c850..8405d216a5fc 100644 --- a/fs/ecryptfs/netlink.c +++ b/fs/ecryptfs/netlink.c @@ -97,7 +97,7 @@ out: */ static int ecryptfs_process_nl_response(struct sk_buff *skb) { - struct nlmsghdr *nlh = (struct nlmsghdr*)skb->data; + struct nlmsghdr *nlh = nlmsg_hdr(skb); struct ecryptfs_message *msg = NLMSG_DATA(nlh); int rc; @@ -181,7 +181,7 @@ receive: "rc = [%d]\n", rc); return; } - nlh = (struct nlmsghdr *)skb->data; + nlh = nlmsg_hdr(skb); if (!NLMSG_OK(nlh, skb->len)) { ecryptfs_printk(KERN_ERR, "Received corrupt netlink " "message\n"); diff --git a/include/linux/netlink.h b/include/linux/netlink.h index 68a632b372ec..36629fff26d3 100644 --- a/include/linux/netlink.h +++ b/include/linux/netlink.h @@ -138,6 +138,11 @@ struct nlattr #include #include +static inline struct nlmsghdr *nlmsg_hdr(const struct sk_buff *skb) +{ + return (struct nlmsghdr *)skb->data; +} + struct netlink_skb_parms { struct ucred creds; /* Skb credentials */ diff --git a/kernel/audit.c b/kernel/audit.c index ea8521417d13..80a7457dadbf 100644 --- a/kernel/audit.c +++ b/kernel/audit.c @@ -151,7 +151,7 @@ struct audit_buffer { static void audit_set_pid(struct audit_buffer *ab, pid_t pid) { - struct nlmsghdr *nlh = (struct nlmsghdr *)ab->skb->data; + struct nlmsghdr *nlh = nlmsg_hdr(ab->skb); nlh->nlmsg_pid = pid; } @@ -750,7 +750,7 @@ static void audit_receive_skb(struct sk_buff *skb) u32 rlen; while (skb->len >= NLMSG_SPACE(0)) { - nlh = (struct nlmsghdr *)skb->data; + nlh = nlmsg_hdr(skb); if (nlh->nlmsg_len < sizeof(*nlh) || skb->len < nlh->nlmsg_len) return; rlen = NLMSG_ALIGN(nlh->nlmsg_len); @@ -1268,7 +1268,7 @@ void audit_log_end(struct audit_buffer *ab) audit_log_lost("rate limit exceeded"); } else { if (audit_pid) { - struct nlmsghdr *nlh = (struct nlmsghdr *)ab->skb->data; + struct nlmsghdr *nlh = nlmsg_hdr(ab->skb); nlh->nlmsg_len = ab->skb->len - NLMSG_SPACE(0); skb_queue_tail(&audit_skb_queue, ab->skb); ab->skb = NULL; diff --git a/kernel/taskstats.c b/kernel/taskstats.c index 4c3476fa058d..ad7d2392cb0e 100644 --- a/kernel/taskstats.c +++ b/kernel/taskstats.c @@ -102,7 +102,7 @@ static int prepare_reply(struct genl_info *info, u8 cmd, struct sk_buff **skbp, */ static int send_reply(struct sk_buff *skb, pid_t pid) { - struct genlmsghdr *genlhdr = nlmsg_data((struct nlmsghdr *)skb->data); + struct genlmsghdr *genlhdr = nlmsg_data(nlmsg_hdr(skb)); void *reply = genlmsg_data(genlhdr); int rc; @@ -121,7 +121,7 @@ static int send_reply(struct sk_buff *skb, pid_t pid) static void send_cpu_listeners(struct sk_buff *skb, struct listener_list *listeners) { - struct genlmsghdr *genlhdr = nlmsg_data((struct nlmsghdr *)skb->data); + struct genlmsghdr *genlhdr = nlmsg_data(nlmsg_hdr(skb)); struct listener *s, *tmp; struct sk_buff *skb_next, *skb_cur = skb; void *reply = genlmsg_data(genlhdr); diff --git a/net/decnet/netfilter/dn_rtmsg.c b/net/decnet/netfilter/dn_rtmsg.c index ceefd9dd0c92..9e8256a2361e 100644 --- a/net/decnet/netfilter/dn_rtmsg.c +++ b/net/decnet/netfilter/dn_rtmsg.c @@ -102,7 +102,7 @@ static unsigned int dnrmg_hook(unsigned int hook, static inline void dnrmg_receive_user_skb(struct sk_buff *skb) { - struct nlmsghdr *nlh = (struct nlmsghdr *)skb->data; + struct nlmsghdr *nlh = nlmsg_hdr(skb); if (nlh->nlmsg_len < sizeof(*nlh) || skb->len < nlh->nlmsg_len) return; diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index cac06c43f004..3ff753c6f197 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c @@ -807,7 +807,7 @@ static void nl_fib_input(struct sock *sk, int len) if (skb == NULL) return; - nlh = (struct nlmsghdr *)skb->data; + nlh = nlmsg_hdr(skb); if (skb->len < NLMSG_SPACE(0) || skb->len < nlh->nlmsg_len || nlh->nlmsg_len < NLMSG_LENGTH(sizeof(*frn))) { kfree_skb(skb); diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c index 37362cd1d07f..238999e6e871 100644 --- a/net/ipv4/inet_diag.c +++ b/net/ipv4/inet_diag.c @@ -847,7 +847,7 @@ static inline void inet_diag_rcv_skb(struct sk_buff *skb) { if (skb->len >= NLMSG_SPACE(0)) { int err; - struct nlmsghdr *nlh = (struct nlmsghdr *)skb->data; + struct nlmsghdr *nlh = nlmsg_hdr(skb); if (nlh->nlmsg_len < sizeof(*nlh) || skb->len < nlh->nlmsg_len) diff --git a/net/ipv4/netfilter/ip_queue.c b/net/ipv4/netfilter/ip_queue.c index 15e0d2002235..17f7c988460c 100644 --- a/net/ipv4/netfilter/ip_queue.c +++ b/net/ipv4/netfilter/ip_queue.c @@ -497,7 +497,7 @@ ipq_rcv_skb(struct sk_buff *skb) if (skblen < sizeof(*nlh)) return; - nlh = (struct nlmsghdr *)skb->data; + nlh = nlmsg_hdr(skb); nlmsglen = nlh->nlmsg_len; if (nlmsglen < sizeof(*nlh) || skblen < nlmsglen) return; diff --git a/net/ipv6/netfilter/ip6_queue.c b/net/ipv6/netfilter/ip6_queue.c index 5cfce218c5e1..275e625e4977 100644 --- a/net/ipv6/netfilter/ip6_queue.c +++ b/net/ipv6/netfilter/ip6_queue.c @@ -487,7 +487,7 @@ ipq_rcv_skb(struct sk_buff *skb) if (skblen < sizeof(*nlh)) return; - nlh = (struct nlmsghdr *)skb->data; + nlh = nlmsg_hdr(skb); nlmsglen = nlh->nlmsg_len; if (nlmsglen < sizeof(*nlh) || skblen < nlmsglen) return; diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 50dc5edb7752..04b72d3c1dea 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -1471,7 +1471,7 @@ static int netlink_rcv_skb(struct sk_buff *skb, int (*cb)(struct sk_buff *, int err; while (skb->len >= nlmsg_total_size(0)) { - nlh = (struct nlmsghdr *) skb->data; + nlh = nlmsg_hdr(skb); if (nlh->nlmsg_len < NLMSG_HDRLEN || skb->len < nlh->nlmsg_len) return 0; diff --git a/net/tipc/netlink.c b/net/tipc/netlink.c index b8e1edc2badc..4cdafa2d1d4d 100644 --- a/net/tipc/netlink.c +++ b/net/tipc/netlink.c @@ -57,7 +57,7 @@ static int handle_cmd(struct sk_buff *skb, struct genl_info *info) if (rep_buf) { skb_push(rep_buf, hdr_space); - rep_nlh = (struct nlmsghdr *)rep_buf->data; + rep_nlh = nlmsg_hdr(rep_buf); memcpy(rep_nlh, req_nlh, hdr_space); rep_nlh->nlmsg_len = rep_buf->len; genlmsg_unicast(rep_buf, req_nlh->nlmsg_pid); diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index addb58501057..5f02b4be1917 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c @@ -3786,7 +3786,7 @@ static int selinux_nlmsg_perm(struct sock *sk, struct sk_buff *skb) err = -EINVAL; goto out; } - nlh = (struct nlmsghdr *)skb->data; + nlh = nlmsg_hdr(skb); err = selinux_nlmsg_lookup(isec->sclass, nlh->nlmsg_type, &perm); if (err) { -- cgit v1.2.3-59-g8ed1b From 897933bcdf31c372e029dd4e2ecd573ebe6cfd9c Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Mon, 19 Mar 2007 22:27:36 -0300 Subject: [SK_BUFF]: Remove skb_add_mtu() leftovers Signed-off-by: Arnaldo Carvalho de Melo --- include/linux/skbuff.h | 1 - net/core/skbuff.c | 14 -------------- 2 files changed, 15 deletions(-) (limited to 'include') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 656dc0e901cc..81ac934d5964 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -1512,7 +1512,6 @@ static inline void *skb_header_pointer(const struct sk_buff *skb, int offset, } extern void skb_init(void); -extern void skb_add_mtu(int mtu); /** * skb_get_timestamp - get timestamp from a skb diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 64caee46291b..e28f119156f7 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -157,7 +157,6 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, if (!skb) goto out; - /* Get the DATA. Size must match skb_add_mtu(). */ size = SKB_DATA_ALIGN(size); data = kmalloc_node_track_caller(size + sizeof(struct skb_shared_info), gfp_mask, node); @@ -1533,19 +1532,6 @@ void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head spin_unlock_irqrestore(&list->lock, flags); } -#if 0 -/* - * Tune the memory allocator for a new MTU size. - */ -void skb_add_mtu(int mtu) -{ - /* Must match allocation in alloc_skb */ - mtu = SKB_DATA_ALIGN(mtu) + sizeof(struct skb_shared_info); - - kmem_add_cache_size(mtu); -} -#endif - static inline void skb_split_inside_header(struct sk_buff *skb, struct sk_buff* skb1, const u32 len, const int pos) -- cgit v1.2.3-59-g8ed1b From a36ca733375860b389c15ffdf6a5f92df64a33b6 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Mon, 19 Mar 2007 22:28:08 -0300 Subject: [NETLINK]: Remove NLMSG_{NEW_ANSWER,CANCEL,END} Not used anywhere and defined inside __KERNEL__, Thomas acked this on irc. Signed-off-by: Arnaldo Carvalho de Melo --- include/linux/netlink.h | 12 ------------ 1 file changed, 12 deletions(-) (limited to 'include') diff --git a/include/linux/netlink.h b/include/linux/netlink.h index 36629fff26d3..0d11f6a7389c 100644 --- a/include/linux/netlink.h +++ b/include/linux/netlink.h @@ -229,18 +229,6 @@ __nlmsg_put(struct sk_buff *skb, u32 pid, u32 seq, int type, int len, int flags) #define NLMSG_PUT(skb, pid, seq, type, len) \ NLMSG_NEW(skb, pid, seq, type, len, 0) -#define NLMSG_NEW_ANSWER(skb, cb, type, len, flags) \ - NLMSG_NEW(skb, NETLINK_CB((cb)->skb).pid, \ - (cb)->nlh->nlmsg_seq, type, len, flags) - -#define NLMSG_END(skb, nlh) \ -({ (nlh)->nlmsg_len = skb_tail_pointer(skb) - (unsigned char *)(nlh); \ - (skb)->len; }) - -#define NLMSG_CANCEL(skb, nlh) \ -({ skb_trim(skb, (unsigned char *) (nlh) - (skb)->data); \ - -1; }) - extern int netlink_dump_start(struct sock *ssk, struct sk_buff *skb, struct nlmsghdr *nlh, int (*dump)(struct sk_buff *skb, struct netlink_callback*), -- cgit v1.2.3-59-g8ed1b From dc5fc579b90ed0a9a4e55b0218cdbaf0a8cf2e67 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Sun, 25 Mar 2007 23:06:12 -0700 Subject: [NETLINK]: Use nlmsg_trim() where appropriate Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: David S. Miller --- include/net/netlink.h | 2 +- net/core/wireless.c | 2 +- net/decnet/dn_route.c | 3 ++- net/decnet/dn_table.c | 3 ++- net/ipv4/inet_diag.c | 7 ++++--- net/ipv4/ipmr.c | 3 ++- net/netfilter/nf_conntrack_netlink.c | 5 +++-- net/sched/act_api.c | 17 +++++++++-------- net/sched/act_gact.c | 3 ++- net/sched/act_ipt.c | 3 ++- net/sched/act_mirred.c | 3 ++- net/sched/act_pedit.c | 3 ++- net/sched/act_police.c | 9 +++++---- net/sched/act_simple.c | 3 ++- net/sched/cls_api.c | 4 +++- net/sched/cls_basic.c | 3 ++- net/sched/cls_fw.c | 3 ++- net/sched/cls_route.c | 3 ++- net/sched/cls_rsvp.c | 1 + net/sched/cls_rsvp.h | 2 +- net/sched/cls_rsvp6.c | 1 + net/sched/cls_tcindex.c | 3 ++- net/sched/cls_u32.c | 3 ++- net/sched/sch_api.c | 5 +++-- net/sched/sch_atm.c | 3 ++- net/sched/sch_cbq.c | 17 +++++++++-------- net/sched/sch_hfsc.c | 5 +++-- net/sched/sch_htb.c | 5 +++-- net/sched/sch_ingress.c | 3 ++- net/sched/sch_netem.c | 3 ++- net/sched/sch_prio.c | 3 ++- net/sched/sch_sfq.c | 3 ++- net/sched/sch_tbf.c | 3 ++- net/xfrm/xfrm_user.c | 16 ++++++++-------- 34 files changed, 93 insertions(+), 62 deletions(-) (limited to 'include') diff --git a/include/net/netlink.h b/include/net/netlink.h index 2c7ab107f20d..510ca7fabe18 100644 --- a/include/net/netlink.h +++ b/include/net/netlink.h @@ -548,7 +548,7 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb) * * Trims the message to the provided mark. Returns -1. */ -static inline int nlmsg_trim(struct sk_buff *skb, void *mark) +static inline int nlmsg_trim(struct sk_buff *skb, const void *mark) { if (mark) skb_trim(skb, (unsigned char *) mark - skb->data); diff --git a/net/core/wireless.c b/net/core/wireless.c index 4a777b68e3bc..86db63d7f760 100644 --- a/net/core/wireless.c +++ b/net/core/wireless.c @@ -1957,7 +1957,7 @@ static inline int rtnetlink_fill_iwinfo(struct sk_buff * skb, nlmsg_failure: rtattr_failure: - skb_trim(skb, b - skb->data); + nlmsg_trim(skb, b); return -1; } diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c index 9678b096b844..2ae35ef1f077 100644 --- a/net/decnet/dn_route.c +++ b/net/decnet/dn_route.c @@ -77,6 +77,7 @@ #include #include #include +#include #include #include #include @@ -1514,7 +1515,7 @@ static int dn_rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq, nlmsg_failure: rtattr_failure: - skb_trim(skb, b - skb->data); + nlmsg_trim(skb, b); return -1; } diff --git a/net/decnet/dn_table.c b/net/decnet/dn_table.c index 544c45540746..d6615c9361e9 100644 --- a/net/decnet/dn_table.c +++ b/net/decnet/dn_table.c @@ -28,6 +28,7 @@ #include #include /* RTF_xxx */ #include +#include #include #include #include @@ -349,7 +350,7 @@ static int dn_fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event, nlmsg_failure: rtattr_failure: - skb_trim(skb, b - skb->data); + nlmsg_trim(skb, b); return -EMSGSIZE; } diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c index 238999e6e871..62c2e9f7e11f 100644 --- a/net/ipv4/inet_diag.c +++ b/net/ipv4/inet_diag.c @@ -27,6 +27,7 @@ #include #include #include +#include #include #include @@ -152,7 +153,7 @@ static int inet_csk_diag_fill(struct sock *sk, rtattr_failure: nlmsg_failure: - skb_trim(skb, b - skb->data); + nlmsg_trim(skb, b); return -EMSGSIZE; } @@ -208,7 +209,7 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw, nlh->nlmsg_len = skb_tail_pointer(skb) - previous_tail; return skb->len; nlmsg_failure: - skb_trim(skb, previous_tail - skb->data); + nlmsg_trim(skb, previous_tail); return -EMSGSIZE; } @@ -579,7 +580,7 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk, return skb->len; nlmsg_failure: - skb_trim(skb, b - skb->data); + nlmsg_trim(skb, b); return -1; } diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index ea0a491dce92..48027df5a90b 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c @@ -62,6 +62,7 @@ #include #include #include +#include #if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2) #define CONFIG_IP_PIMSM 1 @@ -1570,7 +1571,7 @@ ipmr_fill_mroute(struct sk_buff *skb, struct mfc_cache *c, struct rtmsg *rtm) return 1; rtattr_failure: - skb_trim(skb, b - skb->data); + nlmsg_trim(skb, b); return -EMSGSIZE; } diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index 442300c633d7..76f11f325919 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c @@ -33,6 +33,7 @@ #include #include +#include #include #include #include @@ -306,7 +307,7 @@ ctnetlink_fill_info(struct sk_buff *skb, u32 pid, u32 seq, nlmsg_failure: nfattr_failure: - skb_trim(skb, b - skb->data); + nlmsg_trim(skb, b); return -1; } @@ -1169,7 +1170,7 @@ ctnetlink_exp_fill_info(struct sk_buff *skb, u32 pid, u32 seq, nlmsg_failure: nfattr_failure: - skb_trim(skb, b - skb->data); + nlmsg_trim(skb, b); return -1; } diff --git a/net/sched/act_api.c b/net/sched/act_api.c index 28326fb1fc4e..f002f74f3763 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c @@ -31,6 +31,7 @@ #include #include #include +#include void tcf_hash_destroy(struct tcf_common *p, struct tcf_hashinfo *hinfo) { @@ -98,7 +99,7 @@ static int tcf_dump_walker(struct sk_buff *skb, struct netlink_callback *cb, err = tcf_action_dump_1(skb, a, 0, 0); if (err < 0) { index--; - skb_trim(skb, (u8*)r - skb->data); + nlmsg_trim(skb, r); goto done; } r->rta_len = skb_tail_pointer(skb) - (u8 *)r; @@ -114,7 +115,7 @@ done: return n_i; rtattr_failure: - skb_trim(skb, (u8*)r - skb->data); + nlmsg_trim(skb, r); goto done; } @@ -144,7 +145,7 @@ static int tcf_del_walker(struct sk_buff *skb, struct tc_action *a, return n_i; rtattr_failure: - skb_trim(skb, (u8*)r - skb->data); + nlmsg_trim(skb, r); return -EINVAL; } @@ -440,7 +441,7 @@ tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int bind, int ref) } rtattr_failure: - skb_trim(skb, b - skb->data); + nlmsg_trim(skb, b); return -1; } @@ -467,7 +468,7 @@ tcf_action_dump(struct sk_buff *skb, struct tc_action *act, int bind, int ref) rtattr_failure: err = -EINVAL; errout: - skb_trim(skb, b - skb->data); + nlmsg_trim(skb, b); return err; } @@ -658,7 +659,7 @@ tca_get_fill(struct sk_buff *skb, struct tc_action *a, u32 pid, u32 seq, rtattr_failure: nlmsg_failure: - skb_trim(skb, b - skb->data); + nlmsg_trim(skb, b); return -1; } @@ -1059,7 +1060,7 @@ tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb) x->rta_len = skb_tail_pointer(skb) - (u8 *)x; ret = skb->len; } else - skb_trim(skb, (u8*)x - skb->data); + nlmsg_trim(skb, x); nlh->nlmsg_len = skb_tail_pointer(skb) - b; if (NETLINK_CB(cb->skb).pid && ret) @@ -1070,7 +1071,7 @@ tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb) rtattr_failure: nlmsg_failure: module_put(a_o->owner); - skb_trim(skb, b - skb->data); + nlmsg_trim(skb, b); return skb->len; } diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c index aad748b3b38c..7517f3791541 100644 --- a/net/sched/act_gact.c +++ b/net/sched/act_gact.c @@ -28,6 +28,7 @@ #include #include #include +#include #include #include #include @@ -181,7 +182,7 @@ static int tcf_gact_dump(struct sk_buff *skb, struct tc_action *a, int bind, int return skb->len; rtattr_failure: - skb_trim(skb, b - skb->data); + nlmsg_trim(skb, b); return -1; } diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c index 2ccfd5b20fab..00b05f422d45 100644 --- a/net/sched/act_ipt.c +++ b/net/sched/act_ipt.c @@ -30,6 +30,7 @@ #include #include #include +#include #include #include #include @@ -277,7 +278,7 @@ static int tcf_ipt_dump(struct sk_buff *skb, struct tc_action *a, int bind, int return skb->len; rtattr_failure: - skb_trim(skb, b - skb->data); + nlmsg_trim(skb, b); kfree(t); return -1; } diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c index 15f6ecdaf611..de21c92faaa2 100644 --- a/net/sched/act_mirred.c +++ b/net/sched/act_mirred.c @@ -30,6 +30,7 @@ #include #include #include +#include #include #include #include @@ -225,7 +226,7 @@ static int tcf_mirred_dump(struct sk_buff *skb, struct tc_action *a, int bind, i return skb->len; rtattr_failure: - skb_trim(skb, b - skb->data); + nlmsg_trim(skb, b); return -1; } diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c index d654cea1a46c..45b3cda86a21 100644 --- a/net/sched/act_pedit.c +++ b/net/sched/act_pedit.c @@ -27,6 +27,7 @@ #include #include #include +#include #include #include #include @@ -226,7 +227,7 @@ static int tcf_pedit_dump(struct sk_buff *skb, struct tc_action *a, return skb->len; rtattr_failure: - skb_trim(skb, b - skb->data); + nlmsg_trim(skb, b); kfree(opt); return -1; } diff --git a/net/sched/act_police.c b/net/sched/act_police.c index 068b23763665..0a5679ea6c64 100644 --- a/net/sched/act_police.c +++ b/net/sched/act_police.c @@ -30,6 +30,7 @@ #include #include #include +#include #define L2T(p,L) ((p)->tcfp_R_tab->data[(L)>>(p)->tcfp_R_tab->rate.cell_log]) #define L2T_P(p,L) ((p)->tcfp_P_tab->data[(L)>>(p)->tcfp_P_tab->rate.cell_log]) @@ -88,7 +89,7 @@ static int tcf_act_police_walker(struct sk_buff *skb, struct netlink_callback *c err = tcf_action_dump_1(skb, a, 0, 0); if (err < 0) { index--; - skb_trim(skb, (u8*)r - skb->data); + nlmsg_trim(skb, r); goto done; } r->rta_len = skb_tail_pointer(skb) - (u8 *)r; @@ -102,7 +103,7 @@ done: return n_i; rtattr_failure: - skb_trim(skb, (u8*)r - skb->data); + nlmsg_trim(skb, r); goto done; } #endif @@ -355,7 +356,7 @@ tcf_act_police_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref) return skb->len; rtattr_failure: - skb_trim(skb, b - skb->data); + nlmsg_trim(skb, b); return -1; } @@ -598,7 +599,7 @@ int tcf_police_dump(struct sk_buff *skb, struct tcf_police *police) return skb->len; rtattr_failure: - skb_trim(skb, b - skb->data); + nlmsg_trim(skb, b); return -1; } diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c index ecbcfa59b76c..36e1edad5990 100644 --- a/net/sched/act_simple.c +++ b/net/sched/act_simple.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #define TCA_ACT_SIMP 22 @@ -173,7 +174,7 @@ static inline int tcf_simp_dump(struct sk_buff *skb, struct tc_action *a, return skb->len; rtattr_failure: - skb_trim(skb, b - skb->data); + nlmsg_trim(skb, b); return -1; } diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index 84231baf77d1..3d0a6cdcaebc 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -32,6 +32,8 @@ #include #include #include +#include +#include #include #include #include @@ -345,7 +347,7 @@ tcf_fill_node(struct sk_buff *skb, struct tcf_proto *tp, unsigned long fh, nlmsg_failure: rtattr_failure: - skb_trim(skb, b - skb->data); + nlmsg_trim(skb, b); return -1; } diff --git a/net/sched/cls_basic.c b/net/sched/cls_basic.c index 800ec2ac326b..c885412d79d5 100644 --- a/net/sched/cls_basic.c +++ b/net/sched/cls_basic.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include @@ -267,7 +268,7 @@ static int basic_dump(struct tcf_proto *tp, unsigned long fh, return skb->len; rtattr_failure: - skb_trim(skb, b - skb->data); + nlmsg_trim(skb, b); return -1; } diff --git a/net/sched/cls_fw.c b/net/sched/cls_fw.c index f5f355852a87..bbec4a0d4dcb 100644 --- a/net/sched/cls_fw.c +++ b/net/sched/cls_fw.c @@ -38,6 +38,7 @@ #include #include #include +#include #include #include #include @@ -382,7 +383,7 @@ static int fw_dump(struct tcf_proto *tp, unsigned long fh, return skb->len; rtattr_failure: - skb_trim(skb, b - skb->data); + nlmsg_trim(skb, b); return -1; } diff --git a/net/sched/cls_route.c b/net/sched/cls_route.c index 1f94df36239d..e92d716c9158 100644 --- a/net/sched/cls_route.c +++ b/net/sched/cls_route.c @@ -28,6 +28,7 @@ #include #include #include +#include #include #include #include @@ -599,7 +600,7 @@ static int route4_dump(struct tcf_proto *tp, unsigned long fh, return skb->len; rtattr_failure: - skb_trim(skb, b - skb->data); + nlmsg_trim(skb, b); return -1; } diff --git a/net/sched/cls_rsvp.c b/net/sched/cls_rsvp.c index 1d4a1fb17608..0a683c07c648 100644 --- a/net/sched/cls_rsvp.c +++ b/net/sched/cls_rsvp.c @@ -31,6 +31,7 @@ #include #include #include +#include #include #include diff --git a/net/sched/cls_rsvp.h b/net/sched/cls_rsvp.h index 87ed6f3c5070..22f9ede70e8f 100644 --- a/net/sched/cls_rsvp.h +++ b/net/sched/cls_rsvp.h @@ -630,7 +630,7 @@ static int rsvp_dump(struct tcf_proto *tp, unsigned long fh, return skb->len; rtattr_failure: - skb_trim(skb, b - skb->data); + nlmsg_trim(skb, b); return -1; } diff --git a/net/sched/cls_rsvp6.c b/net/sched/cls_rsvp6.c index a2979d89798f..93b6abed57db 100644 --- a/net/sched/cls_rsvp6.c +++ b/net/sched/cls_rsvp6.c @@ -34,6 +34,7 @@ #include #include #include +#include #define RSVP_DST_LEN 4 #define RSVP_ID "rsvp6" diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c index 0537d6066b43..47ac0c556429 100644 --- a/net/sched/cls_tcindex.c +++ b/net/sched/cls_tcindex.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include @@ -495,7 +496,7 @@ static int tcindex_dump(struct tcf_proto *tp, unsigned long fh, return skb->len; rtattr_failure: - skb_trim(skb, b - skb->data); + nlmsg_trim(skb, b); return -1; } diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c index fa11bb750049..62e1deb27a17 100644 --- a/net/sched/cls_u32.c +++ b/net/sched/cls_u32.c @@ -50,6 +50,7 @@ #include #include #include +#include #include #include #include @@ -772,7 +773,7 @@ static int u32_dump(struct tcf_proto *tp, unsigned long fh, return skb->len; rtattr_failure: - skb_trim(skb, b - skb->data); + nlmsg_trim(skb, b); return -1; } diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index 7482a950717b..0b9abea68fd5 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c @@ -36,6 +36,7 @@ #include #include +#include #include #include @@ -852,7 +853,7 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid, nlmsg_failure: rtattr_failure: - skb_trim(skb, b - skb->data); + nlmsg_trim(skb, b); return -1; } @@ -1081,7 +1082,7 @@ static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q, nlmsg_failure: rtattr_failure: - skb_trim(skb, b - skb->data); + nlmsg_trim(skb, b); return -1; } diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c index 1d7bb1632138..0cc3c9b72728 100644 --- a/net/sched/sch_atm.c +++ b/net/sched/sch_atm.c @@ -14,6 +14,7 @@ #include #include #include /* for fput */ +#include #include #include @@ -665,7 +666,7 @@ static int atm_tc_dump_class(struct Qdisc *sch, unsigned long cl, return skb->len; rtattr_failure: - skb_trim(skb,b-skb->data); + nlmsg_trim(skb, b); return -1; } static int diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c index be98a01253e9..dcd9c31dc399 100644 --- a/net/sched/sch_cbq.c +++ b/net/sched/sch_cbq.c @@ -29,6 +29,7 @@ #include #include #include +#include #include #include #include @@ -1471,7 +1472,7 @@ static __inline__ int cbq_dump_rate(struct sk_buff *skb, struct cbq_class *cl) return skb->len; rtattr_failure: - skb_trim(skb, b - skb->data); + nlmsg_trim(skb, b); return -1; } @@ -1496,7 +1497,7 @@ static __inline__ int cbq_dump_lss(struct sk_buff *skb, struct cbq_class *cl) return skb->len; rtattr_failure: - skb_trim(skb, b - skb->data); + nlmsg_trim(skb, b); return -1; } @@ -1514,7 +1515,7 @@ static __inline__ int cbq_dump_wrr(struct sk_buff *skb, struct cbq_class *cl) return skb->len; rtattr_failure: - skb_trim(skb, b - skb->data); + nlmsg_trim(skb, b); return -1; } @@ -1531,7 +1532,7 @@ static __inline__ int cbq_dump_ovl(struct sk_buff *skb, struct cbq_class *cl) return skb->len; rtattr_failure: - skb_trim(skb, b - skb->data); + nlmsg_trim(skb, b); return -1; } @@ -1549,7 +1550,7 @@ static __inline__ int cbq_dump_fopt(struct sk_buff *skb, struct cbq_class *cl) return skb->len; rtattr_failure: - skb_trim(skb, b - skb->data); + nlmsg_trim(skb, b); return -1; } @@ -1568,7 +1569,7 @@ static __inline__ int cbq_dump_police(struct sk_buff *skb, struct cbq_class *cl) return skb->len; rtattr_failure: - skb_trim(skb, b - skb->data); + nlmsg_trim(skb, b); return -1; } #endif @@ -1601,7 +1602,7 @@ static int cbq_dump(struct Qdisc *sch, struct sk_buff *skb) return skb->len; rtattr_failure: - skb_trim(skb, b - skb->data); + nlmsg_trim(skb, b); return -1; } @@ -1637,7 +1638,7 @@ cbq_dump_class(struct Qdisc *sch, unsigned long arg, return skb->len; rtattr_failure: - skb_trim(skb, b - skb->data); + nlmsg_trim(skb, b); return -1; } diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c index 80e6f811e3bc..6a762cf781d7 100644 --- a/net/sched/sch_hfsc.c +++ b/net/sched/sch_hfsc.c @@ -65,6 +65,7 @@ #include #include #include +#include #include #include #include @@ -1378,7 +1379,7 @@ hfsc_dump_class(struct Qdisc *sch, unsigned long arg, struct sk_buff *skb, return skb->len; rtattr_failure: - skb_trim(skb, b - skb->data); + nlmsg_trim(skb, b); return -1; } @@ -1584,7 +1585,7 @@ hfsc_dump_qdisc(struct Qdisc *sch, struct sk_buff *skb) return skb->len; rtattr_failure: - skb_trim(skb, b - skb->data); + nlmsg_trim(skb, b); return -1; } diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index c687388a8cb6..b7abd0ae676a 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c @@ -50,6 +50,7 @@ #include #include #include +#include #include #include #include @@ -1128,7 +1129,7 @@ static int htb_dump(struct Qdisc *sch, struct sk_buff *skb) return skb->len; rtattr_failure: spin_unlock_bh(&sch->dev->queue_lock); - skb_trim(skb, skb_tail_pointer(skb) - skb->data); + nlmsg_trim(skb, skb_tail_pointer(skb)); return -1; } @@ -1164,7 +1165,7 @@ static int htb_dump_class(struct Qdisc *sch, unsigned long arg, return skb->len; rtattr_failure: spin_unlock_bh(&sch->dev->queue_lock); - skb_trim(skb, b - skb->data); + nlmsg_trim(skb, b); return -1; } diff --git a/net/sched/sch_ingress.c b/net/sched/sch_ingress.c index d19f4070c237..f63d5c6eb302 100644 --- a/net/sched/sch_ingress.c +++ b/net/sched/sch_ingress.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include #include @@ -371,7 +372,7 @@ static int ingress_dump(struct Qdisc *sch, struct sk_buff *skb) return skb->len; rtattr_failure: - skb_trim(skb, b - skb->data); + nlmsg_trim(skb, b); return -1; } diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index 2a9b1e429ff8..4818da5a7e6c 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c @@ -22,6 +22,7 @@ #include #include +#include #include #define VERSION "1.2" @@ -616,7 +617,7 @@ static int netem_dump(struct Qdisc *sch, struct sk_buff *skb) return skb->len; rtattr_failure: - skb_trim(skb, b - skb->data); + nlmsg_trim(skb, b); return -1; } diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c index 5b371109ec1c..f13996348dda 100644 --- a/net/sched/sch_prio.c +++ b/net/sched/sch_prio.c @@ -32,6 +32,7 @@ #include #include #include +#include #include #include @@ -280,7 +281,7 @@ static int prio_dump(struct Qdisc *sch, struct sk_buff *skb) return skb->len; rtattr_failure: - skb_trim(skb, b - skb->data); + nlmsg_trim(skb, b); return -1; } diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c index a511ba83e26f..96dfdf78d32c 100644 --- a/net/sched/sch_sfq.c +++ b/net/sched/sch_sfq.c @@ -30,6 +30,7 @@ #include #include #include +#include #include #include #include @@ -476,7 +477,7 @@ static int sfq_dump(struct Qdisc *sch, struct sk_buff *skb) return skb->len; rtattr_failure: - skb_trim(skb, b - skb->data); + nlmsg_trim(skb, b); return -1; } diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c index 231895562c66..626ce96800fe 100644 --- a/net/sched/sch_tbf.c +++ b/net/sched/sch_tbf.c @@ -32,6 +32,7 @@ #include #include #include +#include #include #include #include @@ -408,7 +409,7 @@ static int tbf_dump(struct Qdisc *sch, struct sk_buff *skb) return skb->len; rtattr_failure: - skb_trim(skb, b - skb->data); + nlmsg_trim(skb, b); return -1; } diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index 814bb3125ada..6b7f6dc144c7 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c @@ -628,7 +628,7 @@ out: nlmsg_failure: rtattr_failure: - skb_trim(skb, b - skb->data); + nlmsg_trim(skb, b); return -1; } @@ -1182,7 +1182,7 @@ out: return 0; nlmsg_failure: - skb_trim(skb, b - skb->data); + nlmsg_trim(skb, b); return -1; } @@ -1367,7 +1367,7 @@ static int build_aevent(struct sk_buff *skb, struct xfrm_state *x, struct km_eve rtattr_failure: nlmsg_failure: - skb_trim(skb, b - skb->data); + nlmsg_trim(skb, b); return -1; } @@ -1767,7 +1767,7 @@ static int build_migrate(struct sk_buff *skb, struct xfrm_migrate *m, nlh->nlmsg_len = skb_tail_pointer(skb) - b; return skb->len; nlmsg_failure: - skb_trim(skb, b - skb->data); + nlmsg_trim(skb, b); return -1; } @@ -1956,7 +1956,7 @@ static int build_expire(struct sk_buff *skb, struct xfrm_state *x, struct km_eve return skb->len; nlmsg_failure: - skb_trim(skb, b - skb->data); + nlmsg_trim(skb, b); return -1; } @@ -2157,7 +2157,7 @@ static int build_acquire(struct sk_buff *skb, struct xfrm_state *x, return skb->len; nlmsg_failure: - skb_trim(skb, b - skb->data); + nlmsg_trim(skb, b); return -1; } @@ -2268,7 +2268,7 @@ static int build_polexpire(struct sk_buff *skb, struct xfrm_policy *xp, return skb->len; nlmsg_failure: - skb_trim(skb, b - skb->data); + nlmsg_trim(skb, b); return -1; } @@ -2427,7 +2427,7 @@ static int build_report(struct sk_buff *skb, u8 proto, nlmsg_failure: rtattr_failure: - skb_trim(skb, b - skb->data); + nlmsg_trim(skb, b); return -1; } -- cgit v1.2.3-59-g8ed1b From 89560b53b92a07c529e13a462aa7fd87a844f1f5 Mon Sep 17 00:00:00 2001 From: Gerrit Renker Date: Tue, 20 Mar 2007 15:27:17 -0300 Subject: [DCCP]: Sample RTT from SYN exchange Function: --- include/linux/dccp.h | 2 ++ net/dccp/input.c | 8 ++++++++ net/dccp/options.c | 8 ++++++++ 3 files changed, 18 insertions(+) (limited to 'include') diff --git a/include/linux/dccp.h b/include/linux/dccp.h index fdd4217f1047..e668cf531bab 100644 --- a/include/linux/dccp.h +++ b/include/linux/dccp.h @@ -481,10 +481,12 @@ struct dccp_ackvec; * @dccps_hc_rx_insert_options - * @dccps_hc_tx_insert_options - * @dccps_xmit_timer - timer for when CCID is not ready to send + * @dccps_syn_rtt - RTT sample from Request/Response exchange (in usecs) */ struct dccp_sock { /* inet_connection_sock has to be the first member of dccp_sock */ struct inet_connection_sock dccps_inet_connection; +#define dccps_syn_rtt dccps_inet_connection.icsk_ack.lrcvtime __u64 dccps_swl; __u64 dccps_swh; __u64 dccps_awl; diff --git a/net/dccp/input.c b/net/dccp/input.c index bd578c87b2e7..da6ec185ed5b 100644 --- a/net/dccp/input.c +++ b/net/dccp/input.c @@ -300,6 +300,14 @@ static int dccp_rcv_request_sent_state_process(struct sock *sk, if (dccp_parse_options(sk, skb)) goto out_invalid_packet; + /* Obtain RTT sample from SYN exchange (used by CCID 3) */ + if (dp->dccps_options_received.dccpor_timestamp_echo) { + struct timeval now; + + dccp_timestamp(sk, &now); + dp->dccps_syn_rtt = dccp_sample_rtt(sk, &now, NULL); + } + if (dccp_msk(sk)->dccpms_send_ack_vector && dccp_ackvec_add(dp->dccps_hc_rx_ackvec, sk, DCCP_SKB_CB(skb)->dccpd_seq, diff --git a/net/dccp/options.c b/net/dccp/options.c index 9074ca7977b2..14b621227323 100644 --- a/net/dccp/options.c +++ b/net/dccp/options.c @@ -563,6 +563,14 @@ int dccp_insert_options(struct sock *sk, struct sk_buff *skb) dccp_insert_options_feat(sk, skb)) return -1; + /* + * Obtain RTT sample from Request/Response exchange. + * This is currently used in CCID 3 initialisation. + */ + if (DCCP_SKB_CB(skb)->dccpd_type == DCCP_PKT_REQUEST && + dccp_insert_option_timestamp(sk, skb)) + return -1; + /* XXX: insert other options when appropriate */ if (DCCP_SKB_CB(skb)->dccpd_opt_len != 0) { -- cgit v1.2.3-59-g8ed1b From e284986385b6420a5f30f2dcd743512bbe1a3202 Mon Sep 17 00:00:00 2001 From: Thomas Graf Date: Thu, 22 Mar 2007 11:48:11 -0700 Subject: [RTNL]: Message handler registration interface This patch adds a new interface to register rtnetlink message handlers replacing the exported rtnl_links[] array which required many message handlers to be exported unnecessarly. Signed-off-by: Thomas Graf Signed-off-by: David S. Miller --- include/linux/rtnetlink.h | 7 -- include/net/rtnetlink.h | 18 +++++ net/core/rtnetlink.c | 188 ++++++++++++++++++++++++++++++++++++++++------ 3 files changed, 184 insertions(+), 29 deletions(-) create mode 100644 include/net/rtnetlink.h (limited to 'include') diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h index 3a4cb242ecd2..1fae30af91f3 100644 --- a/include/linux/rtnetlink.h +++ b/include/linux/rtnetlink.h @@ -574,13 +574,6 @@ extern int rtattr_parse(struct rtattr *tb[], int maxattr, struct rtattr *rta, in #define rtattr_parse_nested(tb, max, rta) \ rtattr_parse((tb), (max), RTA_DATA((rta)), RTA_PAYLOAD((rta))) -struct rtnetlink_link -{ - int (*doit)(struct sk_buff *, struct nlmsghdr*, void *attr); - int (*dumpit)(struct sk_buff *, struct netlink_callback *cb); -}; - -extern struct rtnetlink_link * rtnetlink_links[NPROTO]; extern int rtnetlink_send(struct sk_buff *skb, u32 pid, u32 group, int echo); extern int rtnl_unicast(struct sk_buff *skb, u32 pid); extern int rtnl_notify(struct sk_buff *skb, u32 pid, u32 group, diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h new file mode 100644 index 000000000000..dce7072bd28c --- /dev/null +++ b/include/net/rtnetlink.h @@ -0,0 +1,18 @@ +#ifndef __NET_RTNETLINK_H +#define __NET_RTNETLINK_H + +#include +#include + +typedef int (*rtnl_doit_func)(struct sk_buff *, struct nlmsghdr *, void *); +typedef int (*rtnl_dumpit_func)(struct sk_buff *, struct netlink_callback *); + +extern int __rtnl_register(int protocol, int msgtype, + rtnl_doit_func, rtnl_dumpit_func); +extern void rtnl_register(int protocol, int msgtype, + rtnl_doit_func, rtnl_dumpit_func); +extern int rtnl_unregister(int protocol, int msgtype); +extern void rtnl_unregister_all(int protocol); +extern int rtnl_dump_all(struct sk_buff *skb, struct netlink_callback *cb); + +#endif diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 33ea8eac7fe0..fb1630d82dd4 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -50,12 +50,18 @@ #include #include #include -#include +#include #ifdef CONFIG_NET_WIRELESS_RTNETLINK #include #include #endif /* CONFIG_NET_WIRELESS_RTNETLINK */ +struct rtnl_link +{ + rtnl_doit_func doit; + rtnl_dumpit_func dumpit; +}; + static DEFINE_MUTEX(rtnl_mutex); static struct sock *rtnl; @@ -95,7 +101,151 @@ int rtattr_parse(struct rtattr *tb[], int maxattr, struct rtattr *rta, int len) return 0; } -struct rtnetlink_link * rtnetlink_links[NPROTO]; +struct rtnl_link *rtnl_msg_handlers[NPROTO]; + +static inline int rtm_msgindex(int msgtype) +{ + int msgindex = msgtype - RTM_BASE; + + /* + * msgindex < 0 implies someone tried to register a netlink + * control code. msgindex >= RTM_NR_MSGTYPES may indicate that + * the message type has not been added to linux/rtnetlink.h + */ + BUG_ON(msgindex < 0 || msgindex >= RTM_NR_MSGTYPES); + + return msgindex; +} + +static rtnl_doit_func rtnl_get_doit(int protocol, int msgindex) +{ + struct rtnl_link *tab; + + tab = rtnl_msg_handlers[protocol]; + if (tab == NULL || tab->doit == NULL) + tab = rtnl_msg_handlers[PF_UNSPEC]; + + return tab ? tab->doit : NULL; +} + +static rtnl_dumpit_func rtnl_get_dumpit(int protocol, int msgindex) +{ + struct rtnl_link *tab; + + tab = rtnl_msg_handlers[protocol]; + if (tab == NULL || tab->dumpit == NULL) + tab = rtnl_msg_handlers[PF_UNSPEC]; + + return tab ? tab->dumpit : NULL; +} + +/** + * __rtnl_register - Register a rtnetlink message type + * @protocol: Protocol family or PF_UNSPEC + * @msgtype: rtnetlink message type + * @doit: Function pointer called for each request message + * @dumpit: Function pointer called for each dump request (NLM_F_DUMP) message + * + * Registers the specified function pointers (at least one of them has + * to be non-NULL) to be called whenever a request message for the + * specified protocol family and message type is received. + * + * The special protocol family PF_UNSPEC may be used to define fallback + * function pointers for the case when no entry for the specific protocol + * family exists. + * + * Returns 0 on success or a negative error code. + */ +int __rtnl_register(int protocol, int msgtype, + rtnl_doit_func doit, rtnl_dumpit_func dumpit) +{ + struct rtnl_link *tab; + int msgindex; + + BUG_ON(protocol < 0 || protocol >= NPROTO); + msgindex = rtm_msgindex(msgtype); + + tab = rtnl_msg_handlers[protocol]; + if (tab == NULL) { + tab = kcalloc(RTM_NR_MSGTYPES, sizeof(*tab), GFP_KERNEL); + if (tab == NULL) + return -ENOBUFS; + + rtnl_msg_handlers[protocol] = tab; + } + + if (doit) + tab[msgindex].doit = doit; + + if (dumpit) + tab[msgindex].dumpit = dumpit; + + return 0; +} + +EXPORT_SYMBOL_GPL(__rtnl_register); + +/** + * rtnl_register - Register a rtnetlink message type + * + * Identical to __rtnl_register() but panics on failure. This is useful + * as failure of this function is very unlikely, it can only happen due + * to lack of memory when allocating the chain to store all message + * handlers for a protocol. Meant for use in init functions where lack + * of memory implies no sense in continueing. + */ +void rtnl_register(int protocol, int msgtype, + rtnl_doit_func doit, rtnl_dumpit_func dumpit) +{ + if (__rtnl_register(protocol, msgtype, doit, dumpit) < 0) + panic("Unable to register rtnetlink message handler, " + "protocol = %d, message type = %d\n", + protocol, msgtype); +} + +EXPORT_SYMBOL_GPL(rtnl_register); + +/** + * rtnl_unregister - Unregister a rtnetlink message type + * @protocol: Protocol family or PF_UNSPEC + * @msgtype: rtnetlink message type + * + * Returns 0 on success or a negative error code. + */ +int rtnl_unregister(int protocol, int msgtype) +{ + int msgindex; + + BUG_ON(protocol < 0 || protocol >= NPROTO); + msgindex = rtm_msgindex(msgtype); + + if (rtnl_msg_handlers[protocol] == NULL) + return -ENOENT; + + rtnl_msg_handlers[protocol][msgindex].doit = NULL; + rtnl_msg_handlers[protocol][msgindex].dumpit = NULL; + + return 0; +} + +EXPORT_SYMBOL_GPL(rtnl_unregister); + +/** + * rtnl_unregister_all - Unregister all rtnetlink message type of a protocol + * @protocol : Protocol family or PF_UNSPEC + * + * Identical to calling rtnl_unregster() for all registered message types + * of a certain protocol family. + */ +void rtnl_unregister_all(int protocol) +{ + BUG_ON(protocol < 0 || protocol >= NPROTO); + + kfree(rtnl_msg_handlers[protocol]); + rtnl_msg_handlers[protocol] = NULL; +} + +EXPORT_SYMBOL_GPL(rtnl_unregister_all); static const int rtm_min[RTM_NR_FAMILIES] = { @@ -648,7 +798,7 @@ errout: return err; } -static int rtnl_dump_all(struct sk_buff *skb, struct netlink_callback *cb) +int rtnl_dump_all(struct sk_buff *skb, struct netlink_callback *cb) { int idx; int s_idx = cb->family; @@ -659,12 +809,12 @@ static int rtnl_dump_all(struct sk_buff *skb, struct netlink_callback *cb) int type = cb->nlh->nlmsg_type-RTM_BASE; if (idx < s_idx || idx == PF_PACKET) continue; - if (rtnetlink_links[idx] == NULL || - rtnetlink_links[idx][type].dumpit == NULL) + if (rtnl_msg_handlers[idx] == NULL || + rtnl_msg_handlers[idx][type].dumpit == NULL) continue; if (idx > s_idx) memset(&cb->args[0], 0, sizeof(cb->args)); - if (rtnetlink_links[idx][type].dumpit(skb, cb)) + if (rtnl_msg_handlers[idx][type].dumpit(skb, cb)) break; } cb->family = idx; @@ -672,6 +822,8 @@ static int rtnl_dump_all(struct sk_buff *skb, struct netlink_callback *cb) return skb->len; } +EXPORT_SYMBOL_GPL(rtnl_dump_all); + void rtmsg_ifinfo(int type, struct net_device *dev, unsigned change) { struct sk_buff *skb; @@ -703,8 +855,7 @@ static int rtattr_max; static __inline__ int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, int *errp) { - struct rtnetlink_link *link; - struct rtnetlink_link *link_tab; + rtnl_doit_func doit; int sz_idx, kind; int min_len; int family; @@ -737,11 +888,6 @@ rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, int *errp) return -1; } - link_tab = rtnetlink_links[family]; - if (link_tab == NULL) - link_tab = rtnetlink_links[PF_UNSPEC]; - link = &link_tab[type]; - sz_idx = type>>2; kind = type&3; @@ -751,14 +897,14 @@ rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, int *errp) } if (kind == 2 && nlh->nlmsg_flags&NLM_F_DUMP) { - if (link->dumpit == NULL) - link = &(rtnetlink_links[PF_UNSPEC][type]); + rtnl_dumpit_func dumpit; - if (link->dumpit == NULL) + dumpit = rtnl_get_dumpit(family, type); + if (dumpit == NULL) goto err_inval; if ((*errp = netlink_dump_start(rtnl, skb, nlh, - link->dumpit, NULL)) != 0) { + dumpit, NULL)) != 0) { return -1; } @@ -787,11 +933,10 @@ rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, int *errp) } } - if (link->doit == NULL) - link = &(rtnetlink_links[PF_UNSPEC][type]); - if (link->doit == NULL) + doit = rtnl_get_doit(family, type); + if (doit == NULL) goto err_inval; - err = link->doit(skb, nlh, (void *)&rta_buf[0]); + err = doit(skb, nlh, (void *)&rta_buf[0]); *errp = err; return err; @@ -886,7 +1031,6 @@ void __init rtnetlink_init(void) EXPORT_SYMBOL(__rta_fill); EXPORT_SYMBOL(rtattr_strlcpy); EXPORT_SYMBOL(rtattr_parse); -EXPORT_SYMBOL(rtnetlink_links); EXPORT_SYMBOL(rtnetlink_put_metrics); EXPORT_SYMBOL(rtnl_lock); EXPORT_SYMBOL(rtnl_trylock); -- cgit v1.2.3-59-g8ed1b From c8822a4e00442e65d42d50db8e529d75c2025630 Mon Sep 17 00:00:00 2001 From: Thomas Graf Date: Thu, 22 Mar 2007 11:50:06 -0700 Subject: [NEIGH]: Use rtnl registration interface Signed-off-by: Thomas Graf Signed-off-by: David S. Miller --- include/net/neighbour.h | 10 +--------- net/core/neighbour.c | 26 +++++++++++++++++++------- net/core/rtnetlink.c | 5 ----- 3 files changed, 20 insertions(+), 21 deletions(-) (limited to 'include') diff --git a/include/net/neighbour.h b/include/net/neighbour.h index ad7fe1121412..a4f26187fc1a 100644 --- a/include/net/neighbour.h +++ b/include/net/neighbour.h @@ -24,6 +24,7 @@ #include #include +#include #define NUD_IN_TIMER (NUD_INCOMPLETE|NUD_REACHABLE|NUD_DELAY|NUD_PROBE) #define NUD_VALID (NUD_PERMANENT|NUD_NOARP|NUD_REACHABLE|NUD_PROBE|NUD_STALE|NUD_DELAY) @@ -213,16 +214,7 @@ extern void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p, extern struct pneigh_entry *pneigh_lookup(struct neigh_table *tbl, const void *key, struct net_device *dev, int creat); extern int pneigh_delete(struct neigh_table *tbl, const void *key, struct net_device *dev); -struct netlink_callback; -struct nlmsghdr; -extern int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb); -extern int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg); -extern int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg); extern void neigh_app_ns(struct neighbour *n); - -extern int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb); -extern int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg); - extern void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie); extern void __neigh_for_each_release(struct neigh_table *tbl, int (*cb)(struct neighbour *)); extern void pneigh_for_each(struct neigh_table *tbl, void (*cb)(struct pneigh_entry *)); diff --git a/net/core/neighbour.c b/net/core/neighbour.c index 61a4713a5df3..6f3bb73053c2 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -1441,7 +1441,7 @@ int neigh_table_clear(struct neigh_table *tbl) return 0; } -int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) +static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) { struct ndmsg *ndm; struct nlattr *dst_attr; @@ -1506,7 +1506,7 @@ out: return err; } -int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) +static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) { struct ndmsg *ndm; struct nlattr *tb[NDA_MAX+1]; @@ -1786,7 +1786,7 @@ static struct nla_policy nl_ntbl_parm_policy[NDTPA_MAX+1] __read_mostly = { [NDTPA_LOCKTIME] = { .type = NLA_U64 }, }; -int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) +static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) { struct neigh_table *tbl; struct ndtmsg *ndtmsg; @@ -1910,7 +1910,7 @@ errout: return err; } -int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb) +static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb) { int family, tidx, nidx = 0; int tbl_skip = cb->args[0]; @@ -2034,7 +2034,7 @@ out: return rc; } -int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb) +static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb) { struct neigh_table *tbl; int t, family, s_t; @@ -2746,14 +2746,26 @@ void neigh_sysctl_unregister(struct neigh_parms *p) #endif /* CONFIG_SYSCTL */ +static int __init neigh_init(void) +{ + rtnl_register(PF_UNSPEC, RTM_NEWNEIGH, neigh_add, NULL); + rtnl_register(PF_UNSPEC, RTM_DELNEIGH, neigh_delete, NULL); + rtnl_register(PF_UNSPEC, RTM_GETNEIGH, NULL, neigh_dump_info); + + rtnl_register(PF_UNSPEC, RTM_GETNEIGHTBL, NULL, neightbl_dump_info); + rtnl_register(PF_UNSPEC, RTM_SETNEIGHTBL, neightbl_set, NULL); + + return 0; +} + +subsys_initcall(neigh_init); + EXPORT_SYMBOL(__neigh_event_send); EXPORT_SYMBOL(neigh_changeaddr); EXPORT_SYMBOL(neigh_compat_output); EXPORT_SYMBOL(neigh_connected_output); EXPORT_SYMBOL(neigh_create); -EXPORT_SYMBOL(neigh_delete); EXPORT_SYMBOL(neigh_destroy); -EXPORT_SYMBOL(neigh_dump_info); EXPORT_SYMBOL(neigh_event_ns); EXPORT_SYMBOL(neigh_ifdown); EXPORT_SYMBOL(neigh_lookup); diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 835137320001..3044702f7d9b 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -963,16 +963,11 @@ static struct rtnetlink_link link_rtnetlink_table[RTM_NR_MSGTYPES] = { [RTM_GETADDR - RTM_BASE] = { .dumpit = rtnl_dump_all }, [RTM_GETROUTE - RTM_BASE] = { .dumpit = rtnl_dump_all }, - [RTM_NEWNEIGH - RTM_BASE] = { .doit = neigh_add }, - [RTM_DELNEIGH - RTM_BASE] = { .doit = neigh_delete }, - [RTM_GETNEIGH - RTM_BASE] = { .dumpit = neigh_dump_info }, #ifdef CONFIG_FIB_RULES [RTM_NEWRULE - RTM_BASE] = { .doit = fib_nl_newrule }, [RTM_DELRULE - RTM_BASE] = { .doit = fib_nl_delrule }, #endif [RTM_GETRULE - RTM_BASE] = { .dumpit = rtnl_dump_all }, - [RTM_GETNEIGHTBL - RTM_BASE] = { .dumpit = neightbl_dump_info }, - [RTM_SETNEIGHTBL - RTM_BASE] = { .doit = neightbl_set }, }; static int rtnetlink_event(struct notifier_block *this, unsigned long event, void *ptr) -- cgit v1.2.3-59-g8ed1b From 9d9e6a5819230b5a5cc036f213135cb123ab1e50 Mon Sep 17 00:00:00 2001 From: Thomas Graf Date: Sun, 25 Mar 2007 23:20:05 -0700 Subject: [NET] rules: Use rtnl registration interface Signed-off-by: Thomas Graf Signed-off-by: David S. Miller --- include/net/fib_rules.h | 6 +----- net/core/fib_rules.c | 8 ++++++-- net/core/rtnetlink.c | 5 ----- 3 files changed, 7 insertions(+), 12 deletions(-) (limited to 'include') diff --git a/include/net/fib_rules.h b/include/net/fib_rules.h index d585ea9fa97d..b2b9ccdf32d6 100644 --- a/include/net/fib_rules.h +++ b/include/net/fib_rules.h @@ -5,7 +5,7 @@ #include #include #include -#include +#include struct fib_rule { @@ -99,10 +99,6 @@ extern int fib_rules_lookup(struct fib_rules_ops *, struct flowi *, int flags, struct fib_lookup_arg *); -extern int fib_nl_newrule(struct sk_buff *, - struct nlmsghdr *, void *); -extern int fib_nl_delrule(struct sk_buff *, - struct nlmsghdr *, void *); extern int fib_rules_dump(struct sk_buff *, struct netlink_callback *, int); #endif diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c index 7174ced75efc..bf45f24cfea2 100644 --- a/net/core/fib_rules.c +++ b/net/core/fib_rules.c @@ -174,7 +174,7 @@ errout: return err; } -int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg) +static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg) { struct fib_rule_hdr *frh = nlmsg_data(nlh); struct fib_rules_ops *ops = NULL; @@ -265,7 +265,7 @@ errout: return err; } -int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg) +static int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg) { struct fib_rule_hdr *frh = nlmsg_data(nlh); struct fib_rules_ops *ops = NULL; @@ -501,6 +501,10 @@ static struct notifier_block fib_rules_notifier = { static int __init fib_rules_init(void) { + rtnl_register(PF_UNSPEC, RTM_NEWRULE, fib_nl_newrule, NULL); + rtnl_register(PF_UNSPEC, RTM_DELRULE, fib_nl_delrule, NULL); + rtnl_register(PF_UNSPEC, RTM_GETRULE, NULL, rtnl_dump_all); + return register_netdevice_notifier(&fib_rules_notifier); } diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 3044702f7d9b..5cc09f82f6d6 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -963,11 +963,6 @@ static struct rtnetlink_link link_rtnetlink_table[RTM_NR_MSGTYPES] = { [RTM_GETADDR - RTM_BASE] = { .dumpit = rtnl_dump_all }, [RTM_GETROUTE - RTM_BASE] = { .dumpit = rtnl_dump_all }, -#ifdef CONFIG_FIB_RULES - [RTM_NEWRULE - RTM_BASE] = { .doit = fib_nl_newrule }, - [RTM_DELRULE - RTM_BASE] = { .doit = fib_nl_delrule }, -#endif - [RTM_GETRULE - RTM_BASE] = { .dumpit = rtnl_dump_all }, }; static int rtnetlink_event(struct notifier_block *this, unsigned long event, void *ptr) -- cgit v1.2.3-59-g8ed1b From 63f3444fb9a54c024d55f1205f8b94e7d2786595 Mon Sep 17 00:00:00 2001 From: Thomas Graf Date: Thu, 22 Mar 2007 11:55:17 -0700 Subject: [IPv4]: Use rtnl registration interface Signed-off-by: Thomas Graf Signed-off-by: David S. Miller --- include/net/ip_fib.h | 6 ------ net/ipv4/devinet.c | 21 +++++---------------- net/ipv4/fib_frontend.c | 12 ++++++++---- net/ipv4/fib_rules.c | 4 +++- net/ipv4/route.c | 6 ++++-- 5 files changed, 20 insertions(+), 29 deletions(-) (limited to 'include') diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h index 36c635ca1aa6..5a4a0366c24f 100644 --- a/include/net/ip_fib.h +++ b/include/net/ip_fib.h @@ -215,10 +215,6 @@ extern void fib_select_default(const struct flowi *flp, struct fib_result *res); /* Exported by fib_frontend.c */ extern struct nla_policy rtm_ipv4_policy[]; extern void ip_fib_init(void); -extern int inet_rtm_delroute(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg); -extern int inet_rtm_newroute(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg); -extern int inet_rtm_getroute(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg); -extern int inet_dump_fib(struct sk_buff *skb, struct netlink_callback *cb); extern int fib_validate_source(__be32 src, __be32 dst, u8 tos, int oif, struct net_device *dev, __be32 *spec_dst, u32 *itag); extern void fib_select_multipath(const struct flowi *flp, struct fib_result *res); @@ -235,8 +231,6 @@ extern __be32 __fib_res_prefsrc(struct fib_result *res); extern struct fib_table *fib_hash_init(u32 id); #ifdef CONFIG_IP_MULTIPLE_TABLES -extern int fib4_rules_dump(struct sk_buff *skb, struct netlink_callback *cb); - extern void __init fib4_rules_init(void); #ifdef CONFIG_NET_CLS_ROUTE diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index 043857bd151c..9bdc79564cc6 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c @@ -48,7 +48,6 @@ #include #include #include -#include #include #include #include @@ -62,7 +61,7 @@ #include #include #include -#include +#include struct ipv4_devconf ipv4_devconf = { .accept_redirects = 1, @@ -1241,19 +1240,6 @@ errout: rtnl_set_sk_err(RTNLGRP_IPV4_IFADDR, err); } -static struct rtnetlink_link inet_rtnetlink_table[RTM_NR_MSGTYPES] = { - [RTM_NEWADDR - RTM_BASE] = { .doit = inet_rtm_newaddr, }, - [RTM_DELADDR - RTM_BASE] = { .doit = inet_rtm_deladdr, }, - [RTM_GETADDR - RTM_BASE] = { .dumpit = inet_dump_ifaddr, }, - [RTM_NEWROUTE - RTM_BASE] = { .doit = inet_rtm_newroute, }, - [RTM_DELROUTE - RTM_BASE] = { .doit = inet_rtm_delroute, }, - [RTM_GETROUTE - RTM_BASE] = { .doit = inet_rtm_getroute, - .dumpit = inet_dump_fib, }, -#ifdef CONFIG_IP_MULTIPLE_TABLES - [RTM_GETRULE - RTM_BASE] = { .dumpit = fib4_rules_dump, }, -#endif -}; - #ifdef CONFIG_SYSCTL void inet_forward_change(void) @@ -1636,7 +1622,10 @@ void __init devinet_init(void) { register_gifconf(PF_INET, inet_gifconf); register_netdevice_notifier(&ip_netdev_notifier); - rtnetlink_links[PF_INET] = inet_rtnetlink_table; + + rtnl_register(PF_INET, RTM_NEWADDR, inet_rtm_newaddr, NULL); + rtnl_register(PF_INET, RTM_DELADDR, inet_rtm_deladdr, NULL); + rtnl_register(PF_INET, RTM_GETADDR, NULL, inet_dump_ifaddr); #ifdef CONFIG_SYSCTL devinet_sysctl.sysctl_header = register_sysctl_table(devinet_sysctl.devinet_root_dir); diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index 3ff753c6f197..5bf718a3e49b 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c @@ -34,7 +34,6 @@ #include #include #include -#include #include #include @@ -46,6 +45,7 @@ #include #include #include +#include #define FFprint(a...) printk(KERN_DEBUG a) @@ -540,7 +540,7 @@ errout: return err; } -int inet_rtm_delroute(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg) +static int inet_rtm_delroute(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg) { struct fib_config cfg; struct fib_table *tb; @@ -561,7 +561,7 @@ errout: return err; } -int inet_rtm_newroute(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg) +static int inet_rtm_newroute(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg) { struct fib_config cfg; struct fib_table *tb; @@ -582,7 +582,7 @@ errout: return err; } -int inet_dump_fib(struct sk_buff *skb, struct netlink_callback *cb) +static int inet_dump_fib(struct sk_buff *skb, struct netlink_callback *cb) { unsigned int h, s_h; unsigned int e = 0, s_e; @@ -925,6 +925,10 @@ void __init ip_fib_init(void) register_netdevice_notifier(&fib_netdev_notifier); register_inetaddr_notifier(&fib_inetaddr_notifier); nl_fib_lookup_init(); + + rtnl_register(PF_INET, RTM_NEWROUTE, inet_rtm_newroute, NULL); + rtnl_register(PF_INET, RTM_DELROUTE, inet_rtm_delroute, NULL); + rtnl_register(PF_INET, RTM_GETROUTE, NULL, inet_dump_fib); } EXPORT_SYMBOL(inet_addr_type); diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c index c660c074c76c..a7f931ddfaad 100644 --- a/net/ipv4/fib_rules.c +++ b/net/ipv4/fib_rules.c @@ -274,7 +274,7 @@ nla_put_failure: return -ENOBUFS; } -int fib4_rules_dump(struct sk_buff *skb, struct netlink_callback *cb) +static int fib4_rule_dump(struct sk_buff *skb, struct netlink_callback *cb) { return fib_rules_dump(skb, cb, AF_INET); } @@ -327,4 +327,6 @@ void __init fib4_rules_init(void) list_add_tail(&default_rule.common.list, &fib4_rules); fib_rules_register(&fib4_rules_ops); + + rtnl_register(PF_INET, RTM_GETRULE, NULL, fib4_rule_dump); } diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 58417393dec1..df9fe4f2e8cc 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -82,7 +82,6 @@ #include #include #include -#include #include #include #include @@ -104,6 +103,7 @@ #include #include #include +#include #ifdef CONFIG_SYSCTL #include #endif @@ -2721,7 +2721,7 @@ nla_put_failure: return -EMSGSIZE; } -int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void *arg) +static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void *arg) { struct rtmsg *rtm; struct nlattr *tb[RTA_MAX+1]; @@ -3194,6 +3194,8 @@ int __init ip_rt_init(void) xfrm_init(); xfrm4_init(); #endif + rtnl_register(PF_INET, RTM_GETROUTE, inet_rtm_getroute, NULL); + return rc; } -- cgit v1.2.3-59-g8ed1b From be577ddc2b4aca0849f701222f5bc13cf1b79c9a Mon Sep 17 00:00:00 2001 From: Thomas Graf Date: Thu, 22 Mar 2007 11:55:50 -0700 Subject: [PKT_SCHED] qdisc: Use rtnl registration interface Signed-off-by: Thomas Graf Signed-off-by: David S. Miller --- include/net/sch_generic.h | 2 +- net/sched/sch_api.c | 27 +++++++-------------------- 2 files changed, 8 insertions(+), 21 deletions(-) (limited to 'include') diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index 82086392735a..a3f4ddd1d6a8 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -5,10 +5,10 @@ #include #include #include -#include #include #include #include +#include struct Qdisc_ops; struct qdisc_walker; diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index 0b9abea68fd5..b06f20294ac0 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c @@ -27,7 +27,6 @@ #include #include #include -#include #include #include #include @@ -1239,29 +1238,17 @@ static const struct file_operations psched_fops = { static int __init pktsched_init(void) { - struct rtnetlink_link *link_p; - - link_p = rtnetlink_links[PF_UNSPEC]; - - /* Setup rtnetlink links. It is made here to avoid - exporting large number of public symbols. - */ - - if (link_p) { - link_p[RTM_NEWQDISC-RTM_BASE].doit = tc_modify_qdisc; - link_p[RTM_DELQDISC-RTM_BASE].doit = tc_get_qdisc; - link_p[RTM_GETQDISC-RTM_BASE].doit = tc_get_qdisc; - link_p[RTM_GETQDISC-RTM_BASE].dumpit = tc_dump_qdisc; - link_p[RTM_NEWTCLASS-RTM_BASE].doit = tc_ctl_tclass; - link_p[RTM_DELTCLASS-RTM_BASE].doit = tc_ctl_tclass; - link_p[RTM_GETTCLASS-RTM_BASE].doit = tc_ctl_tclass; - link_p[RTM_GETTCLASS-RTM_BASE].dumpit = tc_dump_tclass; - } - register_qdisc(&pfifo_qdisc_ops); register_qdisc(&bfifo_qdisc_ops); proc_net_fops_create("psched", 0, &psched_fops); + rtnl_register(PF_UNSPEC, RTM_NEWQDISC, tc_modify_qdisc, NULL); + rtnl_register(PF_UNSPEC, RTM_DELQDISC, tc_get_qdisc, NULL); + rtnl_register(PF_UNSPEC, RTM_GETQDISC, tc_get_qdisc, tc_dump_qdisc); + rtnl_register(PF_UNSPEC, RTM_NEWTCLASS, tc_ctl_tclass, NULL); + rtnl_register(PF_UNSPEC, RTM_DELTCLASS, tc_ctl_tclass, NULL); + rtnl_register(PF_UNSPEC, RTM_GETTCLASS, tc_ctl_tclass, tc_dump_tclass); + return 0; } -- cgit v1.2.3-59-g8ed1b From fa34ddd739cecf3999ec0b7562618e8321829d41 Mon Sep 17 00:00:00 2001 From: Thomas Graf Date: Thu, 22 Mar 2007 11:57:46 -0700 Subject: [DECNet]: Use rtnl registration interface Signed-off-by: Thomas Graf Acked-by: Steven Whitehouse Signed-off-by: David S. Miller --- include/net/dn_fib.h | 9 --------- include/net/dn_route.h | 1 - net/decnet/af_decnet.c | 1 + net/decnet/dn_dev.c | 24 +++--------------------- net/decnet/dn_fib.c | 8 +++++--- net/decnet/dn_route.c | 9 ++++++++- net/decnet/dn_rules.c | 4 +++- 7 files changed, 20 insertions(+), 36 deletions(-) (limited to 'include') diff --git a/include/net/dn_fib.h b/include/net/dn_fib.h index f01626cbbed6..30125119c950 100644 --- a/include/net/dn_fib.h +++ b/include/net/dn_fib.h @@ -148,17 +148,8 @@ extern void dn_fib_rules_cleanup(void); extern unsigned dnet_addr_type(__le16 addr); extern int dn_fib_lookup(struct flowi *fl, struct dn_fib_res *res); -/* - * rtnetlink interface - */ -extern int dn_fib_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg); -extern int dn_fib_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg); extern int dn_fib_dump(struct sk_buff *skb, struct netlink_callback *cb); -extern int dn_fib_rtm_delrule(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg); -extern int dn_fib_rtm_newrule(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg); -extern int dn_fib_dump_rules(struct sk_buff *skb, struct netlink_callback *cb); - extern void dn_fib_free_info(struct dn_fib_info *fi); static inline void dn_fib_info_put(struct dn_fib_info *fi) diff --git a/include/net/dn_route.h b/include/net/dn_route.h index a566944c4962..c10e8e7e59a7 100644 --- a/include/net/dn_route.h +++ b/include/net/dn_route.h @@ -18,7 +18,6 @@ extern struct sk_buff *dn_alloc_skb(struct sock *sk, int size, gfp_t pri); extern int dn_route_output_sock(struct dst_entry **pprt, struct flowi *, struct sock *sk, int flags); extern int dn_cache_dump(struct sk_buff *skb, struct netlink_callback *cb); -extern int dn_cache_getroute(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg); extern void dn_rt_cache_flush(int delay); /* Masks for flags field */ diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c index c6568d637e1a..a205eaa87f52 100644 --- a/net/decnet/af_decnet.c +++ b/net/decnet/af_decnet.c @@ -2413,6 +2413,7 @@ module_init(decnet_init); static void __exit decnet_exit(void) { sock_unregister(AF_DECnet); + rtnl_unregister_all(PF_DECnet); dev_remove_pack(&dn_dix_packet_type); dn_unregister_sysctl(); diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c index 95871a669dc4..61be2caddc57 100644 --- a/net/decnet/dn_dev.c +++ b/net/decnet/dn_dev.c @@ -1447,24 +1447,6 @@ static const struct file_operations dn_dev_seq_fops = { #endif /* CONFIG_PROC_FS */ -static struct rtnetlink_link dnet_rtnetlink_table[RTM_NR_MSGTYPES] = -{ - [RTM_NEWADDR - RTM_BASE] = { .doit = dn_nl_newaddr, }, - [RTM_DELADDR - RTM_BASE] = { .doit = dn_nl_deladdr, }, - [RTM_GETADDR - RTM_BASE] = { .dumpit = dn_nl_dump_ifaddr, }, -#ifdef CONFIG_DECNET_ROUTER - [RTM_NEWROUTE - RTM_BASE] = { .doit = dn_fib_rtm_newroute, }, - [RTM_DELROUTE - RTM_BASE] = { .doit = dn_fib_rtm_delroute, }, - [RTM_GETROUTE - RTM_BASE] = { .doit = dn_cache_getroute, - .dumpit = dn_fib_dump, }, - [RTM_GETRULE - RTM_BASE] = { .dumpit = dn_fib_dump_rules, }, -#else - [RTM_GETROUTE - RTM_BASE] = { .doit = dn_cache_getroute, - .dumpit = dn_cache_dump, }, -#endif - -}; - static int __initdata addr[2]; module_param_array(addr, int, NULL, 0444); MODULE_PARM_DESC(addr, "The DECnet address of this machine: area,node"); @@ -1485,7 +1467,9 @@ void __init dn_dev_init(void) dn_dev_devices_on(); - rtnetlink_links[PF_DECnet] = dnet_rtnetlink_table; + rtnl_register(PF_DECnet, RTM_NEWADDR, dn_nl_newaddr, NULL); + rtnl_register(PF_DECnet, RTM_DELADDR, dn_nl_deladdr, NULL); + rtnl_register(PF_DECnet, RTM_GETADDR, NULL, dn_nl_dump_ifaddr); proc_net_fops_create("decnet_dev", S_IRUGO, &dn_dev_seq_fops); @@ -1500,8 +1484,6 @@ void __init dn_dev_init(void) void __exit dn_dev_cleanup(void) { - rtnetlink_links[PF_DECnet] = NULL; - #ifdef CONFIG_SYSCTL { int i; diff --git a/net/decnet/dn_fib.c b/net/decnet/dn_fib.c index 82d58a977e6f..310a86268d2b 100644 --- a/net/decnet/dn_fib.c +++ b/net/decnet/dn_fib.c @@ -504,7 +504,7 @@ static int dn_fib_check_attr(struct rtmsg *r, struct rtattr **rta) return 0; } -int dn_fib_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) +static int dn_fib_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) { struct dn_fib_table *tb; struct rtattr **rta = arg; @@ -520,7 +520,7 @@ int dn_fib_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) return -ESRCH; } -int dn_fib_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) +static int dn_fib_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) { struct dn_fib_table *tb; struct rtattr **rta = arg; @@ -748,11 +748,13 @@ void __exit dn_fib_cleanup(void) void __init dn_fib_init(void) { - dn_fib_table_init(); dn_fib_rules_init(); register_dnaddr_notifier(&dn_fib_dnaddr_notifier); + + rtnl_register(PF_DECnet, RTM_NEWROUTE, dn_fib_rtm_newroute, NULL); + rtnl_register(PF_DECnet, RTM_DELROUTE, dn_fib_rtm_delroute, NULL); } diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c index 2ae35ef1f077..5d7337bcf0fe 100644 --- a/net/decnet/dn_route.c +++ b/net/decnet/dn_route.c @@ -1522,7 +1522,7 @@ rtattr_failure: /* * This is called by both endnodes and routers now. */ -int dn_cache_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, void *arg) +static int dn_cache_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, void *arg) { struct rtattr **rta = arg; struct rtmsg *rtm = NLMSG_DATA(nlh); @@ -1813,6 +1813,13 @@ void __init dn_route_init(void) dn_dst_ops.gc_thresh = (dn_rt_hash_mask + 1); proc_net_fops_create("decnet_cache", S_IRUGO, &dn_rt_cache_seq_fops); + +#ifdef CONFIG_DECNET_ROUTER + rtnl_register(PF_DECnet, RTM_GETROUTE, dn_cache_getroute, dn_fib_dump); +#else + rtnl_register(PF_DECnet, RTM_GETROUTE, dn_cache_getroute, + dn_cache_dump); +#endif } void __exit dn_route_cleanup(void) diff --git a/net/decnet/dn_rules.c b/net/decnet/dn_rules.c index 5e86dd542302..a7a7da9b35c3 100644 --- a/net/decnet/dn_rules.c +++ b/net/decnet/dn_rules.c @@ -239,7 +239,7 @@ static u32 dn_fib_rule_default_pref(void) return 0; } -int dn_fib_dump_rules(struct sk_buff *skb, struct netlink_callback *cb) +static int dn_fib_dump_rules(struct sk_buff *skb, struct netlink_callback *cb) { return fib_rules_dump(skb, cb, AF_DECnet); } @@ -264,10 +264,12 @@ void __init dn_fib_rules_init(void) { list_add_tail(&default_rule.common.list, &dn_fib_rules); fib_rules_register(&dn_fib_rules_ops); + rtnl_register(PF_DECnet, RTM_GETRULE, NULL, dn_fib_dump_rules); } void __exit dn_fib_rules_cleanup(void) { + rtnl_unregister(PF_DECnet, RTM_GETRULE); fib_rules_unregister(&dn_fib_rules_ops); } -- cgit v1.2.3-59-g8ed1b From c127ea2c45d1b13a672fde254679721bb282e90a Mon Sep 17 00:00:00 2001 From: Thomas Graf Date: Thu, 22 Mar 2007 11:58:32 -0700 Subject: [IPv6]: Use rtnl registration interface Signed-off-by: Thomas Graf Signed-off-by: David S. Miller --- include/net/ip6_fib.h | 2 -- include/net/ip6_route.h | 5 ----- net/ipv6/addrconf.c | 35 ++++++++++++++++------------------- net/ipv6/af_inet6.c | 2 ++ net/ipv6/fib6_rules.c | 4 +++- net/ipv6/ip6_fib.c | 4 +++- net/ipv6/route.c | 10 +++++++--- 7 files changed, 31 insertions(+), 31 deletions(-) (limited to 'include') diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h index cf355a3c2ad5..c48ea873f1e0 100644 --- a/include/net/ip6_fib.h +++ b/include/net/ip6_fib.h @@ -219,8 +219,6 @@ extern void fib6_init(void); extern void fib6_rules_init(void); extern void fib6_rules_cleanup(void); -extern int fib6_rules_dump(struct sk_buff *, - struct netlink_callback *); #endif #endif diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h index 4e927ebd1cb3..5456fdd6d047 100644 --- a/include/net/ip6_route.h +++ b/include/net/ip6_route.h @@ -116,12 +116,7 @@ extern void rt6_pmtu_discovery(struct in6_addr *daddr, struct net_device *dev, u32 pmtu); -struct nlmsghdr; struct netlink_callback; -extern int inet6_dump_fib(struct sk_buff *skb, struct netlink_callback *cb); -extern int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg); -extern int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg); -extern int inet6_rtm_getroute(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg); struct rt6_rtnl_dump_arg { diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 38274c20eaa2..eecba1886b49 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -3613,23 +3613,6 @@ errout: rtnl_set_sk_err(RTNLGRP_IPV6_PREFIX, err); } -static struct rtnetlink_link inet6_rtnetlink_table[RTM_NR_MSGTYPES] = { - [RTM_GETLINK - RTM_BASE] = { .dumpit = inet6_dump_ifinfo, }, - [RTM_NEWADDR - RTM_BASE] = { .doit = inet6_rtm_newaddr, }, - [RTM_DELADDR - RTM_BASE] = { .doit = inet6_rtm_deladdr, }, - [RTM_GETADDR - RTM_BASE] = { .doit = inet6_rtm_getaddr, - .dumpit = inet6_dump_ifaddr, }, - [RTM_GETMULTICAST - RTM_BASE] = { .dumpit = inet6_dump_ifmcaddr, }, - [RTM_GETANYCAST - RTM_BASE] = { .dumpit = inet6_dump_ifacaddr, }, - [RTM_NEWROUTE - RTM_BASE] = { .doit = inet6_rtm_newroute, }, - [RTM_DELROUTE - RTM_BASE] = { .doit = inet6_rtm_delroute, }, - [RTM_GETROUTE - RTM_BASE] = { .doit = inet6_rtm_getroute, - .dumpit = inet6_dump_fib, }, -#ifdef CONFIG_IPV6_MULTIPLE_TABLES - [RTM_GETRULE - RTM_BASE] = { .dumpit = fib6_rules_dump, }, -#endif -}; - static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp) { inet6_ifa_notify(event ? : RTM_NEWADDR, ifp); @@ -4149,7 +4132,18 @@ int __init addrconf_init(void) register_netdevice_notifier(&ipv6_dev_notf); addrconf_verify(0); - rtnetlink_links[PF_INET6] = inet6_rtnetlink_table; + + err = __rtnl_register(PF_INET6, RTM_GETLINK, NULL, inet6_dump_ifinfo); + if (err < 0) + goto errout; + + /* Only the first call to __rtnl_register can fail */ + __rtnl_register(PF_INET6, RTM_NEWADDR, inet6_rtm_newaddr, NULL); + __rtnl_register(PF_INET6, RTM_DELADDR, inet6_rtm_deladdr, NULL); + __rtnl_register(PF_INET6, RTM_GETADDR, inet6_rtm_getaddr, inet6_dump_ifaddr); + __rtnl_register(PF_INET6, RTM_GETMULTICAST, NULL, inet6_dump_ifmcaddr); + __rtnl_register(PF_INET6, RTM_GETANYCAST, NULL, inet6_dump_ifacaddr); + #ifdef CONFIG_SYSCTL addrconf_sysctl.sysctl_header = register_sysctl_table(addrconf_sysctl.addrconf_root_dir); @@ -4157,6 +4151,10 @@ int __init addrconf_init(void) #endif return 0; +errout: + unregister_netdevice_notifier(&ipv6_dev_notf); + + return err; } void __exit addrconf_cleanup(void) @@ -4168,7 +4166,6 @@ void __exit addrconf_cleanup(void) unregister_netdevice_notifier(&ipv6_dev_notf); - rtnetlink_links[PF_INET6] = NULL; #ifdef CONFIG_SYSCTL addrconf_sysctl_unregister(&ipv6_devconf_dflt); addrconf_sysctl_unregister(&ipv6_devconf); diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index 7b917f856e1c..82572b507547 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c @@ -945,6 +945,8 @@ static void __exit inet6_exit(void) { /* First of all disallow new sockets creation. */ sock_unregister(PF_INET6); + /* Disallow any further netlink messages */ + rtnl_unregister_all(PF_INET6); /* Cleanup code parts. */ ipv6_packet_cleanup(); diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c index ea3035b4e3e8..c74da4b6dd2f 100644 --- a/net/ipv6/fib6_rules.c +++ b/net/ipv6/fib6_rules.c @@ -216,7 +216,7 @@ nla_put_failure: return -ENOBUFS; } -int fib6_rules_dump(struct sk_buff *skb, struct netlink_callback *cb) +static int fib6_rules_dump(struct sk_buff *skb, struct netlink_callback *cb) { return fib_rules_dump(skb, cb, AF_INET6); } @@ -255,9 +255,11 @@ void __init fib6_rules_init(void) list_add_tail(&main_rule.common.list, &fib6_rules); fib_rules_register(&fib6_rules_ops); + __rtnl_register(PF_INET6, RTM_GETRULE, NULL, fib6_rules_dump); } void fib6_rules_cleanup(void) { + rtnl_unregister(PF_INET6, RTM_GETRULE); fib_rules_unregister(&fib6_rules_ops); } diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index 268f476ef3db..ca08ee88d07f 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c @@ -359,7 +359,7 @@ end: return res; } -int inet6_dump_fib(struct sk_buff *skb, struct netlink_callback *cb) +static int inet6_dump_fib(struct sk_buff *skb, struct netlink_callback *cb) { unsigned int h, s_h; unsigned int e = 0, s_e; @@ -1486,6 +1486,8 @@ void __init fib6_init(void) NULL, NULL); fib6_tables_init(); + + __rtnl_register(PF_INET6, RTM_GETROUTE, NULL, inet6_dump_fib); } void fib6_gc_cleanup(void) diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 52cbe1cd4045..70f760f069b1 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -2015,7 +2015,7 @@ errout: return err; } -int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg) +static int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg) { struct fib6_config cfg; int err; @@ -2027,7 +2027,7 @@ int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg) return ip6_route_del(&cfg); } -int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg) +static int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg) { struct fib6_config cfg; int err; @@ -2164,7 +2164,7 @@ int rt6_dump_route(struct rt6_info *rt, void *p_arg) prefix, NLM_F_MULTI); } -int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void *arg) +static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void *arg) { struct nlattr *tb[RTA_MAX+1]; struct rt6_info *rt; @@ -2508,6 +2508,10 @@ void __init ip6_route_init(void) #ifdef CONFIG_IPV6_MULTIPLE_TABLES fib6_rules_init(); #endif + + __rtnl_register(PF_INET6, RTM_NEWROUTE, inet6_rtm_newroute, NULL); + __rtnl_register(PF_INET6, RTM_DELROUTE, inet6_rtm_delroute, NULL); + __rtnl_register(PF_INET6, RTM_GETROUTE, inet6_rtm_getroute, NULL); } void ip6_route_cleanup(void) -- cgit v1.2.3-59-g8ed1b From c454673da7c1d6533f40ec2f788023df9af56ebf Mon Sep 17 00:00:00 2001 From: Thomas Graf Date: Sun, 25 Mar 2007 23:24:24 -0700 Subject: [NET] rules: Unified rules dumping Implements a unified, protocol independant rules dumping function which is capable of both, dumping a specific protocol family or all of them. This speeds up dumping as less lookups are required. Signed-off-by: Thomas Graf Signed-off-by: David S. Miller --- include/net/fib_rules.h | 3 --- include/net/rtnetlink.h | 8 ++++++++ net/core/fib_rules.c | 47 +++++++++++++++++++++++++++++++++++++---------- net/decnet/dn_rules.c | 7 ------- net/ipv4/fib_rules.c | 7 ------- net/ipv6/fib6_rules.c | 7 ------- 6 files changed, 45 insertions(+), 34 deletions(-) (limited to 'include') diff --git a/include/net/fib_rules.h b/include/net/fib_rules.h index b2b9ccdf32d6..ff3029fe9656 100644 --- a/include/net/fib_rules.h +++ b/include/net/fib_rules.h @@ -98,7 +98,4 @@ extern int fib_rules_unregister(struct fib_rules_ops *); extern int fib_rules_lookup(struct fib_rules_ops *, struct flowi *, int flags, struct fib_lookup_arg *); - -extern int fib_rules_dump(struct sk_buff *, - struct netlink_callback *, int); #endif diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h index dce7072bd28c..086fa9e89509 100644 --- a/include/net/rtnetlink.h +++ b/include/net/rtnetlink.h @@ -15,4 +15,12 @@ extern int rtnl_unregister(int protocol, int msgtype); extern void rtnl_unregister_all(int protocol); extern int rtnl_dump_all(struct sk_buff *skb, struct netlink_callback *cb); +static inline int rtnl_msg_family(struct nlmsghdr *nlh) +{ + if (nlmsg_len(nlh) >= sizeof(struct rtgenmsg)) + return ((struct rtgenmsg *) nlmsg_data(nlh))->rtgen_family; + else + return AF_UNSPEC; +} + #endif diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c index bf45f24cfea2..fdf05af16ba5 100644 --- a/net/core/fib_rules.c +++ b/net/core/fib_rules.c @@ -393,19 +393,15 @@ nla_put_failure: return -EMSGSIZE; } -int fib_rules_dump(struct sk_buff *skb, struct netlink_callback *cb, int family) +static int dump_rules(struct sk_buff *skb, struct netlink_callback *cb, + struct fib_rules_ops *ops) { int idx = 0; struct fib_rule *rule; - struct fib_rules_ops *ops; - - ops = lookup_rules_ops(family); - if (ops == NULL) - return -EAFNOSUPPORT; rcu_read_lock(); list_for_each_entry_rcu(rule, ops->rules_list, list) { - if (idx < cb->args[0]) + if (idx < cb->args[1]) goto skip; if (fib_nl_fill_rule(skb, rule, NETLINK_CB(cb->skb).pid, @@ -416,13 +412,44 @@ skip: idx++; } rcu_read_unlock(); - cb->args[0] = idx; + cb->args[1] = idx; rules_ops_put(ops); return skb->len; } -EXPORT_SYMBOL_GPL(fib_rules_dump); +static int fib_nl_dumprule(struct sk_buff *skb, struct netlink_callback *cb) +{ + struct fib_rules_ops *ops; + int idx = 0, family; + + family = rtnl_msg_family(cb->nlh); + if (family != AF_UNSPEC) { + /* Protocol specific dump request */ + ops = lookup_rules_ops(family); + if (ops == NULL) + return -EAFNOSUPPORT; + + return dump_rules(skb, cb, ops); + } + + rcu_read_lock(); + list_for_each_entry_rcu(ops, &rules_ops, list) { + if (idx < cb->args[0] || !try_module_get(ops->owner)) + goto skip; + + if (dump_rules(skb, cb, ops) < 0) + break; + + cb->args[1] = 0; + skip: + idx++; + } + rcu_read_unlock(); + cb->args[0] = idx; + + return skb->len; +} static void notify_rule_change(int event, struct fib_rule *rule, struct fib_rules_ops *ops, struct nlmsghdr *nlh, @@ -503,7 +530,7 @@ static int __init fib_rules_init(void) { rtnl_register(PF_UNSPEC, RTM_NEWRULE, fib_nl_newrule, NULL); rtnl_register(PF_UNSPEC, RTM_DELRULE, fib_nl_delrule, NULL); - rtnl_register(PF_UNSPEC, RTM_GETRULE, NULL, rtnl_dump_all); + rtnl_register(PF_UNSPEC, RTM_GETRULE, NULL, fib_nl_dumprule); return register_netdevice_notifier(&fib_rules_notifier); } diff --git a/net/decnet/dn_rules.c b/net/decnet/dn_rules.c index a7a7da9b35c3..fd0cc2aa316c 100644 --- a/net/decnet/dn_rules.c +++ b/net/decnet/dn_rules.c @@ -239,11 +239,6 @@ static u32 dn_fib_rule_default_pref(void) return 0; } -static int dn_fib_dump_rules(struct sk_buff *skb, struct netlink_callback *cb) -{ - return fib_rules_dump(skb, cb, AF_DECnet); -} - static struct fib_rules_ops dn_fib_rules_ops = { .family = AF_DECnet, .rule_size = sizeof(struct dn_fib_rule), @@ -264,12 +259,10 @@ void __init dn_fib_rules_init(void) { list_add_tail(&default_rule.common.list, &dn_fib_rules); fib_rules_register(&dn_fib_rules_ops); - rtnl_register(PF_DECnet, RTM_GETRULE, NULL, dn_fib_dump_rules); } void __exit dn_fib_rules_cleanup(void) { - rtnl_unregister(PF_DECnet, RTM_GETRULE); fib_rules_unregister(&dn_fib_rules_ops); } diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c index a7f931ddfaad..b021b3440ca3 100644 --- a/net/ipv4/fib_rules.c +++ b/net/ipv4/fib_rules.c @@ -274,11 +274,6 @@ nla_put_failure: return -ENOBUFS; } -static int fib4_rule_dump(struct sk_buff *skb, struct netlink_callback *cb) -{ - return fib_rules_dump(skb, cb, AF_INET); -} - static u32 fib4_rule_default_pref(void) { struct list_head *pos; @@ -327,6 +322,4 @@ void __init fib4_rules_init(void) list_add_tail(&default_rule.common.list, &fib4_rules); fib_rules_register(&fib4_rules_ops); - - rtnl_register(PF_INET, RTM_GETRULE, NULL, fib4_rule_dump); } diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c index c74da4b6dd2f..dd9720e700ef 100644 --- a/net/ipv6/fib6_rules.c +++ b/net/ipv6/fib6_rules.c @@ -216,11 +216,6 @@ nla_put_failure: return -ENOBUFS; } -static int fib6_rules_dump(struct sk_buff *skb, struct netlink_callback *cb) -{ - return fib_rules_dump(skb, cb, AF_INET6); -} - static u32 fib6_rule_default_pref(void) { return 0x3FFF; @@ -255,11 +250,9 @@ void __init fib6_rules_init(void) list_add_tail(&main_rule.common.list, &fib6_rules); fib_rules_register(&fib6_rules_ops); - __rtnl_register(PF_INET6, RTM_GETRULE, NULL, fib6_rules_dump); } void fib6_rules_cleanup(void) { - rtnl_unregister(PF_INET6, RTM_GETRULE); fib_rules_unregister(&fib6_rules_ops); } -- cgit v1.2.3-59-g8ed1b From 1d00a4eb42bdade33a6ec0961cada93577a66ae6 Mon Sep 17 00:00:00 2001 From: Thomas Graf Date: Thu, 22 Mar 2007 23:30:12 -0700 Subject: [NETLINK]: Remove error pointer from netlink message handler The error pointer argument in netlink message handlers is used to signal the special case where processing has to be interrupted because a dump was started but no error happened. Instead it is simpler and more clear to return -EINTR and have netlink_run_queue() deal with getting the queue right. nfnetlink passed on this error pointer to its subsystem handlers but only uses it to signal the start of a netlink dump. Therefore it can be removed there as well. This patch also cleans up the error handling in the affected message handlers to be consistent since it had to be touched anyway. Signed-off-by: Thomas Graf Signed-off-by: David S. Miller --- include/linux/netfilter/nfnetlink.h | 2 +- include/net/netlink.h | 2 +- net/core/rtnetlink.c | 46 ++++++++++------------------- net/netfilter/nf_conntrack_netlink.c | 46 +++++++++++------------------ net/netfilter/nfnetlink.c | 26 ++++++----------- net/netfilter/nfnetlink_log.c | 4 +-- net/netfilter/nfnetlink_queue.c | 6 ++-- net/netlink/af_netlink.c | 21 ++++++++------ net/netlink/genetlink.c | 56 +++++++++++++----------------------- net/xfrm/xfrm_user.c | 40 +++++++++----------------- 10 files changed, 93 insertions(+), 156 deletions(-) (limited to 'include') diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h index e1ea5dfbbbd4..0f9311df1559 100644 --- a/include/linux/netfilter/nfnetlink.h +++ b/include/linux/netfilter/nfnetlink.h @@ -111,7 +111,7 @@ struct nfgenmsg { struct nfnl_callback { int (*call)(struct sock *nl, struct sk_buff *skb, - struct nlmsghdr *nlh, struct nfattr *cda[], int *errp); + struct nlmsghdr *nlh, struct nfattr *cda[]); u_int16_t attr_count; /* number of nfattr's */ }; diff --git a/include/net/netlink.h b/include/net/netlink.h index 510ca7fabe18..1c11518fc822 100644 --- a/include/net/netlink.h +++ b/include/net/netlink.h @@ -214,7 +214,7 @@ struct nl_info { extern void netlink_run_queue(struct sock *sk, unsigned int *qlen, int (*cb)(struct sk_buff *, - struct nlmsghdr *, int *)); + struct nlmsghdr *)); extern void netlink_queue_skip(struct nlmsghdr *nlh, struct sk_buff *skb); extern int nlmsg_notify(struct sock *sk, struct sk_buff *skb, diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index b2136accd267..14241ada41a1 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -852,8 +852,7 @@ static int rtattr_max; /* Process one rtnetlink message. */ -static __inline__ int -rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, int *errp) +static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) { rtnl_doit_func doit; int sz_idx, kind; @@ -863,10 +862,8 @@ rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, int *errp) int err; type = nlh->nlmsg_type; - - /* Unknown message: reply with EINVAL */ if (type > RTM_MAX) - goto err_inval; + return -EINVAL; type -= RTM_BASE; @@ -875,40 +872,33 @@ rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, int *errp) return 0; family = ((struct rtgenmsg*)NLMSG_DATA(nlh))->rtgen_family; - if (family >= NPROTO) { - *errp = -EAFNOSUPPORT; - return -1; - } + if (family >= NPROTO) + return -EAFNOSUPPORT; sz_idx = type>>2; kind = type&3; - if (kind != 2 && security_netlink_recv(skb, CAP_NET_ADMIN)) { - *errp = -EPERM; - return -1; - } + if (kind != 2 && security_netlink_recv(skb, CAP_NET_ADMIN)) + return -EPERM; if (kind == 2 && nlh->nlmsg_flags&NLM_F_DUMP) { rtnl_dumpit_func dumpit; dumpit = rtnl_get_dumpit(family, type); if (dumpit == NULL) - goto err_inval; - - if ((*errp = netlink_dump_start(rtnl, skb, nlh, - dumpit, NULL)) != 0) { - return -1; - } + return -EINVAL; - netlink_queue_skip(nlh, skb); - return -1; + err = netlink_dump_start(rtnl, skb, nlh, dumpit, NULL); + if (err == 0) + err = -EINTR; + return err; } memset(rta_buf, 0, (rtattr_max * sizeof(struct rtattr *))); min_len = rtm_min[sz_idx]; if (nlh->nlmsg_len < min_len) - goto err_inval; + return -EINVAL; if (nlh->nlmsg_len > min_len) { int attrlen = nlh->nlmsg_len - NLMSG_ALIGN(min_len); @@ -918,7 +908,7 @@ rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, int *errp) unsigned flavor = attr->rta_type; if (flavor) { if (flavor > rta_max[sz_idx]) - goto err_inval; + return -EINVAL; rta_buf[flavor-1] = attr; } attr = RTA_NEXT(attr, attrlen); @@ -927,15 +917,9 @@ rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, int *errp) doit = rtnl_get_doit(family, type); if (doit == NULL) - goto err_inval; - err = doit(skb, nlh, (void *)&rta_buf[0]); - - *errp = err; - return err; + return -EINVAL; -err_inval: - *errp = -EINVAL; - return -1; + return doit(skb, nlh, (void *)&rta_buf[0]); } static void rtnetlink_rcv(struct sock *sk, int len) diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index 76f11f325919..443ba7753a33 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c @@ -661,7 +661,7 @@ static const size_t cta_min[CTA_MAX] = { static int ctnetlink_del_conntrack(struct sock *ctnl, struct sk_buff *skb, - struct nlmsghdr *nlh, struct nfattr *cda[], int *errp) + struct nlmsghdr *nlh, struct nfattr *cda[]) { struct nf_conntrack_tuple_hash *h; struct nf_conntrack_tuple tuple; @@ -709,7 +709,7 @@ ctnetlink_del_conntrack(struct sock *ctnl, struct sk_buff *skb, static int ctnetlink_get_conntrack(struct sock *ctnl, struct sk_buff *skb, - struct nlmsghdr *nlh, struct nfattr *cda[], int *errp) + struct nlmsghdr *nlh, struct nfattr *cda[]) { struct nf_conntrack_tuple_hash *h; struct nf_conntrack_tuple tuple; @@ -720,22 +720,15 @@ ctnetlink_get_conntrack(struct sock *ctnl, struct sk_buff *skb, int err = 0; if (nlh->nlmsg_flags & NLM_F_DUMP) { - u32 rlen; - #ifndef CONFIG_NF_CT_ACCT if (NFNL_MSG_TYPE(nlh->nlmsg_type) == IPCTNL_MSG_CT_GET_CTRZERO) return -ENOTSUPP; #endif - if ((*errp = netlink_dump_start(ctnl, skb, nlh, - ctnetlink_dump_table, - ctnetlink_done)) != 0) - return -EINVAL; - - rlen = NLMSG_ALIGN(nlh->nlmsg_len); - if (rlen > skb->len) - rlen = skb->len; - skb_pull(skb, rlen); - return 0; + err = netlink_dump_start(ctnl, skb, nlh, ctnetlink_dump_table, + ctnetlink_done); + if (err == 0) + err = -EINTR; + return err; } if (nfattr_bad_size(cda, CTA_MAX, cta_min)) @@ -1009,7 +1002,7 @@ err: static int ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb, - struct nlmsghdr *nlh, struct nfattr *cda[], int *errp) + struct nlmsghdr *nlh, struct nfattr *cda[]) { struct nf_conntrack_tuple otuple, rtuple; struct nf_conntrack_tuple_hash *h = NULL; @@ -1260,7 +1253,7 @@ static const size_t cta_min_exp[CTA_EXPECT_MAX] = { static int ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb, - struct nlmsghdr *nlh, struct nfattr *cda[], int *errp) + struct nlmsghdr *nlh, struct nfattr *cda[]) { struct nf_conntrack_tuple tuple; struct nf_conntrack_expect *exp; @@ -1273,17 +1266,12 @@ ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb, return -EINVAL; if (nlh->nlmsg_flags & NLM_F_DUMP) { - u32 rlen; - - if ((*errp = netlink_dump_start(ctnl, skb, nlh, - ctnetlink_exp_dump_table, - ctnetlink_done)) != 0) - return -EINVAL; - rlen = NLMSG_ALIGN(nlh->nlmsg_len); - if (rlen > skb->len) - rlen = skb->len; - skb_pull(skb, rlen); - return 0; + err = netlink_dump_start(ctnl, skb, nlh, + ctnetlink_exp_dump_table, + ctnetlink_done); + if (err == 0) + err = -EINTR; + return err; } if (cda[CTA_EXPECT_MASTER-1]) @@ -1330,7 +1318,7 @@ out: static int ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb, - struct nlmsghdr *nlh, struct nfattr *cda[], int *errp) + struct nlmsghdr *nlh, struct nfattr *cda[]) { struct nf_conntrack_expect *exp, *tmp; struct nf_conntrack_tuple tuple; @@ -1464,7 +1452,7 @@ out: static int ctnetlink_new_expect(struct sock *ctnl, struct sk_buff *skb, - struct nlmsghdr *nlh, struct nfattr *cda[], int *errp) + struct nlmsghdr *nlh, struct nfattr *cda[]) { struct nf_conntrack_tuple tuple; struct nf_conntrack_expect *exp; diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c index dec36abdf949..c37ed0156b07 100644 --- a/net/netfilter/nfnetlink.c +++ b/net/netfilter/nfnetlink.c @@ -195,17 +195,14 @@ int nfnetlink_unicast(struct sk_buff *skb, u_int32_t pid, int flags) EXPORT_SYMBOL_GPL(nfnetlink_unicast); /* Process one complete nfnetlink message. */ -static int nfnetlink_rcv_msg(struct sk_buff *skb, - struct nlmsghdr *nlh, int *errp) +static int nfnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) { struct nfnl_callback *nc; struct nfnetlink_subsystem *ss; - int type, err = 0; + int type, err; - if (security_netlink_recv(skb, CAP_NET_ADMIN)) { - *errp = -EPERM; - return -1; - } + if (security_netlink_recv(skb, CAP_NET_ADMIN)) + return -EPERM; /* Only requests are handled by kernel now. */ if (!(nlh->nlmsg_flags & NLM_F_REQUEST)) @@ -227,12 +224,12 @@ static int nfnetlink_rcv_msg(struct sk_buff *skb, ss = nfnetlink_get_subsys(type); if (!ss) #endif - goto err_inval; + return -EINVAL; } nc = nfnetlink_find_client(type, ss); if (!nc) - goto err_inval; + return -EINVAL; { u_int16_t attr_count = @@ -243,16 +240,9 @@ static int nfnetlink_rcv_msg(struct sk_buff *skb, err = nfnetlink_check_attributes(ss, nlh, cda); if (err < 0) - goto err_inval; - - err = nc->call(nfnl, skb, nlh, cda, errp); - *errp = err; - return err; + return err; + return nc->call(nfnl, skb, nlh, cda); } - -err_inval: - *errp = -EINVAL; - return -1; } static void nfnetlink_rcv(struct sock *sk, int len) diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c index 9709f94787f8..b174aadd73e6 100644 --- a/net/netfilter/nfnetlink_log.c +++ b/net/netfilter/nfnetlink_log.c @@ -759,7 +759,7 @@ static struct notifier_block nfulnl_rtnl_notifier = { static int nfulnl_recv_unsupp(struct sock *ctnl, struct sk_buff *skb, - struct nlmsghdr *nlh, struct nfattr *nfqa[], int *errp) + struct nlmsghdr *nlh, struct nfattr *nfqa[]) { return -ENOTSUPP; } @@ -797,7 +797,7 @@ static const int nfula_cfg_min[NFULA_CFG_MAX] = { static int nfulnl_recv_config(struct sock *ctnl, struct sk_buff *skb, - struct nlmsghdr *nlh, struct nfattr *nfula[], int *errp) + struct nlmsghdr *nlh, struct nfattr *nfula[]) { struct nfgenmsg *nfmsg = NLMSG_DATA(nlh); u_int16_t group_num = ntohs(nfmsg->res_id); diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c index b6585caa431e..9aefb1c9bfa3 100644 --- a/net/netfilter/nfnetlink_queue.c +++ b/net/netfilter/nfnetlink_queue.c @@ -783,7 +783,7 @@ static const int nfqa_verdict_min[NFQA_MAX] = { static int nfqnl_recv_verdict(struct sock *ctnl, struct sk_buff *skb, - struct nlmsghdr *nlh, struct nfattr *nfqa[], int *errp) + struct nlmsghdr *nlh, struct nfattr *nfqa[]) { struct nfgenmsg *nfmsg = NLMSG_DATA(nlh); u_int16_t queue_num = ntohs(nfmsg->res_id); @@ -848,7 +848,7 @@ err_out_put: static int nfqnl_recv_unsupp(struct sock *ctnl, struct sk_buff *skb, - struct nlmsghdr *nlh, struct nfattr *nfqa[], int *errp) + struct nlmsghdr *nlh, struct nfattr *nfqa[]) { return -ENOTSUPP; } @@ -865,7 +865,7 @@ static struct nf_queue_handler nfqh = { static int nfqnl_recv_config(struct sock *ctnl, struct sk_buff *skb, - struct nlmsghdr *nlh, struct nfattr *nfqa[], int *errp) + struct nlmsghdr *nlh, struct nfattr *nfqa[]) { struct nfgenmsg *nfmsg = NLMSG_DATA(nlh); u_int16_t queue_num = ntohs(nfmsg->res_id); diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 5d1079b1838c..1823b7c63156 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -1463,7 +1463,7 @@ void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err) } static int netlink_rcv_skb(struct sk_buff *skb, int (*cb)(struct sk_buff *, - struct nlmsghdr *, int *)) + struct nlmsghdr *)) { struct nlmsghdr *nlh; int err; @@ -1483,13 +1483,11 @@ static int netlink_rcv_skb(struct sk_buff *skb, int (*cb)(struct sk_buff *, if (nlh->nlmsg_type < NLMSG_MIN_TYPE) goto skip; - if (cb(skb, nlh, &err) < 0) { - /* Not an error, but we have to interrupt processing - * here. Note: that in this case we do not pull - * message from skb, it will be processed later. - */ - if (err == 0) - return -1; + err = cb(skb, nlh); + if (err == -EINTR) { + /* Not an error, but we interrupt processing */ + netlink_queue_skip(nlh, skb); + return err; } skip: if (nlh->nlmsg_flags & NLM_F_ACK || err) @@ -1515,9 +1513,14 @@ skip: * * qlen must be initialized to 0 before the initial entry, afterwards * the function may be called repeatedly until qlen reaches 0. + * + * The callback function may return -EINTR to signal that processing + * of netlink messages shall be interrupted. In this case the message + * currently being processed will NOT be requeued onto the receive + * queue. */ void netlink_run_queue(struct sock *sk, unsigned int *qlen, - int (*cb)(struct sk_buff *, struct nlmsghdr *, int *)) + int (*cb)(struct sk_buff *, struct nlmsghdr *)) { struct sk_buff *skb; diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c index 95391e609046..1b897bc92e61 100644 --- a/net/netlink/genetlink.c +++ b/net/netlink/genetlink.c @@ -295,60 +295,49 @@ int genl_unregister_family(struct genl_family *family) return -ENOENT; } -static int genl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, - int *errp) +static int genl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) { struct genl_ops *ops; struct genl_family *family; struct genl_info info; struct genlmsghdr *hdr = nlmsg_data(nlh); - int hdrlen, err = -EINVAL; + int hdrlen, err; family = genl_family_find_byid(nlh->nlmsg_type); - if (family == NULL) { - err = -ENOENT; - goto errout; - } + if (family == NULL) + return -ENOENT; hdrlen = GENL_HDRLEN + family->hdrsize; if (nlh->nlmsg_len < nlmsg_msg_size(hdrlen)) - goto errout; + return -EINVAL; ops = genl_get_cmd(hdr->cmd, family); - if (ops == NULL) { - err = -EOPNOTSUPP; - goto errout; - } + if (ops == NULL) + return -EOPNOTSUPP; - if ((ops->flags & GENL_ADMIN_PERM) && security_netlink_recv(skb, CAP_NET_ADMIN)) { - err = -EPERM; - goto errout; - } + if ((ops->flags & GENL_ADMIN_PERM) && + security_netlink_recv(skb, CAP_NET_ADMIN)) + return -EPERM; if (nlh->nlmsg_flags & NLM_F_DUMP) { - if (ops->dumpit == NULL) { - err = -EOPNOTSUPP; - goto errout; - } + if (ops->dumpit == NULL) + return -EOPNOTSUPP; - *errp = err = netlink_dump_start(genl_sock, skb, nlh, - ops->dumpit, ops->done); + err = netlink_dump_start(genl_sock, skb, nlh, + ops->dumpit, ops->done); if (err == 0) - skb_pull(skb, min(NLMSG_ALIGN(nlh->nlmsg_len), - skb->len)); - return -1; + err = -EINTR; + return err; } - if (ops->doit == NULL) { - err = -EOPNOTSUPP; - goto errout; - } + if (ops->doit == NULL) + return -EOPNOTSUPP; if (family->attrbuf) { err = nlmsg_parse(nlh, hdrlen, family->attrbuf, family->maxattr, ops->policy); if (err < 0) - goto errout; + return err; } info.snd_seq = nlh->nlmsg_seq; @@ -358,12 +347,7 @@ static int genl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, info.userhdr = nlmsg_data(nlh) + GENL_HDRLEN; info.attrs = family->attrbuf; - *errp = err = ops->doit(skb, &info); - return err; - -errout: - *errp = err; - return -1; + return ops->doit(skb, &info); } static void genl_rcv(struct sock *sk, int len) diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index 4d2f2094e6df..5e52d6275bad 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c @@ -1852,46 +1852,39 @@ static struct xfrm_link { [XFRM_MSG_MIGRATE - XFRM_MSG_BASE] = { .doit = xfrm_do_migrate }, }; -static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, int *errp) +static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) { struct rtattr *xfrma[XFRMA_MAX]; struct xfrm_link *link; - int type, min_len; + int type, min_len, err; type = nlh->nlmsg_type; - - /* Unknown message: reply with EINVAL */ if (type > XFRM_MSG_MAX) - goto err_einval; + return -EINVAL; type -= XFRM_MSG_BASE; link = &xfrm_dispatch[type]; /* All operations require privileges, even GET */ - if (security_netlink_recv(skb, CAP_NET_ADMIN)) { - *errp = -EPERM; - return -1; - } + if (security_netlink_recv(skb, CAP_NET_ADMIN)) + return -EPERM; if ((type == (XFRM_MSG_GETSA - XFRM_MSG_BASE) || type == (XFRM_MSG_GETPOLICY - XFRM_MSG_BASE)) && (nlh->nlmsg_flags & NLM_F_DUMP)) { if (link->dump == NULL) - goto err_einval; - - if ((*errp = netlink_dump_start(xfrm_nl, skb, nlh, - link->dump, NULL)) != 0) { - return -1; - } + return -EINVAL; - netlink_queue_skip(nlh, skb); - return -1; + err = netlink_dump_start(xfrm_nl, skb, nlh, link->dump, NULL); + if (err == 0) + err = -EINTR; + return err; } memset(xfrma, 0, sizeof(xfrma)); if (nlh->nlmsg_len < (min_len = xfrm_msg_min[type])) - goto err_einval; + return -EINVAL; if (nlh->nlmsg_len > min_len) { int attrlen = nlh->nlmsg_len - NLMSG_ALIGN(min_len); @@ -1901,7 +1894,7 @@ static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, int *err unsigned short flavor = attr->rta_type; if (flavor) { if (flavor > XFRMA_MAX) - goto err_einval; + return -EINVAL; xfrma[flavor - 1] = attr; } attr = RTA_NEXT(attr, attrlen); @@ -1909,14 +1902,9 @@ static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, int *err } if (link->doit == NULL) - goto err_einval; - *errp = link->doit(skb, nlh, xfrma); - - return *errp; + return -EINVAL; -err_einval: - *errp = -EINVAL; - return -1; + return link->doit(skb, nlh, xfrma); } static void xfrm_netlink_rcv(struct sock *sk, int len) -- cgit v1.2.3-59-g8ed1b From 5f79e0f916a3bdeccc910fdf466bca582a9b2cca Mon Sep 17 00:00:00 2001 From: Yasuyuki Kozakai Date: Fri, 23 Mar 2007 11:17:07 -0700 Subject: [NETFILTER]: nf_conntrack: don't use nfct in skb if conntrack is disabled Signed-off-by: Yasuyuki Kozakai Signed-off-by: Patrick McHardy Signed-off-by: David S. Miller --- include/linux/netfilter.h | 11 +++++++---- include/linux/skbuff.h | 24 +++++++----------------- net/core/skbuff.c | 4 +--- net/netfilter/core.c | 2 ++ 4 files changed, 17 insertions(+), 24 deletions(-) (limited to 'include') diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h index 70d3b4f1e48d..4777f1b619ce 100644 --- a/include/linux/netfilter.h +++ b/include/linux/netfilter.h @@ -281,9 +281,6 @@ extern void nf_reinject(struct sk_buff *skb, struct nf_info *info, unsigned int verdict); -extern void (*ip_ct_attach)(struct sk_buff *, struct sk_buff *); -extern void nf_ct_attach(struct sk_buff *, struct sk_buff *); - /* FIXME: Before cache is ever used, this must be implemented for real. */ extern void nf_invalidate_cache(int pf); @@ -388,11 +385,17 @@ static inline int nf_hook(int pf, unsigned int hook, struct sk_buff **pskb, { return 1; } -static inline void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb) {} struct flowi; static inline void nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, int family) {} #endif /*CONFIG_NETFILTER*/ +#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) +extern void (*ip_ct_attach)(struct sk_buff *, struct sk_buff *); +extern void nf_ct_attach(struct sk_buff *, struct sk_buff *); +#else +static inline void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb) {} +#endif + #endif /*__KERNEL__*/ #endif /*__LINUX_NETFILTER_H*/ diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 81ac934d5964..0bedf5384850 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -87,11 +87,12 @@ struct net_device; -#ifdef CONFIG_NETFILTER +#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) struct nf_conntrack { atomic_t use; void (*destroy)(struct nf_conntrack *); }; +#endif #ifdef CONFIG_BRIDGE_NETFILTER struct nf_bridge_info { @@ -106,8 +107,6 @@ struct nf_bridge_info { }; #endif -#endif - struct sk_buff_head { /* These two members must be first. */ struct sk_buff *next; @@ -276,15 +275,13 @@ struct sk_buff { __be16 protocol; void (*destructor)(struct sk_buff *skb); -#ifdef CONFIG_NETFILTER - struct nf_conntrack *nfct; #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) + struct nf_conntrack *nfct; struct sk_buff *nfct_reasm; #endif #ifdef CONFIG_BRIDGE_NETFILTER struct nf_bridge_info *nf_bridge; #endif -#endif /* CONFIG_NETFILTER */ #ifdef CONFIG_NET_SCHED __u16 tc_index; /* traffic control index */ #ifdef CONFIG_NET_CLS_ACT @@ -1558,7 +1555,7 @@ static inline unsigned int skb_checksum_complete(struct sk_buff *skb) __skb_checksum_complete(skb); } -#ifdef CONFIG_NETFILTER +#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) static inline void nf_conntrack_put(struct nf_conntrack *nfct) { if (nfct && atomic_dec_and_test(&nfct->use)) @@ -1569,7 +1566,6 @@ static inline void nf_conntrack_get(struct nf_conntrack *nfct) if (nfct) atomic_inc(&nfct->use); } -#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) static inline void nf_conntrack_get_reasm(struct sk_buff *skb) { if (skb) @@ -1595,9 +1591,9 @@ static inline void nf_bridge_get(struct nf_bridge_info *nf_bridge) #endif /* CONFIG_BRIDGE_NETFILTER */ static inline void nf_reset(struct sk_buff *skb) { +#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) nf_conntrack_put(skb->nfct); skb->nfct = NULL; -#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) nf_conntrack_put_reasm(skb->nfct_reasm); skb->nfct_reasm = NULL; #endif @@ -1610,10 +1606,10 @@ static inline void nf_reset(struct sk_buff *skb) /* Note: This doesn't put any conntrack and bridge info in dst. */ static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src) { +#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) dst->nfct = src->nfct; nf_conntrack_get(src->nfct); dst->nfctinfo = src->nfctinfo; -#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) dst->nfct_reasm = src->nfct_reasm; nf_conntrack_get_reasm(src->nfct_reasm); #endif @@ -1625,8 +1621,8 @@ static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src) static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src) { - nf_conntrack_put(dst->nfct); #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) + nf_conntrack_put(dst->nfct); nf_conntrack_put_reasm(dst->nfct_reasm); #endif #ifdef CONFIG_BRIDGE_NETFILTER @@ -1635,12 +1631,6 @@ static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src) __nf_copy(dst, src); } -#else /* CONFIG_NETFILTER */ -static inline void nf_reset(struct sk_buff *skb) {} -static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src) {} -static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src) {} -#endif /* CONFIG_NETFILTER */ - #ifdef CONFIG_NETWORK_SECMARK static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from) { diff --git a/net/core/skbuff.c b/net/core/skbuff.c index e28f119156f7..f16c72204cf6 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -325,15 +325,13 @@ void __kfree_skb(struct sk_buff *skb) WARN_ON(in_irq()); skb->destructor(skb); } -#ifdef CONFIG_NETFILTER - nf_conntrack_put(skb->nfct); #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) + nf_conntrack_put(skb->nfct); nf_conntrack_put_reasm(skb->nfct_reasm); #endif #ifdef CONFIG_BRIDGE_NETFILTER nf_bridge_put(skb->nf_bridge); #endif -#endif /* XXX: IS this still necessary? - JHS */ #ifdef CONFIG_NET_SCHED skb->tc_index = 0; diff --git a/net/netfilter/core.c b/net/netfilter/core.c index d802b342c615..fe5f22df620c 100644 --- a/net/netfilter/core.c +++ b/net/netfilter/core.c @@ -240,6 +240,7 @@ void nf_proto_csum_replace4(__sum16 *sum, struct sk_buff *skb, } EXPORT_SYMBOL(nf_proto_csum_replace4); +#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) /* This does not belong here, but locally generated errors need it if connection tracking in use: without this, connection may not be in hash table, and hence manufactured ICMP or RST packets will not be associated with it. */ @@ -259,6 +260,7 @@ void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb) } } EXPORT_SYMBOL(nf_ct_attach); +#endif #ifdef CONFIG_PROC_FS struct proc_dir_entry *proc_net_netfilter; -- cgit v1.2.3-59-g8ed1b From de6e05c49f8b4ed63224c5d38891f531ecc4eabb Mon Sep 17 00:00:00 2001 From: Yasuyuki Kozakai Date: Fri, 23 Mar 2007 11:17:27 -0700 Subject: [NETFILTER]: nf_conntrack: kill destroy() in struct nf_conntrack for diet The destructor per conntrack is unnecessary, then this replaces it with system wide destructor. Signed-off-by: Yasuyuki Kozakai Signed-off-by: Patrick McHardy Signed-off-by: David S. Miller --- include/linux/netfilter.h | 1 + include/linux/skbuff.h | 4 ++-- net/netfilter/core.c | 17 ++++++++++++++++- net/netfilter/nf_conntrack_core.c | 4 +++- 4 files changed, 22 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h index 4777f1b619ce..10b5c6275706 100644 --- a/include/linux/netfilter.h +++ b/include/linux/netfilter.h @@ -393,6 +393,7 @@ nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, int family) {} #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) extern void (*ip_ct_attach)(struct sk_buff *, struct sk_buff *); extern void nf_ct_attach(struct sk_buff *, struct sk_buff *); +extern void (*nf_ct_destroy)(struct nf_conntrack *); #else static inline void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb) {} #endif diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 0bedf5384850..37247901ebd2 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -90,7 +90,6 @@ struct net_device; #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) struct nf_conntrack { atomic_t use; - void (*destroy)(struct nf_conntrack *); }; #endif @@ -1556,10 +1555,11 @@ static inline unsigned int skb_checksum_complete(struct sk_buff *skb) } #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) +extern void nf_conntrack_destroy(struct nf_conntrack *nfct); static inline void nf_conntrack_put(struct nf_conntrack *nfct) { if (nfct && atomic_dec_and_test(&nfct->use)) - nfct->destroy(nfct); + nf_conntrack_destroy(nfct); } static inline void nf_conntrack_get(struct nf_conntrack *nfct) { diff --git a/net/netfilter/core.c b/net/netfilter/core.c index fe5f22df620c..a84478ee2ded 100644 --- a/net/netfilter/core.c +++ b/net/netfilter/core.c @@ -260,7 +260,22 @@ void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb) } } EXPORT_SYMBOL(nf_ct_attach); -#endif + +void (*nf_ct_destroy)(struct nf_conntrack *); +EXPORT_SYMBOL(nf_ct_destroy); + +void nf_conntrack_destroy(struct nf_conntrack *nfct) +{ + void (*destroy)(struct nf_conntrack *); + + rcu_read_lock(); + destroy = rcu_dereference(nf_ct_destroy); + BUG_ON(destroy == NULL); + destroy(nfct); + rcu_read_unlock(); +} +EXPORT_SYMBOL(nf_conntrack_destroy); +#endif /* CONFIG_NF_CONNTRACK */ #ifdef CONFIG_PROC_FS struct proc_dir_entry *proc_net_netfilter; diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 6f2aac1d01af..e132c8ae8784 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -616,7 +616,6 @@ __nf_conntrack_alloc(const struct nf_conntrack_tuple *orig, memset(conntrack, 0, nf_ct_cache[features].size); conntrack->features = features; atomic_set(&conntrack->ct_general.use, 1); - conntrack->ct_general.destroy = destroy_conntrack; conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig; conntrack->tuplehash[IP_CT_DIR_REPLY].tuple = *repl; /* Don't set timer yet: wait for confirmation */ @@ -1122,6 +1121,8 @@ void nf_conntrack_cleanup(void) while (atomic_read(&nf_conntrack_untracked.ct_general.use) > 1) schedule(); + rcu_assign_pointer(nf_ct_destroy, NULL); + for (i = 0; i < NF_CT_F_NUM; i++) { if (nf_ct_cache[i].use == 0) continue; @@ -1259,6 +1260,7 @@ int __init nf_conntrack_init(void) /* For use by REJECT target */ rcu_assign_pointer(ip_ct_attach, __nf_conntrack_attach); + rcu_assign_pointer(nf_ct_destroy, destroy_conntrack); /* Set up fake conntrack: - to never be deleted, not in any hashes */ -- cgit v1.2.3-59-g8ed1b From 26e252df1e6e5b68eb790e4a4baf745aa3870038 Mon Sep 17 00:00:00 2001 From: Patrick McHardy Date: Fri, 23 Mar 2007 11:27:29 -0700 Subject: [NET_SCHED]: kill PSCHED_AUDIT_TDIFF Signed-off-by: Patrick McHardy Signed-off-by: David S. Miller --- include/net/pkt_sched.h | 1 - net/sched/sch_cbq.c | 2 -- 2 files changed, 3 deletions(-) (limited to 'include') diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h index 6555e57ff6c9..276d1ad2b708 100644 --- a/include/net/pkt_sched.h +++ b/include/net/pkt_sched.h @@ -59,7 +59,6 @@ typedef long psched_tdiff_t; #define PSCHED_TADD(tv, delta) ((tv) += (delta)) #define PSCHED_SET_PASTPERFECT(t) ((t) = 0) #define PSCHED_IS_PASTPERFECT(t) ((t) == 0) -#define PSCHED_AUDIT_TDIFF(t) struct qdisc_watchdog { struct hrtimer timer; diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c index dcd9c31dc399..57ac6c5cd273 100644 --- a/net/sched/sch_cbq.c +++ b/net/sched/sch_cbq.c @@ -820,8 +820,6 @@ cbq_update(struct cbq_sched_data *q) idle -= L2T(&q->link, len); idle += L2T(cl, len); - PSCHED_AUDIT_TDIFF(idle); - PSCHED_TADD2(q->now, idle, cl->undertime); } else { /* Underlimit */ -- cgit v1.2.3-59-g8ed1b From 7c59e25f3186f26e85b13a318dbc4482d1d363e9 Mon Sep 17 00:00:00 2001 From: Patrick McHardy Date: Fri, 23 Mar 2007 11:27:45 -0700 Subject: [NET_SCHED]: kill PSCHED_TADD/PSCHED_TADD2 Signed-off-by: Patrick McHardy Signed-off-by: David S. Miller --- include/net/pkt_sched.h | 2 -- net/sched/sch_cbq.c | 12 ++++++------ net/sched/sch_netem.c | 2 +- 3 files changed, 7 insertions(+), 9 deletions(-) (limited to 'include') diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h index 276d1ad2b708..32cdf0137cb2 100644 --- a/include/net/pkt_sched.h +++ b/include/net/pkt_sched.h @@ -55,8 +55,6 @@ typedef long psched_tdiff_t; #define PSCHED_TDIFF_SAFE(tv1, tv2, bound) \ min_t(long long, (tv1) - (tv2), bound) #define PSCHED_TLESS(tv1, tv2) ((tv1) < (tv2)) -#define PSCHED_TADD2(tv, delta, tv_res) ((tv_res) = (tv) + (delta)) -#define PSCHED_TADD(tv, delta) ((tv) += (delta)) #define PSCHED_SET_PASTPERFECT(t) ((t) = 0) #define PSCHED_IS_PASTPERFECT(t) ((t) == 0) diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c index 57ac6c5cd273..290b26bdc89d 100644 --- a/net/sched/sch_cbq.c +++ b/net/sched/sch_cbq.c @@ -387,7 +387,7 @@ cbq_mark_toplevel(struct cbq_sched_data *q, struct cbq_class *cl) PSCHED_GET_TIME(now); incr = PSCHED_TDIFF(now, q->now_rt); - PSCHED_TADD2(q->now, incr, now); + now = q->now + incr; do { if (PSCHED_TLESS(cl->undertime, now)) { @@ -492,7 +492,7 @@ static void cbq_ovl_classic(struct cbq_class *cl) cl->avgidle = cl->minidle; if (delay <= 0) delay = 1; - PSCHED_TADD2(q->now, delay, cl->undertime); + cl->undertime = q->now + delay; cl->xstats.overactions++; cl->delayed = 1; @@ -558,7 +558,7 @@ static void cbq_ovl_delay(struct cbq_class *cl) delay -= (-cl->avgidle) - ((-cl->avgidle) >> cl->ewma_log); if (cl->avgidle < cl->minidle) cl->avgidle = cl->minidle; - PSCHED_TADD2(q->now, delay, cl->undertime); + cl->undertime = q->now + delay; if (delay > 0) { sched += delay + cl->penalty; @@ -820,7 +820,7 @@ cbq_update(struct cbq_sched_data *q) idle -= L2T(&q->link, len); idle += L2T(cl, len); - PSCHED_TADD2(q->now, idle, cl->undertime); + cl->undertime = q->now + idle; } else { /* Underlimit */ @@ -1018,12 +1018,12 @@ cbq_dequeue(struct Qdisc *sch) cbq_time = max(real_time, work); */ incr2 = L2T(&q->link, q->tx_len); - PSCHED_TADD(q->now, incr2); + q->now += incr2; cbq_update(q); if ((incr -= incr2) < 0) incr = 0; } - PSCHED_TADD(q->now, incr); + q->now += incr; q->now_rt = now; for (;;) { diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index bc4284396fcb..6044ae77d5da 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c @@ -218,7 +218,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) &q->delay_cor, q->delay_dist); PSCHED_GET_TIME(now); - PSCHED_TADD2(now, delay, cb->time_to_send); + cb->time_to_send = now + delay; ++q->counter; ret = q->qdisc->enqueue(skb, q->qdisc); } else { -- cgit v1.2.3-59-g8ed1b From 104e0878984bb467e3f54d61105d8903babb4ec1 Mon Sep 17 00:00:00 2001 From: Patrick McHardy Date: Fri, 23 Mar 2007 11:28:07 -0700 Subject: [NET_SCHED]: kill PSCHED_TLESS Signed-off-by: Patrick McHardy Signed-off-by: David S. Miller --- include/net/pkt_sched.h | 1 - net/sched/sch_cbq.c | 7 +++---- net/sched/sch_netem.c | 6 +++--- 3 files changed, 6 insertions(+), 8 deletions(-) (limited to 'include') diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h index 32cdf0137cb2..49325ffb00b1 100644 --- a/include/net/pkt_sched.h +++ b/include/net/pkt_sched.h @@ -54,7 +54,6 @@ typedef long psched_tdiff_t; #define PSCHED_TDIFF(tv1, tv2) (long)((tv1) - (tv2)) #define PSCHED_TDIFF_SAFE(tv1, tv2, bound) \ min_t(long long, (tv1) - (tv2), bound) -#define PSCHED_TLESS(tv1, tv2) ((tv1) < (tv2)) #define PSCHED_SET_PASTPERFECT(t) ((t) = 0) #define PSCHED_IS_PASTPERFECT(t) ((t) == 0) diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c index 290b26bdc89d..9e6cdab6af3b 100644 --- a/net/sched/sch_cbq.c +++ b/net/sched/sch_cbq.c @@ -390,7 +390,7 @@ cbq_mark_toplevel(struct cbq_sched_data *q, struct cbq_class *cl) now = q->now + incr; do { - if (PSCHED_TLESS(cl->undertime, now)) { + if (cl->undertime < now) { q->toplevel = cl->level; return; } @@ -845,8 +845,7 @@ cbq_under_limit(struct cbq_class *cl) if (cl->tparent == NULL) return cl; - if (PSCHED_IS_PASTPERFECT(cl->undertime) || - !PSCHED_TLESS(q->now, cl->undertime)) { + if (PSCHED_IS_PASTPERFECT(cl->undertime) || q->now >= cl->undertime) { cl->delayed = 0; return cl; } @@ -870,7 +869,7 @@ cbq_under_limit(struct cbq_class *cl) if (cl->level > q->toplevel) return NULL; } while (!PSCHED_IS_PASTPERFECT(cl->undertime) && - PSCHED_TLESS(q->now, cl->undertime)); + q->now < cl->undertime); cl->delayed = 0; return cl; diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index 6044ae77d5da..5d571aa04a76 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c @@ -286,7 +286,7 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch) /* if more time remaining? */ PSCHED_GET_TIME(now); - if (!PSCHED_TLESS(now, cb->time_to_send)) { + if (cb->time_to_send <= now) { pr_debug("netem_dequeue: return skb=%p\n", skb); sch->q.qlen--; return skb; @@ -494,7 +494,7 @@ static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch) if (likely(skb_queue_len(list) < q->limit)) { /* Optimize for add at tail */ - if (likely(skb_queue_empty(list) || !PSCHED_TLESS(tnext, q->oldest))) { + if (likely(skb_queue_empty(list) || tnext >= q->oldest)) { q->oldest = tnext; return qdisc_enqueue_tail(nskb, sch); } @@ -503,7 +503,7 @@ static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch) const struct netem_skb_cb *cb = (const struct netem_skb_cb *)skb->cb; - if (!PSCHED_TLESS(tnext, cb->time_to_send)) + if (tnext >= cb->time_to_send) break; } -- cgit v1.2.3-59-g8ed1b From a084980dcbf56c896e4b6c19aff2b082d5db7006 Mon Sep 17 00:00:00 2001 From: Patrick McHardy Date: Fri, 23 Mar 2007 11:28:30 -0700 Subject: [NET_SCHED]: kill PSCHED_SET_PASTPERFECT/PSCHED_IS_PASTPERFECT Use direct assignment and comparison instead. Signed-off-by: Patrick McHardy Signed-off-by: David S. Miller --- include/net/pkt_sched.h | 3 +-- include/net/red.h | 4 ++-- net/sched/sch_cbq.c | 17 ++++++++--------- net/sched/sch_netem.c | 2 +- 4 files changed, 12 insertions(+), 14 deletions(-) (limited to 'include') diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h index 49325ffb00b1..c40147a60205 100644 --- a/include/net/pkt_sched.h +++ b/include/net/pkt_sched.h @@ -54,8 +54,7 @@ typedef long psched_tdiff_t; #define PSCHED_TDIFF(tv1, tv2) (long)((tv1) - (tv2)) #define PSCHED_TDIFF_SAFE(tv1, tv2, bound) \ min_t(long long, (tv1) - (tv2), bound) -#define PSCHED_SET_PASTPERFECT(t) ((t) = 0) -#define PSCHED_IS_PASTPERFECT(t) ((t) == 0) +#define PSCHED_PASTPERFECT 0 struct qdisc_watchdog { struct hrtimer timer; diff --git a/include/net/red.h b/include/net/red.h index a4eb37946f2c..d9e1149a2bca 100644 --- a/include/net/red.h +++ b/include/net/red.h @@ -151,7 +151,7 @@ static inline void red_set_parms(struct red_parms *p, static inline int red_is_idling(struct red_parms *p) { - return !PSCHED_IS_PASTPERFECT(p->qidlestart); + return p->qidlestart != PSCHED_PASTPERFECT; } static inline void red_start_of_idle_period(struct red_parms *p) @@ -161,7 +161,7 @@ static inline void red_start_of_idle_period(struct red_parms *p) static inline void red_end_of_idle_period(struct red_parms *p) { - PSCHED_SET_PASTPERFECT(p->qidlestart); + p->qidlestart = PSCHED_PASTPERFECT; } static inline void red_restart(struct red_parms *p) diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c index 9e6cdab6af3b..2bb271b0efc3 100644 --- a/net/sched/sch_cbq.c +++ b/net/sched/sch_cbq.c @@ -738,7 +738,7 @@ cbq_update_toplevel(struct cbq_sched_data *q, struct cbq_class *cl, if (cl && q->toplevel >= borrowed->level) { if (cl->q->q.qlen > 1) { do { - if (PSCHED_IS_PASTPERFECT(borrowed->undertime)) { + if (borrowed->undertime == PSCHED_PASTPERFECT) { q->toplevel = borrowed->level; return; } @@ -824,7 +824,7 @@ cbq_update(struct cbq_sched_data *q) } else { /* Underlimit */ - PSCHED_SET_PASTPERFECT(cl->undertime); + cl->undertime = PSCHED_PASTPERFECT; if (avgidle > cl->maxidle) cl->avgidle = cl->maxidle; else @@ -845,7 +845,7 @@ cbq_under_limit(struct cbq_class *cl) if (cl->tparent == NULL) return cl; - if (PSCHED_IS_PASTPERFECT(cl->undertime) || q->now >= cl->undertime) { + if (cl->undertime == PSCHED_PASTPERFECT || q->now >= cl->undertime) { cl->delayed = 0; return cl; } @@ -868,8 +868,7 @@ cbq_under_limit(struct cbq_class *cl) } if (cl->level > q->toplevel) return NULL; - } while (!PSCHED_IS_PASTPERFECT(cl->undertime) && - q->now < cl->undertime); + } while (cl->undertime != PSCHED_PASTPERFECT && q->now < cl->undertime); cl->delayed = 0; return cl; @@ -1054,11 +1053,11 @@ cbq_dequeue(struct Qdisc *sch) */ if (q->toplevel == TC_CBQ_MAXLEVEL && - PSCHED_IS_PASTPERFECT(q->link.undertime)) + q->link.undertime == PSCHED_PASTPERFECT) break; q->toplevel = TC_CBQ_MAXLEVEL; - PSCHED_SET_PASTPERFECT(q->link.undertime); + q->link.undertime = PSCHED_PASTPERFECT; } /* No packets in scheduler or nobody wants to give them to us :-( @@ -1289,7 +1288,7 @@ cbq_reset(struct Qdisc* sch) qdisc_reset(cl->q); cl->next_alive = NULL; - PSCHED_SET_PASTPERFECT(cl->undertime); + cl->undertime = PSCHED_PASTPERFECT; cl->avgidle = cl->maxidle; cl->deficit = cl->quantum; cl->cpriority = cl->priority; @@ -1650,7 +1649,7 @@ cbq_dump_class_stats(struct Qdisc *sch, unsigned long arg, cl->xstats.avgidle = cl->avgidle; cl->xstats.undertime = 0; - if (!PSCHED_IS_PASTPERFECT(cl->undertime)) + if (cl->undertime != PSCHED_PASTPERFECT) cl->xstats.undertime = PSCHED_TDIFF(cl->undertime, q->now); if (gnet_stats_copy_basic(d, &cl->bstats) < 0 || diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index 5d571aa04a76..1e88301f505c 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c @@ -532,7 +532,7 @@ static int tfifo_init(struct Qdisc *sch, struct rtattr *opt) } else q->limit = max_t(u32, sch->dev->tx_queue_len, 1); - PSCHED_SET_PASTPERFECT(q->oldest); + q->oldest = PSCHED_PASTPERFECT; return 0; } -- cgit v1.2.3-59-g8ed1b From 8edc0c31d6b7849b0fb50db86824830769241939 Mon Sep 17 00:00:00 2001 From: Patrick McHardy Date: Fri, 23 Mar 2007 11:28:55 -0700 Subject: [NET_SCHED]: kill PSCHED_TDIFF Signed-off-by: Patrick McHardy Signed-off-by: David S. Miller --- include/net/pkt_sched.h | 1 - net/sched/sch_cbq.c | 14 +++++++------- 2 files changed, 7 insertions(+), 8 deletions(-) (limited to 'include') diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h index c40147a60205..163973740207 100644 --- a/include/net/pkt_sched.h +++ b/include/net/pkt_sched.h @@ -51,7 +51,6 @@ typedef long psched_tdiff_t; #define PSCHED_GET_TIME(stamp) \ ((stamp) = PSCHED_NS2US(ktime_to_ns(ktime_get()))) -#define PSCHED_TDIFF(tv1, tv2) (long)((tv1) - (tv2)) #define PSCHED_TDIFF_SAFE(tv1, tv2, bound) \ min_t(long long, (tv1) - (tv2), bound) #define PSCHED_PASTPERFECT 0 diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c index 2bb271b0efc3..f9e8403c5222 100644 --- a/net/sched/sch_cbq.c +++ b/net/sched/sch_cbq.c @@ -386,7 +386,7 @@ cbq_mark_toplevel(struct cbq_sched_data *q, struct cbq_class *cl) psched_tdiff_t incr; PSCHED_GET_TIME(now); - incr = PSCHED_TDIFF(now, q->now_rt); + incr = now - q->now_rt; now = q->now + incr; do { @@ -474,7 +474,7 @@ cbq_requeue(struct sk_buff *skb, struct Qdisc *sch) static void cbq_ovl_classic(struct cbq_class *cl) { struct cbq_sched_data *q = qdisc_priv(cl->qdisc); - psched_tdiff_t delay = PSCHED_TDIFF(cl->undertime, q->now); + psched_tdiff_t delay = cl->undertime - q->now; if (!cl->delayed) { delay += cl->offtime; @@ -509,7 +509,7 @@ static void cbq_ovl_classic(struct cbq_class *cl) psched_tdiff_t base_delay = q->wd_expires; for (b = cl->borrow; b; b = b->borrow) { - delay = PSCHED_TDIFF(b->undertime, q->now); + delay = b->undertime - q->now; if (delay < base_delay) { if (delay <= 0) delay = 1; @@ -547,7 +547,7 @@ static void cbq_ovl_rclassic(struct cbq_class *cl) static void cbq_ovl_delay(struct cbq_class *cl) { struct cbq_sched_data *q = qdisc_priv(cl->qdisc); - psched_tdiff_t delay = PSCHED_TDIFF(cl->undertime, q->now); + psched_tdiff_t delay = cl->undertime - q->now; if (!cl->delayed) { psched_time_t sched = q->now; @@ -776,7 +776,7 @@ cbq_update(struct cbq_sched_data *q) idle = (now - last) - last_pktlen/rate */ - idle = PSCHED_TDIFF(q->now, cl->last); + idle = q->now - cl->last; if ((unsigned long)idle > 128*1024*1024) { avgidle = cl->maxidle; } else { @@ -1004,7 +1004,7 @@ cbq_dequeue(struct Qdisc *sch) psched_tdiff_t incr; PSCHED_GET_TIME(now); - incr = PSCHED_TDIFF(now, q->now_rt); + incr = now - q->now_rt; if (q->tx_class) { psched_tdiff_t incr2; @@ -1650,7 +1650,7 @@ cbq_dump_class_stats(struct Qdisc *sch, unsigned long arg, cl->xstats.undertime = 0; if (cl->undertime != PSCHED_PASTPERFECT) - cl->xstats.undertime = PSCHED_TDIFF(cl->undertime, q->now); + cl->xstats.undertime = cl->undertime - q->now; if (gnet_stats_copy_basic(d, &cl->bstats) < 0 || #ifdef CONFIG_NET_ESTIMATOR -- cgit v1.2.3-59-g8ed1b From 03cc45c0a5b9b7f74768feb43b9a2525d203bbdb Mon Sep 17 00:00:00 2001 From: Patrick McHardy Date: Fri, 23 Mar 2007 11:29:11 -0700 Subject: [NET_SCHED]: turn PSCHED_TDIFF_SAFE into inline function Also rename to psched_tdiff_bounded. Signed-off-by: Patrick McHardy Signed-off-by: David S. Miller --- include/net/pkt_sched.h | 8 ++++++-- include/net/red.h | 2 +- net/sched/act_police.c | 8 ++++---- net/sched/sch_htb.c | 4 ++-- net/sched/sch_tbf.c | 2 +- 5 files changed, 14 insertions(+), 10 deletions(-) (limited to 'include') diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h index 163973740207..e6b1da050d32 100644 --- a/include/net/pkt_sched.h +++ b/include/net/pkt_sched.h @@ -51,10 +51,14 @@ typedef long psched_tdiff_t; #define PSCHED_GET_TIME(stamp) \ ((stamp) = PSCHED_NS2US(ktime_to_ns(ktime_get()))) -#define PSCHED_TDIFF_SAFE(tv1, tv2, bound) \ - min_t(long long, (tv1) - (tv2), bound) #define PSCHED_PASTPERFECT 0 +static inline psched_tdiff_t +psched_tdiff_bounded(psched_time_t tv1, psched_time_t tv2, psched_time_t bound) +{ + return min(tv1 - tv2, bound); +} + struct qdisc_watchdog { struct hrtimer timer; struct Qdisc *qdisc; diff --git a/include/net/red.h b/include/net/red.h index d9e1149a2bca..0bc16913fdd7 100644 --- a/include/net/red.h +++ b/include/net/red.h @@ -178,7 +178,7 @@ static inline unsigned long red_calc_qavg_from_idle_time(struct red_parms *p) int shift; PSCHED_GET_TIME(now); - us_idle = PSCHED_TDIFF_SAFE(now, p->qidlestart, p->Scell_max); + us_idle = psched_tdiff_bounded(now, p->qidlestart, p->Scell_max); /* * The problem: ideally, average length queue recalcultion should diff --git a/net/sched/act_police.c b/net/sched/act_police.c index 0a5679ea6c64..65d60a3f7761 100644 --- a/net/sched/act_police.c +++ b/net/sched/act_police.c @@ -298,8 +298,8 @@ static int tcf_act_police(struct sk_buff *skb, struct tc_action *a, PSCHED_GET_TIME(now); - toks = PSCHED_TDIFF_SAFE(now, police->tcfp_t_c, - police->tcfp_burst); + toks = psched_tdiff_bounded(now, police->tcfp_t_c, + police->tcfp_burst); if (police->tcfp_P_tab) { ptoks = toks + police->tcfp_ptoks; if (ptoks > (long)L2T_P(police, police->tcfp_mtu)) @@ -544,8 +544,8 @@ int tcf_police(struct sk_buff *skb, struct tcf_police *police) } PSCHED_GET_TIME(now); - toks = PSCHED_TDIFF_SAFE(now, police->tcfp_t_c, - police->tcfp_burst); + toks = psched_tdiff_bounded(now, police->tcfp_t_c, + police->tcfp_burst); if (police->tcfp_P_tab) { ptoks = toks + police->tcfp_ptoks; if (ptoks > (long)L2T_P(police, police->tcfp_mtu)) diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index b7abd0ae676a..71e4c92b7e87 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c @@ -729,7 +729,7 @@ static void htb_charge_class(struct htb_sched *q, struct htb_class *cl, cl->T = toks while (cl) { - diff = PSCHED_TDIFF_SAFE(q->now, cl->t_c, (u32) cl->mbuffer); + diff = psched_tdiff_bounded(q->now, cl->t_c, cl->mbuffer); if (cl->level >= level) { if (cl->level == level) cl->xstats.lends++; @@ -789,7 +789,7 @@ static psched_time_t htb_do_events(struct htb_sched *q, int level) return cl->pq_key; htb_safe_rb_erase(p, q->wait_pq + level); - diff = PSCHED_TDIFF_SAFE(q->now, cl->t_c, (u32) cl->mbuffer); + diff = psched_tdiff_bounded(q->now, cl->t_c, cl->mbuffer); htb_change_class_mode(q, cl, &diff); if (cl->cmode != HTB_CAN_SEND) htb_add_to_wait_tree(q, cl, diff); diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c index 626ce96800fe..da9f40e54447 100644 --- a/net/sched/sch_tbf.c +++ b/net/sched/sch_tbf.c @@ -201,7 +201,7 @@ static struct sk_buff *tbf_dequeue(struct Qdisc* sch) PSCHED_GET_TIME(now); - toks = PSCHED_TDIFF_SAFE(now, q->t_c, q->buffer); + toks = psched_tdiff_bounded(now, q->t_c, q->buffer); if (q->P_tab) { ptoks = toks + q->ptokens; -- cgit v1.2.3-59-g8ed1b From 3bebcda28077375470dd60545b71bba2f83335fd Mon Sep 17 00:00:00 2001 From: Patrick McHardy Date: Fri, 23 Mar 2007 11:29:25 -0700 Subject: [NET_SCHED]: turn PSCHED_GET_TIME into inline function Signed-off-by: Patrick McHardy Signed-off-by: David S. Miller --- include/net/pkt_sched.h | 8 +++++--- include/net/red.h | 4 ++-- net/sched/act_police.c | 9 ++++----- net/sched/sch_cbq.c | 10 +++++----- net/sched/sch_hfsc.c | 10 ++++------ net/sched/sch_htb.c | 6 +++--- net/sched/sch_netem.c | 8 +++----- net/sched/sch_tbf.c | 7 +++---- 8 files changed, 29 insertions(+), 33 deletions(-) (limited to 'include') diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h index e6b1da050d32..b2cc9a8ed4e7 100644 --- a/include/net/pkt_sched.h +++ b/include/net/pkt_sched.h @@ -48,11 +48,13 @@ typedef long psched_tdiff_t; #define PSCHED_NS2US(x) ((x) >> 10) #define PSCHED_TICKS_PER_SEC PSCHED_NS2US(NSEC_PER_SEC) -#define PSCHED_GET_TIME(stamp) \ - ((stamp) = PSCHED_NS2US(ktime_to_ns(ktime_get()))) - #define PSCHED_PASTPERFECT 0 +static inline psched_time_t psched_get_time(void) +{ + return PSCHED_NS2US(ktime_to_ns(ktime_get())); +} + static inline psched_tdiff_t psched_tdiff_bounded(psched_time_t tv1, psched_time_t tv2, psched_time_t bound) { diff --git a/include/net/red.h b/include/net/red.h index 0bc16913fdd7..3cf31d466a81 100644 --- a/include/net/red.h +++ b/include/net/red.h @@ -156,7 +156,7 @@ static inline int red_is_idling(struct red_parms *p) static inline void red_start_of_idle_period(struct red_parms *p) { - PSCHED_GET_TIME(p->qidlestart); + p->qidlestart = psched_get_time(); } static inline void red_end_of_idle_period(struct red_parms *p) @@ -177,7 +177,7 @@ static inline unsigned long red_calc_qavg_from_idle_time(struct red_parms *p) long us_idle; int shift; - PSCHED_GET_TIME(now); + now = psched_get_time(); us_idle = psched_tdiff_bounded(now, p->qidlestart, p->Scell_max); /* diff --git a/net/sched/act_police.c b/net/sched/act_police.c index 65d60a3f7761..616f465f407e 100644 --- a/net/sched/act_police.c +++ b/net/sched/act_police.c @@ -241,7 +241,7 @@ override: if (ret != ACT_P_CREATED) return ret; - PSCHED_GET_TIME(police->tcfp_t_c); + police->tcfp_t_c = psched_get_time(); police->tcf_index = parm->index ? parm->index : tcf_hash_new_index(&police_idx_gen, &police_hash_info); h = tcf_hash(police->tcf_index, POL_TAB_MASK); @@ -296,8 +296,7 @@ static int tcf_act_police(struct sk_buff *skb, struct tc_action *a, return police->tcfp_result; } - PSCHED_GET_TIME(now); - + now = psched_get_time(); toks = psched_tdiff_bounded(now, police->tcfp_t_c, police->tcfp_burst); if (police->tcfp_P_tab) { @@ -495,7 +494,7 @@ struct tcf_police *tcf_police_locate(struct rtattr *rta, struct rtattr *est) } if (police->tcfp_P_tab) police->tcfp_ptoks = L2T_P(police, police->tcfp_mtu); - PSCHED_GET_TIME(police->tcfp_t_c); + police->tcfp_t_c = psched_get_time(); police->tcf_index = parm->index ? parm->index : tcf_police_new_index(); police->tcf_action = parm->action; @@ -543,7 +542,7 @@ int tcf_police(struct sk_buff *skb, struct tcf_police *police) return police->tcfp_result; } - PSCHED_GET_TIME(now); + now = psched_get_time(); toks = psched_tdiff_bounded(now, police->tcfp_t_c, police->tcfp_burst); if (police->tcfp_P_tab) { diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c index f9e8403c5222..414a97c962f1 100644 --- a/net/sched/sch_cbq.c +++ b/net/sched/sch_cbq.c @@ -385,7 +385,7 @@ cbq_mark_toplevel(struct cbq_sched_data *q, struct cbq_class *cl) psched_time_t now; psched_tdiff_t incr; - PSCHED_GET_TIME(now); + now = psched_get_time(); incr = now - q->now_rt; now = q->now + incr; @@ -654,7 +654,7 @@ static enum hrtimer_restart cbq_undelay(struct hrtimer *timer) psched_tdiff_t delay = 0; unsigned pmask; - PSCHED_GET_TIME(now); + now = psched_get_time(); pmask = q->pmask; q->pmask = 0; @@ -1003,7 +1003,7 @@ cbq_dequeue(struct Qdisc *sch) psched_time_t now; psched_tdiff_t incr; - PSCHED_GET_TIME(now); + now = psched_get_time(); incr = now - q->now_rt; if (q->tx_class) { @@ -1277,7 +1277,7 @@ cbq_reset(struct Qdisc* sch) qdisc_watchdog_cancel(&q->watchdog); hrtimer_cancel(&q->delay_timer); q->toplevel = TC_CBQ_MAXLEVEL; - PSCHED_GET_TIME(q->now); + q->now = psched_get_time(); q->now_rt = q->now; for (prio = 0; prio <= TC_CBQ_MAXPRIO; prio++) @@ -1448,7 +1448,7 @@ static int cbq_init(struct Qdisc *sch, struct rtattr *opt) hrtimer_init(&q->delay_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); q->delay_timer.function = cbq_undelay; q->toplevel = TC_CBQ_MAXLEVEL; - PSCHED_GET_TIME(q->now); + q->now = psched_get_time(); q->now_rt = q->now; cbq_link_class(&q->link); diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c index 6a762cf781d7..7d51d0d6a70e 100644 --- a/net/sched/sch_hfsc.c +++ b/net/sched/sch_hfsc.c @@ -629,9 +629,7 @@ rtsc_min(struct runtime_sc *rtsc, struct internal_sc *isc, u64 x, u64 y) static void init_ed(struct hfsc_class *cl, unsigned int next_len) { - u64 cur_time; - - PSCHED_GET_TIME(cur_time); + u64 cur_time = psched_get_time(); /* update the deadline curve */ rtsc_min(&cl->cl_deadline, &cl->cl_rsc, cur_time, cl->cl_cumul); @@ -754,7 +752,7 @@ init_vf(struct hfsc_class *cl, unsigned int len) if (cl->cl_flags & HFSC_USC) { /* class has upper limit curve */ if (cur_time == 0) - PSCHED_GET_TIME(cur_time); + cur_time = psched_get_time(); /* update the ulimit curve */ rtsc_min(&cl->cl_ulimit, &cl->cl_usc, cur_time, @@ -1038,7 +1036,7 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid, if (cl->cl_parent == NULL && parentid != TC_H_ROOT) return -EINVAL; } - PSCHED_GET_TIME(cur_time); + cur_time = psched_get_time(); sch_tree_lock(sch); if (rsc != NULL) @@ -1639,7 +1637,7 @@ hfsc_dequeue(struct Qdisc *sch) if ((skb = __skb_dequeue(&q->requeue))) goto out; - PSCHED_GET_TIME(cur_time); + cur_time = psched_get_time(); /* * if there are eligible classes, use real-time criteria. diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index 71e4c92b7e87..3f528554b0d4 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c @@ -965,7 +965,7 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch) if (!sch->q.qlen) goto fin; - PSCHED_GET_TIME(q->now); + q->now = psched_get_time(); next_event = q->now + 5 * PSCHED_TICKS_PER_SEC; q->nwc_hit = 0; @@ -1274,7 +1274,7 @@ static void htb_parent_to_leaf(struct htb_class *cl, struct Qdisc *new_q) parent->un.leaf.prio = parent->prio; parent->tokens = parent->buffer; parent->ctokens = parent->cbuffer; - PSCHED_GET_TIME(parent->t_c); + parent->t_c = psched_get_time(); parent->cmode = HTB_CAN_SEND; } @@ -1471,7 +1471,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, cl->tokens = hopt->buffer; cl->ctokens = hopt->cbuffer; cl->mbuffer = 60 * PSCHED_TICKS_PER_SEC; /* 1min */ - PSCHED_GET_TIME(cl->t_c); + cl->t_c = psched_get_time(); cl->cmode = HTB_CAN_SEND; /* attach to the hash list and parent's family */ diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index 1e88301f505c..5d9d8bc9cc3a 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c @@ -217,7 +217,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) delay = tabledist(q->latency, q->jitter, &q->delay_cor, q->delay_dist); - PSCHED_GET_TIME(now); + now = psched_get_time(); cb->time_to_send = now + delay; ++q->counter; ret = q->qdisc->enqueue(skb, q->qdisc); @@ -226,7 +226,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) * Do re-ordering by putting one out of N packets at the front * of the queue. */ - PSCHED_GET_TIME(cb->time_to_send); + cb->time_to_send = psched_get_time(); q->counter = 0; ret = q->qdisc->ops->requeue(skb, q->qdisc); } @@ -281,11 +281,9 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch) if (skb) { const struct netem_skb_cb *cb = (const struct netem_skb_cb *)skb->cb; - psched_time_t now; + psched_time_t now = psched_get_time(); /* if more time remaining? */ - PSCHED_GET_TIME(now); - if (cb->time_to_send <= now) { pr_debug("netem_dequeue: return skb=%p\n", skb); sch->q.qlen--; diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c index da9f40e54447..53862953baaf 100644 --- a/net/sched/sch_tbf.c +++ b/net/sched/sch_tbf.c @@ -199,8 +199,7 @@ static struct sk_buff *tbf_dequeue(struct Qdisc* sch) long ptoks = 0; unsigned int len = skb->len; - PSCHED_GET_TIME(now); - + now = psched_get_time(); toks = psched_tdiff_bounded(now, q->t_c, q->buffer); if (q->P_tab) { @@ -254,7 +253,7 @@ static void tbf_reset(struct Qdisc* sch) qdisc_reset(q->qdisc); sch->q.qlen = 0; - PSCHED_GET_TIME(q->t_c); + q->t_c = psched_get_time(); q->tokens = q->buffer; q->ptokens = q->mtu; qdisc_watchdog_cancel(&q->watchdog); @@ -364,7 +363,7 @@ static int tbf_init(struct Qdisc* sch, struct rtattr *opt) if (opt == NULL) return -EINVAL; - PSCHED_GET_TIME(q->t_c); + q->t_c = psched_get_time(); qdisc_watchdog_init(&q->watchdog, sch); q->qdisc = &noop_qdisc; -- cgit v1.2.3-59-g8ed1b From a48b5a61448899040dfbd2e0cd55b06a2bd2466c Mon Sep 17 00:00:00 2001 From: Patrick McHardy Date: Fri, 23 Mar 2007 11:29:43 -0700 Subject: [NET_SCHED]: Unline tcf_destroy Uninline tcf_destroy and add a helper function to destroy an entire filter chain. Signed-off-by: Patrick McHardy Signed-off-by: David S. Miller --- include/net/sch_generic.h | 10 ++-------- net/sched/sch_api.c | 18 ++++++++++++++++++ net/sched/sch_atm.c | 17 ++--------------- net/sched/sch_cbq.c | 14 ++------------ net/sched/sch_dsmark.c | 8 +------- net/sched/sch_hfsc.c | 13 +------------ net/sched/sch_htb.c | 14 ++------------ net/sched/sch_ingress.c | 7 +------ net/sched/sch_prio.c | 7 +------ 9 files changed, 30 insertions(+), 78 deletions(-) (limited to 'include') diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index a3f4ddd1d6a8..1b8e35197ebe 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -177,14 +177,8 @@ extern void qdisc_tree_decrease_qlen(struct Qdisc *qdisc, unsigned int n); extern struct Qdisc *qdisc_alloc(struct net_device *dev, struct Qdisc_ops *ops); extern struct Qdisc *qdisc_create_dflt(struct net_device *dev, struct Qdisc_ops *ops, u32 parentid); - -static inline void -tcf_destroy(struct tcf_proto *tp) -{ - tp->ops->destroy(tp); - module_put(tp->ops->owner); - kfree(tp); -} +extern void tcf_destroy(struct tcf_proto *tp); +extern void tcf_destroy_chain(struct tcf_proto *fl); static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff_head *list) diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index 58732509160d..5b5bce0694e2 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c @@ -1220,6 +1220,24 @@ reclassify: return -1; } +void tcf_destroy(struct tcf_proto *tp) +{ + tp->ops->destroy(tp); + module_put(tp->ops->owner); + kfree(tp); +} + +void tcf_destroy_chain(struct tcf_proto *fl) +{ + struct tcf_proto *tp; + + while ((tp = fl) != NULL) { + fl = tp->next; + tcf_destroy(tp); + } +} +EXPORT_SYMBOL(tcf_destroy_chain); + #ifdef CONFIG_PROC_FS static int psched_show(struct seq_file *seq, void *v) { diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c index 0cc3c9b72728..be7d299acd73 100644 --- a/net/sched/sch_atm.c +++ b/net/sched/sch_atm.c @@ -158,19 +158,6 @@ static unsigned long atm_tc_bind_filter(struct Qdisc *sch, return atm_tc_get(sch,classid); } - -static void destroy_filters(struct atm_flow_data *flow) -{ - struct tcf_proto *filter; - - while ((filter = flow->filter_list)) { - DPRINTK("destroy_filters: destroying filter %p\n",filter); - flow->filter_list = filter->next; - tcf_destroy(filter); - } -} - - /* * atm_tc_put handles all destructions, including the ones that are explicitly * requested (atm_tc_destroy, etc.). The assumption here is that we never drop @@ -195,7 +182,7 @@ static void atm_tc_put(struct Qdisc *sch, unsigned long cl) *prev = flow->next; DPRINTK("atm_tc_put: qdisc %p\n",flow->q); qdisc_destroy(flow->q); - destroy_filters(flow); + tcf_destroy_chain(flow->filter_list); if (flow->sock) { DPRINTK("atm_tc_put: f_count %d\n", file_count(flow->sock->file)); @@ -611,7 +598,7 @@ static void atm_tc_destroy(struct Qdisc *sch) DPRINTK("atm_tc_destroy(sch %p,[qdisc %p])\n",sch,p); /* races ? */ while ((flow = p->flows)) { - destroy_filters(flow); + tcf_destroy_chain(flow->filter_list); if (flow->ref > 1) printk(KERN_ERR "atm_destroy: %p->ref = %d\n",flow, flow->ref); diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c index 414a97c962f1..a294542cb8e4 100644 --- a/net/sched/sch_cbq.c +++ b/net/sched/sch_cbq.c @@ -1717,23 +1717,13 @@ static unsigned long cbq_get(struct Qdisc *sch, u32 classid) return 0; } -static void cbq_destroy_filters(struct cbq_class *cl) -{ - struct tcf_proto *tp; - - while ((tp = cl->filter_list) != NULL) { - cl->filter_list = tp->next; - tcf_destroy(tp); - } -} - static void cbq_destroy_class(struct Qdisc *sch, struct cbq_class *cl) { struct cbq_sched_data *q = qdisc_priv(sch); BUG_TRAP(!cl->filters); - cbq_destroy_filters(cl); + tcf_destroy_chain(cl->filter_list); qdisc_destroy(cl->q); qdisc_put_rtab(cl->R_tab); #ifdef CONFIG_NET_ESTIMATOR @@ -1760,7 +1750,7 @@ cbq_destroy(struct Qdisc* sch) */ for (h = 0; h < 16; h++) for (cl = q->classes[h]; cl; cl = cl->next) - cbq_destroy_filters(cl); + tcf_destroy_chain(cl->filter_list); for (h = 0; h < 16; h++) { struct cbq_class *next; diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c index 2c857af79a1e..e38e0d00d1e6 100644 --- a/net/sched/sch_dsmark.c +++ b/net/sched/sch_dsmark.c @@ -412,16 +412,10 @@ static void dsmark_reset(struct Qdisc *sch) static void dsmark_destroy(struct Qdisc *sch) { struct dsmark_qdisc_data *p = PRIV(sch); - struct tcf_proto *tp; DPRINTK("dsmark_destroy(sch %p,[qdisc %p])\n", sch, p); - while (p->filter_list) { - tp = p->filter_list; - p->filter_list = tp->next; - tcf_destroy(tp); - } - + tcf_destroy_chain(p->filter_list); qdisc_destroy(p->q); kfree(p->mask); } diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c index 7d51d0d6a70e..9d124c4ee3a7 100644 --- a/net/sched/sch_hfsc.c +++ b/net/sched/sch_hfsc.c @@ -1121,23 +1121,12 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid, return 0; } -static void -hfsc_destroy_filters(struct tcf_proto **fl) -{ - struct tcf_proto *tp; - - while ((tp = *fl) != NULL) { - *fl = tp->next; - tcf_destroy(tp); - } -} - static void hfsc_destroy_class(struct Qdisc *sch, struct hfsc_class *cl) { struct hfsc_sched *q = qdisc_priv(sch); - hfsc_destroy_filters(&cl->filter_list); + tcf_destroy_chain(cl->filter_list); qdisc_destroy(cl->qdisc); #ifdef CONFIG_NET_ESTIMATOR gen_kill_estimator(&cl->bstats, &cl->rate_est); diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index 3f528554b0d4..99bcec8dd04c 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c @@ -1236,16 +1236,6 @@ static unsigned long htb_get(struct Qdisc *sch, u32 classid) return (unsigned long)cl; } -static void htb_destroy_filters(struct tcf_proto **fl) -{ - struct tcf_proto *tp; - - while ((tp = *fl) != NULL) { - *fl = tp->next; - tcf_destroy(tp); - } -} - static inline int htb_parent_last_child(struct htb_class *cl) { if (!cl->parent) @@ -1289,7 +1279,7 @@ static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl) qdisc_put_rtab(cl->rate); qdisc_put_rtab(cl->ceil); - htb_destroy_filters(&cl->filter_list); + tcf_destroy_chain(cl->filter_list); while (!list_empty(&cl->children)) htb_destroy_class(sch, list_entry(cl->children.next, @@ -1321,7 +1311,7 @@ static void htb_destroy(struct Qdisc *sch) and surprisingly it worked in 2.4. But it must precede it because filter need its target class alive to be able to call unbind_filter on it (without Oops). */ - htb_destroy_filters(&q->filter_list); + tcf_destroy_chain(q->filter_list); while (!list_empty(&q->root)) htb_destroy_class(sch, list_entry(q->root.next, diff --git a/net/sched/sch_ingress.c b/net/sched/sch_ingress.c index f63d5c6eb302..1fb60aba1e6c 100644 --- a/net/sched/sch_ingress.c +++ b/net/sched/sch_ingress.c @@ -346,14 +346,9 @@ static void ingress_reset(struct Qdisc *sch) static void ingress_destroy(struct Qdisc *sch) { struct ingress_qdisc_data *p = PRIV(sch); - struct tcf_proto *tp; DPRINTK("ingress_destroy(sch %p,[qdisc %p])\n", sch, p); - while (p->filter_list) { - tp = p->filter_list; - p->filter_list = tp->next; - tcf_destroy(tp); - } + tcf_destroy_chain(p->filter_list); #if 0 /* for future use */ qdisc_destroy(p->q); diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c index f13996348dda..5cfe60bf6e25 100644 --- a/net/sched/sch_prio.c +++ b/net/sched/sch_prio.c @@ -189,13 +189,8 @@ prio_destroy(struct Qdisc* sch) { int prio; struct prio_sched_data *q = qdisc_priv(sch); - struct tcf_proto *tp; - - while ((tp = q->filter_list) != NULL) { - q->filter_list = tp->next; - tcf_destroy(tp); - } + tcf_destroy_chain(q->filter_list); for (prio=0; priobands; prio++) qdisc_destroy(q->queues[prio]); } -- cgit v1.2.3-59-g8ed1b From b6e1331f3ce25a56edb956054eaf8011654686cb Mon Sep 17 00:00:00 2001 From: Vlad Yasevich Date: Fri, 20 Apr 2007 12:23:15 -0700 Subject: [SCTP]: Implement SCTP_FRAGMENT_INTERLEAVE socket option MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This option was introduced in draft-ietf-tsvwg-sctpsocket-13. It prevents head-of-line blocking in the case of one-to-many endpoint. Applications enabling this option really must enable SCTP_SNDRCV event so that they would know where the data belongs. Based on an earlier patch by Ivan Skytte Jørgensen. Additionally, this functionality now permits multiple associations on the same endpoint to enter Partial Delivery. Applications should be extra careful, when using this functionality, to track EOR indicators. Signed-off-by: Vlad Yasevich Signed-off-by: David S. Miller --- include/net/sctp/structs.h | 3 +- include/net/sctp/ulpqueue.h | 2 +- include/net/sctp/user.h | 4 +- net/sctp/socket.c | 84 +++++++++++++++++++++++++++++++++--- net/sctp/ulpqueue.c | 103 +++++++++++++++++++++++++++++++------------- 5 files changed, 157 insertions(+), 39 deletions(-) (limited to 'include') diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index f431acf3dcea..fe7f5ae1c513 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -304,10 +304,11 @@ struct sctp_sock { __u32 autoclose; __u8 nodelay; __u8 disable_fragments; - __u8 pd_mode; __u8 v4mapped; + __u8 frag_interleave; __u32 adaptation_ind; + atomic_t pd_mode; /* Receive to here while partial delivery is in effect. */ struct sk_buff_head pd_lobby; }; diff --git a/include/net/sctp/ulpqueue.h b/include/net/sctp/ulpqueue.h index ab26ab3adae1..39ea3f442b47 100644 --- a/include/net/sctp/ulpqueue.h +++ b/include/net/sctp/ulpqueue.h @@ -78,7 +78,7 @@ void sctp_ulpq_partial_delivery(struct sctp_ulpq *, struct sctp_chunk *, gfp_t); void sctp_ulpq_abort_pd(struct sctp_ulpq *, gfp_t); /* Clear the partial data delivery condition on this socket. */ -int sctp_clear_pd(struct sock *sk); +int sctp_clear_pd(struct sock *sk, struct sctp_association *asoc); /* Skip over an SSN. */ void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn); diff --git a/include/net/sctp/user.h b/include/net/sctp/user.h index 67a30eb2b3a4..e77316088dc7 100644 --- a/include/net/sctp/user.h +++ b/include/net/sctp/user.h @@ -97,6 +97,8 @@ enum sctp_optname { #define SCTP_DELAYED_ACK_TIME SCTP_DELAYED_ACK_TIME SCTP_CONTEXT, /* Receive Context */ #define SCTP_CONTEXT SCTP_CONTEXT + SCTP_FRAGMENT_INTERLEAVE, +#define SCTP_FRAGMENT_INTERLEAVE SCTP_FRAGMENT_INTERLEAVE /* Internal Socket Options. Some of the sctp library functions are * implemented using these socket options. @@ -530,7 +532,7 @@ struct sctp_paddrparams { __u32 spp_flags; } __attribute__((packed, aligned(4))); -/* 7.1.24. Delayed Ack Timer (SCTP_DELAYED_ACK_TIME) +/* 7.1.23. Delayed Ack Timer (SCTP_DELAYED_ACK_TIME) * * This options will get or set the delayed ack timer. The time is set * in milliseconds. If the assoc_id is 0, then this sets or gets the diff --git a/net/sctp/socket.c b/net/sctp/socket.c index a1d026f12b0e..b4be473c68b0 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -2255,7 +2255,7 @@ static int sctp_setsockopt_peer_addr_params(struct sock *sk, return 0; } -/* 7.1.24. Delayed Ack Timer (SCTP_DELAYED_ACK_TIME) +/* 7.1.23. Delayed Ack Timer (SCTP_DELAYED_ACK_TIME) * * This options will get or set the delayed ack timer. The time is set * in milliseconds. If the assoc_id is 0, then this sets or gets the @@ -2792,6 +2792,46 @@ static int sctp_setsockopt_context(struct sock *sk, char __user *optval, return 0; } +/* + * 7.1.24. Get or set fragmented interleave (SCTP_FRAGMENT_INTERLEAVE) + * + * This options will at a minimum specify if the implementation is doing + * fragmented interleave. Fragmented interleave, for a one to many + * socket, is when subsequent calls to receive a message may return + * parts of messages from different associations. Some implementations + * may allow you to turn this value on or off. If so, when turned off, + * no fragment interleave will occur (which will cause a head of line + * blocking amongst multiple associations sharing the same one to many + * socket). When this option is turned on, then each receive call may + * come from a different association (thus the user must receive data + * with the extended calls (e.g. sctp_recvmsg) to keep track of which + * association each receive belongs to. + * + * This option takes a boolean value. A non-zero value indicates that + * fragmented interleave is on. A value of zero indicates that + * fragmented interleave is off. + * + * Note that it is important that an implementation that allows this + * option to be turned on, have it off by default. Otherwise an unaware + * application using the one to many model may become confused and act + * incorrectly. + */ +static int sctp_setsockopt_fragment_interleave(struct sock *sk, + char __user *optval, + int optlen) +{ + int val; + + if (optlen != sizeof(int)) + return -EINVAL; + if (get_user(val, (int __user *)optval)) + return -EFAULT; + + sctp_sk(sk)->frag_interleave = (val == 0) ? 0 : 1; + + return 0; +} + /* API 6.2 setsockopt(), getsockopt() * * Applications use setsockopt() and getsockopt() to set or retrieve @@ -2906,7 +2946,9 @@ SCTP_STATIC int sctp_setsockopt(struct sock *sk, int level, int optname, case SCTP_CONTEXT: retval = sctp_setsockopt_context(sk, optval, optlen); break; - + case SCTP_FRAGMENT_INTERLEAVE: + retval = sctp_setsockopt_fragment_interleave(sk, optval, optlen); + break; default: retval = -ENOPROTOOPT; break; @@ -3134,8 +3176,9 @@ SCTP_STATIC int sctp_init_sock(struct sock *sk) sp->pf = sctp_get_pf_specific(sk->sk_family); /* Control variables for partial data delivery. */ - sp->pd_mode = 0; + atomic_set(&sp->pd_mode, 0); skb_queue_head_init(&sp->pd_lobby); + sp->frag_interleave = 0; /* Create a per socket endpoint structure. Even if we * change the data structure relationships, this may still @@ -3642,7 +3685,7 @@ static int sctp_getsockopt_peer_addr_params(struct sock *sk, int len, return 0; } -/* 7.1.24. Delayed Ack Timer (SCTP_DELAYED_ACK_TIME) +/* 7.1.23. Delayed Ack Timer (SCTP_DELAYED_ACK_TIME) * * This options will get or set the delayed ack timer. The time is set * in milliseconds. If the assoc_id is 0, then this sets or gets the @@ -4536,6 +4579,29 @@ static int sctp_getsockopt_maxseg(struct sock *sk, int len, return 0; } +/* + * 7.1.24. Get or set fragmented interleave (SCTP_FRAGMENT_INTERLEAVE) + * (chapter and verse is quoted at sctp_setsockopt_fragment_interleave()) + */ +static int sctp_getsockopt_fragment_interleave(struct sock *sk, int len, + char __user *optval, int __user *optlen) +{ + int val; + + if (len < sizeof(int)) + return -EINVAL; + + len = sizeof(int); + + val = sctp_sk(sk)->frag_interleave; + if (put_user(len, optlen)) + return -EFAULT; + if (copy_to_user(optval, &val, len)) + return -EFAULT; + + return 0; +} + SCTP_STATIC int sctp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen) { @@ -4648,6 +4714,10 @@ SCTP_STATIC int sctp_getsockopt(struct sock *sk, int level, int optname, case SCTP_CONTEXT: retval = sctp_getsockopt_context(sk, len, optval, optlen); break; + case SCTP_FRAGMENT_INTERLEAVE: + retval = sctp_getsockopt_fragment_interleave(sk, len, optval, + optlen); + break; default: retval = -ENOPROTOOPT; break; @@ -5742,9 +5812,9 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk, * 3) Peeling off non-partial delivery; move pd_lobby to receive_queue. */ skb_queue_head_init(&newsp->pd_lobby); - sctp_sk(newsk)->pd_mode = assoc->ulpq.pd_mode; + atomic_set(&sctp_sk(newsk)->pd_mode, assoc->ulpq.pd_mode); - if (sctp_sk(oldsk)->pd_mode) { + if (atomic_read(&sctp_sk(oldsk)->pd_mode)) { struct sk_buff_head *queue; /* Decide which queue to move pd_lobby skbs to. */ @@ -5770,7 +5840,7 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk, * delivery to finish. */ if (assoc->ulpq.pd_mode) - sctp_clear_pd(oldsk); + sctp_clear_pd(oldsk, NULL); } diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c index b29e3e4b72c9..ac80c34f6c2c 100644 --- a/net/sctp/ulpqueue.c +++ b/net/sctp/ulpqueue.c @@ -138,18 +138,42 @@ int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk, /* Clear the partial delivery mode for this socket. Note: This * assumes that no association is currently in partial delivery mode. */ -int sctp_clear_pd(struct sock *sk) +int sctp_clear_pd(struct sock *sk, struct sctp_association *asoc) { struct sctp_sock *sp = sctp_sk(sk); - sp->pd_mode = 0; - if (!skb_queue_empty(&sp->pd_lobby)) { - struct list_head *list; - sctp_skb_list_tail(&sp->pd_lobby, &sk->sk_receive_queue); - list = (struct list_head *)&sctp_sk(sk)->pd_lobby; - INIT_LIST_HEAD(list); - return 1; + if (atomic_dec_and_test(&sp->pd_mode)) { + /* This means there are no other associations in PD, so + * we can go ahead and clear out the lobby in one shot + */ + if (!skb_queue_empty(&sp->pd_lobby)) { + struct list_head *list; + sctp_skb_list_tail(&sp->pd_lobby, &sk->sk_receive_queue); + list = (struct list_head *)&sctp_sk(sk)->pd_lobby; + INIT_LIST_HEAD(list); + return 1; + } + } else { + /* There are other associations in PD, so we only need to + * pull stuff out of the lobby that belongs to the + * associations that is exiting PD (all of its notifications + * are posted here). + */ + if (!skb_queue_empty(&sp->pd_lobby) && asoc) { + struct sk_buff *skb, *tmp; + struct sctp_ulpevent *event; + + sctp_skb_for_each(skb, &sp->pd_lobby, tmp) { + event = sctp_skb2event(skb); + if (event->asoc == asoc) { + __skb_unlink(skb, &sp->pd_lobby); + __skb_queue_tail(&sk->sk_receive_queue, + skb); + } + } + } } + return 0; } @@ -157,7 +181,7 @@ int sctp_clear_pd(struct sock *sk) static int sctp_ulpq_clear_pd(struct sctp_ulpq *ulpq) { ulpq->pd_mode = 0; - return sctp_clear_pd(ulpq->asoc->base.sk); + return sctp_clear_pd(ulpq->asoc->base.sk, ulpq->asoc); } /* If the SKB of 'event' is on a list, it is the first such member @@ -187,25 +211,35 @@ int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event) * the association the cause of the partial delivery. */ - if (!sctp_sk(sk)->pd_mode) { + if (atomic_read(&sctp_sk(sk)->pd_mode) == 0) { queue = &sk->sk_receive_queue; - } else if (ulpq->pd_mode) { - /* If the association is in partial delivery, we - * need to finish delivering the partially processed - * packet before passing any other data. This is - * because we don't truly support stream interleaving. - */ - if ((event->msg_flags & MSG_NOTIFICATION) || - (SCTP_DATA_NOT_FRAG == - (event->msg_flags & SCTP_DATA_FRAG_MASK))) - queue = &sctp_sk(sk)->pd_lobby; - else { - clear_pd = event->msg_flags & MSG_EOR; - queue = &sk->sk_receive_queue; + } else { + if (ulpq->pd_mode) { + /* If the association is in partial delivery, we + * need to finish delivering the partially processed + * packet before passing any other data. This is + * because we don't truly support stream interleaving. + */ + if ((event->msg_flags & MSG_NOTIFICATION) || + (SCTP_DATA_NOT_FRAG == + (event->msg_flags & SCTP_DATA_FRAG_MASK))) + queue = &sctp_sk(sk)->pd_lobby; + else { + clear_pd = event->msg_flags & MSG_EOR; + queue = &sk->sk_receive_queue; + } + } else { + /* + * If fragment interleave is enabled, we + * can queue this to the recieve queue instead + * of the lobby. + */ + if (sctp_sk(sk)->frag_interleave) + queue = &sk->sk_receive_queue; + else + queue = &sctp_sk(sk)->pd_lobby; } - } else - queue = &sctp_sk(sk)->pd_lobby; - + } /* If we are harvesting multiple skbs they will be * collected on a list. @@ -826,18 +860,29 @@ void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq, { struct sctp_ulpevent *event; struct sctp_association *asoc; + struct sctp_sock *sp; asoc = ulpq->asoc; + sp = sctp_sk(asoc->base.sk); - /* Are we already in partial delivery mode? */ - if (!sctp_sk(asoc->base.sk)->pd_mode) { + /* If the association is already in Partial Delivery mode + * we have noting to do. + */ + if (ulpq->pd_mode) + return; + /* If the user enabled fragment interleave socket option, + * multiple associations can enter partial delivery. + * Otherwise, we can only enter partial delivery if the + * socket is not in partial deliver mode. + */ + if (sp->frag_interleave || atomic_read(&sp->pd_mode) == 0) { /* Is partial delivery possible? */ event = sctp_ulpq_retrieve_first(ulpq); /* Send event to the ULP. */ if (event) { sctp_ulpq_tail_event(ulpq, event); - sctp_sk(asoc->base.sk)->pd_mode = 1; + atomic_inc(&sp->pd_mode); ulpq->pd_mode = 1; return; } -- cgit v1.2.3-59-g8ed1b From d49d91d79a8dc5e85108a5ae1c8eef23dec135c1 Mon Sep 17 00:00:00 2001 From: Vlad Yasevich Date: Fri, 23 Mar 2007 11:32:00 -0700 Subject: [SCTP]: Implement SCTP_PARTIAL_DELIVERY_POINT option. This option induces partial delivery to run as soon as the specified amount of data has been accumulated on the association. However, we give preference to fully reassembled messages over PD messages. In any case, window and buffer is freed up. Signed-off-by: Vlad Yasevich Signed-off-by: David S. Miller --- include/net/sctp/structs.h | 1 + include/net/sctp/user.h | 2 ++ net/sctp/socket.c | 57 +++++++++++++++++++++++++++++++++++++++++ net/sctp/ulpqueue.c | 64 +++++++++++++++++++++++++++++++++++++++++++--- 4 files changed, 120 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index fe7f5ae1c513..37b4a24e589b 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -307,6 +307,7 @@ struct sctp_sock { __u8 v4mapped; __u8 frag_interleave; __u32 adaptation_ind; + __u32 pd_point; atomic_t pd_mode; /* Receive to here while partial delivery is in effect. */ diff --git a/include/net/sctp/user.h b/include/net/sctp/user.h index e77316088dc7..9a8352710631 100644 --- a/include/net/sctp/user.h +++ b/include/net/sctp/user.h @@ -99,6 +99,8 @@ enum sctp_optname { #define SCTP_CONTEXT SCTP_CONTEXT SCTP_FRAGMENT_INTERLEAVE, #define SCTP_FRAGMENT_INTERLEAVE SCTP_FRAGMENT_INTERLEAVE + SCTP_PARTIAL_DELIVERY_POINT, /* Set/Get partial delivery point */ +#define SCTP_PARTIAL_DELIVERY_POINT SCTP_PARTIAL_DELIVERY_POINT /* Internal Socket Options. Some of the sctp library functions are * implemented using these socket options. diff --git a/net/sctp/socket.c b/net/sctp/socket.c index b4be473c68b0..1e787a2d0b5f 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -2832,6 +2832,32 @@ static int sctp_setsockopt_fragment_interleave(struct sock *sk, return 0; } +/* + * 7.1.25. Set or Get the sctp partial delivery point + * (SCTP_PARTIAL_DELIVERY_POINT) + * This option will set or get the SCTP partial delivery point. This + * point is the size of a message where the partial delivery API will be + * invoked to help free up rwnd space for the peer. Setting this to a + * lower value will cause partial delivery's to happen more often. The + * calls argument is an integer that sets or gets the partial delivery + * point. + */ +static int sctp_setsockopt_partial_delivery_point(struct sock *sk, + char __user *optval, + int optlen) +{ + u32 val; + + if (optlen != sizeof(u32)) + return -EINVAL; + if (get_user(val, (int __user *)optval)) + return -EFAULT; + + sctp_sk(sk)->pd_point = val; + + return 0; /* is this the right error code? */ +} + /* API 6.2 setsockopt(), getsockopt() * * Applications use setsockopt() and getsockopt() to set or retrieve @@ -2911,6 +2937,9 @@ SCTP_STATIC int sctp_setsockopt(struct sock *sk, int level, int optname, case SCTP_DELAYED_ACK_TIME: retval = sctp_setsockopt_delayed_ack_time(sk, optval, optlen); break; + case SCTP_PARTIAL_DELIVERY_POINT: + retval = sctp_setsockopt_partial_delivery_point(sk, optval, optlen); + break; case SCTP_INITMSG: retval = sctp_setsockopt_initmsg(sk, optval, optlen); @@ -4602,6 +4631,30 @@ static int sctp_getsockopt_fragment_interleave(struct sock *sk, int len, return 0; } +/* + * 7.1.25. Set or Get the sctp partial delivery point + * (chapter and verse is quoted at sctp_setsockopt_partial_delivery_point()) + */ +static int sctp_getsockopt_partial_delivery_point(struct sock *sk, int len, + char __user *optval, + int __user *optlen) +{ + u32 val; + + if (len < sizeof(u32)) + return -EINVAL; + + len = sizeof(u32); + + val = sctp_sk(sk)->pd_point; + if (put_user(len, optlen)) + return -EFAULT; + if (copy_to_user(optval, &val, len)) + return -EFAULT; + + return -ENOTSUPP; +} + SCTP_STATIC int sctp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen) { @@ -4718,6 +4771,10 @@ SCTP_STATIC int sctp_getsockopt(struct sock *sk, int level, int optname, retval = sctp_getsockopt_fragment_interleave(sk, len, optval, optlen); break; + case SCTP_PARTIAL_DELIVERY_POINT: + retval = sctp_getsockopt_partial_delivery_point(sk, len, optval, + optlen); + break; default: retval = -ENOPROTOOPT; break; diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c index ac80c34f6c2c..0fa4d4d4df17 100644 --- a/net/sctp/ulpqueue.c +++ b/net/sctp/ulpqueue.c @@ -177,6 +177,15 @@ int sctp_clear_pd(struct sock *sk, struct sctp_association *asoc) return 0; } +/* Set the pd_mode on the socket and ulpq */ +static void sctp_ulpq_set_pd(struct sctp_ulpq *ulpq) +{ + struct sctp_sock *sp = sctp_sk(ulpq->asoc->base.sk); + + atomic_inc(&sp->pd_mode); + ulpq->pd_mode = 1; +} + /* Clear the pd_mode and restart any pending messages waiting for delivery. */ static int sctp_ulpq_clear_pd(struct sctp_ulpq *ulpq) { @@ -401,6 +410,11 @@ static inline struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_u struct sk_buff *first_frag = NULL; __u32 ctsn, next_tsn; struct sctp_ulpevent *retval = NULL; + struct sk_buff *pd_first = NULL; + struct sk_buff *pd_last = NULL; + size_t pd_len = 0; + struct sctp_association *asoc; + u32 pd_point; /* Initialized to 0 just to avoid compiler warning message. Will * never be used with this value. It is referenced only after it @@ -416,6 +430,10 @@ static inline struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_u * we expect to find the remaining middle fragments and the last * fragment in order. If not, first_frag is reset to NULL and we * start the next pass when we find another first fragment. + * + * There is a potential to do partial delivery if user sets + * SCTP_PARTIAL_DELIVERY_POINT option. Lets count some things here + * to see if can do PD. */ skb_queue_walk(&ulpq->reasm, pos) { cevent = sctp_skb2event(pos); @@ -423,14 +441,32 @@ static inline struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_u switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) { case SCTP_DATA_FIRST_FRAG: + /* If this "FIRST_FRAG" is the first + * element in the queue, then count it towards + * possible PD. + */ + if (pos == ulpq->reasm.next) { + pd_first = pos; + pd_last = pos; + pd_len = pos->len; + } else { + pd_first = NULL; + pd_last = NULL; + pd_len = 0; + } + first_frag = pos; next_tsn = ctsn + 1; break; case SCTP_DATA_MIDDLE_FRAG: - if ((first_frag) && (ctsn == next_tsn)) + if ((first_frag) && (ctsn == next_tsn)) { next_tsn++; - else + if (pd_first) { + pd_last = pos; + pd_len += pos->len; + } + } else first_frag = NULL; break; @@ -441,7 +477,28 @@ static inline struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_u first_frag = NULL; break; }; + } + + asoc = ulpq->asoc; + if (pd_first) { + /* Make sure we can enter partial deliver. + * We can trigger partial delivery only if framgent + * interleave is set, or the socket is not already + * in partial delivery. + */ + if (!sctp_sk(asoc->base.sk)->frag_interleave && + atomic_read(&sctp_sk(asoc->base.sk)->pd_mode)) + goto done; + cevent = sctp_skb2event(pd_first); + pd_point = sctp_sk(asoc->base.sk)->pd_point; + if (pd_point && pd_point <= pd_len) { + retval = sctp_make_reassembled_event(&ulpq->reasm, + pd_first, + pd_last); + if (retval) + sctp_ulpq_set_pd(ulpq); + } } done: return retval; @@ -882,8 +939,7 @@ void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq, /* Send event to the ULP. */ if (event) { sctp_ulpq_tail_event(ulpq, event); - atomic_inc(&sp->pd_mode); - ulpq->pd_mode = 1; + sctp_ulpq_set_pd(ulpq); return; } } -- cgit v1.2.3-59-g8ed1b From 1ae4114dce35dd1d32ed847f60b599dbbdfd5829 Mon Sep 17 00:00:00 2001 From: Vlad Yasevich Date: Fri, 23 Mar 2007 11:32:26 -0700 Subject: [SCTP]: Implement SCTP_ADDR_CONFIRMED state for ADDR_CHNAGE event Signed-off-by: Vlad Yasevich Signed-off-by: David S. Miller --- include/net/sctp/user.h | 1 + net/sctp/associola.c | 10 +++++++++- 2 files changed, 10 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/net/sctp/user.h b/include/net/sctp/user.h index 9a8352710631..4ed752119bbc 100644 --- a/include/net/sctp/user.h +++ b/include/net/sctp/user.h @@ -265,6 +265,7 @@ enum sctp_spc_state { SCTP_ADDR_REMOVED, SCTP_ADDR_ADDED, SCTP_ADDR_MADE_PRIM, + SCTP_ADDR_CONFIRMED, }; diff --git a/net/sctp/associola.c b/net/sctp/associola.c index 78d2ddb5ca18..85af1cb70fe8 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c @@ -714,8 +714,16 @@ void sctp_assoc_control_transport(struct sctp_association *asoc, /* Record the transition on the transport. */ switch (command) { case SCTP_TRANSPORT_UP: + /* If we are moving from UNCONFIRMED state due + * to heartbeat success, report the SCTP_ADDR_CONFIRMED + * state to the user, otherwise report SCTP_ADDR_AVAILABLE. + */ + if (SCTP_UNCONFIRMED == transport->state && + SCTP_HEARTBEAT_SUCCESS == error) + spc_state = SCTP_ADDR_CONFIRMED; + else + spc_state = SCTP_ADDR_AVAILABLE; transport->state = SCTP_ACTIVE; - spc_state = SCTP_ADDR_AVAILABLE; break; case SCTP_TRANSPORT_DOWN: -- cgit v1.2.3-59-g8ed1b From bdf3092af601ccad765974652ab103162fbe14f4 Mon Sep 17 00:00:00 2001 From: Vlad Yasevich Date: Fri, 23 Mar 2007 11:33:12 -0700 Subject: [SCTP]: Honor flags when setting peer address parameters Parameters only take effect when a corresponding flag bit is set and a value is specified. This means we need to check the flags in addition to checking for non-zero value. Signed-off-by: Vlad Yasevich Signed-off-by: David S. Miller --- include/net/sctp/user.h | 15 +++++++------- net/sctp/socket.c | 54 ++++++++++++++++++++++++++++++++++++++++--------- 2 files changed, 52 insertions(+), 17 deletions(-) (limited to 'include') diff --git a/include/net/sctp/user.h b/include/net/sctp/user.h index 4ed752119bbc..80b7afea17fc 100644 --- a/include/net/sctp/user.h +++ b/include/net/sctp/user.h @@ -513,16 +513,17 @@ struct sctp_setadaptation { * address's parameters: */ enum sctp_spp_flags { - SPP_HB_ENABLE = 1, /*Enable heartbeats*/ - SPP_HB_DISABLE = 2, /*Disable heartbeats*/ + SPP_HB_ENABLE = 1<<0, /*Enable heartbeats*/ + SPP_HB_DISABLE = 1<<1, /*Disable heartbeats*/ SPP_HB = SPP_HB_ENABLE | SPP_HB_DISABLE, - SPP_HB_DEMAND = 4, /*Send heartbeat immediately*/ - SPP_PMTUD_ENABLE = 8, /*Enable PMTU discovery*/ - SPP_PMTUD_DISABLE = 16, /*Disable PMTU discovery*/ + SPP_HB_DEMAND = 1<<2, /*Send heartbeat immediately*/ + SPP_PMTUD_ENABLE = 1<<3, /*Enable PMTU discovery*/ + SPP_PMTUD_DISABLE = 1<<4, /*Disable PMTU discovery*/ SPP_PMTUD = SPP_PMTUD_ENABLE | SPP_PMTUD_DISABLE, - SPP_SACKDELAY_ENABLE = 32, /*Enable SACK*/ - SPP_SACKDELAY_DISABLE = 64, /*Disable SACK*/ + SPP_SACKDELAY_ENABLE = 1<<5, /*Enable SACK*/ + SPP_SACKDELAY_DISABLE = 1<<6, /*Disable SACK*/ SPP_SACKDELAY = SPP_SACKDELAY_ENABLE | SPP_SACKDELAY_DISABLE, + SPP_HB_TIME_IS_ZERO = 1<<7, /* Set HB delay to 0 */ }; struct sctp_paddrparams { diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 1e787a2d0b5f..dda2f6700f5b 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -2039,6 +2039,10 @@ static int sctp_setsockopt_autoclose(struct sock *sk, char __user *optval, * SPP_HB_DEMAND - Request a user initiated heartbeat * to be made immediately. * + * SPP_HB_TIME_IS_ZERO - Specify's that the time for + * heartbeat delayis to be set to the value of 0 + * milliseconds. + * * SPP_PMTUD_ENABLE - This field will enable PMTU * discovery upon the specified address. Note that * if the address feild is empty then all addresses @@ -2081,13 +2085,30 @@ static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params, return error; } - if (params->spp_hbinterval) { - if (trans) { - trans->hbinterval = msecs_to_jiffies(params->spp_hbinterval); - } else if (asoc) { - asoc->hbinterval = msecs_to_jiffies(params->spp_hbinterval); - } else { - sp->hbinterval = params->spp_hbinterval; + /* Note that unless the spp_flag is set to SPP_HB_ENABLE the value of + * this field is ignored. Note also that a value of zero indicates + * the current setting should be left unchanged. + */ + if (params->spp_flags & SPP_HB_ENABLE) { + + /* Re-zero the interval if the SPP_HB_TIME_IS_ZERO is + * set. This lets us use 0 value when this flag + * is set. + */ + if (params->spp_flags & SPP_HB_TIME_IS_ZERO) + params->spp_hbinterval = 0; + + if (params->spp_hbinterval || + (params->spp_flags & SPP_HB_TIME_IS_ZERO)) { + if (trans) { + trans->hbinterval = + msecs_to_jiffies(params->spp_hbinterval); + } else if (asoc) { + asoc->hbinterval = + msecs_to_jiffies(params->spp_hbinterval); + } else { + sp->hbinterval = params->spp_hbinterval; + } } } @@ -2104,7 +2125,12 @@ static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params, } } - if (params->spp_pathmtu) { + /* When Path MTU discovery is disabled the value specified here will + * be the "fixed" path mtu (i.e. the value of the spp_flags field must + * include the flag SPP_PMTUD_DISABLE for this field to have any + * effect). + */ + if ((params->spp_flags & SPP_PMTUD_DISABLE) && params->spp_pathmtu) { if (trans) { trans->pathmtu = params->spp_pathmtu; sctp_assoc_sync_pmtu(asoc); @@ -2135,7 +2161,11 @@ static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params, } } - if (params->spp_sackdelay) { + /* Note that unless the spp_flag is set to SPP_SACKDELAY_ENABLE the + * value of this field is ignored. Note also that a value of zero + * indicates the current setting should be left unchanged. + */ + if ((params->spp_flags & SPP_SACKDELAY_ENABLE) && params->spp_sackdelay) { if (trans) { trans->sackdelay = msecs_to_jiffies(params->spp_sackdelay); @@ -2163,7 +2193,11 @@ static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params, } } - if (params->spp_pathmaxrxt) { + /* Note that unless the spp_flag is set to SPP_PMTUD_ENABLE the value + * of this field is ignored. Note also that a value of zero + * indicates the current setting should be left unchanged. + */ + if ((params->spp_flags & SPP_PMTUD_ENABLE) && params->spp_pathmaxrxt) { if (trans) { trans->pathmaxrxt = params->spp_pathmaxrxt; } else if (asoc) { -- cgit v1.2.3-59-g8ed1b From a5a35e76753d27e782028843a5186f176b50dd16 Mon Sep 17 00:00:00 2001 From: Vlad Yasevich Date: Fri, 23 Mar 2007 11:34:08 -0700 Subject: [SCTP]: Implement sac_info field in SCTP_ASSOC_CHANGE notification. As stated in the sctp socket api draft: sac_info: variable If the sac_state is SCTP_COMM_LOST and an ABORT chunk was received for this association, sac_info[] contains the complete ABORT chunk as defined in the SCTP specification RFC2960 [RFC2960] section 3.3.7. We now save received ABORT chunks into the sac_info field and pass that to the user. Signed-off-by: Vlad Yasevich Signed-off-by: David S. Miller --- include/net/sctp/ulpevent.h | 1 + include/net/sctp/user.h | 1 + net/sctp/sm_sideeffect.c | 11 +++++++--- net/sctp/sm_statefuns.c | 14 ++++++------- net/sctp/ulpevent.c | 49 ++++++++++++++++++++++++++++++++++++++------- 5 files changed, 59 insertions(+), 17 deletions(-) (limited to 'include') diff --git a/include/net/sctp/ulpevent.h b/include/net/sctp/ulpevent.h index 2923e3d31a08..de88ed5b0ba6 100644 --- a/include/net/sctp/ulpevent.h +++ b/include/net/sctp/ulpevent.h @@ -89,6 +89,7 @@ struct sctp_ulpevent *sctp_ulpevent_make_assoc_change( __u16 error, __u16 outbound, __u16 inbound, + struct sctp_chunk *chunk, gfp_t gfp); struct sctp_ulpevent *sctp_ulpevent_make_peer_addr_change( diff --git a/include/net/sctp/user.h b/include/net/sctp/user.h index 80b7afea17fc..1b3153c2cdf0 100644 --- a/include/net/sctp/user.h +++ b/include/net/sctp/user.h @@ -217,6 +217,7 @@ struct sctp_assoc_change { __u16 sac_outbound_streams; __u16 sac_inbound_streams; sctp_assoc_t sac_assoc_id; + __u8 sac_info[0]; }; /* diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c index 135567493119..0a1a197193a2 100644 --- a/net/sctp/sm_sideeffect.c +++ b/net/sctp/sm_sideeffect.c @@ -464,7 +464,7 @@ static void sctp_cmd_init_failed(sctp_cmd_seq_t *commands, struct sctp_ulpevent *event; event = sctp_ulpevent_make_assoc_change(asoc,0, SCTP_CANT_STR_ASSOC, - (__u16)error, 0, 0, + (__u16)error, 0, 0, NULL, GFP_ATOMIC); if (event) @@ -492,8 +492,13 @@ static void sctp_cmd_assoc_failed(sctp_cmd_seq_t *commands, /* Cancel any partial delivery in progress. */ sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC); - event = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_COMM_LOST, - (__u16)error, 0, 0, + if (event_type == SCTP_EVENT_T_CHUNK && subtype.chunk == SCTP_CID_ABORT) + event = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_COMM_LOST, + (__u16)error, 0, 0, chunk, + GFP_ATOMIC); + else + event = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_COMM_LOST, + (__u16)error, 0, 0, NULL, GFP_ATOMIC); if (event) sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index 438e5dc5c714..e0ec16dd678a 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c @@ -186,7 +186,7 @@ sctp_disposition_t sctp_sf_do_4_C(const struct sctp_endpoint *ep, * notification is passed to the upper layer. */ ev = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_SHUTDOWN_COMP, - 0, 0, 0, GFP_ATOMIC); + 0, 0, 0, NULL, GFP_ATOMIC); if (ev) sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev)); @@ -661,7 +661,7 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(const struct sctp_endpoint *ep, ev = sctp_ulpevent_make_assoc_change(new_asoc, 0, SCTP_COMM_UP, 0, new_asoc->c.sinit_num_ostreams, new_asoc->c.sinit_max_instreams, - GFP_ATOMIC); + NULL, GFP_ATOMIC); if (!ev) goto nomem_ev; @@ -790,7 +790,7 @@ sctp_disposition_t sctp_sf_do_5_1E_ca(const struct sctp_endpoint *ep, ev = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_COMM_UP, 0, asoc->c.sinit_num_ostreams, asoc->c.sinit_max_instreams, - GFP_ATOMIC); + NULL, GFP_ATOMIC); if (!ev) goto nomem; @@ -1625,7 +1625,7 @@ static sctp_disposition_t sctp_sf_do_dupcook_a(const struct sctp_endpoint *ep, ev = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_RESTART, 0, new_asoc->c.sinit_num_ostreams, new_asoc->c.sinit_max_instreams, - GFP_ATOMIC); + NULL, GFP_ATOMIC); if (!ev) goto nomem_ev; @@ -1691,7 +1691,7 @@ static sctp_disposition_t sctp_sf_do_dupcook_b(const struct sctp_endpoint *ep, ev = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_COMM_UP, 0, new_asoc->c.sinit_num_ostreams, new_asoc->c.sinit_max_instreams, - GFP_ATOMIC); + NULL, GFP_ATOMIC); if (!ev) goto nomem_ev; @@ -1786,7 +1786,7 @@ static sctp_disposition_t sctp_sf_do_dupcook_d(const struct sctp_endpoint *ep, SCTP_COMM_UP, 0, asoc->c.sinit_num_ostreams, asoc->c.sinit_max_instreams, - GFP_ATOMIC); + NULL, GFP_ATOMIC); if (!ev) goto nomem; @@ -3035,7 +3035,7 @@ sctp_disposition_t sctp_sf_do_9_2_final(const struct sctp_endpoint *ep, * notification is passed to the upper layer. */ ev = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_SHUTDOWN_COMP, - 0, 0, 0, GFP_ATOMIC); + 0, 0, 0, NULL, GFP_ATOMIC); if (!ev) goto nomem; diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c index 2e11bc8d5d35..661ea2dd78ba 100644 --- a/net/sctp/ulpevent.c +++ b/net/sctp/ulpevent.c @@ -131,19 +131,54 @@ static inline void sctp_ulpevent_release_owner(struct sctp_ulpevent *event) struct sctp_ulpevent *sctp_ulpevent_make_assoc_change( const struct sctp_association *asoc, __u16 flags, __u16 state, __u16 error, __u16 outbound, - __u16 inbound, gfp_t gfp) + __u16 inbound, struct sctp_chunk *chunk, gfp_t gfp) { struct sctp_ulpevent *event; struct sctp_assoc_change *sac; struct sk_buff *skb; - event = sctp_ulpevent_new(sizeof(struct sctp_assoc_change), + /* If the lower layer passed in the chunk, it will be + * an ABORT, so we need to include it in the sac_info. + */ + if (chunk) { + /* sctp_inqu_pop() has allready pulled off the chunk + * header. We need to put it back temporarily + */ + skb_push(chunk->skb, sizeof(sctp_chunkhdr_t)); + + /* Copy the chunk data to a new skb and reserve enough + * head room to use as notification. + */ + skb = skb_copy_expand(chunk->skb, + sizeof(struct sctp_assoc_change), 0, gfp); + + if (!skb) + goto fail; + + /* put back the chunk header now that we have a copy */ + skb_pull(chunk->skb, sizeof(sctp_chunkhdr_t)); + + /* Embed the event fields inside the cloned skb. */ + event = sctp_skb2event(skb); + sctp_ulpevent_init(event, MSG_NOTIFICATION, skb->truesize); + + /* Include the notification structure */ + sac = (struct sctp_assoc_change *) + skb_push(skb, sizeof(struct sctp_assoc_change)); + + /* Trim the buffer to the right length. */ + skb_trim(skb, sizeof(struct sctp_assoc_change) + + ntohs(chunk->chunk_hdr->length)); + } else { + event = sctp_ulpevent_new(sizeof(struct sctp_assoc_change), MSG_NOTIFICATION, gfp); - if (!event) - goto fail; - skb = sctp_event2skb(event); - sac = (struct sctp_assoc_change *) - skb_put(skb, sizeof(struct sctp_assoc_change)); + if (!event) + goto fail; + + skb = sctp_event2skb(event); + sac = (struct sctp_assoc_change *) skb_put(skb, + sizeof(struct sctp_assoc_change)); + } /* Socket Extensions for SCTP * 5.3.1.1 SCTP_ASSOC_CHANGE -- cgit v1.2.3-59-g8ed1b From 703315712cfccfe0b45ef4aa6994527d8ee95e33 Mon Sep 17 00:00:00 2001 From: Vlad Yasevich Date: Fri, 23 Mar 2007 11:34:36 -0700 Subject: [SCTP]: Implement SCTP_MAX_BURST socket option. Signed-off-by: Vlad Yasevich Signed-off-by: David S. Miller --- include/net/sctp/constants.h | 2 +- include/net/sctp/structs.h | 1 + include/net/sctp/user.h | 2 ++ net/sctp/associola.c | 2 +- net/sctp/protocol.c | 2 +- net/sctp/socket.c | 61 ++++++++++++++++++++++++++++++++++++++++++++ 6 files changed, 67 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/net/sctp/constants.h b/include/net/sctp/constants.h index 5ddb85599863..bb37724495a5 100644 --- a/include/net/sctp/constants.h +++ b/include/net/sctp/constants.h @@ -283,7 +283,7 @@ enum { SCTP_MAX_GABS = 16 }; #define SCTP_RTO_BETA 2 /* 1/4 when converted to right shifts. */ /* Maximum number of new data packets that can be sent in a burst. */ -#define SCTP_MAX_BURST 4 +#define SCTP_DEFAULT_MAX_BURST 4 #define SCTP_CLOCK_GRANULARITY 1 /* 1 jiffy */ diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index 37b4a24e589b..7b4fff93ba7f 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -276,6 +276,7 @@ struct sctp_sock { __u32 default_context; __u32 default_timetolive; __u32 default_rcv_context; + int max_burst; /* Heartbeat interval: The endpoint sends out a Heartbeat chunk to * the destination address every heartbeat interval. This value diff --git a/include/net/sctp/user.h b/include/net/sctp/user.h index 1b3153c2cdf0..6d2b57758cca 100644 --- a/include/net/sctp/user.h +++ b/include/net/sctp/user.h @@ -101,6 +101,8 @@ enum sctp_optname { #define SCTP_FRAGMENT_INTERLEAVE SCTP_FRAGMENT_INTERLEAVE SCTP_PARTIAL_DELIVERY_POINT, /* Set/Get partial delivery point */ #define SCTP_PARTIAL_DELIVERY_POINT SCTP_PARTIAL_DELIVERY_POINT + SCTP_MAX_BURST, /* Set/Get max burst */ +#define SCTP_MAX_BURST SCTP_MAX_BURST /* Internal Socket Options. Some of the sctp library functions are * implemented using these socket options. diff --git a/net/sctp/associola.c b/net/sctp/associola.c index 85af1cb70fe8..37a343e1ebb7 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c @@ -143,7 +143,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a /* Initialize the maximum mumber of new data packets that can be sent * in a burst. */ - asoc->max_burst = sctp_max_burst; + asoc->max_burst = sp->max_burst; /* initialize association timers */ asoc->timeouts[SCTP_EVENT_TIMEOUT_NONE] = 0; diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index 7c28c9b959e2..c361deb6cea9 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c @@ -1042,7 +1042,7 @@ SCTP_STATIC __init int sctp_init(void) sctp_cookie_preserve_enable = 1; /* Max.Burst - 4 */ - sctp_max_burst = SCTP_MAX_BURST; + sctp_max_burst = SCTP_DEFAULT_MAX_BURST; /* Association.Max.Retrans - 10 attempts * Path.Max.Retrans - 5 attempts (per destination address) diff --git a/net/sctp/socket.c b/net/sctp/socket.c index dda2f6700f5b..f904f2bc0f2c 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -2892,6 +2892,36 @@ static int sctp_setsockopt_partial_delivery_point(struct sock *sk, return 0; /* is this the right error code? */ } +/* + * 7.1.28. Set or Get the maximum burst (SCTP_MAX_BURST) + * + * This option will allow a user to change the maximum burst of packets + * that can be emitted by this association. Note that the default value + * is 4, and some implementations may restrict this setting so that it + * can only be lowered. + * + * NOTE: This text doesn't seem right. Do this on a socket basis with + * future associations inheriting the socket value. + */ +static int sctp_setsockopt_maxburst(struct sock *sk, + char __user *optval, + int optlen) +{ + int val; + + if (optlen != sizeof(int)) + return -EINVAL; + if (get_user(val, (int __user *)optval)) + return -EFAULT; + + if (val < 0) + return -EINVAL; + + sctp_sk(sk)->max_burst = val; + + return 0; +} + /* API 6.2 setsockopt(), getsockopt() * * Applications use setsockopt() and getsockopt() to set or retrieve @@ -3012,6 +3042,9 @@ SCTP_STATIC int sctp_setsockopt(struct sock *sk, int level, int optname, case SCTP_FRAGMENT_INTERLEAVE: retval = sctp_setsockopt_fragment_interleave(sk, optval, optlen); break; + case SCTP_MAX_BURST: + retval = sctp_setsockopt_maxburst(sk, optval, optlen); + break; default: retval = -ENOPROTOOPT; break; @@ -3171,6 +3204,7 @@ SCTP_STATIC int sctp_init_sock(struct sock *sk) sp->default_timetolive = 0; sp->default_rcv_context = 0; + sp->max_burst = sctp_max_burst; /* Initialize default setup parameters. These parameters * can be modified with the SCTP_INITMSG socket option or @@ -4689,6 +4723,30 @@ static int sctp_getsockopt_partial_delivery_point(struct sock *sk, int len, return -ENOTSUPP; } +/* + * 7.1.28. Set or Get the maximum burst (SCTP_MAX_BURST) + * (chapter and verse is quoted at sctp_setsockopt_maxburst()) + */ +static int sctp_getsockopt_maxburst(struct sock *sk, int len, + char __user *optval, + int __user *optlen) +{ + int val; + + if (len < sizeof(int)) + return -EINVAL; + + len = sizeof(int); + + val = sctp_sk(sk)->max_burst; + if (put_user(len, optlen)) + return -EFAULT; + if (copy_to_user(optval, &val, len)) + return -EFAULT; + + return -ENOTSUPP; +} + SCTP_STATIC int sctp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen) { @@ -4809,6 +4867,9 @@ SCTP_STATIC int sctp_getsockopt(struct sock *sk, int level, int optname, retval = sctp_getsockopt_partial_delivery_point(sk, len, optval, optlen); break; + case SCTP_MAX_BURST: + retval = sctp_getsockopt_maxburst(sk, len, optval, optlen); + break; default: retval = -ENOPROTOOPT; break; -- cgit v1.2.3-59-g8ed1b From d30045a0bcf144753869175dd9d840f7ceaf4aba Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Fri, 23 Mar 2007 11:37:48 -0700 Subject: [NETLINK]: introduce NLA_BINARY type This patch introduces a new NLA_BINARY attribute policy type with the verification of simply checking the maximum length of the payload. It also fixes a small typo in the example. Signed-off-by: Johannes Berg Signed-off-by: Thomas Graf Signed-off-by: David S. Miller --- include/net/netlink.h | 4 +++- net/netlink/attr.c | 5 +++++ 2 files changed, 8 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/net/netlink.h b/include/net/netlink.h index 1c11518fc822..2e4c90a98a7f 100644 --- a/include/net/netlink.h +++ b/include/net/netlink.h @@ -171,6 +171,7 @@ enum { NLA_MSECS, NLA_NESTED, NLA_NUL_STRING, + NLA_BINARY, __NLA_TYPE_MAX, }; @@ -188,12 +189,13 @@ enum { * NLA_STRING Maximum length of string * NLA_NUL_STRING Maximum length of string (excluding NUL) * NLA_FLAG Unused + * NLA_BINARY Maximum length of attribute payload * All other Exact length of attribute payload * * Example: * static struct nla_policy my_policy[ATTR_MAX+1] __read_mostly = { * [ATTR_FOO] = { .type = NLA_U16 }, - * [ATTR_BAR] = { .type = NLA_STRING, len = BARSIZ }, + * [ATTR_BAR] = { .type = NLA_STRING, .len = BARSIZ }, * [ATTR_BAZ] = { .len = sizeof(struct mystruct) }, * }; */ diff --git a/net/netlink/attr.c b/net/netlink/attr.c index 004139557e09..df5f820a4c32 100644 --- a/net/netlink/attr.c +++ b/net/netlink/attr.c @@ -67,6 +67,11 @@ static int validate_nla(struct nlattr *nla, int maxtype, } break; + case NLA_BINARY: + if (pt->len && attrlen > pt->len) + return -ERANGE; + break; + default: if (pt->len) minlen = pt->len; -- cgit v1.2.3-59-g8ed1b From b3da2cf37c5c6e47698957a25ab43a7223dbb90f Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Fri, 23 Mar 2007 11:40:27 -0700 Subject: [INET]: Use jhash + random secret for ehash. The days are gone when this was not an issue, there are folks out there with huge bot networks that can be used to attack the established hash tables on remote systems. So just like the routing cache and connection tracking hash, use Jenkins hash with random secret input. Signed-off-by: David S. Miller --- include/net/inet6_hashtables.h | 12 +++++++----- include/net/inet_sock.h | 11 +++++++---- net/ipv4/af_inet.c | 16 ++++++++++++++++ net/ipv6/af_inet6.c | 5 +++++ 4 files changed, 35 insertions(+), 9 deletions(-) (limited to 'include') diff --git a/include/net/inet6_hashtables.h b/include/net/inet6_hashtables.h index c28e424f53d9..668056b4bb0b 100644 --- a/include/net/inet6_hashtables.h +++ b/include/net/inet6_hashtables.h @@ -19,6 +19,9 @@ #include #include #include +#include + +#include #include @@ -28,12 +31,11 @@ struct inet_hashinfo; static inline unsigned int inet6_ehashfn(const struct in6_addr *laddr, const u16 lport, const struct in6_addr *faddr, const __be16 fport) { - unsigned int hashent = (lport ^ (__force u16)fport); + u32 ports = (lport ^ (__force u16)fport); - hashent ^= (__force u32)(laddr->s6_addr32[3] ^ faddr->s6_addr32[3]); - hashent ^= hashent >> 16; - hashent ^= hashent >> 8; - return hashent; + return jhash_3words((__force u32)laddr->s6_addr32[3], + (__force u32)faddr->s6_addr32[3], + ports, inet_ehash_secret); } static inline int inet6_sk_ehashfn(const struct sock *sk) diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h index ce6da97bc848..62daf214931f 100644 --- a/include/net/inet_sock.h +++ b/include/net/inet_sock.h @@ -19,6 +19,7 @@ #include #include +#include #include #include @@ -167,13 +168,15 @@ static inline void inet_sk_copy_descendant(struct sock *sk_to, extern int inet_sk_rebuild_header(struct sock *sk); +extern u32 inet_ehash_secret; +extern void build_ehash_secret(void); + static inline unsigned int inet_ehashfn(const __be32 laddr, const __u16 lport, const __be32 faddr, const __be16 fport) { - unsigned int h = ((__force __u32)laddr ^ lport) ^ ((__force __u32)faddr ^ (__force __u32)fport); - h ^= h >> 16; - h ^= h >> 8; - return h; + return jhash_2words((__force __u32) laddr ^ (__force __u32) faddr, + ((__u32) lport) << 16 | (__force __u32)fport, + inet_ehash_secret); } static inline int inet_sk_ehashfn(const struct sock *sk) diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index f011390f19c9..b7b7278d801c 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -87,6 +87,7 @@ #include #include #include +#include #include #include @@ -217,6 +218,16 @@ out: return err; } +u32 inet_ehash_secret; +EXPORT_SYMBOL(inet_ehash_secret); + +void build_ehash_secret(void) +{ + while (!inet_ehash_secret) + get_random_bytes(&inet_ehash_secret, 4); +} +EXPORT_SYMBOL(build_ehash_secret); + /* * Create an inet socket. */ @@ -233,6 +244,11 @@ static int inet_create(struct socket *sock, int protocol) int try_loading_module = 0; int err; + if (sock->type != SOCK_RAW && + sock->type != SOCK_DGRAM && + !inet_ehash_secret) + build_ehash_secret(); + sock->state = SS_UNCONNECTED; /* Look for the requested type/protocol pair. */ diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index 82572b507547..df31cdd33cda 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c @@ -98,6 +98,11 @@ static int inet6_create(struct socket *sock, int protocol) int try_loading_module = 0; int err; + if (sock->type != SOCK_RAW && + sock->type != SOCK_DGRAM && + !inet_ehash_secret) + build_ehash_secret(); + /* Look for the requested type/protocol pair. */ answer = NULL; lookup_protocol: -- cgit v1.2.3-59-g8ed1b From 0947c9fe56d9cf7ad0bc3a03ccd30446cde698e4 Mon Sep 17 00:00:00 2001 From: Thomas Graf Date: Mon, 26 Mar 2007 17:14:15 -0700 Subject: [NET] fib_rules: goto rule action This patch adds a new rule action FR_ACT_GOTO which allows to skip a set of rules by jumping to another rule. The rule to jump to is specified via the FRA_GOTO attribute which carries a rule preference. Referring to a rule which doesn't exists is explicitely allowed. Such goto rules are marked with the flag FIB_RULE_UNRESOLVED and will act like a rule with a non-matching selector. The rule will become functional as soon as its target is present. The goto action enables performance optimizations by reducing the average number of rules that have to be passed per lookup. Example: 0: from all lookup local 40: not from all to 192.168.23.128 goto 32766 41: from all fwmark 0xa blackhole 42: from all fwmark 0xff blackhole 32766: from all lookup main Signed-off-by: Thomas Graf Signed-off-by: David S. Miller --- include/linux/fib_rules.h | 5 +-- include/net/fib_rules.h | 7 +++- net/core/fib_rules.c | 88 +++++++++++++++++++++++++++++++++++++++++++++-- 3 files changed, 94 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/include/linux/fib_rules.h b/include/linux/fib_rules.h index 8270aac2aa5d..ec9c7b1d3e91 100644 --- a/include/linux/fib_rules.h +++ b/include/linux/fib_rules.h @@ -7,6 +7,7 @@ /* rule is permanent, and cannot be deleted */ #define FIB_RULE_PERMANENT 1 #define FIB_RULE_INVERT 2 +#define FIB_RULE_UNRESOLVED 4 struct fib_rule_hdr { @@ -29,7 +30,7 @@ enum FRA_DST, /* destination address */ FRA_SRC, /* source address */ FRA_IFNAME, /* interface name */ - FRA_UNUSED1, + FRA_GOTO, /* target to jump to (FR_ACT_GOTO) */ FRA_UNUSED2, FRA_PRIORITY, /* priority/preference */ FRA_UNUSED3, @@ -51,7 +52,7 @@ enum { FR_ACT_UNSPEC, FR_ACT_TO_TBL, /* Pass to fixed table */ - FR_ACT_RES1, + FR_ACT_GOTO, /* Jump to another rule */ FR_ACT_RES2, FR_ACT_RES3, FR_ACT_RES4, diff --git a/include/net/fib_rules.h b/include/net/fib_rules.h index ff3029fe9656..08bab8b6e575 100644 --- a/include/net/fib_rules.h +++ b/include/net/fib_rules.h @@ -19,6 +19,8 @@ struct fib_rule u32 flags; u32 table; u8 action; + u32 target; + struct fib_rule * ctarget; struct rcu_head rcu; }; @@ -35,6 +37,8 @@ struct fib_rules_ops struct list_head list; int rule_size; int addr_size; + int unresolved_rules; + int nr_goto_rules; int (*action)(struct fib_rule *, struct flowi *, int, @@ -66,7 +70,8 @@ struct fib_rules_ops [FRA_PRIORITY] = { .type = NLA_U32 }, \ [FRA_FWMARK] = { .type = NLA_U32 }, \ [FRA_FWMASK] = { .type = NLA_U32 }, \ - [FRA_TABLE] = { .type = NLA_U32 } + [FRA_TABLE] = { .type = NLA_U32 }, \ + [FRA_GOTO] = { .type = NLA_U32 } static inline void fib_rule_get(struct fib_rule *rule) { diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c index fdf05af16ba5..0d8bb2efb0c1 100644 --- a/net/core/fib_rules.c +++ b/net/core/fib_rules.c @@ -132,10 +132,23 @@ int fib_rules_lookup(struct fib_rules_ops *ops, struct flowi *fl, rcu_read_lock(); list_for_each_entry_rcu(rule, ops->rules_list, list) { +jumped: if (!fib_rule_match(rule, ops, fl, flags)) continue; - err = ops->action(rule, fl, flags, arg); + if (rule->action == FR_ACT_GOTO) { + struct fib_rule *target; + + target = rcu_dereference(rule->ctarget); + if (target == NULL) { + continue; + } else { + rule = target; + goto jumped; + } + } else + err = ops->action(rule, fl, flags, arg); + if (err != -EAGAIN) { fib_rule_get(rule); arg->rule = rule; @@ -180,7 +193,7 @@ static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg) struct fib_rules_ops *ops = NULL; struct fib_rule *rule, *r, *last = NULL; struct nlattr *tb[FRA_MAX+1]; - int err = -EINVAL; + int err = -EINVAL, unresolved = 0; if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*frh))) goto errout; @@ -237,6 +250,28 @@ static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg) if (!rule->pref && ops->default_pref) rule->pref = ops->default_pref(); + err = -EINVAL; + if (tb[FRA_GOTO]) { + if (rule->action != FR_ACT_GOTO) + goto errout_free; + + rule->target = nla_get_u32(tb[FRA_GOTO]); + /* Backward jumps are prohibited to avoid endless loops */ + if (rule->target <= rule->pref) + goto errout_free; + + list_for_each_entry(r, ops->rules_list, list) { + if (r->pref == rule->target) { + rule->ctarget = r; + break; + } + } + + if (rule->ctarget == NULL) + unresolved = 1; + } else if (rule->action == FR_ACT_GOTO) + goto errout_free; + err = ops->configure(rule, skb, nlh, frh, tb); if (err < 0) goto errout_free; @@ -249,6 +284,28 @@ static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg) fib_rule_get(rule); + if (ops->unresolved_rules) { + /* + * There are unresolved goto rules in the list, check if + * any of them are pointing to this new rule. + */ + list_for_each_entry(r, ops->rules_list, list) { + if (r->action == FR_ACT_GOTO && + r->target == rule->pref) { + BUG_ON(r->ctarget != NULL); + rcu_assign_pointer(r->ctarget, rule); + if (--ops->unresolved_rules == 0) + break; + } + } + } + + if (rule->action == FR_ACT_GOTO) + ops->nr_goto_rules++; + + if (unresolved) + ops->unresolved_rules++; + if (last) list_add_rcu(&rule->list, &last->list); else @@ -269,7 +326,7 @@ static int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg) { struct fib_rule_hdr *frh = nlmsg_data(nlh); struct fib_rules_ops *ops = NULL; - struct fib_rule *rule; + struct fib_rule *rule, *tmp; struct nlattr *tb[FRA_MAX+1]; int err = -EINVAL; @@ -322,6 +379,25 @@ static int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg) } list_del_rcu(&rule->list); + + if (rule->action == FR_ACT_GOTO) + ops->nr_goto_rules--; + + /* + * Check if this rule is a target to any of them. If so, + * disable them. As this operation is eventually very + * expensive, it is only performed if goto rules have + * actually been added. + */ + if (ops->nr_goto_rules > 0) { + list_for_each_entry(tmp, ops->rules_list, list) { + if (tmp->ctarget == rule) { + rcu_assign_pointer(tmp->ctarget, NULL); + ops->unresolved_rules++; + } + } + } + synchronize_rcu(); notify_rule_change(RTM_DELRULE, rule, ops, nlh, NETLINK_CB(skb).pid); @@ -371,6 +447,9 @@ static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule, frh->action = rule->action; frh->flags = rule->flags; + if (rule->action == FR_ACT_GOTO && rule->ctarget == NULL) + frh->flags |= FIB_RULE_UNRESOLVED; + if (rule->ifname[0]) NLA_PUT_STRING(skb, FRA_IFNAME, rule->ifname); @@ -383,6 +462,9 @@ static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule, if (rule->mark_mask || rule->mark) NLA_PUT_U32(skb, FRA_FWMASK, rule->mark_mask); + if (rule->target) + NLA_PUT_U32(skb, FRA_GOTO, rule->target); + if (ops->fill(rule, skb, nlh, frh) < 0) goto nla_put_failure; -- cgit v1.2.3-59-g8ed1b From 2b44368307cd06c5614d7b53801f516c0654020b Mon Sep 17 00:00:00 2001 From: Thomas Graf Date: Mon, 26 Mar 2007 17:37:59 -0700 Subject: [NET] fib_rules: Mark rules detached from the device Rules which match against device names in their selector can remain while the device itself disappears, in fact the device doesn't have to present when the rule is added in the first place. The device name is resolved by trying when the rule is added and later by listening to NETDEV_REGISTER/UNREGISTER notifications. This patch adds the flag FIB_RULE_DEV_DETACHED which is set towards userspace when a rule contains a device match which is unresolved at the moment. This eases spotting the reason why certain rules seem not to function properly. Signed-off-by: Thomas Graf Signed-off-by: David S. Miller --- include/linux/fib_rules.h | 1 + net/core/fib_rules.c | 6 +++++- 2 files changed, 6 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/fib_rules.h b/include/linux/fib_rules.h index ec9c7b1d3e91..c151273293e2 100644 --- a/include/linux/fib_rules.h +++ b/include/linux/fib_rules.h @@ -8,6 +8,7 @@ #define FIB_RULE_PERMANENT 1 #define FIB_RULE_INVERT 2 #define FIB_RULE_UNRESOLVED 4 +#define FIB_RULE_DEV_DETACHED 8 struct fib_rule_hdr { diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c index 0d8bb2efb0c1..7ac602cc8c85 100644 --- a/net/core/fib_rules.c +++ b/net/core/fib_rules.c @@ -450,9 +450,13 @@ static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule, if (rule->action == FR_ACT_GOTO && rule->ctarget == NULL) frh->flags |= FIB_RULE_UNRESOLVED; - if (rule->ifname[0]) + if (rule->ifname[0]) { NLA_PUT_STRING(skb, FRA_IFNAME, rule->ifname); + if (rule->ifindex == -1) + frh->flags |= FIB_RULE_DEV_DETACHED; + } + if (rule->pref) NLA_PUT_U32(skb, FRA_PRIORITY, rule->pref); -- cgit v1.2.3-59-g8ed1b From fa0b2d1d2196dd46527a8d028797e2bca5930a92 Mon Sep 17 00:00:00 2001 From: Thomas Graf Date: Mon, 26 Mar 2007 17:38:53 -0700 Subject: [NET] fib_rules: Add no-operation action The use of nop rules simplifies the usage of goto rules and adds more flexibility as they allow targets to remain while the actual content of the branches can change easly. Signed-off-by: Thomas Graf Signed-off-by: David S. Miller --- include/linux/fib_rules.h | 2 +- net/core/fib_rules.c | 4 +++- 2 files changed, 4 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/linux/fib_rules.h b/include/linux/fib_rules.h index c151273293e2..f278ba781d09 100644 --- a/include/linux/fib_rules.h +++ b/include/linux/fib_rules.h @@ -54,7 +54,7 @@ enum FR_ACT_UNSPEC, FR_ACT_TO_TBL, /* Pass to fixed table */ FR_ACT_GOTO, /* Jump to another rule */ - FR_ACT_RES2, + FR_ACT_NOP, /* No operation */ FR_ACT_RES3, FR_ACT_RES4, FR_ACT_BLACKHOLE, /* Drop without notification */ diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c index 7ac602cc8c85..5824b2644f26 100644 --- a/net/core/fib_rules.c +++ b/net/core/fib_rules.c @@ -146,7 +146,9 @@ jumped: rule = target; goto jumped; } - } else + } else if (rule->action == FR_ACT_NOP) + continue; + else err = ops->action(rule, fl, flags, arg); if (err != -EAGAIN) { -- cgit v1.2.3-59-g8ed1b From 35fc92a9deee0da6e35fdc3150bb134e58f2fd63 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Mon, 26 Mar 2007 23:22:20 -0700 Subject: [NET]: Allow forwarding of ip_summed except CHECKSUM_COMPLETE Right now Xen has a horrible hack that lets it forward packets with partial checksums. One of the reasons that CHECKSUM_PARTIAL and CHECKSUM_COMPLETE were added is so that we can get rid of this hack (where it creates two extra bits in the skbuff to essentially mirror ip_summed without being destroyed by the forwarding code). I had forgotten that I've already gone through all the deivce drivers last time around to make sure that they're looking at ip_summed == CHECKSUM_PARTIAL rather than ip_summed != 0 on transmit. In any case, I've now done that again so it should definitely be safe. Unfortunately nobody has yet added any code to update CHECKSUM_COMPLETE values on forward so we I'm setting that to CHECKSUM_NONE. This should be safe to remove for bridging but I'd like to check that code path first. So here is the patch that lets us get rid of the hack by preserving ip_summed (mostly) on forwarded packets. Signed-off-by: Herbert Xu Signed-off-by: David S. Miller --- include/linux/skbuff.h | 7 +++++++ net/bridge/br_forward.c | 2 +- net/ipv4/ip_forward.c | 2 +- net/ipv6/ip6_output.c | 2 +- 4 files changed, 10 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 37247901ebd2..1c19b2d55c2b 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -1654,5 +1654,12 @@ static inline int skb_is_gso(const struct sk_buff *skb) return skb_shinfo(skb)->gso_size; } +static inline void skb_forward_csum(struct sk_buff *skb) +{ + /* Unfortunately we don't support this one. Any brave souls? */ + if (skb->ip_summed == CHECKSUM_COMPLETE) + skb->ip_summed = CHECKSUM_NONE; +} + #endif /* __KERNEL__ */ #endif /* _LINUX_SKBUFF_H */ diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c index 3e45c1a1aa96..ada7f495445c 100644 --- a/net/bridge/br_forward.c +++ b/net/bridge/br_forward.c @@ -71,7 +71,7 @@ static void __br_forward(const struct net_bridge_port *to, struct sk_buff *skb) indev = skb->dev; skb->dev = to->dev; - skb->ip_summed = CHECKSUM_NONE; + skb_forward_csum(skb); NF_HOOK(PF_BRIDGE, NF_BR_FORWARD, skb, indev, skb->dev, br_forward_finish); diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c index 61b30d100676..9cb04df0054b 100644 --- a/net/ipv4/ip_forward.c +++ b/net/ipv4/ip_forward.c @@ -67,7 +67,7 @@ int ip_forward(struct sk_buff *skb) if (skb->pkt_type != PACKET_HOST) goto drop; - skb->ip_summed = CHECKSUM_NONE; + skb_forward_csum(skb); /* * According to the RFC, we must first decrease the TTL field. If diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index e2b8db6b9aef..be3f082a87ed 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -378,7 +378,7 @@ int ip6_forward(struct sk_buff *skb) goto drop; } - skb->ip_summed = CHECKSUM_NONE; + skb_forward_csum(skb); /* * We DO NOT make any processing on -- cgit v1.2.3-59-g8ed1b From 73417f617a93cf30342c3ea41abc38927bd467aa Mon Sep 17 00:00:00 2001 From: Thomas Graf Date: Tue, 27 Mar 2007 13:56:52 -0700 Subject: [NET] fib_rules: Flush route cache after rule modifications The results of FIB rules lookups are cached in the routing cache except for IPv6 as no such cache exists. So far, it was the responsibility of the user to flush the cache after modifying any rules. This lead to many false bug reports due to misunderstanding of this concept. This patch automatically flushes the route cache after inserting or deleting a rule. Thanks to Muli Ben-Yehuda for catching a bug in the previous patch. Signed-off-by: Thomas Graf Signed-off-by: David S. Miller --- include/net/fib_rules.h | 4 ++++ net/core/fib_rules.c | 8 ++++++++ net/decnet/dn_rules.c | 7 +++++++ net/ipv4/fib_rules.c | 6 ++++++ 4 files changed, 25 insertions(+) (limited to 'include') diff --git a/include/net/fib_rules.h b/include/net/fib_rules.h index 08bab8b6e575..ed3a8872c6ca 100644 --- a/include/net/fib_rules.h +++ b/include/net/fib_rules.h @@ -59,6 +59,10 @@ struct fib_rules_ops u32 (*default_pref)(void); size_t (*nlmsg_payload)(struct fib_rule *); + /* Called after modifications to the rules set, must flush + * the route cache if one exists. */ + void (*flush_cache)(void); + int nlgroup; struct nla_policy *policy; struct list_head *rules_list; diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c index 5824b2644f26..cb2dae19531b 100644 --- a/net/core/fib_rules.c +++ b/net/core/fib_rules.c @@ -44,6 +44,12 @@ static void rules_ops_put(struct fib_rules_ops *ops) module_put(ops->owner); } +static void flush_route_cache(struct fib_rules_ops *ops) +{ + if (ops->flush_cache) + ops->flush_cache(); +} + int fib_rules_register(struct fib_rules_ops *ops) { int err = -EEXIST; @@ -314,6 +320,7 @@ static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg) list_add_rcu(&rule->list, ops->rules_list); notify_rule_change(RTM_NEWRULE, rule, ops, nlh, NETLINK_CB(skb).pid); + flush_route_cache(ops); rules_ops_put(ops); return 0; @@ -404,6 +411,7 @@ static int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg) notify_rule_change(RTM_DELRULE, rule, ops, nlh, NETLINK_CB(skb).pid); fib_rule_put(rule); + flush_route_cache(ops); rules_ops_put(ops); return 0; } diff --git a/net/decnet/dn_rules.c b/net/decnet/dn_rules.c index fd0cc2aa316c..7f58b95b27d1 100644 --- a/net/decnet/dn_rules.c +++ b/net/decnet/dn_rules.c @@ -31,6 +31,7 @@ #include #include #include +#include static struct fib_rules_ops dn_fib_rules_ops; @@ -239,6 +240,11 @@ static u32 dn_fib_rule_default_pref(void) return 0; } +static void dn_fib_rule_flush_cache(void) +{ + dn_rt_cache_flush(0); +} + static struct fib_rules_ops dn_fib_rules_ops = { .family = AF_DECnet, .rule_size = sizeof(struct dn_fib_rule), @@ -249,6 +255,7 @@ static struct fib_rules_ops dn_fib_rules_ops = { .compare = dn_fib_rule_compare, .fill = dn_fib_rule_fill, .default_pref = dn_fib_rule_default_pref, + .flush_cache = dn_fib_rule_flush_cache, .nlgroup = RTNLGRP_DECnet_RULE, .policy = dn_fib_rule_policy, .rules_list = &dn_fib_rules, diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c index b021b3440ca3..fe29b98d6c8f 100644 --- a/net/ipv4/fib_rules.c +++ b/net/ipv4/fib_rules.c @@ -298,6 +298,11 @@ static size_t fib4_rule_nlmsg_payload(struct fib_rule *rule) + nla_total_size(4); /* flow */ } +static void fib4_rule_flush_cache(void) +{ + rt_cache_flush(0); +} + static struct fib_rules_ops fib4_rules_ops = { .family = AF_INET, .rule_size = sizeof(struct fib4_rule), @@ -309,6 +314,7 @@ static struct fib_rules_ops fib4_rules_ops = { .fill = fib4_rule_fill, .default_pref = fib4_rule_default_pref, .nlmsg_payload = fib4_rule_nlmsg_payload, + .flush_cache = fib4_rule_flush_cache, .nlgroup = RTNLGRP_IPV4_RULE, .policy = fib4_rule_policy, .rules_list = &fib4_rules, -- cgit v1.2.3-59-g8ed1b From 2a123b86e2b242a4a6db990d2851d45e192f88e5 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Tue, 27 Mar 2007 18:38:07 -0300 Subject: [BLUETOOTH]: Introduce skb->data accessor methods for hci_{acl,event,sco}_hdr For consistency with other skb data accessors, reducing the number of direct accesses to skb->data. Signed-off-by: Arnaldo Carvalho de Melo --- drivers/bluetooth/bluecard_cs.c | 6 +++--- drivers/bluetooth/bt3c_cs.c | 6 +++--- drivers/bluetooth/btuart_cs.c | 6 +++--- drivers/bluetooth/hci_h4.c | 6 +++--- include/net/bluetooth/hci.h | 18 ++++++++++++++++++ 5 files changed, 30 insertions(+), 12 deletions(-) (limited to 'include') diff --git a/drivers/bluetooth/bluecard_cs.c b/drivers/bluetooth/bluecard_cs.c index acfb6a430dcc..851de4d5b7de 100644 --- a/drivers/bluetooth/bluecard_cs.c +++ b/drivers/bluetooth/bluecard_cs.c @@ -461,20 +461,20 @@ static void bluecard_receive(bluecard_info_t *info, unsigned int offset) switch (info->rx_state) { case RECV_WAIT_EVENT_HEADER: - eh = (struct hci_event_hdr *)(info->rx_skb->data); + eh = hci_event_hdr(info->rx_skb); info->rx_state = RECV_WAIT_DATA; info->rx_count = eh->plen; break; case RECV_WAIT_ACL_HEADER: - ah = (struct hci_acl_hdr *)(info->rx_skb->data); + ah = hci_acl_hdr(info->rx_skb); dlen = __le16_to_cpu(ah->dlen); info->rx_state = RECV_WAIT_DATA; info->rx_count = dlen; break; case RECV_WAIT_SCO_HEADER: - sh = (struct hci_sco_hdr *)(info->rx_skb->data); + sh = hci_sco_hdr(info->rx_skb); info->rx_state = RECV_WAIT_DATA; info->rx_count = sh->dlen; break; diff --git a/drivers/bluetooth/bt3c_cs.c b/drivers/bluetooth/bt3c_cs.c index 18b0f3992c5b..39516074636b 100644 --- a/drivers/bluetooth/bt3c_cs.c +++ b/drivers/bluetooth/bt3c_cs.c @@ -303,20 +303,20 @@ static void bt3c_receive(bt3c_info_t *info) switch (info->rx_state) { case RECV_WAIT_EVENT_HEADER: - eh = (struct hci_event_hdr *)(info->rx_skb->data); + eh = hci_event_hdr(info->rx_skb); info->rx_state = RECV_WAIT_DATA; info->rx_count = eh->plen; break; case RECV_WAIT_ACL_HEADER: - ah = (struct hci_acl_hdr *)(info->rx_skb->data); + ah = hci_acl_hdr(info->rx_skb); dlen = __le16_to_cpu(ah->dlen); info->rx_state = RECV_WAIT_DATA; info->rx_count = dlen; break; case RECV_WAIT_SCO_HEADER: - sh = (struct hci_sco_hdr *)(info->rx_skb->data); + sh = hci_sco_hdr(info->rx_skb); info->rx_state = RECV_WAIT_DATA; info->rx_count = sh->dlen; break; diff --git a/drivers/bluetooth/btuart_cs.c b/drivers/bluetooth/btuart_cs.c index c1bce75148fe..d7d2ea0d86a1 100644 --- a/drivers/bluetooth/btuart_cs.c +++ b/drivers/bluetooth/btuart_cs.c @@ -250,20 +250,20 @@ static void btuart_receive(btuart_info_t *info) switch (info->rx_state) { case RECV_WAIT_EVENT_HEADER: - eh = (struct hci_event_hdr *)(info->rx_skb->data); + eh = hci_event_hdr(info->rx_skb); info->rx_state = RECV_WAIT_DATA; info->rx_count = eh->plen; break; case RECV_WAIT_ACL_HEADER: - ah = (struct hci_acl_hdr *)(info->rx_skb->data); + ah = hci_acl_hdr(info->rx_skb); dlen = __le16_to_cpu(ah->dlen); info->rx_state = RECV_WAIT_DATA; info->rx_count = dlen; break; case RECV_WAIT_SCO_HEADER: - sh = (struct hci_sco_hdr *)(info->rx_skb->data); + sh = hci_sco_hdr(info->rx_skb); info->rx_state = RECV_WAIT_DATA; info->rx_count = sh->dlen; break; diff --git a/drivers/bluetooth/hci_h4.c b/drivers/bluetooth/hci_h4.c index 34f0afc42407..bfbae14cf93d 100644 --- a/drivers/bluetooth/hci_h4.c +++ b/drivers/bluetooth/hci_h4.c @@ -188,7 +188,7 @@ static int h4_recv(struct hci_uart *hu, void *data, int count) continue; case H4_W4_EVENT_HDR: - eh = (struct hci_event_hdr *) h4->rx_skb->data; + eh = hci_event_hdr(h4->rx_skb); BT_DBG("Event header: evt 0x%2.2x plen %d", eh->evt, eh->plen); @@ -196,7 +196,7 @@ static int h4_recv(struct hci_uart *hu, void *data, int count) continue; case H4_W4_ACL_HDR: - ah = (struct hci_acl_hdr *) h4->rx_skb->data; + ah = hci_acl_hdr(h4->rx_skb); dlen = __le16_to_cpu(ah->dlen); BT_DBG("ACL header: dlen %d", dlen); @@ -205,7 +205,7 @@ static int h4_recv(struct hci_uart *hu, void *data, int count) continue; case H4_W4_SCO_HDR: - sh = (struct hci_sco_hdr *) h4->rx_skb->data; + sh = hci_sco_hdr(h4->rx_skb); BT_DBG("SCO header: dlen %d", sh->dlen); diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h index 41456c148842..93ce272a5d27 100644 --- a/include/net/bluetooth/hci.h +++ b/include/net/bluetooth/hci.h @@ -709,6 +709,24 @@ struct hci_sco_hdr { __u8 dlen; } __attribute__ ((packed)); +#ifdef __KERNEL__ +#include +static inline struct hci_event_hdr *hci_event_hdr(const struct sk_buff *skb) +{ + return (struct hci_event_hdr *)skb->data; +} + +static inline struct hci_acl_hdr *hci_acl_hdr(const struct sk_buff *skb) +{ + return (struct hci_acl_hdr *)skb->data; +} + +static inline struct hci_sco_hdr *hci_sco_hdr(const struct sk_buff *skb) +{ + return (struct hci_sco_hdr *)skb->data; +} +#endif + /* Command opcode pack/unpack */ #define hci_opcode_pack(ogf, ocf) (__u16) ((ocf & 0x03ff)|(ogf << 10)) #define hci_opcode_ogf(op) (op >> 10) -- cgit v1.2.3-59-g8ed1b From d626f62b11e00c16e81e4308ab93d3f13551812a Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Tue, 27 Mar 2007 18:55:52 -0300 Subject: [SK_BUFF]: Introduce skb_copy_from_linear_data{_offset} To clearly state the intent of copying from linear sk_buffs, _offset being a overly long variant but interesting for the sake of saving some bytes. Signed-off-by: Arnaldo Carvalho de Melo --- arch/ia64/sn/kernel/xpnet.c | 3 ++- drivers/atm/atmtcp.c | 4 ++-- drivers/atm/nicstar.c | 6 +++--- drivers/bluetooth/bfusb.c | 2 +- drivers/bluetooth/bpa10x.c | 4 ++-- drivers/bluetooth/dtl1_cs.c | 2 +- drivers/char/pcmcia/synclink_cs.c | 2 +- drivers/infiniband/hw/cxgb3/iwch_cm.c | 9 ++++++--- drivers/isdn/act2000/module.c | 2 +- drivers/isdn/gigaset/usb-gigaset.c | 2 +- drivers/isdn/hardware/avm/b1dma.c | 3 ++- drivers/isdn/hardware/avm/c4.c | 3 ++- drivers/isdn/hisax/elsa_ser.c | 6 ++++-- drivers/isdn/hisax/isdnl2.c | 3 ++- drivers/isdn/hysdn/hycapi.c | 2 +- drivers/isdn/hysdn/hysdn_sched.c | 5 +++-- drivers/isdn/i4l/isdn_common.c | 2 +- drivers/isdn/i4l/isdn_ppp.c | 7 +++++-- drivers/isdn/isdnloop/isdnloop.c | 3 ++- drivers/isdn/pcbit/capi.c | 12 +++++++----- drivers/media/dvb/dvb-core/dvb_net.c | 4 +++- drivers/message/fusion/mptlan.c | 6 +++--- drivers/net/3c505.c | 2 +- drivers/net/3c523.c | 2 +- drivers/net/7990.c | 2 +- drivers/net/a2065.c | 2 +- drivers/net/arcnet/capmode.c | 3 ++- drivers/net/atari_bionet.c | 3 ++- drivers/net/atari_pamsnet.c | 3 ++- drivers/net/au1000_eth.c | 2 +- drivers/net/b44.c | 7 ++++--- drivers/net/bnx2.c | 6 ++---- drivers/net/cassini.c | 4 ++-- drivers/net/chelsio/sge.c | 2 +- drivers/net/cxgb3/sge.c | 5 +++-- drivers/net/dgrs.c | 2 +- drivers/net/eepro100.c | 5 +++-- drivers/net/ehea/ehea_main.c | 11 ++++++----- drivers/net/fec_8xx/fec_main.c | 4 +++- drivers/net/fs_enet/fs_enet-main.c | 6 ++++-- drivers/net/hamradio/dmascc.c | 2 +- drivers/net/hamradio/hdlcdrv.c | 4 +++- drivers/net/hamradio/yam.c | 4 +++- drivers/net/ioc3-eth.c | 2 +- drivers/net/irda/ali-ircc.c | 5 ++--- drivers/net/irda/au1k_ir.c | 2 +- drivers/net/irda/donauboe.c | 2 +- drivers/net/irda/irda-usb.c | 4 ++-- drivers/net/irda/mcs7780.c | 4 ++-- drivers/net/irda/nsc-ircc.c | 5 ++--- drivers/net/irda/pxaficp_ir.c | 2 +- drivers/net/irda/smsc-ircc2.c | 2 +- drivers/net/irda/via-ircc.c | 4 ++-- drivers/net/irda/vlsi_ir.c | 2 +- drivers/net/irda/w83977af_ir.c | 2 +- drivers/net/lance.c | 2 +- drivers/net/macmace.c | 3 +-- drivers/net/meth.c | 10 +++++----- drivers/net/myri_sbus.c | 2 +- drivers/net/netxen/netxen_nic_main.c | 6 ++++-- drivers/net/ni52.c | 2 +- drivers/net/ni65.c | 5 +++-- drivers/net/pci-skeleton.c | 2 +- drivers/net/pcmcia/axnet_cs.c | 2 +- drivers/net/ppp_synctty.c | 3 ++- drivers/net/pppoe.c | 3 ++- drivers/net/qla3xxx.c | 3 ++- drivers/net/rrunner.c | 2 +- drivers/net/sgiseeq.c | 2 +- drivers/net/skge.c | 2 +- drivers/net/sky2.c | 2 +- drivers/net/sun3_82586.c | 2 +- drivers/net/sun3lance.c | 2 +- drivers/net/sungem.c | 2 +- drivers/net/sunhme.c | 2 +- drivers/net/sunlance.c | 2 +- drivers/net/sunqe.c | 2 +- drivers/net/tg3.c | 2 +- drivers/net/tlan.c | 2 +- drivers/net/tokenring/3c359.c | 7 +++++-- drivers/net/tokenring/olympic.c | 8 ++++++-- drivers/net/tokenring/tms380tr.c | 2 +- drivers/net/tulip/de2104x.c | 4 ++-- drivers/net/tulip/dmfe.c | 6 ++++-- drivers/net/tulip/uli526x.c | 2 +- drivers/net/tulip/xircom_cb.c | 6 +++--- drivers/net/tulip/xircom_tulip_cb.c | 4 +++- drivers/net/tun.c | 4 ++-- drivers/net/via-velocity.c | 7 ++++--- drivers/net/wan/lmc/lmc_main.c | 2 +- drivers/net/wan/pc300_drv.c | 2 +- drivers/net/wan/z85230.c | 2 +- drivers/net/wireless/atmel.c | 4 ++-- drivers/net/wireless/bcm43xx/bcm43xx_dma.c | 3 ++- drivers/net/wireless/hostap/hostap_80211_rx.c | 13 ++++++++----- drivers/net/wireless/hostap/hostap_80211_tx.c | 23 ++++++++++++++--------- drivers/net/wireless/hostap/hostap_ap.c | 4 ++-- drivers/net/wireless/hostap/hostap_hw.c | 5 +++-- drivers/net/wireless/ipw2100.c | 5 +++-- drivers/net/wireless/ipw2200.c | 2 +- drivers/net/wireless/prism54/islpci_eth.c | 13 +++++++++---- drivers/net/wireless/ray_cs.c | 3 ++- drivers/net/wireless/wavelan.c | 2 +- drivers/net/wireless/zd1201.c | 4 ++-- drivers/s390/net/ctcmain.c | 13 ++++++++----- drivers/s390/net/lcs.c | 2 +- drivers/s390/net/netiucv.c | 7 +++++-- drivers/s390/net/qeth_eddp.c | 3 ++- drivers/usb/atm/usbatm.c | 2 +- drivers/usb/net/catc.c | 2 +- drivers/usb/net/pegasus.c | 2 +- include/linux/skbuff.h | 14 ++++++++++++++ net/ax25/ax25_out.c | 4 ++-- net/bluetooth/bnep/core.c | 2 +- net/bluetooth/cmtp/core.c | 4 ++-- net/bluetooth/l2cap.c | 6 ++++-- net/bridge/br_netfilter.c | 3 ++- net/core/skbuff.c | 17 +++++++++-------- net/decnet/dn_nsp_in.c | 5 +++-- net/ieee80211/ieee80211_crypt_wep.c | 2 +- net/ieee80211/ieee80211_rx.c | 6 +++--- net/ieee80211/ieee80211_tx.c | 8 ++++---- net/ipv4/ip_output.c | 2 +- net/ipv6/ip6_output.c | 2 +- net/irda/irttp.c | 4 ++-- net/netrom/af_netrom.c | 3 ++- net/netrom/nr_loopback.c | 2 +- net/netrom/nr_out.c | 4 ++-- net/netrom/nr_subr.c | 4 ++-- net/rose/af_rose.c | 4 ++-- net/x25/af_x25.c | 2 +- net/x25/x25_in.c | 5 +++-- net/x25/x25_out.c | 4 ++-- 133 files changed, 321 insertions(+), 230 deletions(-) (limited to 'include') diff --git a/arch/ia64/sn/kernel/xpnet.c b/arch/ia64/sn/kernel/xpnet.c index 98d79142f32b..9fc02654f0f5 100644 --- a/arch/ia64/sn/kernel/xpnet.c +++ b/arch/ia64/sn/kernel/xpnet.c @@ -566,7 +566,8 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) msg->version = XPNET_VERSION_EMBED; dev_dbg(xpnet, "calling memcpy(0x%p, 0x%p, 0x%lx)\n", &msg->data, skb->data, (size_t) embedded_bytes); - memcpy(&msg->data, skb->data, (size_t) embedded_bytes); + skb_copy_from_linear_data(skb, &msg->data, + (size_t)embedded_bytes); } else { msg->version = XPNET_VERSION; } diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c index fc518d85543d..1b9493a16aca 100644 --- a/drivers/atm/atmtcp.c +++ b/drivers/atm/atmtcp.c @@ -221,7 +221,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb) hdr->vpi = htons(vcc->vpi); hdr->vci = htons(vcc->vci); hdr->length = htonl(skb->len); - memcpy(skb_put(new_skb,skb->len),skb->data,skb->len); + skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len); if (vcc->pop) vcc->pop(vcc,skb); else dev_kfree_skb(skb); out_vcc->push(out_vcc,new_skb); @@ -310,7 +310,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb) goto done; } __net_timestamp(new_skb); - memcpy(skb_put(new_skb,skb->len),skb->data,skb->len); + skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len); out_vcc->push(out_vcc,new_skb); atomic_inc(&vcc->stats->tx); atomic_inc(&out_vcc->stats->rx); diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c index 26f4b7033494..14ced85b3f54 100644 --- a/drivers/atm/nicstar.c +++ b/drivers/atm/nicstar.c @@ -2395,7 +2395,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe) skb->destructor = ns_lb_destructor; #endif /* NS_USE_DESTRUCTORS */ skb_push(skb, NS_SMBUFSIZE); - memcpy(skb->data, sb->data, NS_SMBUFSIZE); + skb_copy_from_linear_data(sb, skb->data, NS_SMBUFSIZE); skb_put(skb, len - NS_SMBUFSIZE); ATM_SKB(skb)->vcc = vcc; __net_timestamp(skb); @@ -2479,7 +2479,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe) { /* Copy the small buffer to the huge buffer */ sb = (struct sk_buff *) iov->iov_base; - memcpy(hb->data, sb->data, iov->iov_len); + skb_copy_from_linear_data(sb, hb->data, iov->iov_len); skb_put(hb, iov->iov_len); remaining = len - iov->iov_len; iov++; @@ -2491,7 +2491,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe) { lb = (struct sk_buff *) iov->iov_base; tocopy = min_t(int, remaining, iov->iov_len); - memcpy(skb_tail_pointer(hb), lb->data, tocopy); + skb_copy_from_linear_data(lb, skb_tail_pointer(hb), tocopy); skb_put(hb, tocopy); iov++; remaining -= tocopy; diff --git a/drivers/bluetooth/bfusb.c b/drivers/bluetooth/bfusb.c index 4c766f36d884..b990805806af 100644 --- a/drivers/bluetooth/bfusb.c +++ b/drivers/bluetooth/bfusb.c @@ -527,7 +527,7 @@ static int bfusb_send_frame(struct sk_buff *skb) buf[2] = (size == BFUSB_MAX_BLOCK_SIZE) ? 0 : size; memcpy(skb_put(nskb, 3), buf, 3); - memcpy(skb_put(nskb, size), skb->data + sent, size); + skb_copy_from_linear_data_offset(skb, sent, skb_put(nskb, size), size); sent += size; count -= size; diff --git a/drivers/bluetooth/bpa10x.c b/drivers/bluetooth/bpa10x.c index 9fca6513562d..e8ebd5d3de86 100644 --- a/drivers/bluetooth/bpa10x.c +++ b/drivers/bluetooth/bpa10x.c @@ -231,7 +231,7 @@ static void bpa10x_wakeup(struct bpa10x_data *data) cr = (struct usb_ctrlrequest *) urb->setup_packet; cr->wLength = __cpu_to_le16(skb->len); - memcpy(urb->transfer_buffer, skb->data, skb->len); + skb_copy_from_linear_data(skb, urb->transfer_buffer, skb->len); urb->transfer_buffer_length = skb->len; err = usb_submit_urb(urb, GFP_ATOMIC); @@ -250,7 +250,7 @@ static void bpa10x_wakeup(struct bpa10x_data *data) skb = skb_dequeue(&data->tx_queue); if (skb) { - memcpy(urb->transfer_buffer, skb->data, skb->len); + skb_copy_from_linear_data(skb, urb->transfer_buffer, skb->len); urb->transfer_buffer_length = skb->len; err = usb_submit_urb(urb, GFP_ATOMIC); diff --git a/drivers/bluetooth/dtl1_cs.c b/drivers/bluetooth/dtl1_cs.c index 459aa97937ab..7f9c54b9964a 100644 --- a/drivers/bluetooth/dtl1_cs.c +++ b/drivers/bluetooth/dtl1_cs.c @@ -425,7 +425,7 @@ static int dtl1_hci_send_frame(struct sk_buff *skb) return -ENOMEM; skb_reserve(s, NSHL); - memcpy(skb_put(s, skb->len), skb->data, skb->len); + skb_copy_from_linear_data(skb, skb_put(s, skb->len), skb->len); if (skb->len & 0x0001) *skb_put(s, 1) = 0; /* PAD */ diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c index 8d025e9b5bce..157b1d09ab55 100644 --- a/drivers/char/pcmcia/synclink_cs.c +++ b/drivers/char/pcmcia/synclink_cs.c @@ -4169,7 +4169,7 @@ static int hdlcdev_xmit(struct sk_buff *skb, struct net_device *dev) netif_stop_queue(dev); /* copy data to device buffers */ - memcpy(info->tx_buf, skb->data, skb->len); + skb_copy_from_linear_data(skb, info->tx_buf, skb->len); info->tx_get = 0; info->tx_put = info->tx_count = skb->len; diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c index e842c65a3f4d..3b4b0acd707f 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_cm.c +++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c @@ -821,7 +821,8 @@ static void process_mpa_reply(struct iwch_ep *ep, struct sk_buff *skb) /* * copy the new data into our accumulation buffer. */ - memcpy(&(ep->mpa_pkt[ep->mpa_pkt_len]), skb->data, skb->len); + skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]), + skb->len); ep->mpa_pkt_len += skb->len; /* @@ -940,7 +941,8 @@ static void process_mpa_request(struct iwch_ep *ep, struct sk_buff *skb) /* * Copy the new data into our accumulation buffer. */ - memcpy(&(ep->mpa_pkt[ep->mpa_pkt_len]), skb->data, skb->len); + skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]), + skb->len); ep->mpa_pkt_len += skb->len; /* @@ -1619,7 +1621,8 @@ static int terminate(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) PDBG("%s ep %p\n", __FUNCTION__, ep); skb_pull(skb, sizeof(struct cpl_rdma_terminate)); PDBG("%s saving %d bytes of term msg\n", __FUNCTION__, skb->len); - memcpy(ep->com.qp->attr.terminate_buffer, skb->data, skb->len); + skb_copy_from_linear_data(skb, ep->com.qp->attr.terminate_buffer, + skb->len); ep->com.qp->attr.terminate_msg_len = skb->len; ep->com.qp->attr.is_terminate_local = 0; return CPL_RET_BUF_DONE; diff --git a/drivers/isdn/act2000/module.c b/drivers/isdn/act2000/module.c index e3e5c1399076..ee2b0b9f8f46 100644 --- a/drivers/isdn/act2000/module.c +++ b/drivers/isdn/act2000/module.c @@ -442,7 +442,7 @@ act2000_sendbuf(act2000_card *card, int channel, int ack, struct sk_buff *skb) return 0; } skb_reserve(xmit_skb, 19); - memcpy(skb_put(xmit_skb, len), skb->data, len); + skb_copy_from_linear_data(skb, skb_put(xmit_skb, len), len); } else { xmit_skb = skb_clone(skb, GFP_ATOMIC); if (!xmit_skb) { diff --git a/drivers/isdn/gigaset/usb-gigaset.c b/drivers/isdn/gigaset/usb-gigaset.c index 2baef349c12d..c8e1c357cec8 100644 --- a/drivers/isdn/gigaset/usb-gigaset.c +++ b/drivers/isdn/gigaset/usb-gigaset.c @@ -652,7 +652,7 @@ static int write_modem(struct cardstate *cs) * transmit data */ count = min(bcs->tx_skb->len, (unsigned) ucs->bulk_out_size); - memcpy(ucs->bulk_out_buffer, bcs->tx_skb->data, count); + skb_copy_from_linear_data(bcs->tx_skb, ucs->bulk_out_buffer, count); skb_pull(bcs->tx_skb, count); atomic_set(&ucs->busy, 1); gig_dbg(DEBUG_OUTPUT, "write_modem: send %d bytes", count); diff --git a/drivers/isdn/hardware/avm/b1dma.c b/drivers/isdn/hardware/avm/b1dma.c index 1e2d38e3d68c..428872b653e9 100644 --- a/drivers/isdn/hardware/avm/b1dma.c +++ b/drivers/isdn/hardware/avm/b1dma.c @@ -404,7 +404,8 @@ static void b1dma_dispatch_tx(avmcard *card) printk(KERN_DEBUG "tx: put 0x%x len=%d\n", skb->data[2], txlen); #endif - memcpy(dma->sendbuf.dmabuf, skb->data+2, skb->len-2); + skb_copy_from_linear_data_offset(skb, 2, dma->sendbuf.dmabuf, + skb->len - 2); } txlen = (txlen + 3) & ~3; diff --git a/drivers/isdn/hardware/avm/c4.c b/drivers/isdn/hardware/avm/c4.c index 6f5efa8d78cb..d58f927e766a 100644 --- a/drivers/isdn/hardware/avm/c4.c +++ b/drivers/isdn/hardware/avm/c4.c @@ -457,7 +457,8 @@ static void c4_dispatch_tx(avmcard *card) printk(KERN_DEBUG "%s: tx put 0x%x len=%d\n", card->name, skb->data[2], txlen); #endif - memcpy(dma->sendbuf.dmabuf, skb->data+2, skb->len-2); + skb_copy_from_linear_data_offset(skb, 2, dma->sendbuf.dmabuf, + skb->len - 2); } txlen = (txlen + 3) & ~3; diff --git a/drivers/isdn/hisax/elsa_ser.c b/drivers/isdn/hisax/elsa_ser.c index ae377e812775..1642dca988a1 100644 --- a/drivers/isdn/hisax/elsa_ser.c +++ b/drivers/isdn/hisax/elsa_ser.c @@ -254,14 +254,16 @@ write_modem(struct BCState *bcs) { count = len; if (count > MAX_MODEM_BUF - fp) { count = MAX_MODEM_BUF - fp; - memcpy(cs->hw.elsa.transbuf + fp, bcs->tx_skb->data, count); + skb_copy_from_linear_data(bcs->tx_skb, + cs->hw.elsa.transbuf + fp, count); skb_pull(bcs->tx_skb, count); cs->hw.elsa.transcnt += count; ret = count; count = len - count; fp = 0; } - memcpy((cs->hw.elsa.transbuf + fp), bcs->tx_skb->data, count); + skb_copy_from_linear_data(bcs->tx_skb, + cs->hw.elsa.transbuf + fp, count); skb_pull(bcs->tx_skb, count); cs->hw.elsa.transcnt += count; ret += count; diff --git a/drivers/isdn/hisax/isdnl2.c b/drivers/isdn/hisax/isdnl2.c index cd3b5ad53491..3446f249d675 100644 --- a/drivers/isdn/hisax/isdnl2.c +++ b/drivers/isdn/hisax/isdnl2.c @@ -1293,7 +1293,8 @@ l2_pull_iqueue(struct FsmInst *fi, int event, void *arg) oskb = skb; skb = alloc_skb(oskb->len + i, GFP_ATOMIC); memcpy(skb_put(skb, i), header, i); - memcpy(skb_put(skb, oskb->len), oskb->data, oskb->len); + skb_copy_from_linear_data(oskb, + skb_put(skb, oskb->len), oskb->len); dev_kfree_skb(oskb); } st->l2.l2l1(st, PH_PULL | INDICATION, skb); diff --git a/drivers/isdn/hysdn/hycapi.c b/drivers/isdn/hysdn/hycapi.c index b2ae4ec1e49e..4433ce0fca55 100644 --- a/drivers/isdn/hysdn/hycapi.c +++ b/drivers/isdn/hysdn/hycapi.c @@ -398,7 +398,7 @@ static u16 hycapi_send_message(struct capi_ctr *ctrl, struct sk_buff *skb) _len = CAPIMSG_LEN(skb->data); if (_len > 22) { _len2 = _len - 22; - memcpy(msghead, skb->data, 22); + skb_copy_from_linear_data(skb, msghead, 22); memcpy(skb->data + _len2, msghead, 22); skb_pull(skb, _len2); CAPIMSG_SETLEN(skb->data, 22); diff --git a/drivers/isdn/hysdn/hysdn_sched.c b/drivers/isdn/hysdn/hysdn_sched.c index b7b5aa4748a0..81db4a190d41 100644 --- a/drivers/isdn/hysdn/hysdn_sched.c +++ b/drivers/isdn/hysdn/hysdn_sched.c @@ -113,7 +113,8 @@ hysdn_sched_tx(hysdn_card *card, unsigned char *buf, (skb = hysdn_tx_netget(card)) != NULL) { if (skb->len <= maxlen) { - memcpy(buf, skb->data, skb->len); /* copy the packet to the buffer */ + /* copy the packet to the buffer */ + skb_copy_from_linear_data(skb, buf, skb->len); *len = skb->len; *chan = CHAN_NDIS_DATA; card->net_tx_busy = 1; /* we are busy sending network data */ @@ -126,7 +127,7 @@ hysdn_sched_tx(hysdn_card *card, unsigned char *buf, ((skb = hycapi_tx_capiget(card)) != NULL) ) { if (skb->len <= maxlen) { - memcpy(buf, skb->data, skb->len); + skb_copy_from_linear_data(skb, buf, skb->len); *len = skb->len; *chan = CHAN_CAPI; hycapi_tx_capiack(card); diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c index 9c926e41b114..c97330b19877 100644 --- a/drivers/isdn/i4l/isdn_common.c +++ b/drivers/isdn/i4l/isdn_common.c @@ -829,7 +829,7 @@ isdn_readbchan(int di, int channel, u_char * buf, u_char * fp, int len, wait_que dflag = 0; } count_put = count_pull; - memcpy(cp, skb->data, count_put); + skb_copy_from_linear_data(skb, cp, count_put); cp += count_put; len -= count_put; #ifdef CONFIG_ISDN_AUDIO diff --git a/drivers/isdn/i4l/isdn_ppp.c b/drivers/isdn/i4l/isdn_ppp.c index be915051cb2e..387392cb3d68 100644 --- a/drivers/isdn/i4l/isdn_ppp.c +++ b/drivers/isdn/i4l/isdn_ppp.c @@ -1100,7 +1100,8 @@ isdn_ppp_push_higher(isdn_net_dev * net_dev, isdn_net_local * lp, struct sk_buff goto drop_packet; } skb_put(skb, skb_old->len + 128); - memcpy(skb->data, skb_old->data, skb_old->len); + skb_copy_from_linear_data(skb_old, skb->data, + skb_old->len); if (net_dev->local->ppp_slot < 0) { printk(KERN_ERR "%s: net_dev->local->ppp_slot(%d) out of range\n", __FUNCTION__, net_dev->local->ppp_slot); @@ -1902,7 +1903,9 @@ void isdn_ppp_mp_reassembly( isdn_net_dev * net_dev, isdn_net_local * lp, while( from != to ) { unsigned int len = from->len - MP_HEADER_LEN; - memcpy(skb_put(skb,len), from->data+MP_HEADER_LEN, len); + skb_copy_from_linear_data_offset(from, MP_HEADER_LEN, + skb_put(skb,len), + len); frag = from->next; isdn_ppp_mp_free_skb(mp, from); from = frag; diff --git a/drivers/isdn/isdnloop/isdnloop.c b/drivers/isdn/isdnloop/isdnloop.c index e3add27dd0e1..e93ad59f60bf 100644 --- a/drivers/isdn/isdnloop/isdnloop.c +++ b/drivers/isdn/isdnloop/isdnloop.c @@ -415,7 +415,8 @@ isdnloop_sendbuf(int channel, struct sk_buff *skb, isdnloop_card * card) spin_lock_irqsave(&card->isdnloop_lock, flags); nskb = dev_alloc_skb(skb->len); if (nskb) { - memcpy(skb_put(nskb, len), skb->data, len); + skb_copy_from_linear_data(skb, + skb_put(nskb, len), len); skb_queue_tail(&card->bqueue[channel], nskb); dev_kfree_skb(skb); } else diff --git a/drivers/isdn/pcbit/capi.c b/drivers/isdn/pcbit/capi.c index 47c59e95898d..7b55e151f1b0 100644 --- a/drivers/isdn/pcbit/capi.c +++ b/drivers/isdn/pcbit/capi.c @@ -429,8 +429,9 @@ int capi_decode_conn_ind(struct pcbit_chan * chan, if (!(info->data.setup.CallingPN = kmalloc(len - count + 1, GFP_ATOMIC))) return -1; - memcpy(info->data.setup.CallingPN, skb->data + count + 1, - len - count); + skb_copy_from_linear_data_offset(skb, count + 1, + info->data.setup.CallingPN, + len - count); info->data.setup.CallingPN[len - count] = 0; } @@ -457,8 +458,9 @@ int capi_decode_conn_ind(struct pcbit_chan * chan, if (!(info->data.setup.CalledPN = kmalloc(len - count + 1, GFP_ATOMIC))) return -1; - memcpy(info->data.setup.CalledPN, skb->data + count + 1, - len - count); + skb_copy_from_linear_data_offset(skb, count + 1, + info->data.setup.CalledPN, + len - count); info->data.setup.CalledPN[len - count] = 0; } @@ -539,7 +541,7 @@ int capi_decode_conn_actv_ind(struct pcbit_chan * chan, struct sk_buff *skb) #ifdef DEBUG if (len > 1 && len < 31) { - memcpy(str, skb->data + 2, len - 1); + skb_copy_from_linear_data_offset(skb, 2, str, len - 1); str[len] = 0; printk(KERN_DEBUG "Connected Party Number: %s\n", str); } diff --git a/drivers/media/dvb/dvb-core/dvb_net.c b/drivers/media/dvb/dvb-core/dvb_net.c index 9de177a5b9f1..6a5ab409c4e7 100644 --- a/drivers/media/dvb/dvb-core/dvb_net.c +++ b/drivers/media/dvb/dvb-core/dvb_net.c @@ -697,7 +697,9 @@ static void dvb_net_ule( struct net_device *dev, const u8 *buf, size_t buf_len ) } else { - memcpy(dest_addr, priv->ule_skb->data, ETH_ALEN); + skb_copy_from_linear_data(priv->ule_skb, + dest_addr, + ETH_ALEN); skb_pull(priv->ule_skb, ETH_ALEN); } } diff --git a/drivers/message/fusion/mptlan.c b/drivers/message/fusion/mptlan.c index 21fe1b66808c..7dd34bd28efc 100644 --- a/drivers/message/fusion/mptlan.c +++ b/drivers/message/fusion/mptlan.c @@ -932,7 +932,7 @@ mpt_lan_receive_post_turbo(struct net_device *dev, u32 tmsg) pci_dma_sync_single_for_cpu(mpt_dev->pcidev, priv->RcvCtl[ctx].dma, priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE); - memcpy(skb_put(skb, len), old_skb->data, len); + skb_copy_from_linear_data(old_skb, skb_put(skb, len), len); pci_dma_sync_single_for_device(mpt_dev->pcidev, priv->RcvCtl[ctx].dma, priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE); @@ -1093,7 +1093,7 @@ mpt_lan_receive_post_reply(struct net_device *dev, priv->RcvCtl[ctx].dma, priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE); - memcpy(skb_put(skb, l), old_skb->data, l); + skb_copy_from_linear_data(old_skb, skb_put(skb, l), l); pci_dma_sync_single_for_device(mpt_dev->pcidev, priv->RcvCtl[ctx].dma, @@ -1122,7 +1122,7 @@ mpt_lan_receive_post_reply(struct net_device *dev, priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE); - memcpy(skb_put(skb, len), old_skb->data, len); + skb_copy_from_linear_data(old_skb, skb_put(skb, len), len); pci_dma_sync_single_for_device(mpt_dev->pcidev, priv->RcvCtl[ctx].dma, diff --git a/drivers/net/3c505.c b/drivers/net/3c505.c index c693b5a79500..e985a85a5623 100644 --- a/drivers/net/3c505.c +++ b/drivers/net/3c505.c @@ -1025,7 +1025,7 @@ static int send_packet(struct net_device *dev, struct sk_buff *skb) adapter->current_dma.start_time = jiffies; if ((unsigned long)(skb->data + nlen) >= MAX_DMA_ADDRESS || nlen != skb->len) { - memcpy(adapter->dma_buffer, skb->data, nlen); + skb_copy_from_linear_data(skb, adapter->dma_buffer, nlen); memset(adapter->dma_buffer+skb->len, 0, nlen-skb->len); target = isa_virt_to_bus(adapter->dma_buffer); } diff --git a/drivers/net/3c523.c b/drivers/net/3c523.c index 6b2036df6856..a384f7d478ab 100644 --- a/drivers/net/3c523.c +++ b/drivers/net/3c523.c @@ -1145,7 +1145,7 @@ static int elmc_send_packet(struct sk_buff *skb, struct net_device *dev) if (len != skb->len) memset((char *) p->xmit_cbuffs[p->xmit_count], 0, ETH_ZLEN); - memcpy((char *) p->xmit_cbuffs[p->xmit_count], (char *) (skb->data), skb->len); + skb_copy_from_linear_data(skb, p->xmit_cbuffs[p->xmit_count], skb->len); #if (NUM_XMIT_BUFFS == 1) #ifdef NO_NOPCOMMANDS diff --git a/drivers/net/7990.c b/drivers/net/7990.c index c50264aea16b..d396f996af57 100644 --- a/drivers/net/7990.c +++ b/drivers/net/7990.c @@ -567,7 +567,7 @@ int lance_start_xmit (struct sk_buff *skb, struct net_device *dev) if (skb->len < ETH_ZLEN) memset((char *)&ib->tx_buf[entry][0], 0, ETH_ZLEN); - memcpy ((char *)&ib->tx_buf [entry][0], skb->data, skblen); + skb_copy_from_linear_data(skb, &ib->tx_buf[entry][0], skblen); /* Now, give the packet to the lance */ ib->btx_ring [entry].tmd1_bits = (LE_T1_POK|LE_T1_OWN); diff --git a/drivers/net/a2065.c b/drivers/net/a2065.c index b38fc65005eb..1226cbba0450 100644 --- a/drivers/net/a2065.c +++ b/drivers/net/a2065.c @@ -598,7 +598,7 @@ static int lance_start_xmit (struct sk_buff *skb, struct net_device *dev) ib->btx_ring [entry].length = (-len) | 0xf000; ib->btx_ring [entry].misc = 0; - memcpy ((char *)&ib->tx_buf [entry][0], skb->data, skblen); + skb_copy_from_linear_data(skb, &ib->tx_buf [entry][0], skblen); /* Clear the slack of the packet, do I need this? */ if (len != skblen) diff --git a/drivers/net/arcnet/capmode.c b/drivers/net/arcnet/capmode.c index f6a87bd20ff2..cc4610db6395 100644 --- a/drivers/net/arcnet/capmode.c +++ b/drivers/net/arcnet/capmode.c @@ -273,7 +273,8 @@ static int ack_tx(struct net_device *dev, int acked) /* skb_pull(ackskb, ARC_HDR_SIZE); */ - memcpy(ackpkt, lp->outgoing.skb->data, ARC_HDR_SIZE+sizeof(struct arc_cap)); + skb_copy_from_linear_data(lp->outgoing.skb, ackpkt, + ARC_HDR_SIZE + sizeof(struct arc_cap)); ackpkt->soft.cap.proto=0; /* using protocol 0 for acknowledge */ ackpkt->soft.cap.mes.ack=acked; diff --git a/drivers/net/atari_bionet.c b/drivers/net/atari_bionet.c index f52e7f22f63d..13dbed368d6a 100644 --- a/drivers/net/atari_bionet.c +++ b/drivers/net/atari_bionet.c @@ -453,7 +453,8 @@ bionet_send_packet(struct sk_buff *skb, struct net_device *dev) { stdma_lock(bionet_intr, NULL); local_irq_restore(flags); if( !STRAM_ADDR(buf+length-1) ) { - memcpy(nic_packet->buffer, skb->data, length); + skb_copy_from_linear_data(skb, nic_packet->buffer, + length); buf = (unsigned long)&((struct nic_pkt_s *)phys_nic_packet)->buffer; } diff --git a/drivers/net/atari_pamsnet.c b/drivers/net/atari_pamsnet.c index 3b5436149286..745101d7451b 100644 --- a/drivers/net/atari_pamsnet.c +++ b/drivers/net/atari_pamsnet.c @@ -717,7 +717,8 @@ pamsnet_send_packet(struct sk_buff *skb, struct net_device *dev) { local_irq_restore(flags); if( !STRAM_ADDR(buf+length-1) ) { - memcpy(nic_packet->buffer, skb->data, length); + skb_copy_from_linear_data(skb, nic_packet->buffer, + length); buf = (unsigned long)phys_nic_packet; } diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c index 97b55f2546c5..d10fb80e9a63 100644 --- a/drivers/net/au1000_eth.c +++ b/drivers/net/au1000_eth.c @@ -1125,7 +1125,7 @@ static int au1000_tx(struct sk_buff *skb, struct net_device *dev) } pDB = aup->tx_db_inuse[aup->tx_head]; - memcpy((void *)pDB->vaddr, skb->data, skb->len); + skb_copy_from_linear_data(skb, pDB->vaddr, skb->len); if (skb->len < ETH_ZLEN) { for (i=skb->len; ivaddr)[i] = 0; diff --git a/drivers/net/b44.c b/drivers/net/b44.c index f67d97de97ff..879a2fff474e 100644 --- a/drivers/net/b44.c +++ b/drivers/net/b44.c @@ -828,8 +828,8 @@ static int b44_rx(struct b44 *bp, int budget) skb_reserve(copy_skb, 2); skb_put(copy_skb, len); /* DMA sync done above, copy just the actual packet */ - memcpy(copy_skb->data, skb->data+bp->rx_offset, len); - + skb_copy_from_linear_data_offset(skb, bp->rx_offset, + copy_skb->data, len); skb = copy_skb; } skb->ip_summed = CHECKSUM_NONE; @@ -1006,7 +1006,8 @@ static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev) goto err_out; } - memcpy(skb_put(bounce_skb, len), skb->data, skb->len); + skb_copy_from_linear_data(skb, skb_put(bounce_skb, len), + skb->len); dev_kfree_skb_any(skb); skb = bounce_skb; } diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c index 7e7b5f344030..f98a2205a090 100644 --- a/drivers/net/bnx2.c +++ b/drivers/net/bnx2.c @@ -1884,10 +1884,8 @@ bnx2_rx_int(struct bnx2 *bp, int budget) goto reuse_rx; /* aligned copy */ - memcpy(new_skb->data, - skb->data + bp->rx_offset - 2, - len + 2); - + skb_copy_from_linear_data_offset(skb, bp->rx_offset - 2, + new_skb->data, len + 2); skb_reserve(new_skb, 2); skb_put(new_skb, len); diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c index bd3ab6493e39..4aec747d9e43 100644 --- a/drivers/net/cassini.c +++ b/drivers/net/cassini.c @@ -2846,8 +2846,8 @@ static inline int cas_xmit_tx_ringN(struct cas *cp, int ring, ctrl | TX_DESC_SOF, 0); entry = TX_DESC_NEXT(ring, entry); - memcpy(tx_tiny_buf(cp, ring, entry), skb->data + - len - tabort, tabort); + skb_copy_from_linear_data_offset(skb, len - tabort, + tx_tiny_buf(cp, ring, entry), tabort); mapping = tx_tiny_map(cp, ring, entry, tentry); cas_write_txd(cp, ring, entry, mapping, tabort, ctrl, (nr_frags == 0)); diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c index 43e92f9f0bcd..1be1bbd16164 100644 --- a/drivers/net/chelsio/sge.c +++ b/drivers/net/chelsio/sge.c @@ -1062,7 +1062,7 @@ static inline struct sk_buff *get_packet(struct pci_dev *pdev, pci_unmap_addr(ce, dma_addr), pci_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE); - memcpy(skb->data, ce->skb->data, len); + skb_copy_from_linear_data(ce->skb, skb->data, len); pci_dma_sync_single_for_device(pdev, pci_unmap_addr(ce, dma_addr), pci_unmap_len(ce, dma_len), diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c index c5faf1380e15..166c959c94b9 100644 --- a/drivers/net/cxgb3/sge.c +++ b/drivers/net/cxgb3/sge.c @@ -913,7 +913,8 @@ static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb, if (skb->len <= WR_LEN - sizeof(*cpl)) { q->sdesc[pidx].skb = NULL; if (!skb->data_len) - memcpy(&d->flit[2], skb->data, skb->len); + skb_copy_from_linear_data(skb, &d->flit[2], + skb->len); else skb_copy_bits(skb, 0, &d->flit[2], skb->len); @@ -1771,7 +1772,7 @@ static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl, __skb_put(skb, len); pci_dma_sync_single_for_cpu(adap->pdev, mapping, len, PCI_DMA_FROMDEVICE); - memcpy(skb->data, sd->t.skb->data, len); + skb_copy_from_linear_data(sd->t.skb, skb->data, len); pci_dma_sync_single_for_device(adap->pdev, mapping, len, PCI_DMA_FROMDEVICE); } else if (!drop_thres) diff --git a/drivers/net/dgrs.c b/drivers/net/dgrs.c index d223c38966f4..df62c0232f36 100644 --- a/drivers/net/dgrs.c +++ b/drivers/net/dgrs.c @@ -741,7 +741,7 @@ static int dgrs_start_xmit(struct sk_buff *skb, struct net_device *devN) } amt = min_t(unsigned int, len, rbdp->size - count); - memcpy( (char *) S2H(rbdp->buf) + count, skb->data + i, amt); + skb_copy_from_linear_data_offset(skb, i, S2H(rbdp->buf) + count, amt); i += amt; count += amt; len -= amt; diff --git a/drivers/net/eepro100.c b/drivers/net/eepro100.c index db658bc491a9..6c267c38df97 100644 --- a/drivers/net/eepro100.c +++ b/drivers/net/eepro100.c @@ -1804,8 +1804,9 @@ speedo_rx(struct net_device *dev) eth_copy_and_sum(skb, sp->rx_skbuff[entry]->data, pkt_len, 0); skb_put(skb, pkt_len); #else - memcpy(skb_put(skb, pkt_len), sp->rx_skbuff[entry]->data, - pkt_len); + skb_copy_from_linear_data(sp->rx_skbuff[entry], + skb_put(skb, pkt_len), + pkt_len); #endif pci_dma_sync_single_for_device(sp->pdev, sp->rx_ring_dma[entry], sizeof(struct RxFD) + pkt_len, diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c index 63732d2305bb..8b5392072632 100644 --- a/drivers/net/ehea/ehea_main.c +++ b/drivers/net/ehea/ehea_main.c @@ -1306,7 +1306,7 @@ static void write_swqe2_TSO(struct sk_buff *skb, if (skb_data_size >= headersize) { /* copy immediate data */ - memcpy(imm_data, skb->data, headersize); + skb_copy_from_linear_data(skb, imm_data, headersize); swqe->immediate_data_length = headersize; if (skb_data_size > headersize) { @@ -1337,7 +1337,7 @@ static void write_swqe2_nonTSO(struct sk_buff *skb, */ if (skb_data_size >= SWQE2_MAX_IMM) { /* copy immediate data */ - memcpy(imm_data, skb->data, SWQE2_MAX_IMM); + skb_copy_from_linear_data(skb, imm_data, SWQE2_MAX_IMM); swqe->immediate_data_length = SWQE2_MAX_IMM; @@ -1350,7 +1350,7 @@ static void write_swqe2_nonTSO(struct sk_buff *skb, swqe->descriptors++; } } else { - memcpy(imm_data, skb->data, skb_data_size); + skb_copy_from_linear_data(skb, imm_data, skb_data_size); swqe->immediate_data_length = skb_data_size; } } @@ -1772,10 +1772,11 @@ static void ehea_xmit3(struct sk_buff *skb, struct net_device *dev, /* copy (immediate) data */ if (nfrags == 0) { /* data is in a single piece */ - memcpy(imm_data, skb->data, skb->len); + skb_copy_from_linear_data(skb, imm_data, skb->len); } else { /* first copy data from the skb->data buffer ... */ - memcpy(imm_data, skb->data, skb->len - skb->data_len); + skb_copy_from_linear_data(skb, imm_data, + skb->len - skb->data_len); imm_data += skb->len - skb->data_len; /* ... then copy data from the fragments */ diff --git a/drivers/net/fec_8xx/fec_main.c b/drivers/net/fec_8xx/fec_main.c index 698dba8f2aa1..e824d5d231af 100644 --- a/drivers/net/fec_8xx/fec_main.c +++ b/drivers/net/fec_8xx/fec_main.c @@ -551,7 +551,9 @@ static int fec_enet_rx_common(struct net_device *dev, int *budget) skbn = dev_alloc_skb(pkt_len + 2); if (skbn != NULL) { skb_reserve(skbn, 2); /* align IP header */ - memcpy(skbn->data, skb->data, pkt_len); + skb_copy_from_linear_data(skb + skbn->data, + pkt_len); /* swap */ skbt = skb; skb = skbn; diff --git a/drivers/net/fs_enet/fs_enet-main.c b/drivers/net/fs_enet/fs_enet-main.c index 9f6ef315ce51..e2ddd617493a 100644 --- a/drivers/net/fs_enet/fs_enet-main.c +++ b/drivers/net/fs_enet/fs_enet-main.c @@ -160,7 +160,8 @@ static int fs_enet_rx_napi(struct net_device *dev, int *budget) skbn = dev_alloc_skb(pkt_len + 2); if (skbn != NULL) { skb_reserve(skbn, 2); /* align IP header */ - memcpy(skbn->data, skb->data, pkt_len); + skb_copy_from_linear_data(skb, + skbn->data, pkt_len); /* swap */ skbt = skb; skb = skbn; @@ -293,7 +294,8 @@ static int fs_enet_rx_non_napi(struct net_device *dev) skbn = dev_alloc_skb(pkt_len + 2); if (skbn != NULL) { skb_reserve(skbn, 2); /* align IP header */ - memcpy(skbn->data, skb->data, pkt_len); + skb_copy_from_linear_data(skb, + skbn->data, pkt_len); /* swap */ skbt = skb; skb = skbn; diff --git a/drivers/net/hamradio/dmascc.c b/drivers/net/hamradio/dmascc.c index 0fbb414b5a4d..3be8c5047599 100644 --- a/drivers/net/hamradio/dmascc.c +++ b/drivers/net/hamradio/dmascc.c @@ -930,7 +930,7 @@ static int scc_send_packet(struct sk_buff *skb, struct net_device *dev) /* Transfer data to DMA buffer */ i = priv->tx_head; - memcpy(priv->tx_buf[i], skb->data + 1, skb->len - 1); + skb_copy_from_linear_data_offset(skb, 1, priv->tx_buf[i], skb->len - 1); priv->tx_len[i] = skb->len - 1; /* Clear interrupts while we touch our circular buffers */ diff --git a/drivers/net/hamradio/hdlcdrv.c b/drivers/net/hamradio/hdlcdrv.c index f5a17ad9d3d6..b33adc6a340b 100644 --- a/drivers/net/hamradio/hdlcdrv.c +++ b/drivers/net/hamradio/hdlcdrv.c @@ -317,7 +317,9 @@ void hdlcdrv_transmitter(struct net_device *dev, struct hdlcdrv_state *s) dev_kfree_skb_irq(skb); break; } - memcpy(s->hdlctx.buffer, skb->data+1, pkt_len); + skb_copy_from_linear_data_offset(skb, 1, + s->hdlctx.buffer, + pkt_len); dev_kfree_skb_irq(skb); s->hdlctx.bp = s->hdlctx.buffer; append_crc_ccitt(s->hdlctx.buffer, pkt_len); diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c index ee3ea4fa729f..ac2d6dd9dbe4 100644 --- a/drivers/net/hamradio/yam.c +++ b/drivers/net/hamradio/yam.c @@ -638,7 +638,9 @@ static void yam_tx_byte(struct net_device *dev, struct yam_port *yp) dev_kfree_skb_any(skb); break; } - memcpy(yp->tx_buf, skb->data + 1, yp->tx_len); + skb_copy_from_linear_data_offset(skb->data, 1, + yp->tx_buf, + yp->tx_len); dev_kfree_skb_any(skb); yp->tx_count = 0; yp->tx_crcl = 0x21; diff --git a/drivers/net/ioc3-eth.c b/drivers/net/ioc3-eth.c index bc62e770a256..f749e07c6425 100644 --- a/drivers/net/ioc3-eth.c +++ b/drivers/net/ioc3-eth.c @@ -1443,7 +1443,7 @@ static int ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev) if (len <= 104) { /* Short packet, let's copy it directly into the ring. */ - memcpy(desc->data, skb->data, skb->len); + skb_copy_from_linear_data(skb, desc->data, skb->len); if (len < ETH_ZLEN) { /* Very short packet, pad with zeros at the end. */ memset(desc->data + len, 0, ETH_ZLEN - len); diff --git a/drivers/net/irda/ali-ircc.c b/drivers/net/irda/ali-ircc.c index 0f10758226fa..fb2248a25516 100644 --- a/drivers/net/irda/ali-ircc.c +++ b/drivers/net/irda/ali-ircc.c @@ -1472,9 +1472,8 @@ static int ali_ircc_fir_hard_xmit(struct sk_buff *skb, struct net_device *dev) self->stats.tx_bytes += skb->len; - memcpy(self->tx_fifo.queue[self->tx_fifo.free].start, skb->data, - skb->len); - + skb_copy_from_linear_data(skb, self->tx_fifo.queue[self->tx_fifo.free].start, + skb->len); self->tx_fifo.len++; self->tx_fifo.free++; diff --git a/drivers/net/irda/au1k_ir.c b/drivers/net/irda/au1k_ir.c index 27afd0f367d6..cdd1f6c1e741 100644 --- a/drivers/net/irda/au1k_ir.c +++ b/drivers/net/irda/au1k_ir.c @@ -526,7 +526,7 @@ static int au1k_irda_hard_xmit(struct sk_buff *skb, struct net_device *dev) if (aup->speed == 4000000) { /* FIR */ - memcpy((void *)pDB->vaddr, skb->data, skb->len); + skb_copy_from_linear_data(skb, pDB->vaddr, skb->len); ptxd->count_0 = skb->len & 0xff; ptxd->count_1 = (skb->len >> 8) & 0xff; diff --git a/drivers/net/irda/donauboe.c b/drivers/net/irda/donauboe.c index ddfa6c38a16b..9987a0dc1eaf 100644 --- a/drivers/net/irda/donauboe.c +++ b/drivers/net/irda/donauboe.c @@ -1119,7 +1119,7 @@ dumpbufs(skb->data,skb->len,'>'); else { len = skb->len; - memcpy (self->tx_bufs[self->txs], skb->data, len); + skb_copy_from_linear_data(skb, self->tx_bufs[self->txs], len); } self->ring->tx[self->txs].len = len & 0x0fff; diff --git a/drivers/net/irda/irda-usb.c b/drivers/net/irda/irda-usb.c index 6ef375a095f4..0ac240ca905b 100644 --- a/drivers/net/irda/irda-usb.c +++ b/drivers/net/irda/irda-usb.c @@ -441,7 +441,7 @@ static int irda_usb_hard_xmit(struct sk_buff *skb, struct net_device *netdev) goto drop; } - memcpy(self->tx_buff + self->header_length, skb->data, skb->len); + skb_copy_from_linear_data(skb, self->tx_buff + self->header_length, skb->len); /* Change setting for next frame */ if (self->capability & IUC_STIR421X) { @@ -902,7 +902,7 @@ static void irda_usb_receive(struct urb *urb) if(docopy) { /* Copy packet, so we can recycle the original */ - memcpy(newskb->data, skb->data, urb->actual_length); + skb_copy_from_linear_data(skb, newskb->data, urb->actual_length); /* Deliver this new skb */ dataskb = newskb; /* And hook the old skb to the URB diff --git a/drivers/net/irda/mcs7780.c b/drivers/net/irda/mcs7780.c index 3ff1f4b33c06..4b0037e498f8 100644 --- a/drivers/net/irda/mcs7780.c +++ b/drivers/net/irda/mcs7780.c @@ -353,7 +353,7 @@ static unsigned mcs_wrap_fir_skb(const struct sk_buff *skb, __u8 *buf) buf[0] = len & 0xff; buf[1] = (len >> 8) & 0xff; /* copy the data into the tx buffer. */ - memcpy(buf+2, skb->data, skb->len); + skb_copy_from_linear_data(skb, buf + 2, skb->len); /* put the fcs in the last four bytes in little endian order. */ buf[len - 4] = fcs & 0xff; buf[len - 3] = (fcs >> 8) & 0xff; @@ -377,7 +377,7 @@ static unsigned mcs_wrap_mir_skb(const struct sk_buff *skb, __u8 *buf) buf[0] = len & 0xff; buf[1] = (len >> 8) & 0xff; /* copy the data */ - memcpy(buf+2, skb->data, skb->len); + skb_copy_from_linear_data(skb, buf + 2, skb->len); /* put the fcs in last two bytes in little endian order. */ buf[len - 2] = fcs & 0xff; buf[len - 1] = (fcs >> 8) & 0xff; diff --git a/drivers/net/irda/nsc-ircc.c b/drivers/net/irda/nsc-ircc.c index 8ce7dad582f4..0ff992714136 100644 --- a/drivers/net/irda/nsc-ircc.c +++ b/drivers/net/irda/nsc-ircc.c @@ -1466,9 +1466,8 @@ static int nsc_ircc_hard_xmit_fir(struct sk_buff *skb, struct net_device *dev) self->stats.tx_bytes += skb->len; - memcpy(self->tx_fifo.queue[self->tx_fifo.free].start, skb->data, - skb->len); - + skb_copy_from_linear_data(skb, self->tx_fifo.queue[self->tx_fifo.free].start, + skb->len); self->tx_fifo.len++; self->tx_fifo.free++; diff --git a/drivers/net/irda/pxaficp_ir.c b/drivers/net/irda/pxaficp_ir.c index f35d7d42624e..b3e1107420af 100644 --- a/drivers/net/irda/pxaficp_ir.c +++ b/drivers/net/irda/pxaficp_ir.c @@ -484,7 +484,7 @@ static int pxa_irda_hard_xmit(struct sk_buff *skb, struct net_device *dev) unsigned long mtt = irda_get_mtt(skb); si->dma_tx_buff_len = skb->len; - memcpy(si->dma_tx_buff, skb->data, skb->len); + skb_copy_from_linear_data(skb, si->dma_tx_buff, skb->len); if (mtt) while ((unsigned)(OSCR - si->last_oscr)/4 < mtt) diff --git a/drivers/net/irda/smsc-ircc2.c b/drivers/net/irda/smsc-ircc2.c index e8453868d741..198bf3bfa70f 100644 --- a/drivers/net/irda/smsc-ircc2.c +++ b/drivers/net/irda/smsc-ircc2.c @@ -1162,7 +1162,7 @@ static int smsc_ircc_hard_xmit_fir(struct sk_buff *skb, struct net_device *dev) self->new_speed = speed; } - memcpy(self->tx_buff.head, skb->data, skb->len); + skb_copy_from_linear_data(skb, self->tx_buff.head, skb->len); self->tx_buff.len = skb->len; self->tx_buff.data = self->tx_buff.head; diff --git a/drivers/net/irda/via-ircc.c b/drivers/net/irda/via-ircc.c index 5ff416314604..45bbd6686151 100644 --- a/drivers/net/irda/via-ircc.c +++ b/drivers/net/irda/via-ircc.c @@ -925,8 +925,8 @@ static int via_ircc_hard_xmit_fir(struct sk_buff *skb, self->tx_fifo.tail += skb->len; self->stats.tx_bytes += skb->len; - memcpy(self->tx_fifo.queue[self->tx_fifo.free].start, skb->data, - skb->len); + skb_copy_from_linear_data(skb, + self->tx_fifo.queue[self->tx_fifo.free].start, skb->len); self->tx_fifo.len++; self->tx_fifo.free++; //F01 if (self->tx_fifo.len == 1) { diff --git a/drivers/net/irda/vlsi_ir.c b/drivers/net/irda/vlsi_ir.c index 79b407f3a49a..c4be973867a6 100644 --- a/drivers/net/irda/vlsi_ir.c +++ b/drivers/net/irda/vlsi_ir.c @@ -993,7 +993,7 @@ static int vlsi_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev) goto drop; } else - memcpy(rd->buf, skb->data, len); + skb_copy_from_linear_data(skb, rd->buf, len); } rd->skb = skb; /* remember skb for tx-complete stats */ diff --git a/drivers/net/irda/w83977af_ir.c b/drivers/net/irda/w83977af_ir.c index bee445130952..0d4a68618fc1 100644 --- a/drivers/net/irda/w83977af_ir.c +++ b/drivers/net/irda/w83977af_ir.c @@ -529,7 +529,7 @@ int w83977af_hard_xmit(struct sk_buff *skb, struct net_device *dev) /* Decide if we should use PIO or DMA transfer */ if (self->io.speed > PIO_MAX_SPEED) { self->tx_buff.data = self->tx_buff.head; - memcpy(self->tx_buff.data, skb->data, skb->len); + skb_copy_from_linear_data(skb, self->tx_buff.data, skb->len); self->tx_buff.len = skb->len; mtt = irda_get_mtt(skb); diff --git a/drivers/net/lance.c b/drivers/net/lance.c index 11cbcb946db4..0fe96c85828b 100644 --- a/drivers/net/lance.c +++ b/drivers/net/lance.c @@ -988,7 +988,7 @@ static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev) if (lance_debug > 5) printk("%s: bouncing a high-memory packet (%#x).\n", dev->name, (u32)isa_virt_to_bus(skb->data)); - memcpy(&lp->tx_bounce_buffs[entry], skb->data, skb->len); + skb_copy_from_linear_data(skb, &lp->tx_bounce_buffs[entry], skb->len); lp->tx_ring[entry].base = ((u32)isa_virt_to_bus((lp->tx_bounce_buffs + entry)) & 0xffffff) | 0x83000000; dev_kfree_skb(skb); diff --git a/drivers/net/macmace.c b/drivers/net/macmace.c index 8c07ffc9c244..27911c07558d 100644 --- a/drivers/net/macmace.c +++ b/drivers/net/macmace.c @@ -420,8 +420,7 @@ static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev) mp->stats.tx_bytes += skb->len; /* We need to copy into our xmit buffer to take care of alignment and caching issues */ - - memcpy((void *) mp->tx_ring, skb->data, skb->len); + skb_copy_from_linear_data(skb, mp->tx_ring, skb->len); /* load the Tx DMA and fire it off */ diff --git a/drivers/net/meth.c b/drivers/net/meth.c index fafe67835238..0343ea12b299 100644 --- a/drivers/net/meth.c +++ b/drivers/net/meth.c @@ -608,7 +608,7 @@ static void meth_tx_short_prepare(struct meth_private *priv, desc->header.raw = METH_TX_CMD_INT_EN | (len-1) | ((128-len) << 16); /* maybe I should set whole thing to 0 first... */ - memcpy(desc->data.dt + (120 - len), skb->data, skb->len); + skb_copy_from_linear_data(skb, desc->data.dt + (120 - len), skb->len); if (skb->len < len) memset(desc->data.dt + 120 - len + skb->len, 0, len-skb->len); } @@ -626,8 +626,8 @@ static void meth_tx_1page_prepare(struct meth_private *priv, /* unaligned part */ if (unaligned_len) { - memcpy(desc->data.dt + (120 - unaligned_len), - skb->data, unaligned_len); + skb_copy_from_linear_data(skb, desc->data.dt + (120 - unaligned_len), + unaligned_len); desc->header.raw |= (128 - unaligned_len) << 16; } @@ -652,8 +652,8 @@ static void meth_tx_2page_prepare(struct meth_private *priv, desc->header.raw = METH_TX_CMD_INT_EN | TX_CATBUF1 | TX_CATBUF2| (skb->len - 1); /* unaligned part */ if (unaligned_len){ - memcpy(desc->data.dt + (120 - unaligned_len), - skb->data, unaligned_len); + skb_copy_from_linear_data(skb, desc->data.dt + (120 - unaligned_len), + unaligned_len); desc->header.raw |= (128 - unaligned_len) << 16; } diff --git a/drivers/net/myri_sbus.c b/drivers/net/myri_sbus.c index e1f16fb05846..13444da93273 100644 --- a/drivers/net/myri_sbus.c +++ b/drivers/net/myri_sbus.c @@ -502,7 +502,7 @@ static void myri_rx(struct myri_eth *mp, struct net_device *dev) copy_skb->dev = dev; DRX(("resv_and_put ")); skb_put(copy_skb, len); - memcpy(copy_skb->data, skb->data, len); + skb_copy_from_linear_data(skb, copy_skb->data, len); /* Reuse original ring buffer. */ DRX(("reuse ")); diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c index b488e94bc4c0..ab25c225a07e 100644 --- a/drivers/net/netxen/netxen_nic_main.c +++ b/drivers/net/netxen/netxen_nic_main.c @@ -920,8 +920,10 @@ static int netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) /* copy the next 64 bytes - should be enough except * for pathological case */ - memcpy((void *)hwdesc, (void *)(skb->data) + - first_hdr_len, hdr_len - first_hdr_len); + skb_copy_from_linear_data_offset(skb, first_hdr_len, + hwdesc, + (hdr_len - + first_hdr_len)); producer = get_next_index(producer, max_tx_desc_count); } } diff --git a/drivers/net/ni52.c b/drivers/net/ni52.c index 70b6812a8a75..8646698c77d4 100644 --- a/drivers/net/ni52.c +++ b/drivers/net/ni52.c @@ -1182,7 +1182,7 @@ static int ni52_send_packet(struct sk_buff *skb, struct net_device *dev) else #endif { - memcpy((char *)p->xmit_cbuffs[p->xmit_count],(char *)(skb->data),skb->len); + skb_copy_from_linear_data(skb, p->xmit_cbuffs[p->xmit_count], skb->len); len = skb->len; if (len < ETH_ZLEN) { len = ETH_ZLEN; diff --git a/drivers/net/ni65.c b/drivers/net/ni65.c index 782201d12c22..3818edf0ac18 100644 --- a/drivers/net/ni65.c +++ b/drivers/net/ni65.c @@ -1176,8 +1176,9 @@ static int ni65_send_packet(struct sk_buff *skb, struct net_device *dev) if( (unsigned long) (skb->data + skb->len) > 0x1000000) { #endif - memcpy((char *) p->tmdbounce[p->tmdbouncenum] ,(char *)skb->data, - (skb->len > T_BUF_SIZE) ? T_BUF_SIZE : skb->len); + skb_copy_from_linear_data(skb, p->tmdbounce[p->tmdbouncenum], + skb->len > T_BUF_SIZE ? T_BUF_SIZE : + skb->len); if (len > skb->len) memset((char *)p->tmdbounce[p->tmdbouncenum]+skb->len, 0, len-skb->len); dev_kfree_skb (skb); diff --git a/drivers/net/pci-skeleton.c b/drivers/net/pci-skeleton.c index 099972c977ef..df8998b4f37e 100644 --- a/drivers/net/pci-skeleton.c +++ b/drivers/net/pci-skeleton.c @@ -1344,7 +1344,7 @@ static int netdrv_start_xmit (struct sk_buff *skb, struct net_device *dev) tp->tx_info[entry].skb = skb; /* tp->tx_info[entry].mapping = 0; */ - memcpy (tp->tx_buf[entry], skb->data, skb->len); + skb_copy_from_linear_data(skb, tp->tx_buf[entry], skb->len); /* Note: the chip doesn't have auto-pad! */ NETDRV_W32 (TxStatus0 + (entry * sizeof(u32)), diff --git a/drivers/net/pcmcia/axnet_cs.c b/drivers/net/pcmcia/axnet_cs.c index fabbe95c7ef1..808fae1577e0 100644 --- a/drivers/net/pcmcia/axnet_cs.c +++ b/drivers/net/pcmcia/axnet_cs.c @@ -1136,7 +1136,7 @@ static int ei_start_xmit(struct sk_buff *skb, struct net_device *dev) ei_block_output(dev, length, skb->data, output_page); else { memset(packet, 0, ETH_ZLEN); - memcpy(packet, skb->data, skb->len); + skb_copy_from_linear_data(skb, packet, skb->len); ei_block_output(dev, length, packet, output_page); } diff --git a/drivers/net/ppp_synctty.c b/drivers/net/ppp_synctty.c index b6f0e9a25e26..5918fab38349 100644 --- a/drivers/net/ppp_synctty.c +++ b/drivers/net/ppp_synctty.c @@ -594,7 +594,8 @@ ppp_sync_txmunge(struct syncppp *ap, struct sk_buff *skb) return NULL; } skb_reserve(npkt,2); - memcpy(skb_put(npkt,skb->len), skb->data, skb->len); + skb_copy_from_linear_data(skb, + skb_put(npkt, skb->len), skb->len); kfree_skb(skb); skb = npkt; } diff --git a/drivers/net/pppoe.c b/drivers/net/pppoe.c index e94790632d55..e9fb616ff663 100644 --- a/drivers/net/pppoe.c +++ b/drivers/net/pppoe.c @@ -869,7 +869,8 @@ static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb) goto abort; skb_reserve(skb2, dev->hard_header_len + sizeof(struct pppoe_hdr)); - memcpy(skb_put(skb2, skb->len), skb->data, skb->len); + skb_copy_from_linear_data(skb, skb_put(skb2, skb->len), + skb->len); } else { /* Make a clone so as to not disturb the original skb, * give dev_queue_xmit something it can free. diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c index 40d2639eedcb..7b80fb7a9d9b 100755 --- a/drivers/net/qla3xxx.c +++ b/drivers/net/qla3xxx.c @@ -1927,7 +1927,8 @@ static void ql_process_macip_rx_intr(struct ql3_adapter *qdev, * Copy the ethhdr from first buffer to second. This * is necessary for 3022 IP completions. */ - memcpy(skb_push(skb2, size), skb1->data + VLAN_ID_LEN, size); + skb_copy_from_linear_data_offset(skb1, VLAN_ID_LEN, + skb_push(skb2, size), size); } else { u16 checksum = le16_to_cpu(ib_ip_rsp_ptr->checksum); if (checksum & diff --git a/drivers/net/rrunner.c b/drivers/net/rrunner.c index 3a4fce384504..25c73d47daad 100644 --- a/drivers/net/rrunner.c +++ b/drivers/net/rrunner.c @@ -1451,7 +1451,7 @@ static int rr_start_xmit(struct sk_buff *skb, struct net_device *dev) } skb_reserve(new_skb, 8); skb_put(new_skb, len); - memcpy(new_skb->data, skb->data, len); + skb_copy_from_linear_data(skb, new_skb->data, len); dev_kfree_skb(skb); skb = new_skb; } diff --git a/drivers/net/sgiseeq.c b/drivers/net/sgiseeq.c index 5a8919132186..d8c9c5d66d4f 100644 --- a/drivers/net/sgiseeq.c +++ b/drivers/net/sgiseeq.c @@ -534,7 +534,7 @@ static int sgiseeq_start_xmit(struct sk_buff *skb, struct net_device *dev) * entry and the HPC got to the end of the chain before we * added this new entry and restarted it. */ - memcpy((char *)(long)td->buf_vaddr, skb->data, skblen); + skb_copy_from_linear_data(skb, (char *)(long)td->buf_vaddr, skblen); if (len != skblen) memset((char *)(long)td->buf_vaddr + skb->len, 0, len-skblen); td->tdma.cntinfo = (len & HPCDMA_BCNT) | diff --git a/drivers/net/skge.c b/drivers/net/skge.c index 99b61cfb7ce6..f1a0e6c0fbdd 100644 --- a/drivers/net/skge.c +++ b/drivers/net/skge.c @@ -2950,7 +2950,7 @@ static struct sk_buff *skge_rx_get(struct net_device *dev, pci_dma_sync_single_for_cpu(skge->hw->pdev, pci_unmap_addr(e, mapaddr), len, PCI_DMA_FROMDEVICE); - memcpy(skb->data, e->skb->data, len); + skb_copy_from_linear_data(e->skb, skb->data, len); pci_dma_sync_single_for_device(skge->hw->pdev, pci_unmap_addr(e, mapaddr), len, PCI_DMA_FROMDEVICE); diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c index fd291fc93169..238c2ca34da6 100644 --- a/drivers/net/sky2.c +++ b/drivers/net/sky2.c @@ -1971,7 +1971,7 @@ static struct sk_buff *receive_copy(struct sky2_port *sky2, skb_reserve(skb, 2); pci_dma_sync_single_for_cpu(sky2->hw->pdev, re->data_addr, length, PCI_DMA_FROMDEVICE); - memcpy(skb->data, re->skb->data, length); + skb_copy_from_linear_data(re->skb, skb->data, length); skb->ip_summed = re->skb->ip_summed; skb->csum = re->skb->csum; pci_dma_sync_single_for_device(sky2->hw->pdev, re->data_addr, diff --git a/drivers/net/sun3_82586.c b/drivers/net/sun3_82586.c index 5bcc749bef11..396c3d961f88 100644 --- a/drivers/net/sun3_82586.c +++ b/drivers/net/sun3_82586.c @@ -1026,7 +1026,7 @@ static int sun3_82586_send_packet(struct sk_buff *skb, struct net_device *dev) memset((char *)p->xmit_cbuffs[p->xmit_count], 0, ETH_ZLEN); len = ETH_ZLEN; } - memcpy((char *)p->xmit_cbuffs[p->xmit_count],(char *)(skb->data),skb->len); + skb_copy_from_linear_data(skb, p->xmit_cbuffs[p->xmit_count], skb->len); #if (NUM_XMIT_BUFFS == 1) # ifdef NO_NOPCOMMANDS diff --git a/drivers/net/sun3lance.c b/drivers/net/sun3lance.c index 0454827c8c21..327ed7962fbd 100644 --- a/drivers/net/sun3lance.c +++ b/drivers/net/sun3lance.c @@ -629,7 +629,7 @@ static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev ) head->length = (-len) | 0xf000; head->misc = 0; - memcpy( PKTBUF_ADDR(head), (void *)skb->data, skb->len ); + skb_copy_from_linear_data(skb, PKTBUF_ADDR(head), skb->len); if (len != skb->len) memset(PKTBUF_ADDR(head) + skb->len, 0, len-skb->len); diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c index 4bb89dec5650..9df1038ec6bb 100644 --- a/drivers/net/sungem.c +++ b/drivers/net/sungem.c @@ -848,7 +848,7 @@ static int gem_rx(struct gem *gp, int work_to_do) skb_reserve(copy_skb, 2); skb_put(copy_skb, len); pci_dma_sync_single_for_cpu(gp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); - memcpy(copy_skb->data, skb->data, len); + skb_copy_from_linear_data(skb, copy_skb->data, len); pci_dma_sync_single_for_device(gp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); /* We'll reuse the original ring buffer. */ diff --git a/drivers/net/sunhme.c b/drivers/net/sunhme.c index 4b69c1deb9f3..5304d7b94e5e 100644 --- a/drivers/net/sunhme.c +++ b/drivers/net/sunhme.c @@ -2061,7 +2061,7 @@ static void happy_meal_rx(struct happy_meal *hp, struct net_device *dev) skb_reserve(copy_skb, 2); skb_put(copy_skb, len); hme_dma_sync_for_cpu(hp, dma_addr, len, DMA_FROMDEVICE); - memcpy(copy_skb->data, skb->data, len); + skb_copy_from_linear_data(skb, copy_skb->data, len); hme_dma_sync_for_device(hp, dma_addr, len, DMA_FROMDEVICE); /* Reuse original ring buffer. */ diff --git a/drivers/net/sunlance.c b/drivers/net/sunlance.c index 8f53a1ef6083..42722530ab24 100644 --- a/drivers/net/sunlance.c +++ b/drivers/net/sunlance.c @@ -1143,7 +1143,7 @@ static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev) struct lance_init_block *ib = lp->init_block_mem; ib->btx_ring [entry].length = (-len) | 0xf000; ib->btx_ring [entry].misc = 0; - memcpy((char *)&ib->tx_buf [entry][0], skb->data, skblen); + skb_copy_from_linear_data(skb, &ib->tx_buf [entry][0], skblen); if (len != skblen) memset((char *) &ib->tx_buf [entry][skblen], 0, len - skblen); ib->btx_ring [entry].tmd1_bits = (LE_T1_POK | LE_T1_OWN); diff --git a/drivers/net/sunqe.c b/drivers/net/sunqe.c index fbfb98284fde..fa70e0b78af7 100644 --- a/drivers/net/sunqe.c +++ b/drivers/net/sunqe.c @@ -592,7 +592,7 @@ static int qe_start_xmit(struct sk_buff *skb, struct net_device *dev) /* Avoid a race... */ qep->qe_block->qe_txd[entry].tx_flags = TXD_UPDATE; - memcpy(txbuf, skb->data, len); + skb_copy_from_linear_data(skb, txbuf, len); qep->qe_block->qe_txd[entry].tx_addr = txbuf_dvma; qep->qe_block->qe_txd[entry].tx_flags = diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index 414365c3198d..38383e4e07a1 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c @@ -3350,7 +3350,7 @@ static int tg3_rx(struct tg3 *tp, int budget) skb_reserve(copy_skb, 2); skb_put(copy_skb, len); pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); - memcpy(copy_skb->data, skb->data, len); + skb_copy_from_linear_data(skb, copy_skb->data, len); pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); /* We'll reuse the original ring buffer. */ diff --git a/drivers/net/tlan.c b/drivers/net/tlan.c index 2ede3f58cf97..106dc1ef0acb 100644 --- a/drivers/net/tlan.c +++ b/drivers/net/tlan.c @@ -1112,7 +1112,7 @@ static int TLan_StartTx( struct sk_buff *skb, struct net_device *dev ) if ( bbuf ) { tail_buffer = priv->txBuffer + ( priv->txTail * TLAN_MAX_FRAME_SIZE ); - memcpy( tail_buffer, skb->data, skb->len ); + skb_copy_from_linear_data(skb, tail_buffer, skb->len); } else { tail_list->buffer[0].address = pci_map_single(priv->pciDev, skb->data, skb->len, PCI_DMA_TODEVICE); TLan_StoreSKB(tail_list, skb); diff --git a/drivers/net/tokenring/3c359.c b/drivers/net/tokenring/3c359.c index d293423ee8e3..e22a3f5333ef 100644 --- a/drivers/net/tokenring/3c359.c +++ b/drivers/net/tokenring/3c359.c @@ -937,14 +937,17 @@ static void xl_rx(struct net_device *dev) copy_len = xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfraglen & 0x7FFF ; frame_length -= copy_len ; pci_dma_sync_single_for_cpu(xl_priv->pdev,xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr,xl_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ; - memcpy(skb_put(skb,copy_len), xl_priv->rx_ring_skb[xl_priv->rx_ring_tail]->data, copy_len) ; + skb_copy_from_linear_data(xl_priv->rx_ring_skb[xl_priv->rx_ring_tail], + skb_put(skb, copy_len), + copy_len); pci_dma_sync_single_for_device(xl_priv->pdev,xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr,xl_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ; adv_rx_ring(dev) ; } /* Now we have found the last fragment */ pci_dma_sync_single_for_cpu(xl_priv->pdev,xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr,xl_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ; - memcpy(skb_put(skb,copy_len), xl_priv->rx_ring_skb[xl_priv->rx_ring_tail]->data, frame_length) ; + skb_copy_from_linear_data(xl_priv->rx_ring_skb[xl_priv->rx_ring_tail], + skb_put(skb,copy_len), frame_length); /* memcpy(skb_put(skb,frame_length), bus_to_virt(xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr), frame_length) ; */ pci_dma_sync_single_for_device(xl_priv->pdev,xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr,xl_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ; adv_rx_ring(dev) ; diff --git a/drivers/net/tokenring/olympic.c b/drivers/net/tokenring/olympic.c index a62065808881..09b3cfb8e809 100644 --- a/drivers/net/tokenring/olympic.c +++ b/drivers/net/tokenring/olympic.c @@ -845,7 +845,9 @@ static void olympic_rx(struct net_device *dev) pci_dma_sync_single_for_cpu(olympic_priv->pdev, le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer), olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ; - memcpy(skb_put(skb,length-4),olympic_priv->rx_ring_skb[rx_ring_last_received]->data,length-4) ; + skb_copy_from_linear_data(olympic_priv->rx_ring_skb[rx_ring_last_received], + skb_put(skb,length - 4), + length - 4); pci_dma_sync_single_for_device(olympic_priv->pdev, le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer), olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ; @@ -862,7 +864,9 @@ static void olympic_rx(struct net_device *dev) olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ; rx_desc = &(olympic_priv->olympic_rx_ring[rx_ring_last_received]); cpy_length = (i == 1 ? frag_len : le32_to_cpu(rx_desc->res_length)); - memcpy(skb_put(skb, cpy_length), olympic_priv->rx_ring_skb[rx_ring_last_received]->data, cpy_length) ; + skb_copy_from_linear_data(olympic_priv->rx_ring_skb[rx_ring_last_received], + skb_put(skb, cpy_length), + cpy_length); pci_dma_sync_single_for_device(olympic_priv->pdev, le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer), olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ; diff --git a/drivers/net/tokenring/tms380tr.c b/drivers/net/tokenring/tms380tr.c index de6f72775ecc..e6f0817c3509 100644 --- a/drivers/net/tokenring/tms380tr.c +++ b/drivers/net/tokenring/tms380tr.c @@ -644,7 +644,7 @@ static int tms380tr_hardware_send_packet(struct sk_buff *skb, struct net_device dmabuf = 0; i = tp->TplFree->TPLIndex; buf = tp->LocalTxBuffers[i]; - memcpy(buf, skb->data, length); + skb_copy_from_linear_data(skb, buf, length); newbuf = ((char *)buf - (char *)tp) + tp->dmabuffer; } else { diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c index 8a7effa70904..d19f8568440f 100644 --- a/drivers/net/tulip/de2104x.c +++ b/drivers/net/tulip/de2104x.c @@ -449,8 +449,8 @@ static void de_rx (struct de_private *de) } else { pci_dma_sync_single_for_cpu(de->pdev, mapping, len, PCI_DMA_FROMDEVICE); skb_reserve(copy_skb, RX_OFFSET); - memcpy(skb_put(copy_skb, len), skb->data, len); - + skb_copy_from_linear_data(skb, skb_put(copy_skb, len), + len); pci_dma_sync_single_for_device(de->pdev, mapping, len, PCI_DMA_FROMDEVICE); /* We'll reuse the original ring buffer. */ diff --git a/drivers/net/tulip/dmfe.c b/drivers/net/tulip/dmfe.c index a5e0237a6537..b3a64ca98634 100644 --- a/drivers/net/tulip/dmfe.c +++ b/drivers/net/tulip/dmfe.c @@ -682,7 +682,7 @@ static int dmfe_start_xmit(struct sk_buff *skb, struct DEVICE *dev) /* transmit this packet */ txptr = db->tx_insert_ptr; - memcpy(txptr->tx_buf_ptr, skb->data, skb->len); + skb_copy_from_linear_data(skb, txptr->tx_buf_ptr, skb->len); txptr->tdes1 = cpu_to_le32(0xe1000000 | skb->len); /* Point to next transmit free descriptor */ @@ -989,7 +989,9 @@ static void dmfe_rx_packet(struct DEVICE *dev, struct dmfe_board_info * db) skb = newskb; /* size less than COPY_SIZE, allocate a rxlen SKB */ skb_reserve(skb, 2); /* 16byte align */ - memcpy(skb_put(skb, rxlen), rxptr->rx_skb_ptr->data, rxlen); + skb_copy_from_linear_data(rxptr->rx_skb_ptr, + skb_put(skb, rxlen), + rxlen); dmfe_reuse_skb(db, rxptr->rx_skb_ptr); } else skb_put(skb, rxlen); diff --git a/drivers/net/tulip/uli526x.c b/drivers/net/tulip/uli526x.c index e46f4cb02c15..ca2548eb7d63 100644 --- a/drivers/net/tulip/uli526x.c +++ b/drivers/net/tulip/uli526x.c @@ -583,7 +583,7 @@ static int uli526x_start_xmit(struct sk_buff *skb, struct net_device *dev) /* transmit this packet */ txptr = db->tx_insert_ptr; - memcpy(txptr->tx_buf_ptr, skb->data, skb->len); + skb_copy_from_linear_data(skb, txptr->tx_buf_ptr, skb->len); txptr->tdes1 = cpu_to_le32(0xe1000000 | skb->len); /* Point to next transmit free descriptor */ diff --git a/drivers/net/tulip/xircom_cb.c b/drivers/net/tulip/xircom_cb.c index 1fe3734e155b..985a1810ca59 100644 --- a/drivers/net/tulip/xircom_cb.c +++ b/drivers/net/tulip/xircom_cb.c @@ -411,9 +411,9 @@ static int xircom_start_xmit(struct sk_buff *skb, struct net_device *dev) sometimes sends more than you ask it to. */ memset(&card->tx_buffer[bufferoffsets[desc]/4],0,1536); - memcpy(&(card->tx_buffer[bufferoffsets[desc]/4]),skb->data,skb->len); - - + skb_copy_from_linear_data(skb, + &(card->tx_buffer[bufferoffsets[desc] / 4]), + skb->len); /* FIXME: The specification tells us that the length we send HAS to be a multiple of 4 bytes. */ diff --git a/drivers/net/tulip/xircom_tulip_cb.c b/drivers/net/tulip/xircom_tulip_cb.c index 3f24c82755fc..696b3b8aac8e 100644 --- a/drivers/net/tulip/xircom_tulip_cb.c +++ b/drivers/net/tulip/xircom_tulip_cb.c @@ -915,7 +915,9 @@ xircom_start_xmit(struct sk_buff *skb, struct net_device *dev) tp->tx_skbuff[entry] = skb; if (tp->chip_id == X3201_3) { - memcpy(tp->tx_aligned_skbuff[entry]->data,skb->data,skb->len); + skb_copy_from_linear_data(skb, + tp->tx_aligned_skbuff[entry]->data, + skb->len); tp->tx_ring[entry].buffer1 = virt_to_bus(tp->tx_aligned_skbuff[entry]->data); } else tp->tx_ring[entry].buffer1 = virt_to_bus(skb->data); diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 288d8559f8c5..4d461595406d 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -386,8 +386,8 @@ static ssize_t tun_chr_aio_read(struct kiocb *iocb, const struct iovec *iv, * - we are multicast promiscous. * - we belong to the multicast group. */ - memcpy(addr, skb->data, - min_t(size_t, sizeof addr, skb->len)); + skb_copy_from_linear_data(skb, addr, min_t(size_t, sizeof addr, + skb->len)); bit_nr = ether_crc(sizeof addr, addr) >> 26; if ((tun->if_flags & IFF_PROMISC) || memcmp(addr, tun->dev_addr, sizeof addr) == 0 || diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c index 422eaf8ea12d..25b75b615188 100644 --- a/drivers/net/via-velocity.c +++ b/drivers/net/via-velocity.c @@ -1339,7 +1339,8 @@ static inline int velocity_rx_copy(struct sk_buff **rx_skb, int pkt_size, if (vptr->flags & VELOCITY_FLAGS_IP_ALIGN) skb_reserve(new_skb, 2); - memcpy(new_skb->data, rx_skb[0]->data, pkt_size); + skb_copy_from_linear_data(rx_skb[0], new_skb->data, + pkt_size); *rx_skb = new_skb; ret = 0; } @@ -1927,7 +1928,7 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev) if (pktlen < ETH_ZLEN) { /* Cannot occur until ZC support */ pktlen = ETH_ZLEN; - memcpy(tdinfo->buf, skb->data, skb->len); + skb_copy_from_linear_data(skb, tdinfo->buf, skb->len); memset(tdinfo->buf + skb->len, 0, ETH_ZLEN - skb->len); tdinfo->skb = skb; tdinfo->skb_dma[0] = tdinfo->buf_dma; @@ -1943,7 +1944,7 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev) int nfrags = skb_shinfo(skb)->nr_frags; tdinfo->skb = skb; if (nfrags > 6) { - memcpy(tdinfo->buf, skb->data, skb->len); + skb_copy_from_linear_data(skb, tdinfo->buf, skb->len); tdinfo->skb_dma[0] = tdinfo->buf_dma; td_ptr->tdesc0.pktsize = td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]); diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c index a576113abbd9..ae132c1c5459 100644 --- a/drivers/net/wan/lmc/lmc_main.c +++ b/drivers/net/wan/lmc/lmc_main.c @@ -1702,7 +1702,7 @@ static int lmc_rx (struct net_device *dev) /*fold00*/ if(!nsb) { goto give_it_anyways; } - memcpy(skb_put(nsb, len), skb->data, len); + skb_copy_from_linear_data(skb, skb_put(nsb, len), len); nsb->protocol = lmc_proto_type(sc, skb); skb_reset_mac_header(nsb); diff --git a/drivers/net/wan/pc300_drv.c b/drivers/net/wan/pc300_drv.c index edbc55528be5..8ba75bb17326 100644 --- a/drivers/net/wan/pc300_drv.c +++ b/drivers/net/wan/pc300_drv.c @@ -1765,7 +1765,7 @@ cpc_trace(struct net_device *dev, struct sk_buff *skb_main, char rx_tx) skb->data[7] = ']'; skb->data[8] = ':'; skb->data[9] = ' '; - memcpy(&skb->data[10], skb_main->data, skb_main->len); + skb_copy_from_linear_data(skb_main, &skb->data[10], skb_main->len); netif_rx(skb); } diff --git a/drivers/net/wan/z85230.c b/drivers/net/wan/z85230.c index 8b4540bfc1b0..9432d2ce7745 100644 --- a/drivers/net/wan/z85230.c +++ b/drivers/net/wan/z85230.c @@ -1782,7 +1782,7 @@ int z8530_queue_xmit(struct z8530_channel *c, struct sk_buff *skb) */ c->tx_next_ptr=c->tx_dma_buf[c->tx_dma_used]; c->tx_dma_used^=1; /* Flip temp buffer */ - memcpy(c->tx_next_ptr, skb->data, skb->len); + skb_copy_from_linear_data(skb, c->tx_next_ptr, skb->len); } else c->tx_next_ptr=skb->data; diff --git a/drivers/net/wireless/atmel.c b/drivers/net/wireless/atmel.c index 1c17cbe007ba..51a7db53afa5 100644 --- a/drivers/net/wireless/atmel.c +++ b/drivers/net/wireless/atmel.c @@ -827,14 +827,14 @@ static int start_tx(struct sk_buff *skb, struct net_device *dev) if (priv->wep_is_on) frame_ctl |= IEEE80211_FCTL_PROTECTED; if (priv->operating_mode == IW_MODE_ADHOC) { - memcpy(&header.addr1, skb->data, 6); + skb_copy_from_linear_data(skb, &header.addr1, 6); memcpy(&header.addr2, dev->dev_addr, 6); memcpy(&header.addr3, priv->BSSID, 6); } else { frame_ctl |= IEEE80211_FCTL_TODS; memcpy(&header.addr1, priv->CurrentBSSID, 6); memcpy(&header.addr2, dev->dev_addr, 6); - memcpy(&header.addr3, skb->data, 6); + skb_copy_from_linear_data(skb, &header.addr3, 6); } if (priv->use_wpa) diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_dma.c b/drivers/net/wireless/bcm43xx/bcm43xx_dma.c index 6e0dc76400e5..e3d2e61a31ee 100644 --- a/drivers/net/wireless/bcm43xx/bcm43xx_dma.c +++ b/drivers/net/wireless/bcm43xx/bcm43xx_dma.c @@ -998,7 +998,8 @@ static void dma_tx_fragment(struct bcm43xx_dmaring *ring, assert(0); return; } - memcpy(skb_put(bounce_skb, skb->len), skb->data, skb->len); + skb_copy_from_linear_data(skb, skb_put(bounce_skb, skb->len), + skb->len); dev_kfree_skb_any(skb); skb = bounce_skb; } diff --git a/drivers/net/wireless/hostap/hostap_80211_rx.c b/drivers/net/wireless/hostap/hostap_80211_rx.c index 35a3a50724fe..cbedc9ee740a 100644 --- a/drivers/net/wireless/hostap/hostap_80211_rx.c +++ b/drivers/net/wireless/hostap/hostap_80211_rx.c @@ -933,12 +933,14 @@ void hostap_80211_rx(struct net_device *dev, struct sk_buff *skb, if (frag == 0) { /* copy first fragment (including full headers) into * beginning of the fragment cache skb */ - memcpy(skb_put(frag_skb, flen), skb->data, flen); + skb_copy_from_linear_data(skb, skb_put(frag_skb, flen), + flen); } else { /* append frame payload to the end of the fragment * cache skb */ - memcpy(skb_put(frag_skb, flen), skb->data + hdrlen, - flen); + skb_copy_from_linear_data_offset(skb, hdrlen, + skb_put(frag_skb, + flen), flen); } dev_kfree_skb(skb); skb = NULL; @@ -1044,8 +1046,9 @@ void hostap_80211_rx(struct net_device *dev, struct sk_buff *skb, skb->len >= ETH_HLEN + ETH_ALEN) { /* Non-standard frame: get addr4 from its bogus location after * the payload */ - memcpy(skb->data + ETH_ALEN, - skb->data + skb->len - ETH_ALEN, ETH_ALEN); + skb_copy_from_linear_data_offset(skb, skb->len - ETH_ALEN, + skb->data + ETH_ALEN, + ETH_ALEN); skb_trim(skb, skb->len - ETH_ALEN); } diff --git a/drivers/net/wireless/hostap/hostap_80211_tx.c b/drivers/net/wireless/hostap/hostap_80211_tx.c index 159baef18e4a..246fac0e8001 100644 --- a/drivers/net/wireless/hostap/hostap_80211_tx.c +++ b/drivers/net/wireless/hostap/hostap_80211_tx.c @@ -146,7 +146,8 @@ int hostap_data_start_xmit(struct sk_buff *skb, struct net_device *dev) fc |= IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS; /* From&To DS: Addr1 = RA, Addr2 = TA, Addr3 = DA, * Addr4 = SA */ - memcpy(&hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN); + skb_copy_from_linear_data_offset(skb, ETH_ALEN, + &hdr.addr4, ETH_ALEN); hdr_len += ETH_ALEN; } else { /* bogus 4-addr format to workaround Prism2 station @@ -159,7 +160,8 @@ int hostap_data_start_xmit(struct sk_buff *skb, struct net_device *dev) /* SA from skb->data + ETH_ALEN will be added after * frame payload; use hdr.addr4 as a temporary buffer */ - memcpy(&hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN); + skb_copy_from_linear_data_offset(skb, ETH_ALEN, + &hdr.addr4, ETH_ALEN); need_tailroom += ETH_ALEN; } @@ -174,24 +176,27 @@ int hostap_data_start_xmit(struct sk_buff *skb, struct net_device *dev) else memcpy(&hdr.addr1, local->bssid, ETH_ALEN); memcpy(&hdr.addr2, dev->dev_addr, ETH_ALEN); - memcpy(&hdr.addr3, skb->data, ETH_ALEN); + skb_copy_from_linear_data(skb, &hdr.addr3, ETH_ALEN); } else if (local->iw_mode == IW_MODE_MASTER && !to_assoc_ap) { fc |= IEEE80211_FCTL_FROMDS; /* From DS: Addr1 = DA, Addr2 = BSSID, Addr3 = SA */ - memcpy(&hdr.addr1, skb->data, ETH_ALEN); + skb_copy_from_linear_data(skb, &hdr.addr1, ETH_ALEN); memcpy(&hdr.addr2, dev->dev_addr, ETH_ALEN); - memcpy(&hdr.addr3, skb->data + ETH_ALEN, ETH_ALEN); + skb_copy_from_linear_data_offset(skb, ETH_ALEN, &hdr.addr3, + ETH_ALEN); } else if (local->iw_mode == IW_MODE_INFRA || to_assoc_ap) { fc |= IEEE80211_FCTL_TODS; /* To DS: Addr1 = BSSID, Addr2 = SA, Addr3 = DA */ memcpy(&hdr.addr1, to_assoc_ap ? local->assoc_ap_addr : local->bssid, ETH_ALEN); - memcpy(&hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN); - memcpy(&hdr.addr3, skb->data, ETH_ALEN); + skb_copy_from_linear_data_offset(skb, ETH_ALEN, &hdr.addr2, + ETH_ALEN); + skb_copy_from_linear_data(skb, &hdr.addr3, ETH_ALEN); } else if (local->iw_mode == IW_MODE_ADHOC) { /* not From/To DS: Addr1 = DA, Addr2 = SA, Addr3 = BSSID */ - memcpy(&hdr.addr1, skb->data, ETH_ALEN); - memcpy(&hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN); + skb_copy_from_linear_data(skb, &hdr.addr1, ETH_ALEN); + skb_copy_from_linear_data_offset(skb, ETH_ALEN, &hdr.addr2, + ETH_ALEN); memcpy(&hdr.addr3, local->bssid, ETH_ALEN); } diff --git a/drivers/net/wireless/hostap/hostap_ap.c b/drivers/net/wireless/hostap/hostap_ap.c index 797d950d5d61..4ca8a27b8c55 100644 --- a/drivers/net/wireless/hostap/hostap_ap.c +++ b/drivers/net/wireless/hostap/hostap_ap.c @@ -1277,8 +1277,8 @@ static char * ap_auth_make_challenge(struct ap_data *ap) return NULL; } - memcpy(tmpbuf, skb->data + ap->crypt->extra_mpdu_prefix_len, - WLAN_AUTH_CHALLENGE_LEN); + skb_copy_from_linear_data_offset(skb, ap->crypt->extra_mpdu_prefix_len, + tmpbuf, WLAN_AUTH_CHALLENGE_LEN); dev_kfree_skb(skb); return tmpbuf; diff --git a/drivers/net/wireless/hostap/hostap_hw.c b/drivers/net/wireless/hostap/hostap_hw.c index 9003ff7d151a..fb01fb95a9f0 100644 --- a/drivers/net/wireless/hostap/hostap_hw.c +++ b/drivers/net/wireless/hostap/hostap_hw.c @@ -1838,13 +1838,14 @@ static int prism2_tx_80211(struct sk_buff *skb, struct net_device *dev) /* skb->data starts with txdesc->frame_control */ hdr_len = 24; - memcpy(&txdesc.frame_control, skb->data, hdr_len); + skb_copy_from_linear_data(skb, &txdesc.frame_control, hdr_len); fc = le16_to_cpu(txdesc.frame_control); if (WLAN_FC_GET_TYPE(fc) == IEEE80211_FTYPE_DATA && (fc & IEEE80211_FCTL_FROMDS) && (fc & IEEE80211_FCTL_TODS) && skb->len >= 30) { /* Addr4 */ - memcpy(txdesc.addr4, skb->data + hdr_len, ETH_ALEN); + skb_copy_from_linear_data_offset(skb, hdr_len, txdesc.addr4, + ETH_ALEN); hdr_len += ETH_ALEN; } diff --git a/drivers/net/wireless/ipw2100.c b/drivers/net/wireless/ipw2100.c index ad6e4a428355..9137a4dd02eb 100644 --- a/drivers/net/wireless/ipw2100.c +++ b/drivers/net/wireless/ipw2100.c @@ -2416,8 +2416,9 @@ static void isr_rx(struct ipw2100_priv *priv, int i, #ifdef IPW2100_RX_DEBUG /* Make a copy of the frame so we can dump it to the logs if * ieee80211_rx fails */ - memcpy(packet_data, packet->skb->data, - min_t(u32, status->frame_size, IPW_RX_NIC_BUFFER_LENGTH)); + skb_copy_from_linear_data(packet->skb, packet_data, + min_t(u32, status->frame_size, + IPW_RX_NIC_BUFFER_LENGTH)); #endif if (!ieee80211_rx(priv->ieee, packet->skb, stats)) { diff --git a/drivers/net/wireless/ipw2200.c b/drivers/net/wireless/ipw2200.c index b04c56a25cc5..4839a45098cb 100644 --- a/drivers/net/wireless/ipw2200.c +++ b/drivers/net/wireless/ipw2200.c @@ -10355,7 +10355,7 @@ static void ipw_handle_promiscuous_tx(struct ipw_priv *priv, rt_hdr->it_len = dst->len; - memcpy(skb_put(dst, len), src->data, len); + skb_copy_from_linear_data(src, skb_put(dst, len), len); if (!ieee80211_rx(priv->prom_priv->ieee, dst, &dummystats)) dev_kfree_skb_any(dst); diff --git a/drivers/net/wireless/prism54/islpci_eth.c b/drivers/net/wireless/prism54/islpci_eth.c index 6ebfff034242..7d8bff1dbc4d 100644 --- a/drivers/net/wireless/prism54/islpci_eth.c +++ b/drivers/net/wireless/prism54/islpci_eth.c @@ -162,13 +162,16 @@ islpci_eth_transmit(struct sk_buff *skb, struct net_device *ndev) skb_put(newskb, init_wds ? skb->len + 6 : skb->len); if (init_wds) { - memcpy(newskb->data + 6, skb->data, skb->len); + skb_copy_from_linear_data(skb, + newskb->data + 6, + skb->len); memcpy(newskb->data, wds_mac, 6); #ifdef ISLPCI_ETH_DEBUG printk("islpci_eth_transmit:wds_mac\n"); #endif } else - memcpy(newskb->data, skb->data, skb->len); + skb_copy_from_linear_data(skb, newskb->data, + skb->len); #if VERBOSE > SHOW_ERROR_MESSAGES DEBUG(SHOW_TRACING, "memcpy %p %p %i wds %i\n", @@ -394,8 +397,10 @@ islpci_eth_receive(islpci_private *priv) /* Update spy records */ wireless_spy_update(ndev, annex->addr2, &wstats); - memcpy(skb->data + sizeof (struct rfmon_header), - skb->data, 2 * ETH_ALEN); + skb_copy_from_linear_data(skb, + (skb->data + + sizeof(struct rfmon_header)), + 2 * ETH_ALEN); skb_pull(skb, sizeof (struct rfmon_header)); } skb->protocol = eth_type_trans(skb, ndev); diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c index 9633b0457f8c..3be624295a1f 100644 --- a/drivers/net/wireless/ray_cs.c +++ b/drivers/net/wireless/ray_cs.c @@ -2242,7 +2242,8 @@ static void rx_data(struct net_device *dev, struct rcs __iomem *prcs, unsigned i rx_ptr += copy_from_rx_buff(local, rx_ptr, pkt_addr & RX_BUFF_END, rx_len); /* Get source address */ #ifdef WIRELESS_SPY - memcpy(linksrcaddr, ((struct mac_header *)skb->data)->addr_2, ETH_ALEN); + skb_copy_from_linear_data_offset(skb, offsetof(struct mac_header, addr_2), + linksrcaddr, ETH_ALEN); #endif /* Now, deal with encapsulation/translation/sniffer */ if (!sniffer) { diff --git a/drivers/net/wireless/wavelan.c b/drivers/net/wireless/wavelan.c index 2bf77b1ee531..1cf090d60edc 100644 --- a/drivers/net/wireless/wavelan.c +++ b/drivers/net/wireless/wavelan.c @@ -2938,7 +2938,7 @@ static int wavelan_packet_xmit(struct sk_buff *skb, struct net_device * dev) * need to pad. Jean II */ if (skb->len < ETH_ZLEN) { memset(data, 0, ETH_ZLEN); - memcpy(data, skb->data, skb->len); + skb_copy_from_linear_data(skb, data, skb->len); /* Write packet on the card */ if(wv_packet_write(dev, data, ETH_ZLEN)) return 1; /* We failed */ diff --git a/drivers/net/wireless/zd1201.c b/drivers/net/wireless/zd1201.c index 1fe013a7297a..935b144d9b56 100644 --- a/drivers/net/wireless/zd1201.c +++ b/drivers/net/wireless/zd1201.c @@ -807,10 +807,10 @@ static int zd1201_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) txbuf[4] = 0x00; txbuf[5] = 0x00; - memcpy(txbuf+6, skb->data+12, skb->len-12); + skb_copy_from_linear_data_offset(skb, 12, txbuf + 6, skb->len - 12); if (pad) txbuf[skb->len-12+6]=0; - memcpy(txbuf+skb->len-12+6+pad, skb->data, 12); + skb_copy_from_linear_data(skb, txbuf + skb->len - 12 + 6 + pad, 12); *(__be16*)&txbuf[skb->len+6+pad] = htons(skb->len-12+6); txbuf[txbuflen-1] = 0; diff --git a/drivers/s390/net/ctcmain.c b/drivers/s390/net/ctcmain.c index 54e3f806cd52..b0f813e6f48e 100644 --- a/drivers/s390/net/ctcmain.c +++ b/drivers/s390/net/ctcmain.c @@ -472,7 +472,8 @@ ctc_unpack_skb(struct channel *ch, struct sk_buff *pskb) privptr->stats.rx_dropped++; return; } - memcpy(skb_put(skb, pskb->len), pskb->data, pskb->len); + skb_copy_from_linear_data(pskb, skb_put(skb, pskb->len), + pskb->len); skb_reset_mac_header(skb); skb->dev = pskb->dev; skb->protocol = pskb->protocol; @@ -716,8 +717,9 @@ ch_action_txdone(fsm_instance * fi, int event, void *arg) *((__u16 *) skb_put(ch->trans_skb, 2)) = ch->collect_len + 2; i = 0; while ((skb = skb_dequeue(&ch->collect_queue))) { - memcpy(skb_put(ch->trans_skb, skb->len), skb->data, - skb->len); + skb_copy_from_linear_data(skb, skb_put(ch->trans_skb, + skb->len), + skb->len); privptr->stats.tx_packets++; privptr->stats.tx_bytes += skb->len - LL_HEADER_LENGTH; atomic_dec(&skb->users); @@ -2268,8 +2270,9 @@ transmit_skb(struct channel *ch, struct sk_buff *skb) skb_reset_tail_pointer(ch->trans_skb); ch->trans_skb->len = 0; ch->ccw[1].count = skb->len; - memcpy(skb_put(ch->trans_skb, skb->len), skb->data, - skb->len); + skb_copy_from_linear_data(skb, skb_put(ch->trans_skb, + skb->len), + skb->len); atomic_dec(&skb->users); dev_kfree_skb_irq(skb); ccw_idx = 0; diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c index 1c23e187a3ba..08a994fdd1a4 100644 --- a/drivers/s390/net/lcs.c +++ b/drivers/s390/net/lcs.c @@ -1576,7 +1576,7 @@ __lcs_start_xmit(struct lcs_card *card, struct sk_buff *skb, header->offset = card->tx_buffer->count; header->type = card->lan_type; header->slot = card->portno; - memcpy(header + 1, skb->data, skb->len); + skb_copy_from_linear_data(skb, header + 1, skb->len); spin_unlock(&card->lock); card->stats.tx_bytes += skb->len; card->stats.tx_packets++; diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c index cd42bd54988c..e10e85e85c84 100644 --- a/drivers/s390/net/netiucv.c +++ b/drivers/s390/net/netiucv.c @@ -645,7 +645,8 @@ static void netiucv_unpack_skb(struct iucv_connection *conn, privptr->stats.rx_dropped++; return; } - memcpy(skb_put(skb, pskb->len), pskb->data, pskb->len); + skb_copy_from_linear_data(pskb, skb_put(skb, pskb->len), + pskb->len); skb_reset_mac_header(skb); skb->dev = pskb->dev; skb->protocol = pskb->protocol; @@ -744,7 +745,9 @@ static void conn_action_txdone(fsm_instance *fi, int event, void *arg) header.next = conn->tx_buff->len + skb->len + NETIUCV_HDRLEN; memcpy(skb_put(conn->tx_buff, NETIUCV_HDRLEN), &header, NETIUCV_HDRLEN); - memcpy(skb_put(conn->tx_buff, skb->len), skb->data, skb->len); + skb_copy_from_linear_data(skb, + skb_put(conn->tx_buff, skb->len), + skb->len); txbytes += skb->len; txpackets++; stat_maxcq++; diff --git a/drivers/s390/net/qeth_eddp.c b/drivers/s390/net/qeth_eddp.c index 5890bb5ad23e..dd7034fbfff8 100644 --- a/drivers/s390/net/qeth_eddp.c +++ b/drivers/s390/net/qeth_eddp.c @@ -267,7 +267,8 @@ qeth_eddp_copy_data_tcp(char *dst, struct qeth_eddp_data *eddp, int len, QETH_DBF_TEXT(trace, 5, "eddpcdtc"); if (skb_shinfo(eddp->skb)->nr_frags == 0) { - memcpy(dst, eddp->skb->data + eddp->skb_offset, len); + skb_copy_from_linear_data_offset(eddp->skb, eddp->skb_offset, + dst, len); *hcsum = csum_partial(eddp->skb->data + eddp->skb_offset, len, *hcsum); eddp->skb_offset += len; diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c index a076f735a7bc..d287c5755229 100644 --- a/drivers/usb/atm/usbatm.c +++ b/drivers/usb/atm/usbatm.c @@ -484,7 +484,7 @@ static unsigned int usbatm_write_cells(struct usbatm_data *instance, ptr[4] = 0xec; ptr += ATM_CELL_HEADER; - memcpy(ptr, skb->data, data_len); + skb_copy_from_linear_data(skb, ptr, data_len); ptr += data_len; __skb_pull(skb, data_len); diff --git a/drivers/usb/net/catc.c b/drivers/usb/net/catc.c index d82022dd7f2e..ffec2e01b896 100644 --- a/drivers/usb/net/catc.c +++ b/drivers/usb/net/catc.c @@ -418,7 +418,7 @@ static int catc_hard_start_xmit(struct sk_buff *skb, struct net_device *netdev) catc->tx_ptr = (((catc->tx_ptr - 1) >> 6) + 1) << 6; tx_buf = catc->tx_buf[catc->tx_idx] + catc->tx_ptr; *((u16*)tx_buf) = (catc->is_f5u011) ? cpu_to_be16((u16)skb->len) : cpu_to_le16((u16)skb->len); - memcpy(tx_buf + 2, skb->data, skb->len); + skb_copy_from_linear_data(skb, tx_buf + 2, skb->len); catc->tx_ptr += skb->len + 2; if (!test_and_set_bit(TX_RUNNING, &catc->flags)) diff --git a/drivers/usb/net/pegasus.c b/drivers/usb/net/pegasus.c index 13f70e09ea40..1ad4ee54b186 100644 --- a/drivers/usb/net/pegasus.c +++ b/drivers/usb/net/pegasus.c @@ -889,7 +889,7 @@ static int pegasus_start_xmit(struct sk_buff *skb, struct net_device *net) netif_stop_queue(net); ((__le16 *) pegasus->tx_buff)[0] = cpu_to_le16(l16); - memcpy(pegasus->tx_buff + 2, skb->data, skb->len); + skb_copy_from_linear_data(skb, pegasus->tx_buff + 2, skb->len); usb_fill_bulk_urb(pegasus->tx_urb, pegasus->usb, usb_sndbulkpipe(pegasus->usb, 2), pegasus->tx_buff, count, diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 1c19b2d55c2b..08c96bcbc59c 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -1507,6 +1507,20 @@ static inline void *skb_header_pointer(const struct sk_buff *skb, int offset, return buffer; } +static inline void skb_copy_from_linear_data(const struct sk_buff *skb, + void *to, + const unsigned int len) +{ + memcpy(to, skb->data, len); +} + +static inline void skb_copy_from_linear_data_offset(const struct sk_buff *skb, + const int offset, void *to, + const unsigned int len) +{ + memcpy(to, skb->data + offset, len); +} + extern void skb_init(void); /** diff --git a/net/ax25/ax25_out.c b/net/ax25/ax25_out.c index e66953ce53e7..92b517af7260 100644 --- a/net/ax25/ax25_out.c +++ b/net/ax25/ax25_out.c @@ -150,7 +150,7 @@ void ax25_output(ax25_cb *ax25, int paclen, struct sk_buff *skb) skb_reserve(skbn, frontlen + 2); skb_set_network_header(skbn, skb_network_offset(skb)); - memcpy(skb_put(skbn, len), skb->data, len); + skb_copy_from_linear_data(skb, skb_put(skbn, len), len); p = skb_push(skbn, 2); *p++ = AX25_P_SEGMENT; @@ -164,7 +164,7 @@ void ax25_output(ax25_cb *ax25, int paclen, struct sk_buff *skb) skb_reserve(skbn, frontlen + 1); skb_set_network_header(skbn, skb_network_offset(skb)); - memcpy(skb_put(skbn, len), skb->data, len); + skb_copy_from_linear_data(skb, skb_put(skbn, len), len); p = skb_push(skbn, 1); *p = AX25_P_TEXT; } diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c index 97156c4abc8d..ab2db55982ca 100644 --- a/net/bluetooth/bnep/core.c +++ b/net/bluetooth/bnep/core.c @@ -382,7 +382,7 @@ static inline int bnep_rx_frame(struct bnep_session *s, struct sk_buff *skb) break; } - memcpy(__skb_put(nskb, skb->len), skb->data, skb->len); + skb_copy_from_linear_data(skb, __skb_put(nskb, skb->len), skb->len); kfree_skb(skb); s->stats.rx_packets++; diff --git a/net/bluetooth/cmtp/core.c b/net/bluetooth/cmtp/core.c index 3933608a9296..66bef1ccee2a 100644 --- a/net/bluetooth/cmtp/core.c +++ b/net/bluetooth/cmtp/core.c @@ -124,7 +124,7 @@ static inline void cmtp_add_msgpart(struct cmtp_session *session, int id, const } if (skb && (skb->len > 0)) - memcpy(skb_put(nskb, skb->len), skb->data, skb->len); + skb_copy_from_linear_data(skb, skb_put(nskb, skb->len), skb->len); memcpy(skb_put(nskb, count), buf, count); @@ -256,7 +256,7 @@ static void cmtp_process_transmit(struct cmtp_session *session) hdr[2] = size >> 8; } - memcpy(skb_put(nskb, size), skb->data, size); + skb_copy_from_linear_data(skb, skb_put(nskb, size), size); skb_pull(skb, size); if (skb->len > 0) { diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c index 162eab6a4478..a5867879b615 100644 --- a/net/bluetooth/l2cap.c +++ b/net/bluetooth/l2cap.c @@ -2107,7 +2107,8 @@ static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 fl if (!(conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC))) goto drop; - memcpy(skb_put(conn->rx_skb, skb->len), skb->data, skb->len); + skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len), + skb->len); conn->rx_len = len - skb->len; } else { BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len); @@ -2128,7 +2129,8 @@ static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 fl goto drop; } - memcpy(skb_put(conn->rx_skb, skb->len), skb->data, skb->len); + skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len), + skb->len); conn->rx_len -= skb->len; if (!conn->rx_len) { diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c index 8cee7fdc16c3..8b45224699f4 100644 --- a/net/bridge/br_netfilter.c +++ b/net/bridge/br_netfilter.c @@ -129,7 +129,8 @@ static inline void nf_bridge_save_header(struct sk_buff *skb) if (skb->protocol == htons(ETH_P_8021Q)) header_size += VLAN_HLEN; - memcpy(skb->nf_bridge->data, skb->data - header_size, header_size); + skb_copy_from_linear_data_offset(skb, -header_size, + skb->nf_bridge->data, header_size); } /* diff --git a/net/core/skbuff.c b/net/core/skbuff.c index f16c72204cf6..17c6bb5927b6 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -576,7 +576,7 @@ struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask) /* Set the tail pointer and length */ skb_put(n, skb_headlen(skb)); /* Copy the bytes */ - memcpy(n->data, skb->data, n->len); + skb_copy_from_linear_data(skb, n->data, n->len); n->csum = skb->csum; n->ip_summed = skb->ip_summed; @@ -1043,7 +1043,7 @@ int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len) if ((copy = start - offset) > 0) { if (copy > len) copy = len; - memcpy(to, skb->data + offset, copy); + skb_copy_from_linear_data_offset(skb, offset, to, copy); if ((len -= copy) == 0) return 0; offset += copy; @@ -1362,7 +1362,7 @@ void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to) BUG_ON(csstart > skb_headlen(skb)); - memcpy(to, skb->data, csstart); + skb_copy_from_linear_data(skb, to, csstart); csum = 0; if (csstart != skb->len) @@ -1536,8 +1536,8 @@ static inline void skb_split_inside_header(struct sk_buff *skb, { int i; - memcpy(skb_put(skb1, pos - len), skb->data + len, pos - len); - + skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len), + pos - len); /* And move data appendix as is. */ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i]; @@ -1927,8 +1927,8 @@ struct sk_buff *skb_segment(struct sk_buff *skb, int features) skb_set_network_header(nskb, skb->mac_len); nskb->transport_header = (nskb->network_header + skb_network_header_len(skb)); - memcpy(skb_put(nskb, doffset), skb->data, doffset); - + skb_copy_from_linear_data(skb, skb_put(nskb, doffset), + doffset); if (!sg) { nskb->csum = skb_copy_and_csum_bits(skb, offset, skb_put(nskb, len), @@ -1941,7 +1941,8 @@ struct sk_buff *skb_segment(struct sk_buff *skb, int features) nskb->ip_summed = CHECKSUM_PARTIAL; nskb->csum = skb->csum; - memcpy(skb_put(nskb, hsize), skb->data + offset, hsize); + skb_copy_from_linear_data_offset(skb, offset, + skb_put(nskb, hsize), hsize); while (pos < offset + len) { BUG_ON(i >= nfrags); diff --git a/net/decnet/dn_nsp_in.c b/net/decnet/dn_nsp_in.c index d9498a165acf..4074a6e5d0de 100644 --- a/net/decnet/dn_nsp_in.c +++ b/net/decnet/dn_nsp_in.c @@ -362,7 +362,8 @@ static void dn_nsp_conn_conf(struct sock *sk, struct sk_buff *skb) u16 dlen = *skb->data; if ((dlen <= 16) && (dlen <= skb->len)) { scp->conndata_in.opt_optl = dn_htons(dlen); - memcpy(scp->conndata_in.opt_data, skb->data + 1, dlen); + skb_copy_from_linear_data_offset(skb, 1, + scp->conndata_in.opt_data, dlen); } } dn_nsp_send_link(sk, DN_NOCHANGE, 0); @@ -406,7 +407,7 @@ static void dn_nsp_disc_init(struct sock *sk, struct sk_buff *skb) u16 dlen = *skb->data; if ((dlen <= 16) && (dlen <= skb->len)) { scp->discdata_in.opt_optl = dn_htons(dlen); - memcpy(scp->discdata_in.opt_data, skb->data + 1, dlen); + skb_copy_from_linear_data_offset(skb, 1, scp->discdata_in.opt_data, dlen); } } diff --git a/net/ieee80211/ieee80211_crypt_wep.c b/net/ieee80211/ieee80211_crypt_wep.c index ec6d8851a061..4eb35079e434 100644 --- a/net/ieee80211/ieee80211_crypt_wep.c +++ b/net/ieee80211/ieee80211_crypt_wep.c @@ -152,7 +152,7 @@ static int prism2_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv) return -1; /* Copy the IV into the first 3 bytes of the key */ - memcpy(key, skb->data + hdr_len, 3); + skb_copy_from_linear_data_offset(skb, hdr_len, key, 3); /* Copy rest of the WEP key (the secret part) */ memcpy(key + 3, wep->key, wep->key_len); diff --git a/net/ieee80211/ieee80211_rx.c b/net/ieee80211/ieee80211_rx.c index 59a765c49cf9..94e2b8e2ab26 100644 --- a/net/ieee80211/ieee80211_rx.c +++ b/net/ieee80211/ieee80211_rx.c @@ -606,12 +606,12 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb, if (frag == 0) { /* copy first fragment (including full headers) into * beginning of the fragment cache skb */ - memcpy(skb_put(frag_skb, flen), skb->data, flen); + skb_copy_from_linear_data(skb, skb_put(frag_skb, flen), flen); } else { /* append frame payload to the end of the fragment * cache skb */ - memcpy(skb_put(frag_skb, flen), skb->data + hdrlen, - flen); + skb_copy_from_linear_data_offset(skb, hdrlen, + skb_put(frag_skb, flen), flen); } dev_kfree_skb_any(skb); skb = NULL; diff --git a/net/ieee80211/ieee80211_tx.c b/net/ieee80211/ieee80211_tx.c index 62a8a2b76539..a4c3c51140a3 100644 --- a/net/ieee80211/ieee80211_tx.c +++ b/net/ieee80211/ieee80211_tx.c @@ -309,8 +309,8 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev) } /* Save source and destination addresses */ - memcpy(dest, skb->data, ETH_ALEN); - memcpy(src, skb->data + ETH_ALEN, ETH_ALEN); + skb_copy_from_linear_data(skb, dest, ETH_ALEN); + skb_copy_from_linear_data_offset(skb, ETH_ALEN, src, ETH_ALEN); if (host_encrypt || host_build_iv) fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA | @@ -363,7 +363,7 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev) snapped = 1; ieee80211_copy_snap(skb_put(skb_new, SNAP_SIZE + sizeof(u16)), ether_type); - memcpy(skb_put(skb_new, skb->len), skb->data, skb->len); + skb_copy_from_linear_data(skb, skb_put(skb_new, skb->len), skb->len); res = crypt->ops->encrypt_msdu(skb_new, hdr_len, crypt->priv); if (res < 0) { IEEE80211_ERROR("msdu encryption failed\n"); @@ -492,7 +492,7 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev) bytes -= SNAP_SIZE + sizeof(u16); } - memcpy(skb_put(skb_frag, bytes), skb->data, bytes); + skb_copy_from_linear_data(skb, skb_put(skb_frag, bytes), bytes); /* Advance the SKB... */ skb_pull(skb, bytes); diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 875da382d9b9..34606eff8a05 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -596,7 +596,7 @@ slow_path: * Copy the packet header into the new buffer. */ - memcpy(skb_network_header(skb2), skb->data, hlen); + skb_copy_from_linear_data(skb, skb_network_header(skb2), hlen); /* * Copy a block of the IP datagram. diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index be3f082a87ed..4cfdad4e8356 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -746,7 +746,7 @@ slow_path: /* * Copy the packet header into the new buffer. */ - memcpy(skb_network_header(frag), skb->data, hlen); + skb_copy_from_linear_data(skb, skb_network_header(frag), hlen); /* * Build fragment header. diff --git a/net/irda/irttp.c b/net/irda/irttp.c index da3f2bc1b6f6..b55bc8f989df 100644 --- a/net/irda/irttp.c +++ b/net/irda/irttp.c @@ -314,8 +314,8 @@ static inline void irttp_fragment_skb(struct tsap_cb *self, skb_reserve(frag, self->max_header_size); /* Copy data from the original skb into this fragment. */ - memcpy(skb_put(frag, self->max_seg_size), skb->data, - self->max_seg_size); + skb_copy_from_linear_data(skb, skb_put(frag, self->max_seg_size), + self->max_seg_size); /* Insert TTP header, with the more bit set */ frame = skb_push(frag, TTP_HEADER); diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c index 053fa26ff90a..5dc7448925db 100644 --- a/net/netrom/af_netrom.c +++ b/net/netrom/af_netrom.c @@ -1160,7 +1160,8 @@ static int nr_recvmsg(struct kiocb *iocb, struct socket *sock, if (sax != NULL) { sax->sax25_family = AF_NETROM; - memcpy(sax->sax25_call.ax25_call, skb->data + 7, AX25_ADDR_LEN); + skb_copy_from_linear_data_offset(skb, 7, sax->sax25_call.ax25_call, + AX25_ADDR_LEN); } msg->msg_namelen = sizeof(*sax); diff --git a/net/netrom/nr_loopback.c b/net/netrom/nr_loopback.c index 99fdab16ded0..f324d5df4186 100644 --- a/net/netrom/nr_loopback.c +++ b/net/netrom/nr_loopback.c @@ -34,7 +34,7 @@ int nr_loopback_queue(struct sk_buff *skb) struct sk_buff *skbn; if ((skbn = alloc_skb(skb->len, GFP_ATOMIC)) != NULL) { - memcpy(skb_put(skbn, skb->len), skb->data, skb->len); + skb_copy_from_linear_data(skb, skb_put(skbn, skb->len), skb->len); skb_reset_transport_header(skbn); skb_queue_tail(&loopback_queue, skbn); diff --git a/net/netrom/nr_out.c b/net/netrom/nr_out.c index 0cbfb611465b..7c467c95c7d6 100644 --- a/net/netrom/nr_out.c +++ b/net/netrom/nr_out.c @@ -40,7 +40,7 @@ void nr_output(struct sock *sk, struct sk_buff *skb) if (skb->len - NR_TRANSPORT_LEN > NR_MAX_PACKET_SIZE) { /* Save a copy of the Transport Header */ - memcpy(transport, skb->data, NR_TRANSPORT_LEN); + skb_copy_from_linear_data(skb, transport, NR_TRANSPORT_LEN); skb_pull(skb, NR_TRANSPORT_LEN); frontlen = skb_headroom(skb); @@ -54,7 +54,7 @@ void nr_output(struct sock *sk, struct sk_buff *skb) len = (NR_MAX_PACKET_SIZE > skb->len) ? skb->len : NR_MAX_PACKET_SIZE; /* Copy the user data */ - memcpy(skb_put(skbn, len), skb->data, len); + skb_copy_from_linear_data(skb, skb_put(skbn, len), len); skb_pull(skb, len); /* Duplicate the Transport Header */ diff --git a/net/netrom/nr_subr.c b/net/netrom/nr_subr.c index 07b694d18870..04e7d0d2fd8f 100644 --- a/net/netrom/nr_subr.c +++ b/net/netrom/nr_subr.c @@ -226,13 +226,13 @@ void __nr_transmit_reply(struct sk_buff *skb, int mine, unsigned char cmdflags) dptr = skb_put(skbn, NR_NETWORK_LEN + NR_TRANSPORT_LEN); - memcpy(dptr, skb->data + 7, AX25_ADDR_LEN); + skb_copy_from_linear_data_offset(skb, 7, dptr, AX25_ADDR_LEN); dptr[6] &= ~AX25_CBIT; dptr[6] &= ~AX25_EBIT; dptr[6] |= AX25_SSSID_SPARE; dptr += AX25_ADDR_LEN; - memcpy(dptr, skb->data + 0, AX25_ADDR_LEN); + skb_copy_from_linear_data(skb, dptr, AX25_ADDR_LEN); dptr[6] &= ~AX25_CBIT; dptr[6] |= AX25_EBIT; dptr[6] |= AX25_SSSID_SPARE; diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c index 1511697b22ba..f38c3b3471ee 100644 --- a/net/rose/af_rose.c +++ b/net/rose/af_rose.c @@ -1156,7 +1156,7 @@ static int rose_sendmsg(struct kiocb *iocb, struct socket *sock, int lg; /* Save a copy of the Header */ - memcpy(header, skb->data, ROSE_MIN_LEN); + skb_copy_from_linear_data(skb, header, ROSE_MIN_LEN); skb_pull(skb, ROSE_MIN_LEN); frontlen = skb_headroom(skb); @@ -1176,7 +1176,7 @@ static int rose_sendmsg(struct kiocb *iocb, struct socket *sock, lg = (ROSE_PACLEN > skb->len) ? skb->len : ROSE_PACLEN; /* Copy the user data */ - memcpy(skb_put(skbn, lg), skb->data, lg); + skb_copy_from_linear_data(skb, skb_put(skbn, lg), lg); skb_pull(skb, lg); /* Duplicate the Header */ diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c index adcda8ebee9c..0d6002fc77b2 100644 --- a/net/x25/af_x25.c +++ b/net/x25/af_x25.c @@ -951,7 +951,7 @@ int x25_rx_call_request(struct sk_buff *skb, struct x25_neigh *nb, * Incoming Call User Data. */ if (skb->len >= 0) { - memcpy(makex25->calluserdata.cuddata, skb->data, skb->len); + skb_copy_from_linear_data(skb, makex25->calluserdata.cuddata, skb->len); makex25->calluserdata.cudlength = skb->len; } diff --git a/net/x25/x25_in.c b/net/x25/x25_in.c index b2bbe552a89d..ba13248aa1c3 100644 --- a/net/x25/x25_in.c +++ b/net/x25/x25_in.c @@ -112,8 +112,9 @@ static int x25_state1_machine(struct sock *sk, struct sk_buff *skb, int frametyp * Copy any Call User Data. */ if (skb->len >= 0) { - memcpy(x25->calluserdata.cuddata, skb->data, - skb->len); + skb_copy_from_linear_data(skb, + x25->calluserdata.cuddata, + skb->len); x25->calluserdata.cudlength = skb->len; } if (!sock_flag(sk, SOCK_DEAD)) diff --git a/net/x25/x25_out.c b/net/x25/x25_out.c index 6f5737853912..bb45e21ffce9 100644 --- a/net/x25/x25_out.c +++ b/net/x25/x25_out.c @@ -61,7 +61,7 @@ int x25_output(struct sock *sk, struct sk_buff *skb) if (skb->len - header_len > max_len) { /* Save a copy of the Header */ - memcpy(header, skb->data, header_len); + skb_copy_from_linear_data(skb, header, header_len); skb_pull(skb, header_len); frontlen = skb_headroom(skb); @@ -84,7 +84,7 @@ int x25_output(struct sock *sk, struct sk_buff *skb) len = max_len > skb->len ? skb->len : max_len; /* Copy the user data */ - memcpy(skb_put(skbn, len), skb->data, len); + skb_copy_from_linear_data(skb, skb_put(skbn, len), len); skb_pull(skb, len); /* Duplicate the Header */ -- cgit v1.2.3-59-g8ed1b From c45d286e72dd72c0229dc9e2849743ba427fee84 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Wed, 28 Mar 2007 14:29:08 -0700 Subject: [NET]: Inline net_device_stats Network drivers which keep stats allocate their own stats structure then write a get_stats() function to return them. It would be nice if this were done by default. 1) Add a new "stats" field to "struct net_device". 2) Add a new feature field to say "this driver uses the internal one" 3) Have a default "get_stats" which returns NULL if that feature not set. 4) Change callers to check result of get_stats call for NULL, not if ->get_stats is set. This should not break backwards compatibility with older drivers, yet allow modern drivers to shed some boilerplate code. Lightly tested: works for a modified lguest network driver. Signed-off-by: Rusty Russell Signed-off-by: David S. Miller --- arch/s390/appldata/appldata_net_sum.c | 4 ++-- drivers/net/bonding/bond_main.c | 5 ++--- drivers/parisc/led.c | 4 ++-- include/linux/netdevice.h | 2 ++ net/core/dev.c | 13 ++++++++++--- 5 files changed, 18 insertions(+), 10 deletions(-) (limited to 'include') diff --git a/arch/s390/appldata/appldata_net_sum.c b/arch/s390/appldata/appldata_net_sum.c index f64b8c867ae2..516b3ac9a9b5 100644 --- a/arch/s390/appldata/appldata_net_sum.c +++ b/arch/s390/appldata/appldata_net_sum.c @@ -108,10 +108,10 @@ static void appldata_get_net_sum_data(void *data) collisions = 0; read_lock(&dev_base_lock); for (dev = dev_base; dev != NULL; dev = dev->next) { - if (dev->get_stats == NULL) { + stats = dev->get_stats(dev); + if (stats == NULL) { continue; } - stats = dev->get_stats(dev); rx_packets += stats->rx_packets; tx_packets += stats->tx_packets; rx_bytes += stats->rx_bytes; diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 76d3504505bd..cea3783c92c5 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -3640,9 +3640,8 @@ static struct net_device_stats *bond_get_stats(struct net_device *bond_dev) read_lock_bh(&bond->lock); bond_for_each_slave(bond, slave, i) { - if (slave->dev->get_stats) { - sstats = slave->dev->get_stats(slave->dev); - + sstats = slave->dev->get_stats(slave->dev); + if (sstats) { stats->rx_packets += sstats->rx_packets; stats->rx_bytes += sstats->rx_bytes; stats->rx_errors += sstats->rx_errors; diff --git a/drivers/parisc/led.c b/drivers/parisc/led.c index d190c05d87ed..453e6829756c 100644 --- a/drivers/parisc/led.c +++ b/drivers/parisc/led.c @@ -372,9 +372,9 @@ static __inline__ int led_get_net_activity(void) continue; if (LOOPBACK(in_dev->ifa_list->ifa_local)) continue; - if (!dev->get_stats) - continue; stats = dev->get_stats(dev); + if (!stats) + continue; rx_total += stats->rx_packets; tx_total += stats->tx_packets; } diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 1a528548cd1d..71fc8ff4888b 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -323,6 +323,7 @@ struct net_device #define NETIF_F_VLAN_CHALLENGED 1024 /* Device cannot handle VLAN packets */ #define NETIF_F_GSO 2048 /* Enable software GSO. */ #define NETIF_F_LLTX 4096 /* LockLess TX */ +#define NETIF_F_INTERNAL_STATS 8192 /* Use stats structure in net_device */ /* Segmentation offload features */ #define NETIF_F_GSO_SHIFT 16 @@ -347,6 +348,7 @@ struct net_device struct net_device_stats* (*get_stats)(struct net_device *dev); + struct net_device_stats stats; /* List of functions to handle Wireless Extensions (instead of ioctl). * See for details. Jean II */ diff --git a/net/core/dev.c b/net/core/dev.c index 86dc9f693f66..fec8cf27f75d 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -817,7 +817,6 @@ static int default_rebuild_header(struct sk_buff *skb) return 1; } - /** * dev_open - prepare an interface for use. * @dev: device to open @@ -2096,9 +2095,9 @@ void dev_seq_stop(struct seq_file *seq, void *v) static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev) { - if (dev->get_stats) { - struct net_device_stats *stats = dev->get_stats(dev); + struct net_device_stats *stats = dev->get_stats(dev); + if (stats) { seq_printf(seq, "%6s:%8lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu " "%8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n", dev->name, stats->rx_bytes, stats->rx_packets, @@ -3282,6 +3281,13 @@ out: mutex_unlock(&net_todo_run_mutex); } +static struct net_device_stats *maybe_internal_stats(struct net_device *dev) +{ + if (dev->features & NETIF_F_INTERNAL_STATS) + return &dev->stats; + return NULL; +} + /** * alloc_netdev - allocate network device * @sizeof_priv: size of private data to allocate space for @@ -3317,6 +3323,7 @@ struct net_device *alloc_netdev(int sizeof_priv, const char *name, if (sizeof_priv) dev->priv = netdev_priv(dev); + dev->get_stats = maybe_internal_stats; setup(dev); strcpy(dev->name, name); return dev; -- cgit v1.2.3-59-g8ed1b From 27d7ff46a3498d3debc6ba68fb8014c702b81170 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Sat, 31 Mar 2007 11:55:19 -0300 Subject: [SK_BUFF]: Introduce skb_copy_to_linear_data{_offset} To clearly state the intent of copying to linear sk_buffs, _offset being a overly long variant but interesting for the sake of saving some bytes. Signed-off-by: Arnaldo Carvalho de Melo --- arch/ia64/hp/sim/simeth.c | 2 +- arch/ia64/sn/kernel/xpnet.c | 2 +- drivers/isdn/hysdn/hycapi.c | 3 +- drivers/net/8139too.c | 6 ++-- drivers/net/appletalk/ltpc.c | 2 +- drivers/net/atari_bionet.c | 3 +- drivers/net/atari_pamsnet.c | 3 +- drivers/net/chelsio/sge.c | 24 ++++++++++------ drivers/net/cxgb3/sge.c | 6 ++-- drivers/net/defxx.c | 4 ++- drivers/net/e100.c | 2 +- drivers/net/e1000/e1000_main.c | 9 ++++-- drivers/net/ehea/ehea_main.c | 4 +-- drivers/net/irda/ali-ircc.c | 2 +- drivers/net/irda/au1k_ir.c | 2 +- drivers/net/irda/donauboe.c | 4 +-- drivers/net/irda/mcs7780.c | 4 +-- drivers/net/irda/nsc-ircc.c | 8 ++++-- drivers/net/irda/pxaficp_ir.c | 2 +- drivers/net/irda/stir4200.c | 2 +- drivers/net/irda/via-ircc.c | 6 ++-- drivers/net/irda/w83977af_ir.c | 8 ++++-- drivers/net/ixgb/ixgb_main.c | 9 ++++-- drivers/net/loopback.c | 3 +- drivers/net/macb.c | 7 +++-- drivers/net/myri10ge/myri10ge.c | 2 +- drivers/net/sk98lin/skge.c | 2 +- drivers/net/skfp/skfddi.c | 2 +- drivers/net/sun3lance.c | 2 +- drivers/net/tokenring/smctr.c | 4 +-- drivers/net/tokenring/tms380tr.c | 3 +- drivers/net/wan/dscc4.c | 3 +- drivers/net/wan/pc300_drv.c | 2 +- drivers/net/wan/pc300_tty.c | 4 +-- drivers/net/wan/z85230.c | 2 +- drivers/net/wireless/prism54/islpci_eth.c | 4 +-- drivers/s390/net/qeth_main.c | 9 +++--- drivers/usb/atm/usbatm.c | 4 ++- drivers/usb/net/asix.c | 2 +- include/linux/skbuff.h | 15 ++++++++++ net/atm/br2684.c | 2 +- net/atm/lec.c | 8 +++--- net/atm/mpc.c | 11 +++++--- net/bridge/br_netfilter.c | 3 +- net/core/netpoll.c | 2 +- net/core/skbuff.c | 2 +- net/ieee80211/ieee80211_rx.c | 5 ++-- net/ipv4/ipcomp.c | 2 +- net/ipv4/ipmr.c | 2 +- net/ipv4/ipvs/ip_vs_app.c | 2 +- net/ipv4/netfilter/ip_queue.c | 2 +- net/ipv6/ipcomp6.c | 2 +- net/ipv6/netfilter/ip6_queue.c | 2 +- net/irda/irttp.c | 2 +- net/irda/wrapper.c | 3 +- net/netfilter/nfnetlink_queue.c | 2 +- net/netrom/nr_out.c | 4 +-- net/rose/af_rose.c | 2 +- net/tipc/link.c | 46 ++++++++++++++++--------------- net/tipc/msg.h | 7 +++-- net/tipc/port.c | 8 +++--- net/wanrouter/wanmain.c | 4 +-- net/x25/x25_out.c | 2 +- 63 files changed, 185 insertions(+), 127 deletions(-) (limited to 'include') diff --git a/arch/ia64/hp/sim/simeth.c b/arch/ia64/hp/sim/simeth.c index edef008c2b42..f26077a773d5 100644 --- a/arch/ia64/hp/sim/simeth.c +++ b/arch/ia64/hp/sim/simeth.c @@ -473,7 +473,7 @@ simeth_rx(struct net_device *dev) * XXX Fix me * Should really do a csum+copy here */ - memcpy(skb->data, frame, len); + skb_copy_to_linear_data(skb, frame, len); #endif skb->protocol = eth_type_trans(skb, dev); diff --git a/arch/ia64/sn/kernel/xpnet.c b/arch/ia64/sn/kernel/xpnet.c index 9fc02654f0f5..5419acb89a8c 100644 --- a/arch/ia64/sn/kernel/xpnet.c +++ b/arch/ia64/sn/kernel/xpnet.c @@ -233,7 +233,7 @@ xpnet_receive(partid_t partid, int channel, struct xpnet_message *msg) "%lu)\n", skb->data, &msg->data, (size_t) msg->embedded_bytes); - memcpy(skb->data, &msg->data, (size_t) msg->embedded_bytes); + skb_copy_to_linear_data(skb, &msg->data, (size_t)msg->embedded_bytes); } else { dev_dbg(xpnet, "transferring buffer to the skb->data area;\n\t" "bte_copy(0x%p, 0x%p, %hu)\n", (void *)msg->buf_pa, diff --git a/drivers/isdn/hysdn/hycapi.c b/drivers/isdn/hysdn/hycapi.c index 4433ce0fca55..f85450146bdc 100644 --- a/drivers/isdn/hysdn/hycapi.c +++ b/drivers/isdn/hysdn/hycapi.c @@ -399,7 +399,8 @@ static u16 hycapi_send_message(struct capi_ctr *ctrl, struct sk_buff *skb) if (_len > 22) { _len2 = _len - 22; skb_copy_from_linear_data(skb, msghead, 22); - memcpy(skb->data + _len2, msghead, 22); + skb_copy_to_linear_data_offset(skb, _len2, + msghead, 22); skb_pull(skb, _len2); CAPIMSG_SETLEN(skb->data, 22); retval = capilib_data_b3_req(&cinfo->ncci_head, diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c index 2101334a8ac2..a844b1fe2dc4 100644 --- a/drivers/net/8139too.c +++ b/drivers/net/8139too.c @@ -1904,10 +1904,10 @@ static __inline__ void wrap_copy(struct sk_buff *skb, const unsigned char *ring, u32 left = RX_BUF_LEN - offset; if (size > left) { - memcpy(skb->data, ring + offset, left); - memcpy(skb->data+left, ring, size - left); + skb_copy_to_linear_data(skb, ring + offset, left); + skb_copy_to_linear_data_offset(skb, left, ring, size - left); } else - memcpy(skb->data, ring + offset, size); + skb_copy_to_linear_data(skb, ring + offset, size); } #endif diff --git a/drivers/net/appletalk/ltpc.c b/drivers/net/appletalk/ltpc.c index 43c17c85c97b..6a6cbd331a16 100644 --- a/drivers/net/appletalk/ltpc.c +++ b/drivers/net/appletalk/ltpc.c @@ -774,7 +774,7 @@ static int sendup_buffer (struct net_device *dev) skb_pull(skb,3); /* copy ddp(s,e)hdr + contents */ - memcpy(skb->data,(void*)ltdmabuf,len); + skb_copy_to_linear_data(skb, ltdmabuf, len); skb_reset_transport_header(skb); diff --git a/drivers/net/atari_bionet.c b/drivers/net/atari_bionet.c index 13dbed368d6a..3d87bd2b4194 100644 --- a/drivers/net/atari_bionet.c +++ b/drivers/net/atari_bionet.c @@ -550,7 +550,8 @@ bionet_poll_rx(struct net_device *dev) { /* 'skb->data' points to the start of sk_buff data area. */ - memcpy(skb->data, nic_packet->buffer, pkt_len); + skb_copy_to_linear_data(skb, nic_packet->buffer, + pkt_len); skb->protocol = eth_type_trans( skb, dev ); netif_rx(skb); dev->last_rx = jiffies; diff --git a/drivers/net/atari_pamsnet.c b/drivers/net/atari_pamsnet.c index 745101d7451b..54714409a09b 100644 --- a/drivers/net/atari_pamsnet.c +++ b/drivers/net/atari_pamsnet.c @@ -793,7 +793,8 @@ pamsnet_poll_rx(struct net_device *dev) { /* 'skb->data' points to the start of sk_buff data area. */ - memcpy(skb->data, nic_packet->buffer, pkt_len); + skb_copy_to_linear_data(skb, nic_packet->buffer, + pkt_len); netif_rx(skb); dev->last_rx = jiffies; lp->stats.rx_packets++; diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c index 1be1bbd16164..e4f874a70fe5 100644 --- a/drivers/net/chelsio/sge.c +++ b/drivers/net/chelsio/sge.c @@ -2095,10 +2095,14 @@ static void espibug_workaround_t204(unsigned long data) 0x0, 0x7, 0x43, 0x0, 0x0, 0x0 }; - memcpy(skb->data + sizeof(struct cpl_tx_pkt), - ch_mac_addr, ETH_ALEN); - memcpy(skb->data + skb->len - 10, - ch_mac_addr, ETH_ALEN); + skb_copy_to_linear_data_offset(skb, + sizeof(struct cpl_tx_pkt), + ch_mac_addr, + ETH_ALEN); + skb_copy_to_linear_data_offset(skb, + skb->len - 10, + ch_mac_addr, + ETH_ALEN); skb->cb[0] = 0xff; } @@ -2125,10 +2129,14 @@ static void espibug_workaround(unsigned long data) if (!skb->cb[0]) { u8 ch_mac_addr[ETH_ALEN] = {0x0, 0x7, 0x43, 0x0, 0x0, 0x0}; - memcpy(skb->data + sizeof(struct cpl_tx_pkt), - ch_mac_addr, ETH_ALEN); - memcpy(skb->data + skb->len - 10, ch_mac_addr, - ETH_ALEN); + skb_copy_to_linear_data_offset(skb, + sizeof(struct cpl_tx_pkt), + ch_mac_addr, + ETH_ALEN); + skb_copy_to_linear_data_offset(skb, + skb->len - 10, + ch_mac_addr, + ETH_ALEN); skb->cb[0] = 0xff; } diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c index 166c959c94b9..3666586a4831 100644 --- a/drivers/net/cxgb3/sge.c +++ b/drivers/net/cxgb3/sge.c @@ -661,7 +661,7 @@ static inline struct sk_buff *get_imm_packet(const struct rsp_desc *resp) if (skb) { __skb_put(skb, IMMED_PKT_SIZE); - memcpy(skb->data, resp->imm_data, IMMED_PKT_SIZE); + skb_copy_to_linear_data(skb, resp->imm_data, IMMED_PKT_SIZE); } return skb; } @@ -1722,11 +1722,11 @@ static void skb_data_init(struct sk_buff *skb, struct sge_fl_page *p, { skb->len = len; if (len <= SKB_DATA_SIZE) { - memcpy(skb->data, p->va, len); + skb_copy_to_linear_data(skb, p->va, len); skb->tail += len; put_page(p->frag.page); } else { - memcpy(skb->data, p->va, SKB_DATA_SIZE); + skb_copy_to_linear_data(skb, p->va, SKB_DATA_SIZE); skb_shinfo(skb)->frags[0].page = p->frag.page; skb_shinfo(skb)->frags[0].page_offset = p->frag.page_offset + SKB_DATA_SIZE; diff --git a/drivers/net/defxx.c b/drivers/net/defxx.c index 8d29fae1c71c..571d82f8008c 100644 --- a/drivers/net/defxx.c +++ b/drivers/net/defxx.c @@ -3091,7 +3091,9 @@ static void dfx_rcv_queue_process( { /* Receive buffer allocated, pass receive packet up */ - memcpy(skb->data, p_buff + RCV_BUFF_K_PADDING, pkt_len+3); + skb_copy_to_linear_data(skb, + p_buff + RCV_BUFF_K_PADDING, + pkt_len + 3); } skb_reserve(skb,3); /* adjust data field so that it points to FC byte */ diff --git a/drivers/net/e100.c b/drivers/net/e100.c index 0cefef5e3f06..4d0e0aea72bf 100644 --- a/drivers/net/e100.c +++ b/drivers/net/e100.c @@ -1769,7 +1769,7 @@ static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx) /* Align, init, and map the RFD. */ skb_reserve(rx->skb, NET_IP_ALIGN); - memcpy(rx->skb->data, &nic->blank_rfd, sizeof(struct rfd)); + skb_copy_to_linear_data(rx->skb, &nic->blank_rfd, sizeof(struct rfd)); rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data, RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL); diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c index e7c93f44f810..610216ec4918 100644 --- a/drivers/net/e1000/e1000_main.c +++ b/drivers/net/e1000/e1000_main.c @@ -4224,9 +4224,12 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter, netdev_alloc_skb(netdev, length + NET_IP_ALIGN); if (new_skb) { skb_reserve(new_skb, NET_IP_ALIGN); - memcpy(new_skb->data - NET_IP_ALIGN, - skb->data - NET_IP_ALIGN, - length + NET_IP_ALIGN); + skb_copy_to_linear_data_offset(new_skb, + -NET_IP_ALIGN, + (skb->data - + NET_IP_ALIGN), + (length + + NET_IP_ALIGN)); /* save the skb in buffer_info as good */ buffer_info->skb = skb; skb = new_skb; diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c index 8b5392072632..58364a0ff378 100644 --- a/drivers/net/ehea/ehea_main.c +++ b/drivers/net/ehea/ehea_main.c @@ -391,8 +391,8 @@ static int ehea_poll(struct net_device *dev, int *budget) if (!skb) break; } - memcpy(skb->data, ((char*)cqe) + 64, - cqe->num_bytes_transfered - 4); + skb_copy_to_linear_data(skb, ((char*)cqe) + 64, + cqe->num_bytes_transfered - 4); ehea_fill_skb(dev, skb, cqe); } else if (rq == 2) { /* RQ2 */ skb = get_skb_by_index(skb_arr_rq2, diff --git a/drivers/net/irda/ali-ircc.c b/drivers/net/irda/ali-ircc.c index fb2248a25516..f9c889c0dd07 100644 --- a/drivers/net/irda/ali-ircc.c +++ b/drivers/net/irda/ali-ircc.c @@ -1923,7 +1923,7 @@ static int ali_ircc_dma_receive_complete(struct ali_ircc_cb *self) /* Copy frame without CRC, CRC is removed by hardware*/ skb_put(skb, len); - memcpy(skb->data, self->rx_buff.data, len); + skb_copy_to_linear_data(skb, self->rx_buff.data, len); /* Move to next frame */ self->rx_buff.data += len; diff --git a/drivers/net/irda/au1k_ir.c b/drivers/net/irda/au1k_ir.c index cdd1f6c1e741..4dbdfaaf37bf 100644 --- a/drivers/net/irda/au1k_ir.c +++ b/drivers/net/irda/au1k_ir.c @@ -604,7 +604,7 @@ static int au1k_irda_rx(struct net_device *dev) skb_put(skb, count); else skb_put(skb, count-2); - memcpy(skb->data, (void *)pDB->vaddr, count-2); + skb_copy_to_linear_data(skb, pDB->vaddr, count - 2); skb->dev = dev; skb_reset_mac_header(skb); skb->protocol = htons(ETH_P_IRDA); diff --git a/drivers/net/irda/donauboe.c b/drivers/net/irda/donauboe.c index 9987a0dc1eaf..3ca47bf6dfec 100644 --- a/drivers/net/irda/donauboe.c +++ b/drivers/net/irda/donauboe.c @@ -1282,8 +1282,8 @@ dumpbufs(self->rx_bufs[self->rxs],len,'<'); skb_reserve (skb, 1); skb_put (skb, len); - memcpy (skb->data, self->rx_bufs[self->rxs], len); - + skb_copy_to_linear_data(skb, self->rx_bufs[self->rxs], + len); self->stats.rx_packets++; skb->dev = self->netdev; skb_reset_mac_header(skb); diff --git a/drivers/net/irda/mcs7780.c b/drivers/net/irda/mcs7780.c index 4b0037e498f8..54d1d543c92c 100644 --- a/drivers/net/irda/mcs7780.c +++ b/drivers/net/irda/mcs7780.c @@ -426,7 +426,7 @@ static void mcs_unwrap_mir(struct mcs_cb *mcs, __u8 *buf, int len) } skb_reserve(skb, 1); - memcpy(skb->data, buf, new_len); + skb_copy_to_linear_data(skb, buf, new_len); skb_put(skb, new_len); skb_reset_mac_header(skb); skb->protocol = htons(ETH_P_IRDA); @@ -479,7 +479,7 @@ static void mcs_unwrap_fir(struct mcs_cb *mcs, __u8 *buf, int len) } skb_reserve(skb, 1); - memcpy(skb->data, buf, new_len); + skb_copy_to_linear_data(skb, buf, new_len); skb_put(skb, new_len); skb_reset_mac_header(skb); skb->protocol = htons(ETH_P_IRDA); diff --git a/drivers/net/irda/nsc-ircc.c b/drivers/net/irda/nsc-ircc.c index 0ff992714136..d96c89751a71 100644 --- a/drivers/net/irda/nsc-ircc.c +++ b/drivers/net/irda/nsc-ircc.c @@ -1868,10 +1868,14 @@ static int nsc_ircc_dma_receive_complete(struct nsc_ircc_cb *self, int iobase) /* Copy frame without CRC */ if (self->io.speed < 4000000) { skb_put(skb, len-2); - memcpy(skb->data, self->rx_buff.data, len-2); + skb_copy_to_linear_data(skb, + self->rx_buff.data, + len - 2); } else { skb_put(skb, len-4); - memcpy(skb->data, self->rx_buff.data, len-4); + skb_copy_to_linear_data(skb, + self->rx_buff.data, + len - 4); } /* Move to next frame */ diff --git a/drivers/net/irda/pxaficp_ir.c b/drivers/net/irda/pxaficp_ir.c index b3e1107420af..fb196fd91855 100644 --- a/drivers/net/irda/pxaficp_ir.c +++ b/drivers/net/irda/pxaficp_ir.c @@ -386,7 +386,7 @@ static void pxa_irda_fir_irq_eif(struct pxa_irda *si, struct net_device *dev, in /* Align IP header to 20 bytes */ skb_reserve(skb, 1); - memcpy(skb->data, si->dma_rx_buff, len); + skb_copy_to_linear_data(skb, si->dma_rx_buff, len); skb_put(skb, len); /* Feed it to IrLAP */ diff --git a/drivers/net/irda/stir4200.c b/drivers/net/irda/stir4200.c index aec86a214340..755aa444a4dd 100644 --- a/drivers/net/irda/stir4200.c +++ b/drivers/net/irda/stir4200.c @@ -348,7 +348,7 @@ static void fir_eof(struct stir_cb *stir) } skb_reserve(nskb, 1); skb = nskb; - memcpy(nskb->data, rx_buff->data, len); + skb_copy_to_linear_data(nskb, rx_buff->data, len); } else { nskb = dev_alloc_skb(rx_buff->truesize); if (unlikely(!nskb)) { diff --git a/drivers/net/irda/via-ircc.c b/drivers/net/irda/via-ircc.c index 45bbd6686151..ff5358574d0a 100644 --- a/drivers/net/irda/via-ircc.c +++ b/drivers/net/irda/via-ircc.c @@ -1189,7 +1189,7 @@ F01_E */ skb_reserve(skb, 1); skb_put(skb, len - 4); - memcpy(skb->data, self->rx_buff.data, len - 4); + skb_copy_to_linear_data(skb, self->rx_buff.data, len - 4); IRDA_DEBUG(2, "%s(): len=%x.rx_buff=%p\n", __FUNCTION__, len - 4, self->rx_buff.data); @@ -1234,7 +1234,7 @@ static int upload_rxdata(struct via_ircc_cb *self, int iobase) } skb_reserve(skb, 1); skb_put(skb, len - 4 + 1); - memcpy(skb->data, self->rx_buff.data, len - 4 + 1); + skb_copy_to_linear_data(skb, self->rx_buff.data, len - 4 + 1); st_fifo->tail++; st_fifo->len++; if (st_fifo->tail > MAX_RX_WINDOW) @@ -1303,7 +1303,7 @@ static int RxTimerHandler(struct via_ircc_cb *self, int iobase) } skb_reserve(skb, 1); skb_put(skb, len - 4); - memcpy(skb->data, self->rx_buff.data, len - 4); + skb_copy_to_linear_data(skb, self->rx_buff.data, len - 4); IRDA_DEBUG(2, "%s(): len=%x.head=%x\n", __FUNCTION__, len - 4, st_fifo->head); diff --git a/drivers/net/irda/w83977af_ir.c b/drivers/net/irda/w83977af_ir.c index 0d4a68618fc1..5182e800cc18 100644 --- a/drivers/net/irda/w83977af_ir.c +++ b/drivers/net/irda/w83977af_ir.c @@ -908,10 +908,14 @@ int w83977af_dma_receive_complete(struct w83977af_ir *self) /* Copy frame without CRC */ if (self->io.speed < 4000000) { skb_put(skb, len-2); - memcpy(skb->data, self->rx_buff.data, len-2); + skb_copy_to_linear_data(skb, + self->rx_buff.data, + len - 2); } else { skb_put(skb, len-4); - memcpy(skb->data, self->rx_buff.data, len-4); + skb_copy_to_linear_data(skb, + self->rx_buff.data, + len - 4); } /* Move to next frame */ diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c index e729ced52dc3..dfde80e54aef 100644 --- a/drivers/net/ixgb/ixgb_main.c +++ b/drivers/net/ixgb/ixgb_main.c @@ -2017,9 +2017,12 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter) netdev_alloc_skb(netdev, length + NET_IP_ALIGN); if (new_skb) { skb_reserve(new_skb, NET_IP_ALIGN); - memcpy(new_skb->data - NET_IP_ALIGN, - skb->data - NET_IP_ALIGN, - length + NET_IP_ALIGN); + skb_copy_to_linear_data_offset(new_skb, + -NET_IP_ALIGN, + (skb->data - + NET_IP_ALIGN), + (length + + NET_IP_ALIGN)); /* save the skb in buffer_info as good */ buffer_info->skb = skb; skb = new_skb; diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c index 20b5cb101368..6df673a058ce 100644 --- a/drivers/net/loopback.c +++ b/drivers/net/loopback.c @@ -94,7 +94,8 @@ static void emulate_large_send_offload(struct sk_buff *skb) skb_set_mac_header(nskb, -ETH_HLEN); skb_reset_network_header(nskb); iph = ip_hdr(nskb); - memcpy(nskb->data, skb_network_header(skb), doffset); + skb_copy_to_linear_data(nskb, skb_network_header(skb), + doffset); if (skb_copy_bits(skb, doffset + offset, nskb->data + doffset, diff --git a/drivers/net/macb.c b/drivers/net/macb.c index 9e233f8216a7..0e04f7ac3f2e 100644 --- a/drivers/net/macb.c +++ b/drivers/net/macb.c @@ -367,9 +367,10 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag, BUG_ON(frag != last_frag); frag_len = len - offset; } - memcpy(skb->data + offset, - bp->rx_buffers + (RX_BUFFER_SIZE * frag), - frag_len); + skb_copy_to_linear_data_offset(skb, offset, + (bp->rx_buffers + + (RX_BUFFER_SIZE * frag)), + frag_len); offset += RX_BUFFER_SIZE; bp->rx_ring[frag].addr &= ~MACB_BIT(RX_USED); wmb(); diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c index e4b69a0485ba..16e3c4315e82 100644 --- a/drivers/net/myri10ge/myri10ge.c +++ b/drivers/net/myri10ge/myri10ge.c @@ -879,7 +879,7 @@ myri10ge_rx_skb_build(struct sk_buff *skb, u8 * va, * skb_pull() (for ether_pad and eth_type_trans()) requires * the beginning of the packet in skb_headlen(), move it * manually */ - memcpy(skb->data, va, hlen); + skb_copy_to_linear_data(skb, va, hlen); skb_shinfo(skb)->frags[0].page_offset += hlen; skb_shinfo(skb)->frags[0].size -= hlen; skb->data_len -= hlen; diff --git a/drivers/net/sk98lin/skge.c b/drivers/net/sk98lin/skge.c index b987a5c3f42a..e0a93005e6dc 100644 --- a/drivers/net/sk98lin/skge.c +++ b/drivers/net/sk98lin/skge.c @@ -2127,7 +2127,7 @@ rx_start: (dma_addr_t) PhysAddr, FrameLength, PCI_DMA_FROMDEVICE); - memcpy(pNewMsg->data, pMsg, FrameLength); + skb_copy_to_linear_data(pNewMsg, pMsg, FrameLength); pci_dma_sync_single_for_device(pAC->PciDev, (dma_addr_t) PhysAddr, diff --git a/drivers/net/skfp/skfddi.c b/drivers/net/skfp/skfddi.c index 064e7c21c01d..a7ef6c8b7721 100644 --- a/drivers/net/skfp/skfddi.c +++ b/drivers/net/skfp/skfddi.c @@ -1937,7 +1937,7 @@ int mac_drv_rx_init(struct s_smc *smc, int len, int fc, } skb_reserve(skb, 3); skb_put(skb, len); - memcpy(skb->data, look_ahead, len); + skb_copy_to_linear_data(skb, look_ahead, len); // deliver frame to system skb->protocol = fddi_type_trans(skb, smc->os.dev); diff --git a/drivers/net/sun3lance.c b/drivers/net/sun3lance.c index 327ed7962fbd..791e081fdc15 100644 --- a/drivers/net/sun3lance.c +++ b/drivers/net/sun3lance.c @@ -853,7 +853,7 @@ static int lance_rx( struct net_device *dev ) skb_reserve( skb, 2 ); /* 16 byte align */ skb_put( skb, pkt_len ); /* Make room */ -// memcpy( skb->data, PKTBUF_ADDR(head), pkt_len ); +// skb_copy_to_linear_data(skb, PKTBUF_ADDR(head), pkt_len); eth_copy_and_sum(skb, PKTBUF_ADDR(head), pkt_len, 0); diff --git a/drivers/net/tokenring/smctr.c b/drivers/net/tokenring/smctr.c index b0296d80e46c..9bbea5c8acf4 100644 --- a/drivers/net/tokenring/smctr.c +++ b/drivers/net/tokenring/smctr.c @@ -3889,7 +3889,7 @@ static int smctr_process_rx_packet(MAC_HEADER *rmf, __u16 size, /* Slide data into a sleek skb. */ skb_put(skb, skb->len); - memcpy(skb->data, rmf, skb->len); + skb_copy_to_linear_data(skb, rmf, skb->len); /* Update Counters */ tp->MacStat.rx_packets++; @@ -4475,7 +4475,7 @@ static int smctr_rx_frame(struct net_device *dev) if (skb) { skb_put(skb, rx_size); - memcpy(skb->data, pbuff, rx_size); + skb_copy_to_linear_data(skb, pbuff, rx_size); /* Update Counters */ tp->MacStat.rx_packets++; diff --git a/drivers/net/tokenring/tms380tr.c b/drivers/net/tokenring/tms380tr.c index e6f0817c3509..12bd294045a7 100644 --- a/drivers/net/tokenring/tms380tr.c +++ b/drivers/net/tokenring/tms380tr.c @@ -2178,7 +2178,8 @@ static void tms380tr_rcv_status_irq(struct net_device *dev) || rpl->SkbStat == SKB_DMA_DIRECT)) { if(rpl->SkbStat == SKB_DATA_COPY) - memcpy(skb->data, ReceiveDataPtr, Length); + skb_copy_to_linear_data(skb, ReceiveDataPtr, + Length); /* Deliver frame to system */ rpl->Skb = NULL; diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c index 25021a7992a9..dca024471455 100644 --- a/drivers/net/wan/dscc4.c +++ b/drivers/net/wan/dscc4.c @@ -1904,7 +1904,8 @@ static struct sk_buff *dscc4_init_dummy_skb(struct dscc4_dev_priv *dpriv) struct TxFD *tx_fd = dpriv->tx_fd + last; skb->len = DUMMY_SKB_SIZE; - memcpy(skb->data, version, strlen(version)%DUMMY_SKB_SIZE); + skb_copy_to_linear_data(skb, version, + strlen(version) % DUMMY_SKB_SIZE); tx_fd->state = FrameEnd | TO_STATE_TX(DUMMY_SKB_SIZE); tx_fd->data = pci_map_single(dpriv->pci_priv->pdev, skb->data, DUMMY_SKB_SIZE, PCI_DMA_TODEVICE); diff --git a/drivers/net/wan/pc300_drv.c b/drivers/net/wan/pc300_drv.c index 8ba75bb17326..999bf71937ca 100644 --- a/drivers/net/wan/pc300_drv.c +++ b/drivers/net/wan/pc300_drv.c @@ -1759,7 +1759,7 @@ cpc_trace(struct net_device *dev, struct sk_buff *skb_main, char rx_tx) skb->pkt_type = PACKET_HOST; skb->len = 10 + skb_main->len; - memcpy(skb->data, dev->name, 5); + skb_copy_to_linear_data(skb, dev->name, 5); skb->data[5] = '['; skb->data[6] = rx_tx; skb->data[7] = ']'; diff --git a/drivers/net/wan/pc300_tty.c b/drivers/net/wan/pc300_tty.c index de02a07259cf..07dbdfbfc15d 100644 --- a/drivers/net/wan/pc300_tty.c +++ b/drivers/net/wan/pc300_tty.c @@ -1007,13 +1007,13 @@ static void cpc_tty_trace(pc300dev_t *dev, char* buf, int len, char rxtx) skb->pkt_type = PACKET_HOST; skb->len = 10 + len; - memcpy(skb->data,dev->dev->name,5); + skb_copy_to_linear_data(skb, dev->dev->name, 5); skb->data[5] = '['; skb->data[6] = rxtx; skb->data[7] = ']'; skb->data[8] = ':'; skb->data[9] = ' '; - memcpy(&skb->data[10], buf, len); + skb_copy_to_linear_data_offset(skb, 10, buf, len); netif_rx(skb); } diff --git a/drivers/net/wan/z85230.c b/drivers/net/wan/z85230.c index 9432d2ce7745..98ef400908b8 100644 --- a/drivers/net/wan/z85230.c +++ b/drivers/net/wan/z85230.c @@ -1656,7 +1656,7 @@ static void z8530_rx_done(struct z8530_channel *c) else { skb_put(skb, ct); - memcpy(skb->data, rxb, ct); + skb_copy_to_linear_data(skb, rxb, ct); c->stats.rx_packets++; c->stats.rx_bytes+=ct; } diff --git a/drivers/net/wireless/prism54/islpci_eth.c b/drivers/net/wireless/prism54/islpci_eth.c index 7d8bff1dbc4d..dd070cccf324 100644 --- a/drivers/net/wireless/prism54/islpci_eth.c +++ b/drivers/net/wireless/prism54/islpci_eth.c @@ -136,7 +136,7 @@ islpci_eth_transmit(struct sk_buff *skb, struct net_device *ndev) printk("islpci_eth_transmit:wds_mac\n"); #endif memmove(skb->data + 6, src, skb->len); - memcpy(skb->data, wds_mac, 6); + skb_copy_to_linear_data(skb, wds_mac, 6); } else { memmove(skb->data, src, skb->len); } @@ -165,7 +165,7 @@ islpci_eth_transmit(struct sk_buff *skb, struct net_device *ndev) skb_copy_from_linear_data(skb, newskb->data + 6, skb->len); - memcpy(newskb->data, wds_mac, 6); + skb_copy_to_linear_data(newskb, wds_mac, 6); #ifdef ISLPCI_ETH_DEBUG printk("islpci_eth_transmit:wds_mac\n"); #endif diff --git a/drivers/s390/net/qeth_main.c b/drivers/s390/net/qeth_main.c index df7f279ec408..ad7792dc1a04 100644 --- a/drivers/s390/net/qeth_main.c +++ b/drivers/s390/net/qeth_main.c @@ -2501,7 +2501,8 @@ qeth_process_inbound_buffer(struct qeth_card *card, vlan_tag = qeth_rebuild_skb(card, skb, hdr); else { /*in case of OSN*/ skb_push(skb, sizeof(struct qeth_hdr)); - memcpy(skb->data, hdr, sizeof(struct qeth_hdr)); + skb_copy_to_linear_data(skb, hdr, + sizeof(struct qeth_hdr)); } /* is device UP ? */ if (!(card->dev->flags & IFF_UP)){ @@ -3870,9 +3871,9 @@ __qeth_prepare_skb(struct qeth_card *card, struct sk_buff *skb, int ipv) * memcpys instead of one memmove to save cycles. */ skb_push(skb, VLAN_HLEN); - memcpy(skb->data, skb->data + 4, 4); - memcpy(skb->data + 4, skb->data + 8, 4); - memcpy(skb->data + 8, skb->data + 12, 4); + skb_copy_to_linear_data(skb, skb->data + 4, 4); + skb_copy_to_linear_data_offset(skb, 4, skb->data + 8, 4); + skb_copy_to_linear_data_offset(skb, 8, skb->data + 12, 4); tag = (u16 *)(skb->data + 12); /* * first two bytes = ETH_P_8021Q (0x8100) diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c index d287c5755229..d3e2c5f90a26 100644 --- a/drivers/usb/atm/usbatm.c +++ b/drivers/usb/atm/usbatm.c @@ -396,7 +396,9 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char goto out; /* atm_charge increments rx_drop */ } - memcpy(skb->data, skb_tail_pointer(sarb) - pdu_length, length); + skb_copy_to_linear_data(skb, + skb_tail_pointer(sarb) - pdu_length, + length); __skb_put(skb, length); vdbg("%s: sending skb 0x%p, skb->len %u, skb->truesize %u", diff --git a/drivers/usb/net/asix.c b/drivers/usb/net/asix.c index f56e2dab3712..d5ef97bc4d01 100644 --- a/drivers/usb/net/asix.c +++ b/drivers/usb/net/asix.c @@ -352,7 +352,7 @@ static struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb, skb_push(skb, 4); packet_len = (((skb->len - 4) ^ 0x0000ffff) << 16) + (skb->len - 4); cpu_to_le32s(&packet_len); - memcpy(skb->data, &packet_len, sizeof(packet_len)); + skb_copy_to_linear_data(skb, &packet_len, sizeof(packet_len)); if ((skb->len % 512) == 0) { cpu_to_le32s(&padbytes); diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 08c96bcbc59c..92969f662ee4 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -1521,6 +1521,21 @@ static inline void skb_copy_from_linear_data_offset(const struct sk_buff *skb, memcpy(to, skb->data + offset, len); } +static inline void skb_copy_to_linear_data(struct sk_buff *skb, + const void *from, + const unsigned int len) +{ + memcpy(skb->data, from, len); +} + +static inline void skb_copy_to_linear_data_offset(struct sk_buff *skb, + const int offset, + const void *from, + const unsigned int len) +{ + memcpy(skb->data + offset, from, len); +} + extern void skb_init(void); /** diff --git a/net/atm/br2684.c b/net/atm/br2684.c index a1686dfcbb9a..0e9f00c5c899 100644 --- a/net/atm/br2684.c +++ b/net/atm/br2684.c @@ -173,7 +173,7 @@ static int br2684_xmit_vcc(struct sk_buff *skb, struct br2684_dev *brdev, } skb_push(skb, minheadroom); if (brvcc->encaps == e_llc) - memcpy(skb->data, llc_oui_pid_pad, 10); + skb_copy_to_linear_data(skb, llc_oui_pid_pad, 10); else memset(skb->data, 0, 2); #endif /* FASTER_VERSION */ diff --git a/net/atm/lec.c b/net/atm/lec.c index 4b3e72f31b3b..6d63afa5764d 100644 --- a/net/atm/lec.c +++ b/net/atm/lec.c @@ -576,8 +576,8 @@ static int lec_atm_send(struct atm_vcc *vcc, struct sk_buff *skb) break; } skb2->len = sizeof(struct atmlec_msg); - memcpy(skb2->data, mesg, - sizeof(struct atmlec_msg)); + skb_copy_to_linear_data(skb2, mesg, + sizeof(*mesg)); atm_force_charge(priv->lecd, skb2->truesize); sk = sk_atm(priv->lecd); skb_queue_tail(&sk->sk_receive_queue, skb2); @@ -1337,7 +1337,7 @@ static int lane2_resolve(struct net_device *dev, u8 *dst_mac, int force, if (skb == NULL) return -1; skb->len = *sizeoftlvs; - memcpy(skb->data, *tlvs, *sizeoftlvs); + skb_copy_to_linear_data(skb, *tlvs, *sizeoftlvs); retval = send_to_lecd(priv, l_arp_xmt, dst_mac, NULL, skb); } return retval; @@ -1371,7 +1371,7 @@ static int lane2_associate_req(struct net_device *dev, u8 *lan_dst, if (skb == NULL) return 0; skb->len = sizeoftlvs; - memcpy(skb->data, tlvs, sizeoftlvs); + skb_copy_to_linear_data(skb, tlvs, sizeoftlvs); retval = send_to_lecd(priv, l_associate_req, NULL, NULL, skb); if (retval != 0) printk("lec.c: lane2_associate_req() failed\n"); diff --git a/net/atm/mpc.c b/net/atm/mpc.c index 4d2592c14090..813e08d6dc7c 100644 --- a/net/atm/mpc.c +++ b/net/atm/mpc.c @@ -504,11 +504,13 @@ static int send_via_shortcut(struct sk_buff *skb, struct mpoa_client *mpc) tagged_llc_snap_hdr.tag = entry->ctrl_info.tag; skb_pull(skb, ETH_HLEN); /* get rid of Eth header */ skb_push(skb, sizeof(tagged_llc_snap_hdr)); /* add LLC/SNAP header */ - memcpy(skb->data, &tagged_llc_snap_hdr, sizeof(tagged_llc_snap_hdr)); + skb_copy_to_linear_data(skb, &tagged_llc_snap_hdr, + sizeof(tagged_llc_snap_hdr)); } else { skb_pull(skb, ETH_HLEN); /* get rid of Eth header */ skb_push(skb, sizeof(struct llc_snap_hdr)); /* add LLC/SNAP header + tag */ - memcpy(skb->data, &llc_snap_mpoa_data, sizeof(struct llc_snap_hdr)); + skb_copy_to_linear_data(skb, &llc_snap_mpoa_data, + sizeof(struct llc_snap_hdr)); } atomic_add(skb->truesize, &sk_atm(entry->shortcut)->sk_wmem_alloc); @@ -711,7 +713,8 @@ static void mpc_push(struct atm_vcc *vcc, struct sk_buff *skb) return; } skb_push(new_skb, eg->ctrl_info.DH_length); /* add MAC header */ - memcpy(new_skb->data, eg->ctrl_info.DLL_header, eg->ctrl_info.DH_length); + skb_copy_to_linear_data(new_skb, eg->ctrl_info.DLL_header, + eg->ctrl_info.DH_length); new_skb->protocol = eth_type_trans(new_skb, dev); skb_reset_network_header(new_skb); @@ -936,7 +939,7 @@ int msg_to_mpoad(struct k_message *mesg, struct mpoa_client *mpc) if (skb == NULL) return -ENOMEM; skb_put(skb, sizeof(struct k_message)); - memcpy(skb->data, mesg, sizeof(struct k_message)); + skb_copy_to_linear_data(skb, mesg, sizeof(*mesg)); atm_force_charge(mpc->mpoad_vcc, skb->truesize); sk = sk_atm(mpc->mpoad_vcc); diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c index 8b45224699f4..fd70d041e51f 100644 --- a/net/bridge/br_netfilter.c +++ b/net/bridge/br_netfilter.c @@ -149,7 +149,8 @@ int nf_bridge_copy_header(struct sk_buff *skb) if (err) return err; - memcpy(skb->data - header_size, skb->nf_bridge->data, header_size); + skb_copy_to_linear_data_offset(skb, -header_size, + skb->nf_bridge->data, header_size); if (skb->protocol == htons(ETH_P_8021Q)) __skb_push(skb, VLAN_HLEN); diff --git a/net/core/netpoll.c b/net/core/netpoll.c index 57a82445c465..1fb30c3528bc 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c @@ -293,7 +293,7 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len) if (!skb) return; - memcpy(skb->data, msg, len); + skb_copy_to_linear_data(skb, msg, len); skb->len += len; skb_push(skb, sizeof(*udph)); diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 17c6bb5927b6..331d3efa82fa 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -1129,7 +1129,7 @@ int skb_store_bits(const struct sk_buff *skb, int offset, void *from, int len) if ((copy = start - offset) > 0) { if (copy > len) copy = len; - memcpy(skb->data + offset, from, copy); + skb_copy_to_linear_data_offset(skb, offset, from, copy); if ((len -= copy) == 0) return 0; offset += copy; diff --git a/net/ieee80211/ieee80211_rx.c b/net/ieee80211/ieee80211_rx.c index 94e2b8e2ab26..6ae036b1920f 100644 --- a/net/ieee80211/ieee80211_rx.c +++ b/net/ieee80211/ieee80211_rx.c @@ -759,8 +759,9 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb, IEEE80211_FCTL_TODS) && skb->len >= ETH_HLEN + ETH_ALEN) { /* Non-standard frame: get addr4 from its bogus location after * the payload */ - memcpy(skb->data + ETH_ALEN, - skb->data + skb->len - ETH_ALEN, ETH_ALEN); + skb_copy_to_linear_data_offset(skb, ETH_ALEN, + skb->data + skb->len - ETH_ALEN, + ETH_ALEN); skb_trim(skb, skb->len - ETH_ALEN); } #endif diff --git a/net/ipv4/ipcomp.c b/net/ipv4/ipcomp.c index ba348b1e5f84..ab86137c71d2 100644 --- a/net/ipv4/ipcomp.c +++ b/net/ipv4/ipcomp.c @@ -66,7 +66,7 @@ static int ipcomp_decompress(struct xfrm_state *x, struct sk_buff *skb) skb->truesize += dlen - plen; __skb_put(skb, dlen - plen); - memcpy(skb->data, scratch, dlen); + skb_copy_to_linear_data(skb, scratch, dlen); out: put_cpu(); return err; diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index 48027df5a90b..0ebae413ae87 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c @@ -584,7 +584,7 @@ static int ipmr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert) skb->network_header = skb->tail; skb_put(skb, ihl); - memcpy(skb->data,pkt->data,ihl); + skb_copy_to_linear_data(skb, pkt->data, ihl); ip_hdr(skb)->protocol = 0; /* Flag to the kernel this is a route add */ msg = (struct igmpmsg *)skb_network_header(skb); msg->im_vif = vifi; diff --git a/net/ipv4/ipvs/ip_vs_app.c b/net/ipv4/ipvs/ip_vs_app.c index c8a822c0aa75..15ad5dd2d984 100644 --- a/net/ipv4/ipvs/ip_vs_app.c +++ b/net/ipv4/ipvs/ip_vs_app.c @@ -602,7 +602,7 @@ int ip_vs_skb_replace(struct sk_buff *skb, gfp_t pri, skb_put(skb, diff); memmove(skb->data + o_offset + n_len, skb->data + o_offset + o_len, o_left); - memcpy(skb->data + o_offset, n_buf, n_len); + skb_copy_to_linear_data_offset(skb, o_offset, n_buf, n_len); } /* must update the iph total length here */ diff --git a/net/ipv4/netfilter/ip_queue.c b/net/ipv4/netfilter/ip_queue.c index cd8fec05f9bc..0d72693869e6 100644 --- a/net/ipv4/netfilter/ip_queue.c +++ b/net/ipv4/netfilter/ip_queue.c @@ -368,7 +368,7 @@ ipq_mangle_ipv4(ipq_verdict_msg_t *v, struct ipq_queue_entry *e) } if (!skb_make_writable(&e->skb, v->data_len)) return -ENOMEM; - memcpy(e->skb->data, v->payload, v->data_len); + skb_copy_to_linear_data(e->skb, v->payload, v->data_len); e->skb->ip_summed = CHECKSUM_NONE; return 0; diff --git a/net/ipv6/ipcomp6.c b/net/ipv6/ipcomp6.c index 7691a1b5caac..1ee50b5782e1 100644 --- a/net/ipv6/ipcomp6.c +++ b/net/ipv6/ipcomp6.c @@ -111,7 +111,7 @@ static int ipcomp6_input(struct xfrm_state *x, struct sk_buff *skb) skb->truesize += dlen - plen; __skb_put(skb, dlen - plen); - memcpy(skb->data, scratch, dlen); + skb_copy_to_linear_data(skb, scratch, dlen); err = ipch->nexthdr; out_put_cpu: diff --git a/net/ipv6/netfilter/ip6_queue.c b/net/ipv6/netfilter/ip6_queue.c index 2f1ae422d87f..bfae9fdc4668 100644 --- a/net/ipv6/netfilter/ip6_queue.c +++ b/net/ipv6/netfilter/ip6_queue.c @@ -366,7 +366,7 @@ ipq_mangle_ipv6(ipq_verdict_msg_t *v, struct ipq_queue_entry *e) } if (!skb_make_writable(&e->skb, v->data_len)) return -ENOMEM; - memcpy(e->skb->data, v->payload, v->data_len); + skb_copy_to_linear_data(e->skb, v->payload, v->data_len); e->skb->ip_summed = CHECKSUM_NONE; return 0; diff --git a/net/irda/irttp.c b/net/irda/irttp.c index b55bc8f989df..3279897a01b0 100644 --- a/net/irda/irttp.c +++ b/net/irda/irttp.c @@ -256,7 +256,7 @@ static struct sk_buff *irttp_reassemble_skb(struct tsap_cb *self) * Copy all fragments to a new buffer */ while ((frag = skb_dequeue(&self->rx_fragments)) != NULL) { - memcpy(skb->data+n, frag->data, frag->len); + skb_copy_to_linear_data_offset(skb, n, frag->data, frag->len); n += frag->len; dev_kfree_skb(frag); diff --git a/net/irda/wrapper.c b/net/irda/wrapper.c index 2acc66dfb558..a7a7f191f1a8 100644 --- a/net/irda/wrapper.c +++ b/net/irda/wrapper.c @@ -239,7 +239,8 @@ async_bump(struct net_device *dev, if(docopy) { /* Copy data without CRC (lenght already checked) */ - memcpy(newskb->data, rx_buff->data, rx_buff->len - 2); + skb_copy_to_linear_data(newskb, rx_buff->data, + rx_buff->len - 2); /* Deliver this skb */ dataskb = newskb; } else { diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c index 9aefb1c9bfa3..7a97bec67729 100644 --- a/net/netfilter/nfnetlink_queue.c +++ b/net/netfilter/nfnetlink_queue.c @@ -648,7 +648,7 @@ nfqnl_mangle(void *data, int data_len, struct nfqnl_queue_entry *e) } if (!skb_make_writable(&e->skb, data_len)) return -ENOMEM; - memcpy(e->skb->data, data, data_len); + skb_copy_to_linear_data(e->skb, data, data_len); e->skb->ip_summed = CHECKSUM_NONE; return 0; } diff --git a/net/netrom/nr_out.c b/net/netrom/nr_out.c index 7c467c95c7d6..e3e6c44e1890 100644 --- a/net/netrom/nr_out.c +++ b/net/netrom/nr_out.c @@ -59,8 +59,8 @@ void nr_output(struct sock *sk, struct sk_buff *skb) /* Duplicate the Transport Header */ skb_push(skbn, NR_TRANSPORT_LEN); - memcpy(skbn->data, transport, NR_TRANSPORT_LEN); - + skb_copy_to_linear_data(skbn, transport, + NR_TRANSPORT_LEN); if (skb->len > 0) skbn->data[4] |= NR_MORE_FLAG; diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c index f38c3b3471ee..806bf6f5dc6d 100644 --- a/net/rose/af_rose.c +++ b/net/rose/af_rose.c @@ -1181,7 +1181,7 @@ static int rose_sendmsg(struct kiocb *iocb, struct socket *sock, /* Duplicate the Header */ skb_push(skbn, ROSE_MIN_LEN); - memcpy(skbn->data, header, ROSE_MIN_LEN); + skb_copy_to_linear_data(skbn, header, ROSE_MIN_LEN); if (skb->len > 0) skbn->data[2] |= M_BIT; diff --git a/net/tipc/link.c b/net/tipc/link.c index 71c2f2fd405c..f3f99c8ea08a 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c @@ -1001,7 +1001,7 @@ static int link_bundle_buf(struct link *l_ptr, return 0; skb_put(bundler, pad + size); - memcpy(bundler->data + to_pos, buf->data, size); + skb_copy_to_linear_data_offset(bundler, to_pos, buf->data, size); msg_set_size(bundler_msg, to_pos + size); msg_set_msgcnt(bundler_msg, msg_msgcnt(bundler_msg) + 1); dbg("Packed msg # %u(%u octets) into pos %u in buf(#%u)\n", @@ -1109,8 +1109,8 @@ int tipc_link_send_buf(struct link *l_ptr, struct sk_buff *buf) if (bundler) { msg_init(&bundler_hdr, MSG_BUNDLER, OPEN_MSG, TIPC_OK, INT_H_SIZE, l_ptr->addr); - memcpy(bundler->data, (unchar *)&bundler_hdr, - INT_H_SIZE); + skb_copy_to_linear_data(bundler, &bundler_hdr, + INT_H_SIZE); skb_trim(bundler, INT_H_SIZE); link_bundle_buf(l_ptr, bundler, buf); buf = bundler; @@ -1383,9 +1383,9 @@ again: if (!buf) return -ENOMEM; buf->next = NULL; - memcpy(buf->data, (unchar *)&fragm_hdr, INT_H_SIZE); + skb_copy_to_linear_data(buf, &fragm_hdr, INT_H_SIZE); hsz = msg_hdr_sz(hdr); - memcpy(buf->data + INT_H_SIZE, (unchar *)hdr, hsz); + skb_copy_to_linear_data_offset(buf, INT_H_SIZE, hdr, hsz); msg_dbg(buf_msg(buf), ">BUILD>"); /* Chop up message: */ @@ -1416,8 +1416,8 @@ error: return -EFAULT; } } else - memcpy(buf->data + fragm_crs, sect_crs, sz); - + skb_copy_to_linear_data_offset(buf, fragm_crs, + sect_crs, sz); sect_crs += sz; sect_rest -= sz; fragm_crs += sz; @@ -1442,7 +1442,7 @@ error: buf->next = NULL; prev->next = buf; - memcpy(buf->data, (unchar *)&fragm_hdr, INT_H_SIZE); + skb_copy_to_linear_data(buf, &fragm_hdr, INT_H_SIZE); fragm_crs = INT_H_SIZE; fragm_rest = fragm_sz; msg_dbg(buf_msg(buf)," >BUILD>"); @@ -2130,7 +2130,7 @@ void tipc_link_send_proto_msg(struct link *l_ptr, u32 msg_typ, int probe_msg, buf = l_ptr->proto_msg_queue; if (!buf) return; - memcpy(buf->data, (unchar *)msg, sizeof(l_ptr->proto_msg)); + skb_copy_to_linear_data(buf, msg, sizeof(l_ptr->proto_msg)); return; } msg_set_timestamp(msg, jiffies_to_msecs(jiffies)); @@ -2143,7 +2143,7 @@ void tipc_link_send_proto_msg(struct link *l_ptr, u32 msg_typ, int probe_msg, if (!buf) return; - memcpy(buf->data, (unchar *)msg, sizeof(l_ptr->proto_msg)); + skb_copy_to_linear_data(buf, msg, sizeof(l_ptr->proto_msg)); msg_set_size(buf_msg(buf), msg_size); if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) { @@ -2319,8 +2319,8 @@ void tipc_link_tunnel(struct link *l_ptr, "unable to send tunnel msg\n"); return; } - memcpy(buf->data, (unchar *)tunnel_hdr, INT_H_SIZE); - memcpy(buf->data + INT_H_SIZE, (unchar *)msg, length); + skb_copy_to_linear_data(buf, tunnel_hdr, INT_H_SIZE); + skb_copy_to_linear_data_offset(buf, INT_H_SIZE, msg, length); dbg("%c->%c:", l_ptr->b_ptr->net_plane, tunnel->b_ptr->net_plane); msg_dbg(buf_msg(buf), ">SEND>"); tipc_link_send_buf(tunnel, buf); @@ -2361,7 +2361,7 @@ void tipc_link_changeover(struct link *l_ptr) buf = buf_acquire(INT_H_SIZE); if (buf) { - memcpy(buf->data, (unchar *)&tunnel_hdr, INT_H_SIZE); + skb_copy_to_linear_data(buf, &tunnel_hdr, INT_H_SIZE); msg_set_size(&tunnel_hdr, INT_H_SIZE); dbg("%c->%c:", l_ptr->b_ptr->net_plane, tunnel->b_ptr->net_plane); @@ -2426,8 +2426,9 @@ void tipc_link_send_duplicate(struct link *l_ptr, struct link *tunnel) "unable to send duplicate msg\n"); return; } - memcpy(outbuf->data, (unchar *)&tunnel_hdr, INT_H_SIZE); - memcpy(outbuf->data + INT_H_SIZE, iter->data, length); + skb_copy_to_linear_data(outbuf, &tunnel_hdr, INT_H_SIZE); + skb_copy_to_linear_data_offset(outbuf, INT_H_SIZE, iter->data, + length); dbg("%c->%c:", l_ptr->b_ptr->net_plane, tunnel->b_ptr->net_plane); msg_dbg(buf_msg(outbuf), ">SEND>"); @@ -2457,7 +2458,7 @@ static struct sk_buff *buf_extract(struct sk_buff *skb, u32 from_pos) eb = buf_acquire(size); if (eb) - memcpy(eb->data, (unchar *)msg, size); + skb_copy_to_linear_data(eb, msg, size); return eb; } @@ -2631,9 +2632,9 @@ int tipc_link_send_long_buf(struct link *l_ptr, struct sk_buff *buf) goto exit; } msg_set_size(&fragm_hdr, fragm_sz + INT_H_SIZE); - memcpy(fragm->data, (unchar *)&fragm_hdr, INT_H_SIZE); - memcpy(fragm->data + INT_H_SIZE, crs, fragm_sz); - + skb_copy_to_linear_data(fragm, &fragm_hdr, INT_H_SIZE); + skb_copy_to_linear_data_offset(fragm, INT_H_SIZE, crs, + fragm_sz); /* Send queued messages first, if any: */ l_ptr->stats.sent_fragments++; @@ -2733,8 +2734,8 @@ int tipc_link_recv_fragment(struct sk_buff **pending, struct sk_buff **fb, if (pbuf != NULL) { pbuf->next = *pending; *pending = pbuf; - memcpy(pbuf->data, (unchar *)imsg, msg_data_sz(fragm)); - + skb_copy_to_linear_data(pbuf, imsg, + msg_data_sz(fragm)); /* Prepare buffer for subsequent fragments. */ set_long_msg_seqno(pbuf, long_msg_seq_no); @@ -2750,7 +2751,8 @@ int tipc_link_recv_fragment(struct sk_buff **pending, struct sk_buff **fb, u32 fsz = get_fragm_size(pbuf); u32 crs = ((msg_fragm_no(fragm) - 1) * fsz); u32 exp_frags = get_expected_frags(pbuf) - 1; - memcpy(pbuf->data + crs, msg_data(fragm), dsz); + skb_copy_to_linear_data_offset(pbuf, crs, + msg_data(fragm), dsz); buf_discard(fbuf); /* Is message complete? */ diff --git a/net/tipc/msg.h b/net/tipc/msg.h index 62d549063604..aec7ce7af875 100644 --- a/net/tipc/msg.h +++ b/net/tipc/msg.h @@ -786,15 +786,16 @@ static inline int msg_build(struct tipc_msg *hdr, *buf = buf_acquire(sz); if (!(*buf)) return -ENOMEM; - memcpy((*buf)->data, (unchar *)hdr, hsz); + skb_copy_to_linear_data(*buf, hdr, hsz); for (res = 1, cnt = 0; res && (cnt < num_sect); cnt++) { if (likely(usrmem)) res = !copy_from_user((*buf)->data + pos, msg_sect[cnt].iov_base, msg_sect[cnt].iov_len); else - memcpy((*buf)->data + pos, msg_sect[cnt].iov_base, - msg_sect[cnt].iov_len); + skb_copy_to_linear_data_offset(*buf, pos, + msg_sect[cnt].iov_base, + msg_sect[cnt].iov_len); pos += msg_sect[cnt].iov_len; } if (likely(res)) diff --git a/net/tipc/port.c b/net/tipc/port.c index 5f8217d4b452..bcd5da00737b 100644 --- a/net/tipc/port.c +++ b/net/tipc/port.c @@ -464,7 +464,7 @@ int tipc_reject_msg(struct sk_buff *buf, u32 err) msg_set_size(rmsg, data_sz + hdr_sz); msg_set_nametype(rmsg, msg_nametype(msg)); msg_set_nameinst(rmsg, msg_nameinst(msg)); - memcpy(rbuf->data + hdr_sz, msg_data(msg), data_sz); + skb_copy_to_linear_data_offset(rbuf, hdr_sz, msg_data(msg), data_sz); /* send self-abort message when rejecting on a connected port */ if (msg_connected(msg)) { @@ -1419,7 +1419,7 @@ int tipc_send_buf(u32 ref, struct sk_buff *buf, unsigned int dsz) return -ENOMEM; skb_push(buf, hsz); - memcpy(buf->data, (unchar *)msg, hsz); + skb_copy_to_linear_data(buf, msg, hsz); destnode = msg_destnode(msg); p_ptr->publ.congested = 1; if (!tipc_port_congested(p_ptr)) { @@ -1555,7 +1555,7 @@ int tipc_forward_buf2name(u32 ref, if (skb_cow(buf, LONG_H_SIZE)) return -ENOMEM; skb_push(buf, LONG_H_SIZE); - memcpy(buf->data, (unchar *)msg, LONG_H_SIZE); + skb_copy_to_linear_data(buf, msg, LONG_H_SIZE); msg_dbg(buf_msg(buf),"PREP:"); if (likely(destport || destnode)) { p_ptr->sent++; @@ -1679,7 +1679,7 @@ int tipc_forward_buf2port(u32 ref, return -ENOMEM; skb_push(buf, DIR_MSG_H_SIZE); - memcpy(buf->data, (unchar *)msg, DIR_MSG_H_SIZE); + skb_copy_to_linear_data(buf, msg, DIR_MSG_H_SIZE); msg_dbg(msg, "buf2port: "); p_ptr->sent++; if (dest->node == tipc_own_addr) diff --git a/net/wanrouter/wanmain.c b/net/wanrouter/wanmain.c index c49e223084f1..7a19e0ede289 100644 --- a/net/wanrouter/wanmain.c +++ b/net/wanrouter/wanmain.c @@ -277,8 +277,8 @@ int wanrouter_encapsulate(struct sk_buff *skb, struct net_device *dev, skb_push(skb, 7); skb->data[0] = 0; skb->data[1] = NLPID_SNAP; - memcpy(&skb->data[2], wanrouter_oui_ether, - sizeof(wanrouter_oui_ether)); + skb_copy_to_linear_data_offset(skb, 2, wanrouter_oui_ether, + sizeof(wanrouter_oui_ether)); *((unsigned short*)&skb->data[5]) = htons(type); break; diff --git a/net/x25/x25_out.c b/net/x25/x25_out.c index bb45e21ffce9..2b96b52114d6 100644 --- a/net/x25/x25_out.c +++ b/net/x25/x25_out.c @@ -89,7 +89,7 @@ int x25_output(struct sock *sk, struct sk_buff *skb) /* Duplicate the Header */ skb_push(skbn, header_len); - memcpy(skbn->data, header, header_len); + skb_copy_to_linear_data(skbn, header, header_len); if (skb->len > 0) { if (x25->neighbour->extended) -- cgit v1.2.3-59-g8ed1b From 716ea3a7aae3a2bfc44cb97b5419c1c9868c7bc9 Mon Sep 17 00:00:00 2001 From: David Howells Date: Mon, 2 Apr 2007 20:19:53 -0700 Subject: [NET]: Move generic skbuff stuff from XFRM code to generic code Move generic skbuff stuff from XFRM code to generic code so that AF_RXRPC can use it too. The kdoc comments I've attached to the functions needs to be checked by whoever wrote them as I had to make some guesses about the workings of these functions. Signed-off-By: David Howells Signed-off-by: David S. Miller --- include/linux/skbuff.h | 6 ++ include/net/esp.h | 2 - net/core/skbuff.c | 188 +++++++++++++++++++++++++++++++++++++++++++++++++ net/xfrm/xfrm_algo.c | 169 -------------------------------------------- 4 files changed, 194 insertions(+), 171 deletions(-) (limited to 'include') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 92969f662ee4..9b2957d203c9 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -86,6 +86,7 @@ */ struct net_device; +struct scatterlist; #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) struct nf_conntrack { @@ -347,6 +348,11 @@ extern struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, extern struct sk_buff *skb_copy_expand(const struct sk_buff *skb, int newheadroom, int newtailroom, gfp_t priority); +extern int skb_to_sgvec(struct sk_buff *skb, + struct scatterlist *sg, int offset, + int len); +extern int skb_cow_data(struct sk_buff *skb, int tailbits, + struct sk_buff **trailer); extern int skb_pad(struct sk_buff *skb, int pad); #define dev_kfree_skb(a) kfree_skb(a) extern void skb_over_panic(struct sk_buff *skb, int len, diff --git a/include/net/esp.h b/include/net/esp.h index 713d039f4af7..d05d8d2c78f4 100644 --- a/include/net/esp.h +++ b/include/net/esp.h @@ -40,8 +40,6 @@ struct esp_data } auth; }; -extern int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len); -extern int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer); extern void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len); static inline int esp_mac_digest(struct esp_data *esp, struct sk_buff *skb, diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 331d3efa82fa..f927b6e8027e 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -55,6 +55,7 @@ #include #include #include +#include #include #include @@ -2002,6 +2003,190 @@ void __init skb_init(void) NULL, NULL); } +/** + * skb_to_sgvec - Fill a scatter-gather list from a socket buffer + * @skb: Socket buffer containing the buffers to be mapped + * @sg: The scatter-gather list to map into + * @offset: The offset into the buffer's contents to start mapping + * @len: Length of buffer space to be mapped + * + * Fill the specified scatter-gather list with mappings/pointers into a + * region of the buffer space attached to a socket buffer. + */ +int +skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) +{ + int start = skb_headlen(skb); + int i, copy = start - offset; + int elt = 0; + + if (copy > 0) { + if (copy > len) + copy = len; + sg[elt].page = virt_to_page(skb->data + offset); + sg[elt].offset = (unsigned long)(skb->data + offset) % PAGE_SIZE; + sg[elt].length = copy; + elt++; + if ((len -= copy) == 0) + return elt; + offset += copy; + } + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + int end; + + BUG_TRAP(start <= offset + len); + + end = start + skb_shinfo(skb)->frags[i].size; + if ((copy = end - offset) > 0) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + + if (copy > len) + copy = len; + sg[elt].page = frag->page; + sg[elt].offset = frag->page_offset+offset-start; + sg[elt].length = copy; + elt++; + if (!(len -= copy)) + return elt; + offset += copy; + } + start = end; + } + + if (skb_shinfo(skb)->frag_list) { + struct sk_buff *list = skb_shinfo(skb)->frag_list; + + for (; list; list = list->next) { + int end; + + BUG_TRAP(start <= offset + len); + + end = start + list->len; + if ((copy = end - offset) > 0) { + if (copy > len) + copy = len; + elt += skb_to_sgvec(list, sg+elt, offset - start, copy); + if ((len -= copy) == 0) + return elt; + offset += copy; + } + start = end; + } + } + BUG_ON(len); + return elt; +} + +/** + * skb_cow_data - Check that a socket buffer's data buffers are writable + * @skb: The socket buffer to check. + * @tailbits: Amount of trailing space to be added + * @trailer: Returned pointer to the skb where the @tailbits space begins + * + * Make sure that the data buffers attached to a socket buffer are + * writable. If they are not, private copies are made of the data buffers + * and the socket buffer is set to use these instead. + * + * If @tailbits is given, make sure that there is space to write @tailbits + * bytes of data beyond current end of socket buffer. @trailer will be + * set to point to the skb in which this space begins. + * + * The number of scatterlist elements required to completely map the + * COW'd and extended socket buffer will be returned. + */ +int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer) +{ + int copyflag; + int elt; + struct sk_buff *skb1, **skb_p; + + /* If skb is cloned or its head is paged, reallocate + * head pulling out all the pages (pages are considered not writable + * at the moment even if they are anonymous). + */ + if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) && + __pskb_pull_tail(skb, skb_pagelen(skb)-skb_headlen(skb)) == NULL) + return -ENOMEM; + + /* Easy case. Most of packets will go this way. */ + if (!skb_shinfo(skb)->frag_list) { + /* A little of trouble, not enough of space for trailer. + * This should not happen, when stack is tuned to generate + * good frames. OK, on miss we reallocate and reserve even more + * space, 128 bytes is fair. */ + + if (skb_tailroom(skb) < tailbits && + pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC)) + return -ENOMEM; + + /* Voila! */ + *trailer = skb; + return 1; + } + + /* Misery. We are in troubles, going to mincer fragments... */ + + elt = 1; + skb_p = &skb_shinfo(skb)->frag_list; + copyflag = 0; + + while ((skb1 = *skb_p) != NULL) { + int ntail = 0; + + /* The fragment is partially pulled by someone, + * this can happen on input. Copy it and everything + * after it. */ + + if (skb_shared(skb1)) + copyflag = 1; + + /* If the skb is the last, worry about trailer. */ + + if (skb1->next == NULL && tailbits) { + if (skb_shinfo(skb1)->nr_frags || + skb_shinfo(skb1)->frag_list || + skb_tailroom(skb1) < tailbits) + ntail = tailbits + 128; + } + + if (copyflag || + skb_cloned(skb1) || + ntail || + skb_shinfo(skb1)->nr_frags || + skb_shinfo(skb1)->frag_list) { + struct sk_buff *skb2; + + /* Fuck, we are miserable poor guys... */ + if (ntail == 0) + skb2 = skb_copy(skb1, GFP_ATOMIC); + else + skb2 = skb_copy_expand(skb1, + skb_headroom(skb1), + ntail, + GFP_ATOMIC); + if (unlikely(skb2 == NULL)) + return -ENOMEM; + + if (skb1->sk) + skb_set_owner_w(skb2, skb1->sk); + + /* Looking around. Are we still alive? + * OK, link new skb, drop old one */ + + skb2->next = skb1->next; + *skb_p = skb2; + kfree_skb(skb1); + skb1 = skb2; + } + elt++; + *trailer = skb1; + skb_p = &skb1->next; + } + + return elt; +} + EXPORT_SYMBOL(___pskb_trim); EXPORT_SYMBOL(__kfree_skb); EXPORT_SYMBOL(kfree_skb); @@ -2036,3 +2221,6 @@ EXPORT_SYMBOL(skb_seq_read); EXPORT_SYMBOL(skb_abort_seq_read); EXPORT_SYMBOL(skb_find_text); EXPORT_SYMBOL(skb_append_datato_frags); + +EXPORT_SYMBOL_GPL(skb_to_sgvec); +EXPORT_SYMBOL_GPL(skb_cow_data); diff --git a/net/xfrm/xfrm_algo.c b/net/xfrm/xfrm_algo.c index f373a8a7d9c8..6249a9405bb8 100644 --- a/net/xfrm/xfrm_algo.c +++ b/net/xfrm/xfrm_algo.c @@ -612,175 +612,6 @@ EXPORT_SYMBOL_GPL(skb_icv_walk); #if defined(CONFIG_INET_ESP) || defined(CONFIG_INET_ESP_MODULE) || defined(CONFIG_INET6_ESP) || defined(CONFIG_INET6_ESP_MODULE) -/* Looking generic it is not used in another places. */ - -int -skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) -{ - int start = skb_headlen(skb); - int i, copy = start - offset; - int elt = 0; - - if (copy > 0) { - if (copy > len) - copy = len; - sg[elt].page = virt_to_page(skb->data + offset); - sg[elt].offset = (unsigned long)(skb->data + offset) % PAGE_SIZE; - sg[elt].length = copy; - elt++; - if ((len -= copy) == 0) - return elt; - offset += copy; - } - - for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { - int end; - - BUG_TRAP(start <= offset + len); - - end = start + skb_shinfo(skb)->frags[i].size; - if ((copy = end - offset) > 0) { - skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; - - if (copy > len) - copy = len; - sg[elt].page = frag->page; - sg[elt].offset = frag->page_offset+offset-start; - sg[elt].length = copy; - elt++; - if (!(len -= copy)) - return elt; - offset += copy; - } - start = end; - } - - if (skb_shinfo(skb)->frag_list) { - struct sk_buff *list = skb_shinfo(skb)->frag_list; - - for (; list; list = list->next) { - int end; - - BUG_TRAP(start <= offset + len); - - end = start + list->len; - if ((copy = end - offset) > 0) { - if (copy > len) - copy = len; - elt += skb_to_sgvec(list, sg+elt, offset - start, copy); - if ((len -= copy) == 0) - return elt; - offset += copy; - } - start = end; - } - } - BUG_ON(len); - return elt; -} -EXPORT_SYMBOL_GPL(skb_to_sgvec); - -/* Check that skb data bits are writable. If they are not, copy data - * to newly created private area. If "tailbits" is given, make sure that - * tailbits bytes beyond current end of skb are writable. - * - * Returns amount of elements of scatterlist to load for subsequent - * transformations and pointer to writable trailer skb. - */ - -int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer) -{ - int copyflag; - int elt; - struct sk_buff *skb1, **skb_p; - - /* If skb is cloned or its head is paged, reallocate - * head pulling out all the pages (pages are considered not writable - * at the moment even if they are anonymous). - */ - if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) && - __pskb_pull_tail(skb, skb_pagelen(skb)-skb_headlen(skb)) == NULL) - return -ENOMEM; - - /* Easy case. Most of packets will go this way. */ - if (!skb_shinfo(skb)->frag_list) { - /* A little of trouble, not enough of space for trailer. - * This should not happen, when stack is tuned to generate - * good frames. OK, on miss we reallocate and reserve even more - * space, 128 bytes is fair. */ - - if (skb_tailroom(skb) < tailbits && - pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC)) - return -ENOMEM; - - /* Voila! */ - *trailer = skb; - return 1; - } - - /* Misery. We are in troubles, going to mincer fragments... */ - - elt = 1; - skb_p = &skb_shinfo(skb)->frag_list; - copyflag = 0; - - while ((skb1 = *skb_p) != NULL) { - int ntail = 0; - - /* The fragment is partially pulled by someone, - * this can happen on input. Copy it and everything - * after it. */ - - if (skb_shared(skb1)) - copyflag = 1; - - /* If the skb is the last, worry about trailer. */ - - if (skb1->next == NULL && tailbits) { - if (skb_shinfo(skb1)->nr_frags || - skb_shinfo(skb1)->frag_list || - skb_tailroom(skb1) < tailbits) - ntail = tailbits + 128; - } - - if (copyflag || - skb_cloned(skb1) || - ntail || - skb_shinfo(skb1)->nr_frags || - skb_shinfo(skb1)->frag_list) { - struct sk_buff *skb2; - - /* Fuck, we are miserable poor guys... */ - if (ntail == 0) - skb2 = skb_copy(skb1, GFP_ATOMIC); - else - skb2 = skb_copy_expand(skb1, - skb_headroom(skb1), - ntail, - GFP_ATOMIC); - if (unlikely(skb2 == NULL)) - return -ENOMEM; - - if (skb1->sk) - skb_set_owner_w(skb2, skb1->sk); - - /* Looking around. Are we still alive? - * OK, link new skb, drop old one */ - - skb2->next = skb1->next; - *skb_p = skb2; - kfree_skb(skb1); - skb1 = skb2; - } - elt++; - *trailer = skb1; - skb_p = &skb1->next; - } - - return elt; -} -EXPORT_SYMBOL_GPL(skb_cow_data); - void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len) { if (tail != skb) { -- cgit v1.2.3-59-g8ed1b From 29f6af7712c40045e7886d0fa356d97a6f9aba49 Mon Sep 17 00:00:00 2001 From: YOSHIFUJI Hideaki Date: Fri, 6 Apr 2007 11:45:39 -0700 Subject: [IPV6] FIB6RULE: Find source address during looking up route. When looking up route for destination with rules with source address restrictions, we may need to find a source address for the traffic if not given. Based on patch from Noriaki TAKAMIYA . Signed-off-by: YOSHIFUJI Hideaki Signed-off-by: David S. Miller --- include/linux/fib_rules.h | 11 +++++++---- net/ipv6/fib6_rules.c | 34 +++++++++++++++++++++++++++++++--- 2 files changed, 38 insertions(+), 7 deletions(-) (limited to 'include') diff --git a/include/linux/fib_rules.h b/include/linux/fib_rules.h index f278ba781d09..87b606b63f1e 100644 --- a/include/linux/fib_rules.h +++ b/include/linux/fib_rules.h @@ -5,10 +5,13 @@ #include /* rule is permanent, and cannot be deleted */ -#define FIB_RULE_PERMANENT 1 -#define FIB_RULE_INVERT 2 -#define FIB_RULE_UNRESOLVED 4 -#define FIB_RULE_DEV_DETACHED 8 +#define FIB_RULE_PERMANENT 0x00000001 +#define FIB_RULE_INVERT 0x00000002 +#define FIB_RULE_UNRESOLVED 0x00000004 +#define FIB_RULE_DEV_DETACHED 0x00000008 + +/* try to find source address in routing lookups */ +#define FIB_RULE_FIND_SADDR 0x00010000 struct fib_rule_hdr { diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c index dd9720e700ef..fc3882c90604 100644 --- a/net/ipv6/fib6_rules.c +++ b/net/ipv6/fib6_rules.c @@ -17,6 +17,7 @@ #include #include +#include #include #include @@ -95,8 +96,27 @@ static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp, if (table) rt = lookup(table, flp, flags); - if (rt != &ip6_null_entry) + if (rt != &ip6_null_entry) { + struct fib6_rule *r = (struct fib6_rule *)rule; + + /* + * If we need to find a source address for this traffic, + * we check the result if it meets requirement of the rule. + */ + if ((rule->flags & FIB_RULE_FIND_SADDR) && + r->src.plen && !(flags & RT6_LOOKUP_F_HAS_SADDR)) { + struct in6_addr saddr; + if (ipv6_get_saddr(&rt->u.dst, &flp->fl6_dst, + &saddr)) + goto again; + if (!ipv6_prefix_equal(&saddr, &r->src.addr, + r->src.plen)) + goto again; + ipv6_addr_copy(&flp->fl6_src, &saddr); + } goto out; + } +again: dst_release(&rt->u.dst); rt = NULL; goto out; @@ -117,9 +137,17 @@ static int fib6_rule_match(struct fib_rule *rule, struct flowi *fl, int flags) !ipv6_prefix_equal(&fl->fl6_dst, &r->dst.addr, r->dst.plen)) return 0; + /* + * If FIB_RULE_FIND_SADDR is set and we do not have a + * source address for the traffic, we defer check for + * source address. + */ if (r->src.plen) { - if (!(flags & RT6_LOOKUP_F_HAS_SADDR) || - !ipv6_prefix_equal(&fl->fl6_src, &r->src.addr, r->src.plen)) + if (flags & RT6_LOOKUP_F_HAS_SADDR) { + if (!ipv6_prefix_equal(&fl->fl6_src, &r->src.addr, + r->src.plen)) + return 0; + } else if (!(r->common.flags & FIB_RULE_FIND_SADDR)) return 0; } -- cgit v1.2.3-59-g8ed1b From c5c2523893747f88a83376abad310c8ad13f7197 Mon Sep 17 00:00:00 2001 From: Patrick McHardy Date: Mon, 9 Apr 2007 11:47:18 -0700 Subject: [XFRM]: Optimize MTU calculation Replace the probing based MTU estimation, which usually takes 2-3 iterations to find a fitting value and may underestimate the MTU, by an exact calculation. Also fix underestimation of the XFRM trailer_len, which causes unnecessary reallocations. Signed-off-by: Patrick McHardy Signed-off-by: David S. Miller --- include/net/xfrm.h | 2 +- net/ipv4/esp4.c | 30 ++++++++++++++++++------------ net/ipv6/esp6.c | 22 ++++++++++++---------- net/xfrm/xfrm_state.c | 36 ++++++++---------------------------- 4 files changed, 39 insertions(+), 51 deletions(-) (limited to 'include') diff --git a/include/net/xfrm.h b/include/net/xfrm.h index 5a00aa85b756..e144a25814bd 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -279,7 +279,7 @@ struct xfrm_type xfrm_address_t *(*local_addr)(struct xfrm_state *, xfrm_address_t *); xfrm_address_t *(*remote_addr)(struct xfrm_state *, xfrm_address_t *); /* Estimate maximal size of result of transformation of a dgram */ - u32 (*get_max_size)(struct xfrm_state *, int size); + u32 (*get_mtu)(struct xfrm_state *, int size); }; extern int xfrm_register_type(struct xfrm_type *type, unsigned short family); diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c index bdc65d8af181..a315d5d22764 100644 --- a/net/ipv4/esp4.c +++ b/net/ipv4/esp4.c @@ -272,32 +272,34 @@ out: return -EINVAL; } -static u32 esp4_get_max_size(struct xfrm_state *x, int mtu) +static u32 esp4_get_mtu(struct xfrm_state *x, int mtu) { struct esp_data *esp = x->data; u32 blksize = ALIGN(crypto_blkcipher_blocksize(esp->conf.tfm), 4); - int enclen = 0; + u32 align = max_t(u32, blksize, esp->conf.padlen); + u32 rem; + + mtu -= x->props.header_len + esp->auth.icv_trunc_len; + rem = mtu & (align - 1); + mtu &= ~(align - 1); switch (x->props.mode) { case XFRM_MODE_TUNNEL: - mtu = ALIGN(mtu +2, blksize); break; default: case XFRM_MODE_TRANSPORT: /* The worst case */ - mtu = ALIGN(mtu + 2, 4) + blksize - 4; + mtu -= blksize - 4; + mtu += min_t(u32, blksize - 4, rem); break; case XFRM_MODE_BEET: /* The worst case. */ - enclen = IPV4_BEET_PHMAXLEN; - mtu = ALIGN(mtu + enclen + 2, blksize); + mtu -= IPV4_BEET_PHMAXLEN; + mtu += min_t(u32, IPV4_BEET_PHMAXLEN, rem); break; } - if (esp->conf.padlen) - mtu = ALIGN(mtu, esp->conf.padlen); - - return mtu + x->props.header_len + esp->auth.icv_trunc_len - enclen; + return mtu - 2; } static void esp4_err(struct sk_buff *skb, u32 info) @@ -340,6 +342,7 @@ static int esp_init_state(struct xfrm_state *x) { struct esp_data *esp = NULL; struct crypto_blkcipher *tfm; + u32 align; /* null auth and encryption can have zero length keys */ if (x->aalg) { @@ -421,7 +424,10 @@ static int esp_init_state(struct xfrm_state *x) } } x->data = esp; - x->props.trailer_len = esp4_get_max_size(x, 0) - x->props.header_len; + align = ALIGN(crypto_blkcipher_blocksize(esp->conf.tfm), 4); + if (esp->conf.padlen) + align = max_t(u32, align, esp->conf.padlen); + x->props.trailer_len = align + 1 + esp->auth.icv_trunc_len; return 0; error: @@ -438,7 +444,7 @@ static struct xfrm_type esp_type = .proto = IPPROTO_ESP, .init_state = esp_init_state, .destructor = esp_destroy, - .get_max_size = esp4_get_max_size, + .get_mtu = esp4_get_mtu, .input = esp_input, .output = esp_output }; diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c index 6b76c4c31137..7107bb7e2e62 100644 --- a/net/ipv6/esp6.c +++ b/net/ipv6/esp6.c @@ -235,22 +235,24 @@ out: return ret; } -static u32 esp6_get_max_size(struct xfrm_state *x, int mtu) +static u32 esp6_get_mtu(struct xfrm_state *x, int mtu) { struct esp_data *esp = x->data; u32 blksize = ALIGN(crypto_blkcipher_blocksize(esp->conf.tfm), 4); + u32 align = max_t(u32, blksize, esp->conf.padlen); + u32 rem; - if (x->props.mode == XFRM_MODE_TUNNEL) { - mtu = ALIGN(mtu + 2, blksize); - } else { - /* The worst case. */ + mtu -= x->props.header_len + esp->auth.icv_trunc_len; + rem = mtu & (align - 1); + mtu &= ~(align - 1); + + if (x->props.mode != XFRM_MODE_TUNNEL) { u32 padsize = ((blksize - 1) & 7) + 1; - mtu = ALIGN(mtu + 2, padsize) + blksize - padsize; + mtu -= blksize - padsize; + mtu += min_t(u32, blksize - padsize, rem); } - if (esp->conf.padlen) - mtu = ALIGN(mtu, esp->conf.padlen); - return mtu + x->props.header_len + esp->auth.icv_trunc_len; + return mtu - 2; } static void esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, @@ -380,7 +382,7 @@ static struct xfrm_type esp6_type = .proto = IPPROTO_ESP, .init_state = esp6_init_state, .destructor = esp6_destroy, - .get_max_size = esp6_get_max_size, + .get_mtu = esp6_get_mtu, .input = esp6_input, .output = esp6_output, .hdr_offset = xfrm6_find_1stfragopt, diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index 63a20e818164..69a3600afd9d 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c @@ -1667,37 +1667,17 @@ void xfrm_state_delete_tunnel(struct xfrm_state *x) } EXPORT_SYMBOL(xfrm_state_delete_tunnel); -/* - * This function is NOT optimal. For example, with ESP it will give an - * MTU that's usually two bytes short of being optimal. However, it will - * usually give an answer that's a multiple of 4 provided the input is - * also a multiple of 4. - */ int xfrm_state_mtu(struct xfrm_state *x, int mtu) { - int res = mtu; - - res -= x->props.header_len; - - for (;;) { - int m = res; - - if (m < 68) - return 68; - - spin_lock_bh(&x->lock); - if (x->km.state == XFRM_STATE_VALID && - x->type && x->type->get_max_size) - m = x->type->get_max_size(x, m); - else - m += x->props.header_len; - spin_unlock_bh(&x->lock); - - if (m <= mtu) - break; - res -= (m - mtu); - } + int res; + spin_lock_bh(&x->lock); + if (x->km.state == XFRM_STATE_VALID && + x->type && x->type->get_mtu) + res = x->type->get_mtu(x, mtu); + else + res = mtu; + spin_unlock_bh(&x->lock); return res; } -- cgit v1.2.3-59-g8ed1b From 663ead3bb8d5b561e70fc3bb3861c9220b5a77eb Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Mon, 9 Apr 2007 11:59:07 -0700 Subject: [NET]: Use csum_start offset instead of skb_transport_header The skb transport pointer is currently used to specify the start of the checksum region for transmit checksum offload. Unfortunately, the same pointer is also used during receive side processing. This creates a problem when we want to retransmit a received packet with partial checksums since the skb transport pointer would be overwritten. This patch solves this problem by creating a new 16-bit csum_start offset value to replace the skb transport header for the purpose of checksums. This offset is calculated from skb->head so that it does not have to change when skb->data changes. No extra space is required since csum_offset itself fits within a 16-bit word so we can use the other 16 bits for csum_start. For backwards compatibility, just before we push a packet with partial checksums off into the device driver, we set the skb transport header to what it would have been under the old scheme. Signed-off-by: Herbert Xu Signed-off-by: David S. Miller --- include/linux/skbuff.h | 9 +++++++-- net/core/dev.c | 25 +++++++++++++++---------- net/core/skbuff.c | 2 +- net/ipv4/tcp_ipv4.c | 2 ++ net/ipv4/udp.c | 1 + net/ipv6/tcp_ipv6.c | 2 ++ 6 files changed, 28 insertions(+), 13 deletions(-) (limited to 'include') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 9b2957d203c9..910560e85561 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -205,7 +205,9 @@ typedef unsigned char *sk_buff_data_t; * @len: Length of actual data * @data_len: Data length * @mac_len: Length of link layer header - * @csum: Checksum + * @csum: Checksum (must include start/offset pair) + * @csum_start: Offset from skb->head where checksumming should start + * @csum_offset: Offset from csum_start where checksum should be stored * @local_df: allow local fragmentation * @cloned: Head may be cloned (check refcnt to be sure) * @nohdr: Payload reference only, must not modify header @@ -261,7 +263,10 @@ struct sk_buff { mac_len; union { __wsum csum; - __u32 csum_offset; + struct { + __u16 csum_start; + __u16 csum_offset; + }; }; __u32 priority; __u8 local_df:1, diff --git a/net/core/dev.c b/net/core/dev.c index fec8cf27f75d..d23972f56fc7 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1155,7 +1155,7 @@ EXPORT_SYMBOL(netif_device_attach); int skb_checksum_help(struct sk_buff *skb) { __wsum csum; - int ret = 0, offset = skb_transport_offset(skb); + int ret = 0, offset; if (skb->ip_summed == CHECKSUM_COMPLETE) goto out_set_summed; @@ -1171,15 +1171,16 @@ int skb_checksum_help(struct sk_buff *skb) goto out; } + offset = skb->csum_start - skb_headroom(skb); BUG_ON(offset > (int)skb->len); csum = skb_checksum(skb, offset, skb->len-offset, 0); - offset = skb->tail - skb->transport_header; + offset = skb_headlen(skb) - offset; BUG_ON(offset <= 0); BUG_ON(skb->csum_offset + 2 > offset); - *(__sum16 *)(skb_transport_header(skb) + - skb->csum_offset) = csum_fold(csum); + *(__sum16 *)(skb->head + skb->csum_start + skb->csum_offset) = + csum_fold(csum); out_set_summed: skb->ip_summed = CHECKSUM_NONE; out: @@ -1431,12 +1432,16 @@ int dev_queue_xmit(struct sk_buff *skb) /* If packet is not checksummed and device does not support * checksumming for this protocol, complete checksumming here. */ - if (skb->ip_summed == CHECKSUM_PARTIAL && - (!(dev->features & NETIF_F_GEN_CSUM) && - (!(dev->features & NETIF_F_IP_CSUM) || - skb->protocol != htons(ETH_P_IP)))) - if (skb_checksum_help(skb)) - goto out_kfree_skb; + if (skb->ip_summed == CHECKSUM_PARTIAL) { + skb_set_transport_header(skb, skb->csum_start - + skb_headroom(skb)); + + if (!(dev->features & NETIF_F_GEN_CSUM) && + (!(dev->features & NETIF_F_IP_CSUM) || + skb->protocol != htons(ETH_P_IP))) + if (skb_checksum_help(skb)) + goto out_kfree_skb; + } gso: spin_lock_prefetch(&dev->queue_lock); diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 4965df29768b..52a4fdd4f31c 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -1358,7 +1358,7 @@ void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to) long csstart; if (skb->ip_summed == CHECKSUM_PARTIAL) - csstart = skb_transport_offset(skb); + csstart = skb->csum_start - skb_headroom(skb); else csstart = skb_headlen(skb); diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index e11eaf4cc269..a091a99ad263 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -504,6 +504,7 @@ void tcp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb) if (skb->ip_summed == CHECKSUM_PARTIAL) { th->check = ~tcp_v4_check(len, inet->saddr, inet->daddr, 0); + skb->csum_start = skb_transport_header(skb) - skb->head; skb->csum_offset = offsetof(struct tcphdr, check); } else { th->check = tcp_v4_check(len, inet->saddr, inet->daddr, @@ -526,6 +527,7 @@ int tcp_v4_gso_send_check(struct sk_buff *skb) th->check = 0; th->check = ~tcp_v4_check(skb->len, iph->saddr, iph->daddr, 0); + skb->csum_start = skb_transport_header(skb) - skb->head; skb->csum_offset = offsetof(struct tcphdr, check); skb->ip_summed = CHECKSUM_PARTIAL; return 0; diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 71b0b60ba538..5ad7a26e3091 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -427,6 +427,7 @@ static void udp4_hwcsum_outgoing(struct sock *sk, struct sk_buff *skb, /* * Only one fragment on the socket. */ + skb->csum_start = skb_transport_header(skb) - skb->head; skb->csum_offset = offsetof(struct udphdr, check); uh->check = ~csum_tcpudp_magic(src, dst, len, IPPROTO_UDP, 0); } else { diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 4a55da079f5f..7e824b97126d 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -950,6 +950,7 @@ static void tcp_v6_send_check(struct sock *sk, int len, struct sk_buff *skb) if (skb->ip_summed == CHECKSUM_PARTIAL) { th->check = ~csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP, 0); + skb->csum_start = skb_transport_header(skb) - skb->head; skb->csum_offset = offsetof(struct tcphdr, check); } else { th->check = csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP, @@ -972,6 +973,7 @@ static int tcp_v6_gso_send_check(struct sk_buff *skb) th->check = 0; th->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, skb->len, IPPROTO_TCP, 0); + skb->csum_start = skb_transport_header(skb) - skb->head; skb->csum_offset = offsetof(struct tcphdr, check); skb->ip_summed = CHECKSUM_PARTIAL; return 0; -- cgit v1.2.3-59-g8ed1b From 604763722c655c7e3f31ecf6f7b4dafcd26a7a15 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Mon, 9 Apr 2007 11:59:39 -0700 Subject: [NET]: Treat CHECKSUM_PARTIAL as CHECKSUM_UNNECESSARY When a transmitted packet is looped back directly, CHECKSUM_PARTIAL maps to the semantics of CHECKSUM_UNNECESSARY. Therefore we should treat it as such in the stack. Signed-off-by: Herbert Xu Signed-off-by: David S. Miller --- include/linux/skbuff.h | 16 +++++++++++----- include/net/tcp.h | 2 +- include/net/udp.h | 2 +- net/core/netpoll.c | 2 +- net/ipv4/ipvs/ip_vs_core.c | 6 ++---- net/ipv4/tcp_input.c | 6 +++--- net/ipv4/tcp_ipv4.c | 3 +-- net/ipv4/udp.c | 4 ++-- net/ipv6/raw.c | 4 ++-- net/ipv6/tcp_ipv6.c | 3 +-- net/ipv6/udp.c | 4 ++-- net/sctp/input.c | 3 +-- net/sunrpc/socklib.c | 2 +- 13 files changed, 29 insertions(+), 28 deletions(-) (limited to 'include') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 910560e85561..c413afbe0b9c 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -32,10 +32,11 @@ #define HAVE_ALLOC_SKB /* For the drivers to know */ #define HAVE_ALIGNABLE_SKB /* Ditto 8) */ +/* Don't change this without changing skb_csum_unnecessary! */ #define CHECKSUM_NONE 0 -#define CHECKSUM_PARTIAL 1 -#define CHECKSUM_UNNECESSARY 2 -#define CHECKSUM_COMPLETE 3 +#define CHECKSUM_UNNECESSARY 1 +#define CHECKSUM_COMPLETE 2 +#define CHECKSUM_PARTIAL 3 #define SKB_DATA_ALIGN(X) (((X) + (SMP_CACHE_BYTES - 1)) & \ ~(SMP_CACHE_BYTES - 1)) @@ -1572,6 +1573,11 @@ static inline void __net_timestamp(struct sk_buff *skb) extern __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len); extern __sum16 __skb_checksum_complete(struct sk_buff *skb); +static inline int skb_csum_unnecessary(const struct sk_buff *skb) +{ + return skb->ip_summed & CHECKSUM_UNNECESSARY; +} + /** * skb_checksum_complete - Calculate checksum of an entire packet * @skb: packet to process @@ -1590,8 +1596,8 @@ extern __sum16 __skb_checksum_complete(struct sk_buff *skb); */ static inline unsigned int skb_checksum_complete(struct sk_buff *skb) { - return skb->ip_summed != CHECKSUM_UNNECESSARY && - __skb_checksum_complete(skb); + return skb_csum_unnecessary(skb) ? + 0 : __skb_checksum_complete(skb); } #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) diff --git a/include/net/tcp.h b/include/net/tcp.h index af9273204cfd..07f724e02f84 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -818,7 +818,7 @@ static inline __sum16 __tcp_checksum_complete(struct sk_buff *skb) static inline int tcp_checksum_complete(struct sk_buff *skb) { - return skb->ip_summed != CHECKSUM_UNNECESSARY && + return !skb_csum_unnecessary(skb) && __tcp_checksum_complete(skb); } diff --git a/include/net/udp.h b/include/net/udp.h index 4906ed7113e7..98755ebaf163 100644 --- a/include/net/udp.h +++ b/include/net/udp.h @@ -77,7 +77,7 @@ static inline __sum16 __udp_lib_checksum_complete(struct sk_buff *skb) static inline int udp_lib_checksum_complete(struct sk_buff *skb) { - return skb->ip_summed != CHECKSUM_UNNECESSARY && + return !skb_csum_unnecessary(skb) && __udp_lib_checksum_complete(skb); } diff --git a/net/core/netpoll.c b/net/core/netpoll.c index 1fb30c3528bc..b316435b0e2a 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c @@ -86,7 +86,7 @@ static __sum16 checksum_udp(struct sk_buff *skb, struct udphdr *uh, { __wsum psum; - if (uh->check == 0 || skb->ip_summed == CHECKSUM_UNNECESSARY) + if (uh->check == 0 || skb_csum_unnecessary(skb)) return 0; psum = csum_tcpudp_nofold(saddr, daddr, ulen, IPPROTO_UDP, 0); diff --git a/net/ipv4/ipvs/ip_vs_core.c b/net/ipv4/ipvs/ip_vs_core.c index 62cfbed317bf..f005a2f929f4 100644 --- a/net/ipv4/ipvs/ip_vs_core.c +++ b/net/ipv4/ipvs/ip_vs_core.c @@ -681,8 +681,7 @@ static int ip_vs_out_icmp(struct sk_buff **pskb, int *related) } /* Ensure the checksum is correct */ - if (skb->ip_summed != CHECKSUM_UNNECESSARY && - ip_vs_checksum_complete(skb, ihl)) { + if (!skb_csum_unnecessary(skb) && ip_vs_checksum_complete(skb, ihl)) { /* Failed checksum! */ IP_VS_DBG(1, "Forward ICMP: failed checksum from %d.%d.%d.%d!\n", NIPQUAD(iph->saddr)); @@ -921,8 +920,7 @@ ip_vs_in_icmp(struct sk_buff **pskb, int *related, unsigned int hooknum) verdict = NF_DROP; /* Ensure the checksum is correct */ - if (skb->ip_summed != CHECKSUM_UNNECESSARY && - ip_vs_checksum_complete(skb, ihl)) { + if (!skb_csum_unnecessary(skb) && ip_vs_checksum_complete(skb, ihl)) { /* Failed checksum! */ IP_VS_DBG(1, "Incoming ICMP: failed checksum from %d.%d.%d.%d!\n", NIPQUAD(iph->saddr)); diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 9c3b4c7a50ad..d1604f59d77e 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -4009,7 +4009,7 @@ static int tcp_copy_to_iovec(struct sock *sk, struct sk_buff *skb, int hlen) int err; local_bh_enable(); - if (skb->ip_summed==CHECKSUM_UNNECESSARY) + if (skb_csum_unnecessary(skb)) err = skb_copy_datagram_iovec(skb, hlen, tp->ucopy.iov, chunk); else err = skb_copy_and_csum_datagram_iovec(skb, hlen, @@ -4041,7 +4041,7 @@ static __sum16 __tcp_checksum_complete_user(struct sock *sk, struct sk_buff *skb static inline int tcp_checksum_complete_user(struct sock *sk, struct sk_buff *skb) { - return skb->ip_summed != CHECKSUM_UNNECESSARY && + return !skb_csum_unnecessary(skb) && __tcp_checksum_complete_user(sk, skb); } @@ -4059,7 +4059,7 @@ static int tcp_dma_try_early_copy(struct sock *sk, struct sk_buff *skb, int hlen if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) tp->ucopy.dma_chan = get_softnet_dma(); - if (tp->ucopy.dma_chan && skb->ip_summed == CHECKSUM_UNNECESSARY) { + if (tp->ucopy.dma_chan && skb_csum_unnecessary(skb)) { dma_cookie = dma_skb_copy_datagram_iovec(tp->ucopy.dma_chan, skb, hlen, tp->ucopy.iov, chunk, tp->ucopy.pinned_list); diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index a091a99ad263..5a3e7f839fc5 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -1638,8 +1638,7 @@ int tcp_v4_rcv(struct sk_buff *skb) * Packet length and doff are validated by header prediction, * provided case of th->doff==0 is eliminated. * So, we defer the checks. */ - if ((skb->ip_summed != CHECKSUM_UNNECESSARY && - tcp_v4_checksum_init(skb))) + if (!skb_csum_unnecessary(skb) && tcp_v4_checksum_init(skb)) goto bad_packet; th = tcp_hdr(skb); diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 5ad7a26e3091..cec0f2cc49b7 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -848,7 +848,7 @@ try_again: goto csum_copy_err; } - if (skb->ip_summed == CHECKSUM_UNNECESSARY) + if (skb_csum_unnecessary(skb)) err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov, copied ); else { @@ -1190,7 +1190,7 @@ static inline int udp4_csum_init(struct sk_buff *skb, struct udphdr *uh, proto, skb->csum)) skb->ip_summed = CHECKSUM_UNNECESSARY; } - if (skb->ip_summed != CHECKSUM_UNNECESSARY) + if (!skb_csum_unnecessary(skb)) skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr, skb->len, proto, 0); /* Probably, we should checksum udp header (it should be in cache diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index 2b3be68b70a7..f65fcd7704ca 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c @@ -368,7 +368,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb) skb->len, inet->num, skb->csum)) skb->ip_summed = CHECKSUM_UNNECESSARY; } - if (skb->ip_summed != CHECKSUM_UNNECESSARY) + if (!skb_csum_unnecessary(skb)) skb->csum = ~csum_unfold(csum_ipv6_magic(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr, skb->len, @@ -421,7 +421,7 @@ static int rawv6_recvmsg(struct kiocb *iocb, struct sock *sk, msg->msg_flags |= MSG_TRUNC; } - if (skb->ip_summed==CHECKSUM_UNNECESSARY) { + if (skb_csum_unnecessary(skb)) { err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); } else if (msg->msg_flags&MSG_TRUNC) { if (__skb_checksum_complete(skb)) diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 7e824b97126d..2b668a6ae698 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -1707,8 +1707,7 @@ static int tcp_v6_rcv(struct sk_buff **pskb) if (!pskb_may_pull(skb, th->doff*4)) goto discard_it; - if ((skb->ip_summed != CHECKSUM_UNNECESSARY && - tcp_v6_checksum_init(skb))) + if (!skb_csum_unnecessary(skb) && tcp_v6_checksum_init(skb)) goto bad_packet; th = tcp_hdr(skb); diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 1e3dfb20b1cf..b083c09e3d2d 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -153,7 +153,7 @@ try_again: goto csum_copy_err; } - if (skb->ip_summed == CHECKSUM_UNNECESSARY) + if (skb_csum_unnecessary(skb)) err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov, copied ); else { @@ -397,7 +397,7 @@ static inline int udp6_csum_init(struct sk_buff *skb, struct udphdr *uh, skb->len, proto, skb->csum)) skb->ip_summed = CHECKSUM_UNNECESSARY; - if (skb->ip_summed != CHECKSUM_UNNECESSARY) + if (!skb_csum_unnecessary(skb)) skb->csum = ~csum_unfold(csum_ipv6_magic(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr, skb->len, proto, 0)); diff --git a/net/sctp/input.c b/net/sctp/input.c index 18b97eedc1fa..885109fb3dda 100644 --- a/net/sctp/input.c +++ b/net/sctp/input.c @@ -140,8 +140,7 @@ int sctp_rcv(struct sk_buff *skb) __skb_pull(skb, skb_transport_offset(skb)); if (skb->len < sizeof(struct sctphdr)) goto discard_it; - if ((skb->ip_summed != CHECKSUM_UNNECESSARY) && - (sctp_rcv_checksum(skb) < 0)) + if (!skb_csum_unnecessary(skb) && sctp_rcv_checksum(skb) < 0) goto discard_it; skb_pull(skb, sizeof(struct sctphdr)); diff --git a/net/sunrpc/socklib.c b/net/sunrpc/socklib.c index 634885b0c04d..1d377d1ab7f4 100644 --- a/net/sunrpc/socklib.c +++ b/net/sunrpc/socklib.c @@ -154,7 +154,7 @@ int csum_partial_copy_to_xdr(struct xdr_buf *xdr, struct sk_buff *skb) desc.offset = sizeof(struct udphdr); desc.count = skb->len - desc.offset; - if (skb->ip_summed == CHECKSUM_UNNECESSARY) + if (skb_csum_unnecessary(skb)) goto no_checksum; desc.csum = csum_partial(skb->data, desc.offset, skb->csum); -- cgit v1.2.3-59-g8ed1b From 6229e362dd49b9e8387126bd4483ab0574d23e9c Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Wed, 21 Mar 2007 13:38:47 -0700 Subject: bridge: eliminate call by reference Change the bridging hook to be simple function with return value rather than modifying the skb argument. This could generate better code and is cleaner. Signed-off-by: Stephen Hemminger --- include/linux/if_bridge.h | 3 ++- net/bridge/br_input.c | 20 +++++++++----------- net/bridge/br_private.h | 3 ++- net/core/dev.c | 31 +++++++++++++++++++------------ 4 files changed, 32 insertions(+), 25 deletions(-) (limited to 'include') diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h index fd1b6eb94a5f..4ff211d98769 100644 --- a/include/linux/if_bridge.h +++ b/include/linux/if_bridge.h @@ -105,7 +105,8 @@ struct __fdb_entry #include extern void brioctl_set(int (*ioctl_hook)(unsigned int, void __user *)); -extern int (*br_handle_frame_hook)(struct net_bridge_port *p, struct sk_buff **pskb); +extern struct sk_buff *(*br_handle_frame_hook)(struct net_bridge_port *p, + struct sk_buff *skb); extern int (*br_should_route_hook)(struct sk_buff **pskb); #endif diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c index a260679afad8..2f5c379d9ffa 100644 --- a/net/bridge/br_input.c +++ b/net/bridge/br_input.c @@ -121,13 +121,11 @@ static inline int is_link_local(const unsigned char *dest) /* * Called via br_handle_frame_hook. - * Return 0 if *pskb should be processed furthur - * 1 if *pskb is handled + * Return NULL if skb is handled * note: already called with rcu_read_lock (preempt_disabled) */ -int br_handle_frame(struct net_bridge_port *p, struct sk_buff **pskb) +struct sk_buff *br_handle_frame(struct net_bridge_port *p, struct sk_buff *skb) { - struct sk_buff *skb = *pskb; const unsigned char *dest = eth_hdr(skb)->h_dest; if (!is_valid_ether_addr(eth_hdr(skb)->h_source)) @@ -135,15 +133,15 @@ int br_handle_frame(struct net_bridge_port *p, struct sk_buff **pskb) if (unlikely(is_link_local(dest))) { skb->pkt_type = PACKET_HOST; - return NF_HOOK(PF_BRIDGE, NF_BR_LOCAL_IN, skb, skb->dev, - NULL, br_handle_local_finish) != 0; + + return (NF_HOOK(PF_BRIDGE, NF_BR_LOCAL_IN, skb, skb->dev, + NULL, br_handle_local_finish) == 0) ? skb : NULL; } if (p->state == BR_STATE_FORWARDING || p->state == BR_STATE_LEARNING) { if (br_should_route_hook) { - if (br_should_route_hook(pskb)) - return 0; - skb = *pskb; + if (br_should_route_hook(&skb)) + return skb; dest = eth_hdr(skb)->h_dest; } @@ -152,10 +150,10 @@ int br_handle_frame(struct net_bridge_port *p, struct sk_buff **pskb) NF_HOOK(PF_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL, br_handle_frame_finish); - return 1; + return NULL; } err: kfree_skb(skb); - return 1; + return NULL; } diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index 2b73de6c0b47..fab8ce0ce88d 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h @@ -182,7 +182,8 @@ extern void br_features_recompute(struct net_bridge *br); /* br_input.c */ extern int br_handle_frame_finish(struct sk_buff *skb); -extern int br_handle_frame(struct net_bridge_port *p, struct sk_buff **pskb); +extern struct sk_buff *br_handle_frame(struct net_bridge_port *p, + struct sk_buff *skb); /* br_ioctl.c */ extern int br_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); diff --git a/net/core/dev.c b/net/core/dev.c index d23972f56fc7..7f31d0f88424 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1687,31 +1687,37 @@ static inline int deliver_skb(struct sk_buff *skb, } #if defined(CONFIG_BRIDGE) || defined (CONFIG_BRIDGE_MODULE) -int (*br_handle_frame_hook)(struct net_bridge_port *p, struct sk_buff **pskb); +/* These hooks defined here for ATM */ struct net_bridge; struct net_bridge_fdb_entry *(*br_fdb_get_hook)(struct net_bridge *br, unsigned char *addr); -void (*br_fdb_put_hook)(struct net_bridge_fdb_entry *ent); +void (*br_fdb_put_hook)(struct net_bridge_fdb_entry *ent) __read_mostly; -static __inline__ int handle_bridge(struct sk_buff **pskb, - struct packet_type **pt_prev, int *ret, - struct net_device *orig_dev) +/* + * If bridge module is loaded call bridging hook. + * returns NULL if packet was consumed. + */ +struct sk_buff *(*br_handle_frame_hook)(struct net_bridge_port *p, + struct sk_buff *skb) __read_mostly; +static inline struct sk_buff *handle_bridge(struct sk_buff *skb, + struct packet_type **pt_prev, int *ret, + struct net_device *orig_dev) { struct net_bridge_port *port; - if ((*pskb)->pkt_type == PACKET_LOOPBACK || - (port = rcu_dereference((*pskb)->dev->br_port)) == NULL) - return 0; + if (skb->pkt_type == PACKET_LOOPBACK || + (port = rcu_dereference(skb->dev->br_port)) == NULL) + return skb; if (*pt_prev) { - *ret = deliver_skb(*pskb, *pt_prev, orig_dev); + *ret = deliver_skb(skb, *pt_prev, orig_dev); *pt_prev = NULL; } - return br_handle_frame_hook(port, pskb); + return br_handle_frame_hook(port, skb); } #else -#define handle_bridge(skb, pt_prev, ret, orig_dev) (0) +#define handle_bridge(skb, pt_prev, ret, orig_dev) (skb) #endif #ifdef CONFIG_NET_CLS_ACT @@ -1818,7 +1824,8 @@ int netif_receive_skb(struct sk_buff *skb) ncls: #endif - if (handle_bridge(&skb, &pt_prev, &ret, orig_dev)) + skb = handle_bridge(skb, &pt_prev, &ret, orig_dev); + if (!skb) goto out; type = skb->protocol; -- cgit v1.2.3-59-g8ed1b From 91d73c15cb165195bc8c3d6a35e30df454b1485b Mon Sep 17 00:00:00 2001 From: Gerrit Renker Date: Fri, 20 Apr 2007 13:57:21 -0700 Subject: [DCCP]: Complete documentation of dccp_sock This fills in missing documentation for dccp_sock fields. Signed-off-by: Gerrit Renker Signed-off-by: Ian McDonald Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: David S. Miller --- include/linux/dccp.h | 21 ++++++++++----------- 1 file changed, 10 insertions(+), 11 deletions(-) (limited to 'include') diff --git a/include/linux/dccp.h b/include/linux/dccp.h index e668cf531bab..fda2148d8c85 100644 --- a/include/linux/dccp.h +++ b/include/linux/dccp.h @@ -465,21 +465,20 @@ struct dccp_ackvec; * @dccps_service_list - second .. last service code on passive socket * @dccps_timestamp_time - time of latest TIMESTAMP option * @dccps_timestamp_echo - latest timestamp received on a TIMESTAMP option - * @dccps_l_ack_ratio - - * @dccps_r_ack_ratio - + * @dccps_l_ack_ratio - feature-local Ack Ratio + * @dccps_r_ack_ratio - feature-remote Ack Ratio * @dccps_pcslen - sender partial checksum coverage (via sockopt) * @dccps_pcrlen - receiver partial checksum coverage (via sockopt) * @dccps_ndp_count - number of Non Data Packets since last data packet - * @dccps_mss_cache - - * @dccps_minisock - + * @dccps_mss_cache - current value of MSS (path MTU minus header sizes) + * @dccps_minisock - associated minisock (accessed via dccp_msk) * @dccps_hc_rx_ackvec - rx half connection ack vector - * @dccps_hc_rx_ccid - - * @dccps_hc_tx_ccid - - * @dccps_options_received - - * @dccps_epoch - - * @dccps_role - Role of this sock, one of %dccp_role - * @dccps_hc_rx_insert_options - - * @dccps_hc_tx_insert_options - + * @dccps_hc_rx_ccid - CCID used for the receiver (or receiving half-connection) + * @dccps_hc_tx_ccid - CCID used for the sender (or sending half-connection) + * @dccps_options_received - parsed set of retrieved options + * @dccps_role - role of this sock, one of %dccp_role + * @dccps_hc_rx_insert_options - receiver wants to add options when acking + * @dccps_hc_tx_insert_options - sender wants to add options when sending * @dccps_xmit_timer - timer for when CCID is not ready to send * @dccps_syn_rtt - RTT sample from Request/Response exchange (in usecs) */ -- cgit v1.2.3-59-g8ed1b From 516299d2f5b6f9703b9b388faf91898dc636a678 Mon Sep 17 00:00:00 2001 From: Michael Milner Date: Thu, 12 Apr 2007 22:14:23 -0700 Subject: [NETFILTER]: bridge-nf: filter bridged IPv4/IPv6 encapsulated in pppoe traffic The attached patch by Michael Milner adds support for using iptables and ip6tables on bridged traffic encapsulated in ppoe frames, similar to what's already supported for vlan. Signed-off-by: Michael Milner Signed-off-by: Bart De Schuymer Signed-off-by: Patrick McHardy Signed-off-by: David S. Miller --- Documentation/networking/ip-sysctl.txt | 7 +++- include/linux/if_pppox.h | 3 ++ include/linux/netfilter_bridge.h | 11 ++++- include/linux/sysctl.h | 1 + net/bridge/br_netfilter.c | 77 ++++++++++++++++++++++++++++++++-- 5 files changed, 92 insertions(+), 7 deletions(-) (limited to 'include') diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt index 054c515bd726..af6a63ab9026 100644 --- a/Documentation/networking/ip-sysctl.txt +++ b/Documentation/networking/ip-sysctl.txt @@ -1015,7 +1015,12 @@ bridge-nf-call-ip6tables - BOOLEAN Default: 1 bridge-nf-filter-vlan-tagged - BOOLEAN - 1 : pass bridged vlan-tagged ARP/IP traffic to arptables/iptables. + 1 : pass bridged vlan-tagged ARP/IP/IPv6 traffic to {arp,ip,ip6}tables. + 0 : disable this. + Default: 1 + +bridge-nf-filter-pppoe-tagged - BOOLEAN + 1 : pass bridged pppoe-tagged IP/IPv6 traffic to {ip,ip6}tables. 0 : disable this. Default: 1 diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h index 29d6579ff1a0..6f987be60fe2 100644 --- a/include/linux/if_pppox.h +++ b/include/linux/if_pppox.h @@ -111,6 +111,9 @@ struct pppoe_hdr { struct pppoe_tag tag[0]; } __attribute__ ((packed)); +/* Length of entire PPPoE + PPP header */ +#define PPPOE_SES_HLEN 8 + #ifdef __KERNEL__ #include diff --git a/include/linux/netfilter_bridge.h b/include/linux/netfilter_bridge.h index 55689f39f77a..19060030bac9 100644 --- a/include/linux/netfilter_bridge.h +++ b/include/linux/netfilter_bridge.h @@ -7,6 +7,7 @@ #include #include #include +#include /* Bridge Hooks */ /* After promisc drops, checksum checks. */ @@ -58,8 +59,14 @@ static inline int nf_bridge_maybe_copy_header(struct sk_buff *skb) * enough room for the encapsulating header (if there is one). */ static inline int nf_bridge_pad(const struct sk_buff *skb) { - return (skb->nf_bridge && skb->protocol == htons(ETH_P_8021Q)) - ? VLAN_HLEN : 0; + int padding = 0; + + if (skb->nf_bridge && skb->protocol == htons(ETH_P_8021Q)) + padding = VLAN_HLEN; + else if (skb->nf_bridge && skb->protocol == htons(ETH_P_PPP_SES)) + padding = PPPOE_SES_HLEN; + + return padding; } struct bridge_skb_cb { diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h index df2d9ed20a4e..47f1c53332ce 100644 --- a/include/linux/sysctl.h +++ b/include/linux/sysctl.h @@ -792,6 +792,7 @@ enum { NET_BRIDGE_NF_CALL_IPTABLES = 2, NET_BRIDGE_NF_CALL_IP6TABLES = 3, NET_BRIDGE_NF_FILTER_VLAN_TAGGED = 4, + NET_BRIDGE_NF_FILTER_PPPOE_TAGGED = 5, }; /* CTL_FS names: */ diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c index fd70d041e51f..9b2986b182ba 100644 --- a/net/bridge/br_netfilter.c +++ b/net/bridge/br_netfilter.c @@ -29,6 +29,8 @@ #include #include #include +#include +#include #include #include #include @@ -57,8 +59,10 @@ static int brnf_call_iptables __read_mostly = 1; static int brnf_call_ip6tables __read_mostly = 1; static int brnf_call_arptables __read_mostly = 1; static int brnf_filter_vlan_tagged __read_mostly = 1; +static int brnf_filter_pppoe_tagged __read_mostly = 1; #else #define brnf_filter_vlan_tagged 1 +#define brnf_filter_pppoe_tagged 1 #endif static inline __be16 vlan_proto(const struct sk_buff *skb) @@ -81,6 +85,22 @@ static inline __be16 vlan_proto(const struct sk_buff *skb) vlan_proto(skb) == htons(ETH_P_ARP) && \ brnf_filter_vlan_tagged) +static inline __be16 pppoe_proto(const struct sk_buff *skb) +{ + return *((__be16 *)(skb_mac_header(skb) + ETH_HLEN + + sizeof(struct pppoe_hdr))); +} + +#define IS_PPPOE_IP(skb) \ + (skb->protocol == htons(ETH_P_PPP_SES) && \ + pppoe_proto(skb) == htons(PPP_IP) && \ + brnf_filter_pppoe_tagged) + +#define IS_PPPOE_IPV6(skb) \ + (skb->protocol == htons(ETH_P_PPP_SES) && \ + pppoe_proto(skb) == htons(PPP_IPV6) && \ + brnf_filter_pppoe_tagged) + /* We need these fake structures to make netfilter happy -- * lots of places assume that skb->dst != NULL, which isn't * all that unreasonable. @@ -128,6 +148,8 @@ static inline void nf_bridge_save_header(struct sk_buff *skb) if (skb->protocol == htons(ETH_P_8021Q)) header_size += VLAN_HLEN; + else if (skb->protocol == htons(ETH_P_PPP_SES)) + header_size += PPPOE_SES_HLEN; skb_copy_from_linear_data_offset(skb, -header_size, skb->nf_bridge->data, header_size); @@ -144,6 +166,8 @@ int nf_bridge_copy_header(struct sk_buff *skb) if (skb->protocol == htons(ETH_P_8021Q)) header_size += VLAN_HLEN; + else if (skb->protocol == htons(ETH_P_PPP_SES)) + header_size += PPPOE_SES_HLEN; err = skb_cow(skb, header_size); if (err) @@ -154,6 +178,8 @@ int nf_bridge_copy_header(struct sk_buff *skb) if (skb->protocol == htons(ETH_P_8021Q)) __skb_push(skb, VLAN_HLEN); + else if (skb->protocol == htons(ETH_P_PPP_SES)) + __skb_push(skb, PPPOE_SES_HLEN); return 0; } @@ -177,6 +203,9 @@ static int br_nf_pre_routing_finish_ipv6(struct sk_buff *skb) if (skb->protocol == htons(ETH_P_8021Q)) { skb_push(skb, VLAN_HLEN); skb->network_header -= VLAN_HLEN; + } else if (skb->protocol == htons(ETH_P_PPP_SES)) { + skb_push(skb, PPPOE_SES_HLEN); + skb->network_header -= PPPOE_SES_HLEN; } NF_HOOK_THRESH(PF_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL, br_handle_frame_finish, 1); @@ -258,6 +287,9 @@ static int br_nf_pre_routing_finish_bridge(struct sk_buff *skb) if (skb->protocol == htons(ETH_P_8021Q)) { skb_pull(skb, VLAN_HLEN); skb->network_header += VLAN_HLEN; + } else if (skb->protocol == htons(ETH_P_PPP_SES)) { + skb_pull(skb, PPPOE_SES_HLEN); + skb->network_header += PPPOE_SES_HLEN; } skb->dst->output(skb); } @@ -328,6 +360,10 @@ bridged_dnat: htons(ETH_P_8021Q)) { skb_push(skb, VLAN_HLEN); skb->network_header -= VLAN_HLEN; + } else if(skb->protocol == + htons(ETH_P_PPP_SES)) { + skb_push(skb, PPPOE_SES_HLEN); + skb->network_header -= PPPOE_SES_HLEN; } NF_HOOK_THRESH(PF_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL, @@ -347,6 +383,9 @@ bridged_dnat: if (skb->protocol == htons(ETH_P_8021Q)) { skb_push(skb, VLAN_HLEN); skb->network_header -= VLAN_HLEN; + } else if (skb->protocol == htons(ETH_P_PPP_SES)) { + skb_push(skb, PPPOE_SES_HLEN); + skb->network_header -= PPPOE_SES_HLEN; } NF_HOOK_THRESH(PF_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL, br_handle_frame_finish, 1); @@ -489,7 +528,8 @@ static unsigned int br_nf_pre_routing(unsigned int hook, struct sk_buff **pskb, __u32 len; struct sk_buff *skb = *pskb; - if (skb->protocol == htons(ETH_P_IPV6) || IS_VLAN_IPV6(skb)) { + if (skb->protocol == htons(ETH_P_IPV6) || IS_VLAN_IPV6(skb) || + IS_PPPOE_IPV6(skb)) { #ifdef CONFIG_SYSCTL if (!brnf_call_ip6tables) return NF_ACCEPT; @@ -500,6 +540,9 @@ static unsigned int br_nf_pre_routing(unsigned int hook, struct sk_buff **pskb, if (skb->protocol == htons(ETH_P_8021Q)) { skb_pull_rcsum(skb, VLAN_HLEN); skb->network_header += VLAN_HLEN; + } else if (skb->protocol == htons(ETH_P_PPP_SES)) { + skb_pull_rcsum(skb, PPPOE_SES_HLEN); + skb->network_header += PPPOE_SES_HLEN; } return br_nf_pre_routing_ipv6(hook, skb, in, out, okfn); } @@ -508,7 +551,8 @@ static unsigned int br_nf_pre_routing(unsigned int hook, struct sk_buff **pskb, return NF_ACCEPT; #endif - if (skb->protocol != htons(ETH_P_IP) && !IS_VLAN_IP(skb)) + if (skb->protocol != htons(ETH_P_IP) && !IS_VLAN_IP(skb) && + !IS_PPPOE_IP(skb)) return NF_ACCEPT; if ((skb = skb_share_check(*pskb, GFP_ATOMIC)) == NULL) @@ -517,6 +561,9 @@ static unsigned int br_nf_pre_routing(unsigned int hook, struct sk_buff **pskb, if (skb->protocol == htons(ETH_P_8021Q)) { skb_pull_rcsum(skb, VLAN_HLEN); skb->network_header += VLAN_HLEN; + } else if (skb->protocol == htons(ETH_P_PPP_SES)) { + skb_pull_rcsum(skb, PPPOE_SES_HLEN); + skb->network_header += PPPOE_SES_HLEN; } if (!pskb_may_pull(skb, sizeof(struct iphdr))) @@ -598,6 +645,9 @@ static int br_nf_forward_finish(struct sk_buff *skb) if (skb->protocol == htons(ETH_P_8021Q)) { skb_push(skb, VLAN_HLEN); skb->network_header -= VLAN_HLEN; + } else if (skb->protocol == htons(ETH_P_PPP_SES)) { + skb_push(skb, PPPOE_SES_HLEN); + skb->network_header -= PPPOE_SES_HLEN; } NF_HOOK_THRESH(PF_BRIDGE, NF_BR_FORWARD, skb, in, skb->dev, br_forward_finish, 1); @@ -626,7 +676,8 @@ static unsigned int br_nf_forward_ip(unsigned int hook, struct sk_buff **pskb, if (!parent) return NF_DROP; - if (skb->protocol == htons(ETH_P_IP) || IS_VLAN_IP(skb)) + if (skb->protocol == htons(ETH_P_IP) || IS_VLAN_IP(skb) || + IS_PPPOE_IP(skb)) pf = PF_INET; else pf = PF_INET6; @@ -634,6 +685,9 @@ static unsigned int br_nf_forward_ip(unsigned int hook, struct sk_buff **pskb, if (skb->protocol == htons(ETH_P_8021Q)) { skb_pull(*pskb, VLAN_HLEN); (*pskb)->network_header += VLAN_HLEN; + } else if (skb->protocol == htons(ETH_P_PPP_SES)) { + skb_pull(*pskb, PPPOE_SES_HLEN); + (*pskb)->network_header += PPPOE_SES_HLEN; } nf_bridge = skb->nf_bridge; @@ -726,6 +780,9 @@ static unsigned int br_nf_local_out(unsigned int hook, struct sk_buff **pskb, if (skb->protocol == htons(ETH_P_8021Q)) { skb_push(skb, VLAN_HLEN); skb->network_header -= VLAN_HLEN; + } else if (skb->protocol == htons(ETH_P_PPP_SES)) { + skb_push(skb, PPPOE_SES_HLEN); + skb->network_header -= PPPOE_SES_HLEN; } NF_HOOK(PF_BRIDGE, NF_BR_FORWARD, skb, realindev, skb->dev, @@ -771,7 +828,8 @@ static unsigned int br_nf_post_routing(unsigned int hook, struct sk_buff **pskb, if (!realoutdev) return NF_DROP; - if (skb->protocol == htons(ETH_P_IP) || IS_VLAN_IP(skb)) + if (skb->protocol == htons(ETH_P_IP) || IS_VLAN_IP(skb) || + IS_PPPOE_IP(skb)) pf = PF_INET; else pf = PF_INET6; @@ -793,6 +851,9 @@ static unsigned int br_nf_post_routing(unsigned int hook, struct sk_buff **pskb, if (skb->protocol == htons(ETH_P_8021Q)) { skb_pull(skb, VLAN_HLEN); skb->network_header += VLAN_HLEN; + } else if (skb->protocol == htons(ETH_P_PPP_SES)) { + skb_pull(skb, PPPOE_SES_HLEN); + skb->network_header += PPPOE_SES_HLEN; } nf_bridge_save_header(skb); @@ -930,6 +991,14 @@ static ctl_table brnf_table[] = { .mode = 0644, .proc_handler = &brnf_sysctl_call_tables, }, + { + .ctl_name = NET_BRIDGE_NF_FILTER_PPPOE_TAGGED, + .procname = "bridge-nf-filter-pppoe-tagged", + .data = &brnf_filter_pppoe_tagged, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &brnf_sysctl_call_tables, + }, { .ctl_name = 0 } }; -- cgit v1.2.3-59-g8ed1b From c15bf6e699f4c366f2d1e19ac5d7add21c6b5a19 Mon Sep 17 00:00:00 2001 From: Bart De Schuymer Date: Thu, 12 Apr 2007 22:15:06 -0700 Subject: [NETFILTER]: ebt_arp: add gratuitous arp filtering The attached patch adds gratuitous arp filtering, more precisely: it allows checking that the IPv4 source address matches the IPv4 destination address inside the ARP header. It also adds a check for the hardware address type when matching MAC addresses (nothing critical, just for better consistency). Signed-off-by: Bart De Schuymer Acked-by: Carl-Daniel Hailfinger Signed-off-by: Patrick McHardy Signed-off-by: David S. Miller --- include/linux/netfilter_bridge/ebt_arp.h | 4 ++- net/bridge/netfilter/ebt_arp.c | 48 +++++++++++++++----------------- 2 files changed, 25 insertions(+), 27 deletions(-) (limited to 'include') diff --git a/include/linux/netfilter_bridge/ebt_arp.h b/include/linux/netfilter_bridge/ebt_arp.h index 97e4dbde1f89..cbf4843b6b0f 100644 --- a/include/linux/netfilter_bridge/ebt_arp.h +++ b/include/linux/netfilter_bridge/ebt_arp.h @@ -8,8 +8,10 @@ #define EBT_ARP_DST_IP 0x10 #define EBT_ARP_SRC_MAC 0x20 #define EBT_ARP_DST_MAC 0x40 +#define EBT_ARP_GRAT 0x80 #define EBT_ARP_MASK (EBT_ARP_OPCODE | EBT_ARP_HTYPE | EBT_ARP_PTYPE | \ - EBT_ARP_SRC_IP | EBT_ARP_DST_IP | EBT_ARP_SRC_MAC | EBT_ARP_DST_MAC) + EBT_ARP_SRC_IP | EBT_ARP_DST_IP | EBT_ARP_SRC_MAC | EBT_ARP_DST_MAC | \ + EBT_ARP_GRAT) #define EBT_ARP_MATCH "arp" struct ebt_arp_info diff --git a/net/bridge/netfilter/ebt_arp.c b/net/bridge/netfilter/ebt_arp.c index 9c599800a900..1a46952a56d9 100644 --- a/net/bridge/netfilter/ebt_arp.c +++ b/net/bridge/netfilter/ebt_arp.c @@ -35,40 +35,36 @@ static int ebt_filter_arp(const struct sk_buff *skb, const struct net_device *in return EBT_NOMATCH; if (info->bitmask & (EBT_ARP_SRC_IP | EBT_ARP_DST_IP)) { - __be32 _addr, *ap; + __be32 saddr, daddr, *sap, *dap; - /* IPv4 addresses are always 4 bytes */ - if (ah->ar_pln != sizeof(__be32)) + if (ah->ar_pln != sizeof(__be32) || ah->ar_pro != htons(ETH_P_IP)) + return EBT_NOMATCH; + sap = skb_header_pointer(skb, sizeof(struct arphdr) + + ah->ar_hln, sizeof(saddr), + &saddr); + if (sap == NULL) + return EBT_NOMATCH; + dap = skb_header_pointer(skb, sizeof(struct arphdr) + + 2*ah->ar_hln+sizeof(saddr), + sizeof(daddr), &daddr); + if (dap == NULL) + return EBT_NOMATCH; + if (info->bitmask & EBT_ARP_SRC_IP && + FWINV(info->saddr != (*sap & info->smsk), EBT_ARP_SRC_IP)) + return EBT_NOMATCH; + if (info->bitmask & EBT_ARP_DST_IP && + FWINV(info->daddr != (*dap & info->dmsk), EBT_ARP_DST_IP)) + return EBT_NOMATCH; + if (info->bitmask & EBT_ARP_GRAT && + FWINV(*dap != *sap, EBT_ARP_GRAT)) return EBT_NOMATCH; - if (info->bitmask & EBT_ARP_SRC_IP) { - ap = skb_header_pointer(skb, sizeof(struct arphdr) + - ah->ar_hln, sizeof(_addr), - &_addr); - if (ap == NULL) - return EBT_NOMATCH; - if (FWINV(info->saddr != (*ap & info->smsk), - EBT_ARP_SRC_IP)) - return EBT_NOMATCH; - } - - if (info->bitmask & EBT_ARP_DST_IP) { - ap = skb_header_pointer(skb, sizeof(struct arphdr) + - 2*ah->ar_hln+sizeof(__be32), - sizeof(_addr), &_addr); - if (ap == NULL) - return EBT_NOMATCH; - if (FWINV(info->daddr != (*ap & info->dmsk), - EBT_ARP_DST_IP)) - return EBT_NOMATCH; - } } if (info->bitmask & (EBT_ARP_SRC_MAC | EBT_ARP_DST_MAC)) { unsigned char _mac[ETH_ALEN], *mp; uint8_t verdict, i; - /* MAC addresses are 6 bytes */ - if (ah->ar_hln != ETH_ALEN) + if (ah->ar_hln != ETH_ALEN || ah->ar_hrd != htons(ARPHRD_ETHER)) return EBT_NOMATCH; if (info->bitmask & EBT_ARP_SRC_MAC) { mp = skb_header_pointer(skb, sizeof(struct arphdr), -- cgit v1.2.3-59-g8ed1b From af65bdfce98d7965fbe93a48b8128444a2eea024 Mon Sep 17 00:00:00 2001 From: Patrick McHardy Date: Fri, 20 Apr 2007 14:14:21 -0700 Subject: [NETLINK]: Switch cb_lock spinlock to mutex and allow to override it Switch cb_lock to mutex and allow netlink kernel users to override it with a subsystem specific mutex for consistent locking in dump callbacks. All netlink_dump_start users have been audited not to rely on any side-effects of the previously used spinlock. Signed-off-by: Patrick McHardy Signed-off-by: David S. Miller --- drivers/connector/connector.c | 2 +- drivers/scsi/scsi_netlink.c | 3 ++- drivers/scsi/scsi_transport_iscsi.c | 2 +- fs/ecryptfs/netlink.c | 2 +- include/linux/netlink.h | 5 ++++- kernel/audit.c | 2 +- lib/kobject_uevent.c | 2 +- net/bridge/netfilter/ebt_ulog.c | 2 +- net/core/rtnetlink.c | 2 +- net/decnet/netfilter/dn_rtmsg.c | 2 +- net/ipv4/fib_frontend.c | 3 ++- net/ipv4/inet_diag.c | 2 +- net/ipv4/netfilter/ip_queue.c | 2 +- net/ipv4/netfilter/ipt_ULOG.c | 2 +- net/ipv6/netfilter/ip6_queue.c | 2 +- net/netfilter/nfnetlink.c | 2 +- net/netlink/af_netlink.c | 38 ++++++++++++++++++++++--------------- net/netlink/genetlink.c | 2 +- net/xfrm/xfrm_user.c | 2 +- security/selinux/netlink.c | 2 +- 20 files changed, 47 insertions(+), 34 deletions(-) (limited to 'include') diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c index 7f9c4fb7e5b0..a7b9e9bb3e8d 100644 --- a/drivers/connector/connector.c +++ b/drivers/connector/connector.c @@ -448,7 +448,7 @@ static int __devinit cn_init(void) dev->nls = netlink_kernel_create(NETLINK_CONNECTOR, CN_NETLINK_USERS + 0xf, - dev->input, THIS_MODULE); + dev->input, NULL, THIS_MODULE); if (!dev->nls) return -EIO; diff --git a/drivers/scsi/scsi_netlink.c b/drivers/scsi/scsi_netlink.c index 45646a285244..4bf9aa547c78 100644 --- a/drivers/scsi/scsi_netlink.c +++ b/drivers/scsi/scsi_netlink.c @@ -168,7 +168,8 @@ scsi_netlink_init(void) } scsi_nl_sock = netlink_kernel_create(NETLINK_SCSITRANSPORT, - SCSI_NL_GRP_CNT, scsi_nl_rcv, THIS_MODULE); + SCSI_NL_GRP_CNT, scsi_nl_rcv, NULL, + THIS_MODULE); if (!scsi_nl_sock) { printk(KERN_ERR "%s: register of recieve handler failed\n", __FUNCTION__); diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index 10590cd7e9ed..aabaa0576ab4 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c @@ -1435,7 +1435,7 @@ static __init int iscsi_transport_init(void) if (err) goto unregister_conn_class; - nls = netlink_kernel_create(NETLINK_ISCSI, 1, iscsi_if_rx, + nls = netlink_kernel_create(NETLINK_ISCSI, 1, iscsi_if_rx, NULL, THIS_MODULE); if (!nls) { err = -ENOBUFS; diff --git a/fs/ecryptfs/netlink.c b/fs/ecryptfs/netlink.c index 8405d216a5fc..fe9186312d7c 100644 --- a/fs/ecryptfs/netlink.c +++ b/fs/ecryptfs/netlink.c @@ -229,7 +229,7 @@ int ecryptfs_init_netlink(void) ecryptfs_nl_sock = netlink_kernel_create(NETLINK_ECRYPTFS, 0, ecryptfs_receive_nl_message, - THIS_MODULE); + NULL, THIS_MODULE); if (!ecryptfs_nl_sock) { rc = -EIO; ecryptfs_printk(KERN_ERR, "Failed to create netlink socket\n"); diff --git a/include/linux/netlink.h b/include/linux/netlink.h index 0d11f6a7389c..f41688f56632 100644 --- a/include/linux/netlink.h +++ b/include/linux/netlink.h @@ -157,7 +157,10 @@ struct netlink_skb_parms #define NETLINK_CREDS(skb) (&NETLINK_CB((skb)).creds) -extern struct sock *netlink_kernel_create(int unit, unsigned int groups, void (*input)(struct sock *sk, int len), struct module *module); +extern struct sock *netlink_kernel_create(int unit, unsigned int groups, + void (*input)(struct sock *sk, int len), + struct mutex *cb_mutex, + struct module *module); extern void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err); extern int netlink_has_listeners(struct sock *sk, unsigned int group); extern int netlink_unicast(struct sock *ssk, struct sk_buff *skb, __u32 pid, int nonblock); diff --git a/kernel/audit.c b/kernel/audit.c index 80a7457dadbf..4e9d20829681 100644 --- a/kernel/audit.c +++ b/kernel/audit.c @@ -795,7 +795,7 @@ static int __init audit_init(void) printk(KERN_INFO "audit: initializing netlink socket (%s)\n", audit_default ? "enabled" : "disabled"); audit_sock = netlink_kernel_create(NETLINK_AUDIT, 0, audit_receive, - THIS_MODULE); + NULL, THIS_MODULE); if (!audit_sock) audit_panic("cannot initialize netlink socket"); else diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c index 84272ed77f03..82fc1794b691 100644 --- a/lib/kobject_uevent.c +++ b/lib/kobject_uevent.c @@ -293,7 +293,7 @@ EXPORT_SYMBOL_GPL(add_uevent_var); static int __init kobject_uevent_init(void) { uevent_sock = netlink_kernel_create(NETLINK_KOBJECT_UEVENT, 1, NULL, - THIS_MODULE); + NULL, THIS_MODULE); if (!uevent_sock) { printk(KERN_ERR diff --git a/net/bridge/netfilter/ebt_ulog.c b/net/bridge/netfilter/ebt_ulog.c index 8b84cd40279e..9411db625917 100644 --- a/net/bridge/netfilter/ebt_ulog.c +++ b/net/bridge/netfilter/ebt_ulog.c @@ -302,7 +302,7 @@ static int __init ebt_ulog_init(void) } ebtulognl = netlink_kernel_create(NETLINK_NFLOG, EBT_ULOG_MAXNLGROUPS, - NULL, THIS_MODULE); + NULL, NULL, THIS_MODULE); if (!ebtulognl) ret = -ENOMEM; else if ((ret = ebt_register_watcher(&ulog))) diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 5266df337051..648a7b6d15df 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -972,7 +972,7 @@ void __init rtnetlink_init(void) panic("rtnetlink_init: cannot allocate rta_buf\n"); rtnl = netlink_kernel_create(NETLINK_ROUTE, RTNLGRP_MAX, rtnetlink_rcv, - THIS_MODULE); + NULL, THIS_MODULE); if (rtnl == NULL) panic("rtnetlink_init: cannot initialize rtnetlink\n"); netlink_set_nonroot(NETLINK_ROUTE, NL_NONROOT_RECV); diff --git a/net/decnet/netfilter/dn_rtmsg.c b/net/decnet/netfilter/dn_rtmsg.c index 2ee47bab6938..696234688cf6 100644 --- a/net/decnet/netfilter/dn_rtmsg.c +++ b/net/decnet/netfilter/dn_rtmsg.c @@ -138,7 +138,7 @@ static int __init dn_rtmsg_init(void) int rv = 0; dnrmg = netlink_kernel_create(NETLINK_DNRTMSG, DNRNG_NLGRP_MAX, - dnrmg_receive_user_sk, THIS_MODULE); + dnrmg_receive_user_sk, NULL, THIS_MODULE); if (dnrmg == NULL) { printk(KERN_ERR "dn_rtmsg: Cannot create netlink socket"); return -ENOMEM; diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index 5bf718a3e49b..953dd458c239 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c @@ -827,7 +827,8 @@ static void nl_fib_input(struct sock *sk, int len) static void nl_fib_lookup_init(void) { - netlink_kernel_create(NETLINK_FIB_LOOKUP, 0, nl_fib_input, THIS_MODULE); + netlink_kernel_create(NETLINK_FIB_LOOKUP, 0, nl_fib_input, NULL, + THIS_MODULE); } static void fib_disable_ip(struct net_device *dev, int force) diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c index 0148f0e34ceb..dbeacd8b0f90 100644 --- a/net/ipv4/inet_diag.c +++ b/net/ipv4/inet_diag.c @@ -893,7 +893,7 @@ static int __init inet_diag_init(void) goto out; idiagnl = netlink_kernel_create(NETLINK_INET_DIAG, 0, inet_diag_rcv, - THIS_MODULE); + NULL, THIS_MODULE); if (idiagnl == NULL) goto out_free_table; err = 0; diff --git a/net/ipv4/netfilter/ip_queue.c b/net/ipv4/netfilter/ip_queue.c index 0d72693869e6..702d94db19b9 100644 --- a/net/ipv4/netfilter/ip_queue.c +++ b/net/ipv4/netfilter/ip_queue.c @@ -668,7 +668,7 @@ static int __init ip_queue_init(void) netlink_register_notifier(&ipq_nl_notifier); ipqnl = netlink_kernel_create(NETLINK_FIREWALL, 0, ipq_rcv_sk, - THIS_MODULE); + NULL, THIS_MODULE); if (ipqnl == NULL) { printk(KERN_ERR "ip_queue: failed to create netlink socket\n"); goto cleanup_netlink_notifier; diff --git a/net/ipv4/netfilter/ipt_ULOG.c b/net/ipv4/netfilter/ipt_ULOG.c index a2bcba70af50..23b607b33b32 100644 --- a/net/ipv4/netfilter/ipt_ULOG.c +++ b/net/ipv4/netfilter/ipt_ULOG.c @@ -420,7 +420,7 @@ static int __init ipt_ulog_init(void) setup_timer(&ulog_buffers[i].timer, ulog_timer, i); nflognl = netlink_kernel_create(NETLINK_NFLOG, ULOG_MAXNLGROUPS, NULL, - THIS_MODULE); + NULL, THIS_MODULE); if (!nflognl) return -ENOMEM; diff --git a/net/ipv6/netfilter/ip6_queue.c b/net/ipv6/netfilter/ip6_queue.c index bfae9fdc4668..0004db38af6d 100644 --- a/net/ipv6/netfilter/ip6_queue.c +++ b/net/ipv6/netfilter/ip6_queue.c @@ -657,7 +657,7 @@ static int __init ip6_queue_init(void) struct proc_dir_entry *proc; netlink_register_notifier(&ipq_nl_notifier); - ipqnl = netlink_kernel_create(NETLINK_IP6_FW, 0, ipq_rcv_sk, + ipqnl = netlink_kernel_create(NETLINK_IP6_FW, 0, ipq_rcv_sk, NULL, THIS_MODULE); if (ipqnl == NULL) { printk(KERN_ERR "ip6_queue: failed to create netlink socket\n"); diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c index b0da853eabe0..8797e6953ef2 100644 --- a/net/netfilter/nfnetlink.c +++ b/net/netfilter/nfnetlink.c @@ -265,7 +265,7 @@ static int __init nfnetlink_init(void) printk("Netfilter messages via NETLINK v%s.\n", nfversion); nfnl = netlink_kernel_create(NETLINK_NETFILTER, NFNLGRP_MAX, - nfnetlink_rcv, THIS_MODULE); + nfnetlink_rcv, NULL, THIS_MODULE); if (!nfnl) { printk(KERN_ERR "cannot initialize nfnetlink!\n"); return -1; diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 2cbf1682f63d..ec16c9b7b3bd 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -56,6 +56,7 @@ #include #include #include +#include #include #include @@ -76,7 +77,8 @@ struct netlink_sock { unsigned long state; wait_queue_head_t wait; struct netlink_callback *cb; - spinlock_t cb_lock; + struct mutex *cb_mutex; + struct mutex cb_def_mutex; void (*data_ready)(struct sock *sk, int bytes); struct module *module; }; @@ -108,6 +110,7 @@ struct netlink_table { unsigned long *listeners; unsigned int nl_nonroot; unsigned int groups; + struct mutex *cb_mutex; struct module *module; int registered; }; @@ -370,7 +373,8 @@ static struct proto netlink_proto = { .obj_size = sizeof(struct netlink_sock), }; -static int __netlink_create(struct socket *sock, int protocol) +static int __netlink_create(struct socket *sock, struct mutex *cb_mutex, + int protocol) { struct sock *sk; struct netlink_sock *nlk; @@ -384,7 +388,8 @@ static int __netlink_create(struct socket *sock, int protocol) sock_init_data(sock, sk); nlk = nlk_sk(sk); - spin_lock_init(&nlk->cb_lock); + nlk->cb_mutex = cb_mutex ? : &nlk->cb_def_mutex; + mutex_init(nlk->cb_mutex); init_waitqueue_head(&nlk->wait); sk->sk_destruct = netlink_sock_destruct; @@ -395,6 +400,7 @@ static int __netlink_create(struct socket *sock, int protocol) static int netlink_create(struct socket *sock, int protocol) { struct module *module = NULL; + struct mutex *cb_mutex; struct netlink_sock *nlk; int err = 0; @@ -417,9 +423,10 @@ static int netlink_create(struct socket *sock, int protocol) if (nl_table[protocol].registered && try_module_get(nl_table[protocol].module)) module = nl_table[protocol].module; + cb_mutex = nl_table[protocol].cb_mutex; netlink_unlock_table(); - if ((err = __netlink_create(sock, protocol)) < 0) + if ((err = __netlink_create(sock, cb_mutex, protocol)) < 0) goto out_module; nlk = nlk_sk(sock->sk); @@ -444,14 +451,14 @@ static int netlink_release(struct socket *sock) sock_orphan(sk); nlk = nlk_sk(sk); - spin_lock(&nlk->cb_lock); + mutex_lock(nlk->cb_mutex); if (nlk->cb) { if (nlk->cb->done) nlk->cb->done(nlk->cb); netlink_destroy_callback(nlk->cb); nlk->cb = NULL; } - spin_unlock(&nlk->cb_lock); + mutex_unlock(nlk->cb_mutex); /* OK. Socket is unlinked, and, therefore, no new packets will arrive */ @@ -1266,7 +1273,7 @@ static void netlink_data_ready(struct sock *sk, int len) struct sock * netlink_kernel_create(int unit, unsigned int groups, void (*input)(struct sock *sk, int len), - struct module *module) + struct mutex *cb_mutex, struct module *module) { struct socket *sock; struct sock *sk; @@ -1281,7 +1288,7 @@ netlink_kernel_create(int unit, unsigned int groups, if (sock_create_lite(PF_NETLINK, SOCK_DGRAM, unit, &sock)) return NULL; - if (__netlink_create(sock, unit) < 0) + if (__netlink_create(sock, cb_mutex, unit) < 0) goto out_sock_release; if (groups < 32) @@ -1305,6 +1312,7 @@ netlink_kernel_create(int unit, unsigned int groups, netlink_table_grab(); nl_table[unit].groups = groups; nl_table[unit].listeners = listeners; + nl_table[unit].cb_mutex = cb_mutex; nl_table[unit].module = module; nl_table[unit].registered = 1; netlink_table_ungrab(); @@ -1347,7 +1355,7 @@ static int netlink_dump(struct sock *sk) if (!skb) goto errout; - spin_lock(&nlk->cb_lock); + mutex_lock(nlk->cb_mutex); cb = nlk->cb; if (cb == NULL) { @@ -1358,7 +1366,7 @@ static int netlink_dump(struct sock *sk) len = cb->dump(skb, cb); if (len > 0) { - spin_unlock(&nlk->cb_lock); + mutex_unlock(nlk->cb_mutex); skb_queue_tail(&sk->sk_receive_queue, skb); sk->sk_data_ready(sk, len); return 0; @@ -1376,13 +1384,13 @@ static int netlink_dump(struct sock *sk) if (cb->done) cb->done(cb); nlk->cb = NULL; - spin_unlock(&nlk->cb_lock); + mutex_unlock(nlk->cb_mutex); netlink_destroy_callback(cb); return 0; errout_skb: - spin_unlock(&nlk->cb_lock); + mutex_unlock(nlk->cb_mutex); kfree_skb(skb); errout: return err; @@ -1414,15 +1422,15 @@ int netlink_dump_start(struct sock *ssk, struct sk_buff *skb, } nlk = nlk_sk(sk); /* A dump or destruction is in progress... */ - spin_lock(&nlk->cb_lock); + mutex_lock(nlk->cb_mutex); if (nlk->cb || sock_flag(sk, SOCK_DEAD)) { - spin_unlock(&nlk->cb_lock); + mutex_unlock(nlk->cb_mutex); netlink_destroy_callback(cb); sock_put(sk); return -EBUSY; } nlk->cb = cb; - spin_unlock(&nlk->cb_lock); + mutex_unlock(nlk->cb_mutex); netlink_dump(sk); sock_put(sk); diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c index fac2e7a6dbe4..6e31234a4196 100644 --- a/net/netlink/genetlink.c +++ b/net/netlink/genetlink.c @@ -558,7 +558,7 @@ static int __init genl_init(void) netlink_set_nonroot(NETLINK_GENERIC, NL_NONROOT_RECV); genl_sock = netlink_kernel_create(NETLINK_GENERIC, GENL_MAX_ID, - genl_rcv, THIS_MODULE); + genl_rcv, NULL, THIS_MODULE); if (genl_sock == NULL) panic("GENL: Cannot initialize generic netlink\n"); diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index 2ff968373f1c..88659edc9b1a 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c @@ -2444,7 +2444,7 @@ static int __init xfrm_user_init(void) printk(KERN_INFO "Initializing XFRM netlink socket\n"); nlsk = netlink_kernel_create(NETLINK_XFRM, XFRMNLGRP_MAX, - xfrm_netlink_rcv, THIS_MODULE); + xfrm_netlink_rcv, NULL, THIS_MODULE); if (nlsk == NULL) return -ENOMEM; rcu_assign_pointer(xfrm_nl, nlsk); diff --git a/security/selinux/netlink.c b/security/selinux/netlink.c index 33f2e064a682..f49046de63a2 100644 --- a/security/selinux/netlink.c +++ b/security/selinux/netlink.c @@ -104,7 +104,7 @@ void selnl_notify_policyload(u32 seqno) static int __init selnl_init(void) { - selnl = netlink_kernel_create(NETLINK_SELINUX, SELNLGRP_MAX, NULL, + selnl = netlink_kernel_create(NETLINK_SELINUX, SELNLGRP_MAX, NULL, NULL, THIS_MODULE); if (selnl == NULL) panic("SELinux: Cannot create netlink socket."); -- cgit v1.2.3-59-g8ed1b From 0463d4ae25771aaf3379bb6b2392f6edf23c2828 Mon Sep 17 00:00:00 2001 From: Patrick McHardy Date: Mon, 16 Apr 2007 17:02:10 -0700 Subject: [NET_SCHED]: Eliminate qdisc_tree_lock Since we're now holding the rtnl during the entire dump operation, we can remove qdisc_tree_lock, whose only purpose is to protect dump callbacks from concurrent changes to the qdisc tree. Signed-off-by: Patrick McHardy Signed-off-by: David S. Miller --- include/net/pkt_sched.h | 2 -- net/sched/cls_api.c | 2 -- net/sched/sch_api.c | 22 +++------------------- net/sched/sch_generic.c | 29 +++++++---------------------- 4 files changed, 10 insertions(+), 45 deletions(-) (limited to 'include') diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h index b2cc9a8ed4e7..5754d53d9efc 100644 --- a/include/net/pkt_sched.h +++ b/include/net/pkt_sched.h @@ -13,8 +13,6 @@ struct qdisc_walker int (*fn)(struct Qdisc *, unsigned long cl, struct qdisc_walker *); }; -extern rwlock_t qdisc_tree_lock; - #define QDISC_ALIGNTO 32 #define QDISC_ALIGN(len) (((len) + QDISC_ALIGNTO-1) & ~(QDISC_ALIGNTO-1)) diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index ca3da5013b7a..ebf94edf0478 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -400,7 +400,6 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb) if ((dev = dev_get_by_index(tcm->tcm_ifindex)) == NULL) return skb->len; - read_lock(&qdisc_tree_lock); if (!tcm->tcm_parent) q = dev->qdisc_sleeping; else @@ -457,7 +456,6 @@ errout: if (cl) cops->put(q, cl); out: - read_unlock(&qdisc_tree_lock); dev_put(dev); return skb->len; } diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index 2e863bdaa9a1..0ce6914f5981 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c @@ -191,7 +191,7 @@ int unregister_qdisc(struct Qdisc_ops *qops) (root qdisc, all its children, children of children etc.) */ -static struct Qdisc *__qdisc_lookup(struct net_device *dev, u32 handle) +struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle) { struct Qdisc *q; @@ -202,16 +202,6 @@ static struct Qdisc *__qdisc_lookup(struct net_device *dev, u32 handle) return NULL; } -struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle) -{ - struct Qdisc *q; - - read_lock(&qdisc_tree_lock); - q = __qdisc_lookup(dev, handle); - read_unlock(&qdisc_tree_lock); - return q; -} - static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid) { unsigned long cl; @@ -405,7 +395,7 @@ void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n) if (n == 0) return; while ((parentid = sch->parent)) { - sch = __qdisc_lookup(sch->dev, TC_H_MAJ(parentid)); + sch = qdisc_lookup(sch->dev, TC_H_MAJ(parentid)); cops = sch->ops->cl_ops; if (cops->qlen_notify) { cl = cops->get(sch, parentid); @@ -905,7 +895,6 @@ static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb) continue; if (idx > s_idx) s_q_idx = 0; - read_lock(&qdisc_tree_lock); q_idx = 0; list_for_each_entry(q, &dev->qdisc_list, list) { if (q_idx < s_q_idx) { @@ -913,13 +902,10 @@ static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb) continue; } if (tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).pid, - cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWQDISC) <= 0) { - read_unlock(&qdisc_tree_lock); + cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWQDISC) <= 0) goto done; - } q_idx++; } - read_unlock(&qdisc_tree_lock); } done: @@ -1142,7 +1128,6 @@ static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb) s_t = cb->args[0]; t = 0; - read_lock(&qdisc_tree_lock); list_for_each_entry(q, &dev->qdisc_list, list) { if (t < s_t || !q->ops->cl_ops || (tcm->tcm_parent && @@ -1164,7 +1149,6 @@ static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb) break; t++; } - read_unlock(&qdisc_tree_lock); cb->args[0] = t; diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 52eb3439d7c6..1894eb72f6cf 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -36,34 +36,23 @@ /* Main transmission queue. */ -/* Main qdisc structure lock. - - However, modifications - to data, participating in scheduling must be additionally - protected with dev->queue_lock spinlock. - - The idea is the following: - - enqueue, dequeue are serialized via top level device - spinlock dev->queue_lock. - - tree walking is protected by read_lock(qdisc_tree_lock) - and this lock is used only in process context. - - updates to tree are made only under rtnl semaphore, - hence this lock may be made without local bh disabling. - - qdisc_tree_lock must be grabbed BEFORE dev->queue_lock! +/* Modifications to data participating in scheduling must be protected with + * dev->queue_lock spinlock. + * + * The idea is the following: + * - enqueue, dequeue are serialized via top level device + * spinlock dev->queue_lock. + * - updates to tree and tree walking are only done under the rtnl mutex. */ -DEFINE_RWLOCK(qdisc_tree_lock); void qdisc_lock_tree(struct net_device *dev) { - write_lock(&qdisc_tree_lock); spin_lock_bh(&dev->queue_lock); } void qdisc_unlock_tree(struct net_device *dev) { spin_unlock_bh(&dev->queue_lock); - write_unlock(&qdisc_tree_lock); } /* @@ -528,15 +517,11 @@ void dev_activate(struct net_device *dev) printk(KERN_INFO "%s: activation failed\n", dev->name); return; } - write_lock(&qdisc_tree_lock); list_add_tail(&qdisc->list, &dev->qdisc_list); - write_unlock(&qdisc_tree_lock); } else { qdisc = &noqueue_qdisc; } - write_lock(&qdisc_tree_lock); dev->qdisc_sleeping = qdisc; - write_unlock(&qdisc_tree_lock); } if (!netif_carrier_ok(dev)) -- cgit v1.2.3-59-g8ed1b From 628a5c561890a9a9a74dea017873530584aab06e Mon Sep 17 00:00:00 2001 From: John Heffner Date: Fri, 20 Apr 2007 15:53:27 -0700 Subject: [INET]: Add IP(V6)_PMTUDISC_RPOBE Add IP(V6)_PMTUDISC_PROBE value for IP(V6)_MTU_DISCOVER. This option forces us not to fragment, but does not make use of the kernel path MTU discovery. That is, it allows for user-mode MTU probing (or, packetization-layer path MTU discovery). This is particularly useful for diagnostic utilities, like traceroute/tracepath. Signed-off-by: John Heffner Signed-off-by: David S. Miller --- include/linux/in.h | 1 + include/linux/in6.h | 1 + net/ipv4/ip_output.c | 20 +++++++++++++++----- net/ipv4/ip_sockglue.c | 2 +- net/ipv6/ip6_output.c | 15 ++++++++++++--- net/ipv6/ipv6_sockglue.c | 2 +- 6 files changed, 31 insertions(+), 10 deletions(-) (limited to 'include') diff --git a/include/linux/in.h b/include/linux/in.h index 1912e7c0bc26..3975cbf52f20 100644 --- a/include/linux/in.h +++ b/include/linux/in.h @@ -83,6 +83,7 @@ struct in_addr { #define IP_PMTUDISC_DONT 0 /* Never send DF frames */ #define IP_PMTUDISC_WANT 1 /* Use per route hints */ #define IP_PMTUDISC_DO 2 /* Always DF */ +#define IP_PMTUDISC_PROBE 3 /* Ignore dst pmtu */ #define IP_MULTICAST_IF 32 #define IP_MULTICAST_TTL 33 diff --git a/include/linux/in6.h b/include/linux/in6.h index 4e8350ae8869..d559fac4a26d 100644 --- a/include/linux/in6.h +++ b/include/linux/in6.h @@ -179,6 +179,7 @@ struct in6_flowlabel_req #define IPV6_PMTUDISC_DONT 0 #define IPV6_PMTUDISC_WANT 1 #define IPV6_PMTUDISC_DO 2 +#define IPV6_PMTUDISC_PROBE 3 /* Flowlabel */ #define IPV6_FLOWLABEL_MGR 32 diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 34606eff8a05..534650cad3a8 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -189,6 +189,14 @@ static inline int ip_finish_output2(struct sk_buff *skb) return -EINVAL; } +static inline int ip_skb_dst_mtu(struct sk_buff *skb) +{ + struct inet_sock *inet = skb->sk ? inet_sk(skb->sk) : NULL; + + return (inet && inet->pmtudisc == IP_PMTUDISC_PROBE) ? + skb->dst->dev->mtu : dst_mtu(skb->dst); +} + static inline int ip_finish_output(struct sk_buff *skb) { #if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM) @@ -198,7 +206,7 @@ static inline int ip_finish_output(struct sk_buff *skb) return dst_output(skb); } #endif - if (skb->len > dst_mtu(skb->dst) && !skb_is_gso(skb)) + if (skb->len > ip_skb_dst_mtu(skb) && !skb_is_gso(skb)) return ip_fragment(skb, ip_finish_output2); else return ip_finish_output2(skb); @@ -422,7 +430,7 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff*)) if (unlikely((iph->frag_off & htons(IP_DF)) && !skb->local_df)) { IP_INC_STATS(IPSTATS_MIB_FRAGFAILS); icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, - htonl(dst_mtu(&rt->u.dst))); + htonl(ip_skb_dst_mtu(skb))); kfree_skb(skb); return -EMSGSIZE; } @@ -787,7 +795,9 @@ int ip_append_data(struct sock *sk, inet->cork.addr = ipc->addr; } dst_hold(&rt->u.dst); - inet->cork.fragsize = mtu = dst_mtu(rt->u.dst.path); + inet->cork.fragsize = mtu = inet->pmtudisc == IP_PMTUDISC_PROBE ? + rt->u.dst.dev->mtu : + dst_mtu(rt->u.dst.path); inet->cork.rt = rt; inet->cork.length = 0; sk->sk_sndmsg_page = NULL; @@ -1203,13 +1213,13 @@ int ip_push_pending_frames(struct sock *sk) * to fragment the frame generated here. No matter, what transforms * how transforms change size of the packet, it will come out. */ - if (inet->pmtudisc != IP_PMTUDISC_DO) + if (inet->pmtudisc < IP_PMTUDISC_DO) skb->local_df = 1; /* DF bit is set when we want to see DF on outgoing frames. * If local_df is set too, we still allow to fragment this frame * locally. */ - if (inet->pmtudisc == IP_PMTUDISC_DO || + if (inet->pmtudisc >= IP_PMTUDISC_DO || (skb->len <= dst_mtu(&rt->u.dst) && ip_dont_fragment(sk, &rt->u.dst))) df = htons(IP_DF); diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index c199d2311731..4d544573f48a 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c @@ -542,7 +542,7 @@ static int do_ip_setsockopt(struct sock *sk, int level, inet->hdrincl = val ? 1 : 0; break; case IP_MTU_DISCOVER: - if (val<0 || val>2) + if (val<0 || val>3) goto e_inval; inet->pmtudisc = val; break; diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 5a5b7d4ad31c..f508171bab73 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -137,9 +137,17 @@ static int ip6_output2(struct sk_buff *skb) return NF_HOOK(PF_INET6, NF_IP6_POST_ROUTING, skb,NULL, skb->dev,ip6_output_finish); } +static inline int ip6_skb_dst_mtu(struct sk_buff *skb) +{ + struct ipv6_pinfo *np = skb->sk ? inet6_sk(skb->sk) : NULL; + + return (np && np->pmtudisc == IPV6_PMTUDISC_PROBE) ? + skb->dst->dev->mtu : dst_mtu(skb->dst); +} + int ip6_output(struct sk_buff *skb) { - if ((skb->len > dst_mtu(skb->dst) && !skb_is_gso(skb)) || + if ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) || dst_allfrag(skb->dst)) return ip6_fragment(skb, ip6_output2); else @@ -566,7 +574,7 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) hlen = ip6_find_1stfragopt(skb, &prevhdr); nexthdr = *prevhdr; - mtu = dst_mtu(&rt->u.dst); + mtu = ip6_skb_dst_mtu(skb); /* We must not fragment if the socket is set to force MTU discovery * or if the skb it not generated by a local socket. (This last @@ -1063,7 +1071,8 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to, inet->cork.fl = *fl; np->cork.hop_limit = hlimit; np->cork.tclass = tclass; - mtu = dst_mtu(rt->u.dst.path); + mtu = np->pmtudisc == IPV6_PMTUDISC_PROBE ? + rt->u.dst.dev->mtu : dst_mtu(rt->u.dst.path); if (np->frag_size < mtu) { if (np->frag_size) mtu = np->frag_size; diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index da930fa089c9..aa3d07c52a8f 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c @@ -694,7 +694,7 @@ done: retv = ip6_ra_control(sk, val, NULL); break; case IPV6_MTU_DISCOVER: - if (val<0 || val>2) + if (val<0 || val>3) goto e_inval; np->pmtudisc = val; retv = 0; -- cgit v1.2.3-59-g8ed1b From bf99f1bde3b3009af74874f3465f6861431fbb66 Mon Sep 17 00:00:00 2001 From: YOSHIFUJI Hideaki Date: Fri, 20 Apr 2007 15:56:20 -0700 Subject: [IPV6] SNMP: Netlink interface. Signed-off-by: YOSHIFUJI Hideaki Signed-off-by: David S. Miller --- include/linux/if_link.h | 1 + include/net/ipv6.h | 1 + net/ipv6/addrconf.c | 22 +++++++++++++++++----- net/ipv6/proc.c | 32 ++++++++++++++++++++++++++++++++ 4 files changed, 51 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/include/linux/if_link.h b/include/linux/if_link.h index 35ed3b5467f3..604c2434f71c 100644 --- a/include/linux/if_link.h +++ b/include/linux/if_link.h @@ -126,6 +126,7 @@ enum IFLA_INET6_STATS, /* statistics */ IFLA_INET6_MCAST, /* MC things. What of them? */ IFLA_INET6_CACHEINFO, /* time values and max reasm size */ + IFLA_INET6_ICMP6STATS, /* statistics (icmpv6) */ __IFLA_INET6_MAX }; diff --git a/include/net/ipv6.h b/include/net/ipv6.h index 00328b71a08c..4408def379bf 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -172,6 +172,7 @@ int snmp6_alloc_dev(struct inet6_dev *idev); int snmp6_free_dev(struct inet6_dev *idev); int snmp6_mib_init(void *ptr[2], size_t mibsize, size_t mibalign); void snmp6_mib_free(void *ptr[2]); +void snmp6_fill_stats(u64 *stats, struct inet6_dev *idev, int attrtype, int bytes); struct ip6_ra_chain { diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 1486f76f7878..9ba9e92d1934 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -3433,6 +3433,8 @@ static inline size_t inet6_if_nlmsg_size(void) nla_total_size(4) /* IFLA_INET6_FLAGS */ + nla_total_size(sizeof(struct ifla_cacheinfo)) + nla_total_size(DEVCONF_MAX * 4) /* IFLA_INET6_CONF */ + + nla_total_size(IPSTATS_MIB_MAX * 8) /* IFLA_INET6_STATS */ + + nla_total_size(ICMP6_MIB_MAX * 8) /* IFLA_INET6_ICMP6STATS */ ); } @@ -3440,7 +3442,7 @@ static int inet6_fill_ifinfo(struct sk_buff *skb, struct inet6_dev *idev, u32 pid, u32 seq, int event, unsigned int flags) { struct net_device *dev = idev->dev; - struct nlattr *conf; + struct nlattr *nla; struct ifinfomsg *hdr; struct nlmsghdr *nlh; void *protoinfo; @@ -3480,12 +3482,22 @@ static int inet6_fill_ifinfo(struct sk_buff *skb, struct inet6_dev *idev, ci.retrans_time = idev->nd_parms->retrans_time; NLA_PUT(skb, IFLA_INET6_CACHEINFO, sizeof(ci), &ci); - conf = nla_reserve(skb, IFLA_INET6_CONF, DEVCONF_MAX * sizeof(s32)); - if (conf == NULL) + nla = nla_reserve(skb, IFLA_INET6_CONF, DEVCONF_MAX * sizeof(s32)); + if (nla == NULL) goto nla_put_failure; - ipv6_store_devconf(&idev->cnf, nla_data(conf), nla_len(conf)); + ipv6_store_devconf(&idev->cnf, nla_data(nla), nla_len(nla)); - /* XXX - Statistics/MC not implemented */ + /* XXX - MC not implemented */ + + nla = nla_reserve(skb, IFLA_INET6_STATS, IPSTATS_MIB_MAX * sizeof(u64)); + if (nla == NULL) + goto nla_put_failure; + snmp6_fill_stats(nla_data(nla), idev, IFLA_INET6_STATS, nla_len(nla)); + + nla = nla_reserve(skb, IFLA_INET6_ICMP6STATS, ICMP6_MIB_MAX * sizeof(u64)); + if (nla == NULL) + goto nla_put_failure; + snmp6_fill_stats(nla_data(nla), idev, IFLA_INET6_ICMP6STATS, nla_len(nla)); nla_nest_end(skb, protoinfo); return nlmsg_end(skb, nlh); diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c index fa3fb509f187..0dc551501519 100644 --- a/net/ipv6/proc.c +++ b/net/ipv6/proc.c @@ -207,6 +207,31 @@ static const struct file_operations snmp6_seq_fops = { .release = single_release, }; +static inline void +__snmp6_fill_stats(u64 *stats, void **mib, int items, int bytes) +{ + int i; + int pad = bytes - sizeof(u64) * items; + BUG_ON(pad < 0); + stats[0] = items; + for (i = 1; i < items; i++) + stats[i] = (u64)fold_field(mib, i); + memset(&stats[items], 0, pad); +} + +void +snmp6_fill_stats(u64 *stats, struct inet6_dev *idev, int attrtype, int bytes) +{ + switch(attrtype) { + case IFLA_INET6_STATS: + __snmp6_fill_stats(stats, (void **)idev->stats.ipv6, IPSTATS_MIB_MAX, bytes); + break; + case IFLA_INET6_ICMP6STATS: + __snmp6_fill_stats(stats, (void **)idev->stats.icmpv6, ICMP6_MIB_MAX, bytes); + break; + } +} + int snmp6_register_dev(struct inet6_dev *idev) { struct proc_dir_entry *p; @@ -283,6 +308,13 @@ int snmp6_unregister_dev(struct inet6_dev *idev) { return 0; } + +void +snmp6_fill_stats(u64 *stats, struct inet6_dev *idev, int attrtype, int bytes) +{ + memset(stats, 0, sizeof(bytes)); +} + #endif /* CONFIG_PROC_FS */ int snmp6_alloc_dev(struct inet6_dev *idev) -- cgit v1.2.3-59-g8ed1b From 334901700f9f58993ebd7f6136d3f9062460d34d Mon Sep 17 00:00:00 2001 From: YOSHIFUJI Hideaki Date: Fri, 20 Apr 2007 15:57:15 -0700 Subject: [IPV4] SNMP: Move some statistic bits to net/ipv4/proc.c. This also fixes memory leak in error path. Signed-off-by: YOSHIFUJI Hideaki Signed-off-by: David S. Miller --- include/net/ip.h | 3 +++ net/ipv4/af_inet.c | 59 ++++++++++++++++++++++++++++++++++++------------------ net/ipv4/proc.c | 25 +++++++++++++++++++++++ 3 files changed, 67 insertions(+), 20 deletions(-) (limited to 'include') diff --git a/include/net/ip.h b/include/net/ip.h index 75f226d26e0d..f41ce07f6700 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -166,6 +166,9 @@ DECLARE_SNMP_STAT(struct linux_mib, net_statistics); #define NET_ADD_STATS_BH(field, adnd) SNMP_ADD_STATS_BH(net_statistics, field, adnd) #define NET_ADD_STATS_USER(field, adnd) SNMP_ADD_STATS_USER(net_statistics, field, adnd) +extern int snmp_mib_init(void *ptr[2], size_t mibsize, size_t mibalign); +extern void snmp_mib_free(void *ptr[2]); + extern int sysctl_local_port_range[2]; extern int sysctl_ip_default_ttl; extern int sysctl_ip_nonlocal_bind; diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 45ced52c03d4..a33ca7e7e8f8 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -1245,28 +1245,47 @@ static struct net_protocol icmp_protocol = { static int __init init_ipv4_mibs(void) { - net_statistics[0] = alloc_percpu(struct linux_mib); - net_statistics[1] = alloc_percpu(struct linux_mib); - ip_statistics[0] = alloc_percpu(struct ipstats_mib); - ip_statistics[1] = alloc_percpu(struct ipstats_mib); - icmp_statistics[0] = alloc_percpu(struct icmp_mib); - icmp_statistics[1] = alloc_percpu(struct icmp_mib); - tcp_statistics[0] = alloc_percpu(struct tcp_mib); - tcp_statistics[1] = alloc_percpu(struct tcp_mib); - udp_statistics[0] = alloc_percpu(struct udp_mib); - udp_statistics[1] = alloc_percpu(struct udp_mib); - udplite_statistics[0] = alloc_percpu(struct udp_mib); - udplite_statistics[1] = alloc_percpu(struct udp_mib); - if (! - (net_statistics[0] && net_statistics[1] && ip_statistics[0] - && ip_statistics[1] && tcp_statistics[0] && tcp_statistics[1] - && udp_statistics[0] && udp_statistics[1] - && udplite_statistics[0] && udplite_statistics[1] ) ) - return -ENOMEM; - - (void) tcp_mib_init(); + if (snmp_mib_init((void **)net_statistics, + sizeof(struct linux_mib), + __alignof__(struct linux_mib)) < 0) + goto err_net_mib; + if (snmp_mib_init((void **)ip_statistics, + sizeof(struct ipstats_mib), + __alignof__(struct ipstats_mib)) < 0) + goto err_ip_mib; + if (snmp_mib_init((void **)icmp_statistics, + sizeof(struct icmp_mib), + __alignof__(struct icmp_mib)) < 0) + goto err_icmp_mib; + if (snmp_mib_init((void **)tcp_statistics, + sizeof(struct tcp_mib), + __alignof__(struct tcp_mib)) < 0) + goto err_tcp_mib; + if (snmp_mib_init((void **)udp_statistics, + sizeof(struct udp_mib), + __alignof__(struct udp_mib)) < 0) + goto err_udp_mib; + if (snmp_mib_init((void **)udplite_statistics, + sizeof(struct udp_mib), + __alignof__(struct udp_mib)) < 0) + goto err_udplite_mib; + + tcp_mib_init(); return 0; + +err_udplite_mib: + snmp_mib_free((void **)udp_statistics); +err_udp_mib: + snmp_mib_free((void **)tcp_statistics); +err_tcp_mib: + snmp_mib_free((void **)icmp_statistics); +err_icmp_mib: + snmp_mib_free((void **)ip_statistics); +err_ip_mib: + snmp_mib_free((void **)net_statistics); +err_net_mib: + return -ENOMEM; } static int ipv4_proc_init(void); diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c index ae68a691e8cd..97952d54ae84 100644 --- a/net/ipv4/proc.c +++ b/net/ipv4/proc.c @@ -391,3 +391,28 @@ out_netstat: goto out; } +int snmp_mib_init(void *ptr[2], size_t mibsize, size_t mibalign) +{ + BUG_ON(ptr == NULL); + ptr[0] = __alloc_percpu(mibsize); + if (!ptr[0]) + goto err0; + ptr[1] = __alloc_percpu(mibsize); + if (!ptr[1]) + goto err1; + return 0; +err1: + free_percpu(ptr[0]); + ptr[0] = NULL; +err0: + return -ENOMEM; +} + +void snmp_mib_free(void *ptr[2]) +{ + BUG_ON(ptr == NULL); + free_percpu(ptr[0]); + free_percpu(ptr[1]); + ptr[0] = ptr[1] = NULL; +} + -- cgit v1.2.3-59-g8ed1b From 80feaacb8a6400a9540a961b6743c69a5896b937 Mon Sep 17 00:00:00 2001 From: "Peter P. Waskiewicz Jr" Date: Fri, 20 Apr 2007 16:05:39 -0700 Subject: [AF_PACKET]: Add option to return orig_dev to userspace. Add a packet socket option to allow the orig_dev index to be returned to userspace when passing traffic through a decapsulated device, such as the bonding driver. This is very useful for layer 2 traffic being able to report which physical device actually received the traffic, instead of having the encapsulating device hide that information. The new option is called PACKET_ORIGDEV. Signed-off-by: Peter P. Waskiewicz Jr. Signed-off-by: David S. Miller --- include/linux/if_packet.h | 1 + net/packet/af_packet.c | 32 +++++++++++++++++++++++++++++--- 2 files changed, 30 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/linux/if_packet.h b/include/linux/if_packet.h index f3de05c30678..ad09609227ff 100644 --- a/include/linux/if_packet.h +++ b/include/linux/if_packet.h @@ -42,6 +42,7 @@ struct sockaddr_ll #define PACKET_STATISTICS 6 #define PACKET_COPY_THRESH 7 #define PACKET_AUXDATA 8 +#define PACKET_ORIGDEV 9 struct tpacket_stats { diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 36388b2f32f9..02e401cd683f 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -201,7 +201,8 @@ struct packet_sock { struct packet_type prot_hook; spinlock_t bind_lock; unsigned int running:1, /* prot_hook is attached*/ - auxdata:1; + auxdata:1, + origdev:1; int ifindex; /* bound device */ __be16 num; #ifdef CONFIG_PACKET_MULTICAST @@ -528,7 +529,10 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev, struct packet sll->sll_hatype = dev->type; sll->sll_protocol = skb->protocol; sll->sll_pkttype = skb->pkt_type; - sll->sll_ifindex = dev->ifindex; + if (unlikely(po->origdev) && skb->pkt_type == PACKET_HOST) + sll->sll_ifindex = orig_dev->ifindex; + else + sll->sll_ifindex = dev->ifindex; sll->sll_halen = 0; if (dev->hard_header_parse) @@ -673,7 +677,10 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, struct packe sll->sll_hatype = dev->type; sll->sll_protocol = skb->protocol; sll->sll_pkttype = skb->pkt_type; - sll->sll_ifindex = dev->ifindex; + if (unlikely(po->origdev) && skb->pkt_type == PACKET_HOST) + sll->sll_ifindex = orig_dev->ifindex; + else + sll->sll_ifindex = dev->ifindex; h->tp_status = status; smp_mb(); @@ -1413,6 +1420,18 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv po->auxdata = !!val; return 0; } + case PACKET_ORIGDEV: + { + int val; + + if (optlen < sizeof(val)) + return -EINVAL; + if (copy_from_user(&val, optval, sizeof(val))) + return -EFAULT; + + po->origdev = !!val; + return 0; + } default: return -ENOPROTOOPT; } @@ -1454,6 +1473,13 @@ static int packet_getsockopt(struct socket *sock, int level, int optname, len = sizeof(int); val = po->auxdata; + data = &val; + break; + case PACKET_ORIGDEV: + if (len > sizeof(int)) + len = sizeof(int); + val = po->origdev; + data = &val; break; default: -- cgit v1.2.3-59-g8ed1b From 0c6fcc8a8cfcc737d05b6be8b2c3e931ef99cfc2 Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Fri, 20 Apr 2007 16:40:01 -0700 Subject: [NET] skbuff: skb_store_bits const is backwards Getting warnings becuase skb_store_bits has skb as constant, but the function overwrites it. Looks like const was on the wrong side. Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- include/linux/skbuff.h | 4 ++-- net/core/skbuff.c | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index c413afbe0b9c..50f6f6a094cf 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -1494,8 +1494,8 @@ extern __wsum skb_checksum(const struct sk_buff *skb, int offset, int len, __wsum csum); extern int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len); -extern int skb_store_bits(const struct sk_buff *skb, int offset, - void *from, int len); +extern int skb_store_bits(struct sk_buff *skb, int offset, + const void *from, int len); extern __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to, int len, __wsum csum); diff --git a/net/core/skbuff.c b/net/core/skbuff.c index c7a1b24b7374..6b50d58cce1e 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -1129,7 +1129,7 @@ fault: * traversing fragment lists and such. */ -int skb_store_bits(const struct sk_buff *skb, int offset, void *from, int len) +int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len) { int i, copy; int start = skb_headlen(skb); -- cgit v1.2.3-59-g8ed1b From 4ac02bab77438b484a5cf855a002fb6a1d592894 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Fri, 20 Apr 2007 17:11:46 -0700 Subject: [TCP]: Uninline tcp_done(). The function is quite big and has several call sites and nothing to collapse by compiler optimization on inlining. Besides it's nicer to read in a in .c file. Signed-off-by: Andi Kleen Signed-off-by: David S. Miller --- include/net/tcp.h | 16 +--------------- net/ipv4/tcp.c | 17 +++++++++++++++++ 2 files changed, 18 insertions(+), 15 deletions(-) (limited to 'include') diff --git a/include/net/tcp.h b/include/net/tcp.h index 07f724e02f84..e79803353c83 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -921,21 +921,7 @@ static inline void tcp_set_state(struct sock *sk, int state) #endif } -static inline void tcp_done(struct sock *sk) -{ - if(sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV) - TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS); - - tcp_set_state(sk, TCP_CLOSE); - tcp_clear_xmit_timers(sk); - - sk->sk_shutdown = SHUTDOWN_MASK; - - if (!sock_flag(sk, SOCK_DEAD)) - sk->sk_state_change(sk); - else - inet_csk_destroy_sock(sk); -} +extern void tcp_done(struct sock *sk); static inline void tcp_sack_reset(struct tcp_options_received *rx_opt) { diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 4664733f139c..99ad52c00c96 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -2375,6 +2375,23 @@ void __tcp_put_md5sig_pool(void) EXPORT_SYMBOL(__tcp_put_md5sig_pool); #endif +void tcp_done(struct sock *sk) +{ + if(sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV) + TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS); + + tcp_set_state(sk, TCP_CLOSE); + tcp_clear_xmit_timers(sk); + + sk->sk_shutdown = SHUTDOWN_MASK; + + if (!sock_flag(sk, SOCK_DEAD)) + sk->sk_state_change(sk); + else + inet_csk_destroy_sock(sk); +} +EXPORT_SYMBOL_GPL(tcp_done); + extern void __skb_cb_too_small_for_tcp(int, int); extern struct tcp_congestion_ops tcp_reno; -- cgit v1.2.3-59-g8ed1b From 9958089a43ae8a9af07402461c0b2b7548c7341e Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Fri, 20 Apr 2007 17:12:43 -0700 Subject: [NET]: Move sk_setup_caps() out of line. It is far too large to be an inline and not in any hot paths. Signed-off-by: Andi Kleen Signed-off-by: David S. Miller --- include/net/sock.h | 14 +------------- net/core/sock.c | 15 +++++++++++++++ 2 files changed, 16 insertions(+), 13 deletions(-) (limited to 'include') diff --git a/include/net/sock.h b/include/net/sock.h index 390c04700590..25c37e34bfdc 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -1075,19 +1075,7 @@ static inline int sk_can_gso(const struct sock *sk) return net_gso_ok(sk->sk_route_caps, sk->sk_gso_type); } -static inline void sk_setup_caps(struct sock *sk, struct dst_entry *dst) -{ - __sk_dst_set(sk, dst); - sk->sk_route_caps = dst->dev->features; - if (sk->sk_route_caps & NETIF_F_GSO) - sk->sk_route_caps |= NETIF_F_GSO_MASK; - if (sk_can_gso(sk)) { - if (dst->header_len) - sk->sk_route_caps &= ~NETIF_F_GSO_MASK; - else - sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM; - } -} +extern void sk_setup_caps(struct sock *sk, struct dst_entry *dst); static inline void sk_charge_skb(struct sock *sk, struct sk_buff *skb) { diff --git a/net/core/sock.c b/net/core/sock.c index 73a8018029a8..043bdc05d211 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -979,6 +979,21 @@ out: EXPORT_SYMBOL_GPL(sk_clone); +void sk_setup_caps(struct sock *sk, struct dst_entry *dst) +{ + __sk_dst_set(sk, dst); + sk->sk_route_caps = dst->dev->features; + if (sk->sk_route_caps & NETIF_F_GSO) + sk->sk_route_caps |= NETIF_F_GSO_MASK; + if (sk_can_gso(sk)) { + if (dst->header_len) + sk->sk_route_caps &= ~NETIF_F_GSO_MASK; + else + sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM; + } +} +EXPORT_SYMBOL_GPL(sk_setup_caps); + void __init sk_init(void) { if (num_physpages <= 4096) { -- cgit v1.2.3-59-g8ed1b From 9e412ba7632f71259a53085665d4983b78257b7c Mon Sep 17 00:00:00 2001 From: Ilpo Järvinen Date: Fri, 20 Apr 2007 22:18:02 -0700 Subject: [TCP]: Sed magic converts func(sk, tp, ...) -> func(sk, ...) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This is (mostly) automated change using magic: sed -e '/struct sock \*sk/ N' -e '/struct sock \*sk/ N' -e '/struct sock \*sk/ N' -e '/struct sock \*sk/ N' -e 's|struct sock \*sk,[\n\t ]*struct tcp_sock \*tp\([^{]*\n{\n\)| struct sock \*sk\1\tstruct tcp_sock *tp = tcp_sk(sk);\n|g' -e 's|struct sock \*sk, struct tcp_sock \*tp| struct sock \*sk|g' -e 's|sk, tp\([^-]\)|sk\1|g' Fixed four unused variable (tp) warnings that were introduced. In addition, manually added newlines after local variables and tweaked function arguments positioning. $ gcc --version gcc (GCC) 4.1.1 20060525 (Red Hat 4.1.1-1) ... $ codiff -fV built-in.o.old built-in.o.new net/ipv4/route.c: rt_cache_flush | +14 1 function changed, 14 bytes added net/ipv4/tcp.c: tcp_setsockopt | -5 tcp_sendpage | -25 tcp_sendmsg | -16 3 functions changed, 46 bytes removed net/ipv4/tcp_input.c: tcp_try_undo_recovery | +3 tcp_try_undo_dsack | +2 tcp_mark_head_lost | -12 tcp_ack | -15 tcp_event_data_recv | -32 tcp_rcv_state_process | -10 tcp_rcv_established | +1 7 functions changed, 6 bytes added, 69 bytes removed, diff: -63 net/ipv4/tcp_output.c: update_send_head | -9 tcp_transmit_skb | +19 tcp_cwnd_validate | +1 tcp_write_wakeup | -17 __tcp_push_pending_frames | -25 tcp_push_one | -8 tcp_send_fin | -4 7 functions changed, 20 bytes added, 63 bytes removed, diff: -43 built-in.o.new: 18 functions changed, 40 bytes added, 178 bytes removed, diff: -138 Signed-off-by: Ilpo Järvinen Signed-off-by: David S. Miller --- include/net/tcp.h | 25 +++++---- include/net/tcp_ecn.h | 11 ++-- net/ipv4/tcp.c | 39 +++++++------- net/ipv4/tcp_input.c | 145 ++++++++++++++++++++++++++++++-------------------- net/ipv4/tcp_output.c | 54 ++++++++++--------- 5 files changed, 158 insertions(+), 116 deletions(-) (limited to 'include') diff --git a/include/net/tcp.h b/include/net/tcp.h index e79803353c83..43910fe3c448 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -420,9 +420,9 @@ extern __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb, /* tcp_output.c */ -extern void __tcp_push_pending_frames(struct sock *sk, struct tcp_sock *tp, - unsigned int cur_mss, int nonagle); -extern int tcp_may_send_now(struct sock *sk, struct tcp_sock *tp); +extern void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss, + int nonagle); +extern int tcp_may_send_now(struct sock *sk); extern int tcp_retransmit_skb(struct sock *, struct sk_buff *); extern void tcp_xmit_retransmit_queue(struct sock *); extern void tcp_simple_retransmit(struct sock *); @@ -479,8 +479,10 @@ static inline void tcp_fast_path_on(struct tcp_sock *tp) __tcp_fast_path_on(tp, tp->snd_wnd >> tp->rx_opt.snd_wscale); } -static inline void tcp_fast_path_check(struct sock *sk, struct tcp_sock *tp) +static inline void tcp_fast_path_check(struct sock *sk) { + struct tcp_sock *tp = tcp_sk(sk); + if (skb_queue_empty(&tp->out_of_order_queue) && tp->rcv_wnd && atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf && @@ -591,10 +593,10 @@ static inline void tcp_dec_pcount_approx(__u32 *count, } } -static inline void tcp_packets_out_inc(struct sock *sk, - struct tcp_sock *tp, +static inline void tcp_packets_out_inc(struct sock *sk, const struct sk_buff *skb) { + struct tcp_sock *tp = tcp_sk(sk); int orig = tp->packets_out; tp->packets_out += tcp_skb_pcount(skb); @@ -778,18 +780,21 @@ static inline void tcp_minshall_update(struct tcp_sock *tp, int mss, tp->snd_sml = TCP_SKB_CB(skb)->end_seq; } -static inline void tcp_check_probe_timer(struct sock *sk, struct tcp_sock *tp) +static inline void tcp_check_probe_timer(struct sock *sk) { + struct tcp_sock *tp = tcp_sk(sk); const struct inet_connection_sock *icsk = inet_csk(sk); + if (!tp->packets_out && !icsk->icsk_pending) inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, icsk->icsk_rto, TCP_RTO_MAX); } -static inline void tcp_push_pending_frames(struct sock *sk, - struct tcp_sock *tp) +static inline void tcp_push_pending_frames(struct sock *sk) { - __tcp_push_pending_frames(sk, tp, tcp_current_mss(sk, 1), tp->nonagle); + struct tcp_sock *tp = tcp_sk(sk); + + __tcp_push_pending_frames(sk, tcp_current_mss(sk, 1), tp->nonagle); } static inline void tcp_init_wl(struct tcp_sock *tp, u32 ack, u32 seq) diff --git a/include/net/tcp_ecn.h b/include/net/tcp_ecn.h index b5f7c6ac0880..89eb3e05116d 100644 --- a/include/net/tcp_ecn.h +++ b/include/net/tcp_ecn.h @@ -27,9 +27,10 @@ static inline void TCP_ECN_send_synack(struct tcp_sock *tp, TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_ECE; } -static inline void TCP_ECN_send_syn(struct sock *sk, struct tcp_sock *tp, - struct sk_buff *skb) +static inline void TCP_ECN_send_syn(struct sock *sk, struct sk_buff *skb) { + struct tcp_sock *tp = tcp_sk(sk); + tp->ecn_flags = 0; if (sysctl_tcp_ecn) { TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_ECE|TCPCB_FLAG_CWR; @@ -44,9 +45,11 @@ TCP_ECN_make_synack(struct request_sock *req, struct tcphdr *th) th->ece = 1; } -static inline void TCP_ECN_send(struct sock *sk, struct tcp_sock *tp, - struct sk_buff *skb, int tcp_header_len) +static inline void TCP_ECN_send(struct sock *sk, struct sk_buff *skb, + int tcp_header_len) { + struct tcp_sock *tp = tcp_sk(sk); + if (tp->ecn_flags & TCP_ECN_OK) { /* Not-retransmitted data segment: set ECT and inject CWR. */ if (skb->len != tcp_header_len && diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 99ad52c00c96..2cf9a898ce50 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -460,9 +460,9 @@ static inline int forced_push(struct tcp_sock *tp) return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1)); } -static inline void skb_entail(struct sock *sk, struct tcp_sock *tp, - struct sk_buff *skb) +static inline void skb_entail(struct sock *sk, struct sk_buff *skb) { + struct tcp_sock *tp = tcp_sk(sk); struct tcp_skb_cb *tcb = TCP_SKB_CB(skb); skb->csum = 0; @@ -486,15 +486,17 @@ static inline void tcp_mark_urg(struct tcp_sock *tp, int flags, } } -static inline void tcp_push(struct sock *sk, struct tcp_sock *tp, int flags, - int mss_now, int nonagle) +static inline void tcp_push(struct sock *sk, int flags, int mss_now, + int nonagle) { + struct tcp_sock *tp = tcp_sk(sk); + if (tcp_send_head(sk)) { struct sk_buff *skb = tcp_write_queue_tail(sk); if (!(flags & MSG_MORE) || forced_push(tp)) tcp_mark_push(tp, skb); tcp_mark_urg(tp, flags, skb); - __tcp_push_pending_frames(sk, tp, mss_now, + __tcp_push_pending_frames(sk, mss_now, (flags & MSG_MORE) ? TCP_NAGLE_CORK : nonagle); } } @@ -540,7 +542,7 @@ new_segment: if (!skb) goto wait_for_memory; - skb_entail(sk, tp, skb); + skb_entail(sk, skb); copy = size_goal; } @@ -586,7 +588,7 @@ new_segment: if (forced_push(tp)) { tcp_mark_push(tp, skb); - __tcp_push_pending_frames(sk, tp, mss_now, TCP_NAGLE_PUSH); + __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH); } else if (skb == tcp_send_head(sk)) tcp_push_one(sk, mss_now); continue; @@ -595,7 +597,7 @@ wait_for_sndbuf: set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); wait_for_memory: if (copied) - tcp_push(sk, tp, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH); + tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH); if ((err = sk_stream_wait_memory(sk, &timeo)) != 0) goto do_error; @@ -606,7 +608,7 @@ wait_for_memory: out: if (copied) - tcp_push(sk, tp, flags, mss_now, tp->nonagle); + tcp_push(sk, flags, mss_now, tp->nonagle); return copied; do_error: @@ -637,8 +639,9 @@ ssize_t tcp_sendpage(struct socket *sock, struct page *page, int offset, #define TCP_PAGE(sk) (sk->sk_sndmsg_page) #define TCP_OFF(sk) (sk->sk_sndmsg_off) -static inline int select_size(struct sock *sk, struct tcp_sock *tp) +static inline int select_size(struct sock *sk) { + struct tcp_sock *tp = tcp_sk(sk); int tmp = tp->mss_cache; if (sk->sk_route_caps & NETIF_F_SG) { @@ -714,7 +717,7 @@ new_segment: if (!sk_stream_memory_free(sk)) goto wait_for_sndbuf; - skb = sk_stream_alloc_pskb(sk, select_size(sk, tp), + skb = sk_stream_alloc_pskb(sk, select_size(sk), 0, sk->sk_allocation); if (!skb) goto wait_for_memory; @@ -725,7 +728,7 @@ new_segment: if (sk->sk_route_caps & NETIF_F_ALL_CSUM) skb->ip_summed = CHECKSUM_PARTIAL; - skb_entail(sk, tp, skb); + skb_entail(sk, skb); copy = size_goal; } @@ -830,7 +833,7 @@ new_segment: if (forced_push(tp)) { tcp_mark_push(tp, skb); - __tcp_push_pending_frames(sk, tp, mss_now, TCP_NAGLE_PUSH); + __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH); } else if (skb == tcp_send_head(sk)) tcp_push_one(sk, mss_now); continue; @@ -839,7 +842,7 @@ wait_for_sndbuf: set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); wait_for_memory: if (copied) - tcp_push(sk, tp, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH); + tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH); if ((err = sk_stream_wait_memory(sk, &timeo)) != 0) goto do_error; @@ -851,7 +854,7 @@ wait_for_memory: out: if (copied) - tcp_push(sk, tp, flags, mss_now, tp->nonagle); + tcp_push(sk, flags, mss_now, tp->nonagle); TCP_CHECK_TIMER(sk); release_sock(sk); return copied; @@ -1389,7 +1392,7 @@ do_prequeue: skip_copy: if (tp->urg_data && after(tp->copied_seq, tp->urg_seq)) { tp->urg_data = 0; - tcp_fast_path_check(sk, tp); + tcp_fast_path_check(sk); } if (used + offset < skb->len) continue; @@ -1830,7 +1833,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level, * for currently queued segments. */ tp->nonagle |= TCP_NAGLE_OFF|TCP_NAGLE_PUSH; - tcp_push_pending_frames(sk, tp); + tcp_push_pending_frames(sk); } else { tp->nonagle &= ~TCP_NAGLE_OFF; } @@ -1854,7 +1857,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level, tp->nonagle &= ~TCP_NAGLE_CORK; if (tp->nonagle&TCP_NAGLE_OFF) tp->nonagle |= TCP_NAGLE_PUSH; - tcp_push_pending_frames(sk, tp); + tcp_push_pending_frames(sk); } break; diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 2fbfc2e4209c..633389390788 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -235,9 +235,9 @@ static void tcp_fixup_sndbuf(struct sock *sk) */ /* Slow part of check#2. */ -static int __tcp_grow_window(const struct sock *sk, struct tcp_sock *tp, - const struct sk_buff *skb) +static int __tcp_grow_window(const struct sock *sk, const struct sk_buff *skb) { + struct tcp_sock *tp = tcp_sk(sk); /* Optimize this! */ int truesize = tcp_win_from_space(skb->truesize)/2; int window = tcp_win_from_space(sysctl_tcp_rmem[2])/2; @@ -252,9 +252,11 @@ static int __tcp_grow_window(const struct sock *sk, struct tcp_sock *tp, return 0; } -static void tcp_grow_window(struct sock *sk, struct tcp_sock *tp, +static void tcp_grow_window(struct sock *sk, struct sk_buff *skb) { + struct tcp_sock *tp = tcp_sk(sk); + /* Check #1 */ if (tp->rcv_ssthresh < tp->window_clamp && (int)tp->rcv_ssthresh < tcp_space(sk) && @@ -267,7 +269,7 @@ static void tcp_grow_window(struct sock *sk, struct tcp_sock *tp, if (tcp_win_from_space(skb->truesize) <= skb->len) incr = 2*tp->advmss; else - incr = __tcp_grow_window(sk, tp, skb); + incr = __tcp_grow_window(sk, skb); if (incr) { tp->rcv_ssthresh = min(tp->rcv_ssthresh + incr, tp->window_clamp); @@ -330,8 +332,9 @@ static void tcp_init_buffer_space(struct sock *sk) } /* 5. Recalculate window clamp after socket hit its memory bounds. */ -static void tcp_clamp_window(struct sock *sk, struct tcp_sock *tp) +static void tcp_clamp_window(struct sock *sk) { + struct tcp_sock *tp = tcp_sk(sk); struct inet_connection_sock *icsk = inet_csk(sk); icsk->icsk_ack.quick = 0; @@ -503,8 +506,9 @@ new_measure: * each ACK we send, he increments snd_cwnd and transmits more of his * queue. -DaveM */ -static void tcp_event_data_recv(struct sock *sk, struct tcp_sock *tp, struct sk_buff *skb) +static void tcp_event_data_recv(struct sock *sk, struct sk_buff *skb) { + struct tcp_sock *tp = tcp_sk(sk); struct inet_connection_sock *icsk = inet_csk(sk); u32 now; @@ -545,7 +549,7 @@ static void tcp_event_data_recv(struct sock *sk, struct tcp_sock *tp, struct sk_ TCP_ECN_check_ce(tp, skb); if (skb->len >= 128) - tcp_grow_window(sk, tp, skb); + tcp_grow_window(sk, skb); } /* Called to compute a smoothed rtt estimate. The data fed to this @@ -1541,8 +1545,10 @@ static inline int tcp_skb_timedout(struct sock *sk, struct sk_buff *skb) return (tcp_time_stamp - TCP_SKB_CB(skb)->when > inet_csk(sk)->icsk_rto); } -static inline int tcp_head_timedout(struct sock *sk, struct tcp_sock *tp) +static inline int tcp_head_timedout(struct sock *sk) { + struct tcp_sock *tp = tcp_sk(sk); + return tp->packets_out && tcp_skb_timedout(sk, tcp_write_queue_head(sk)); } @@ -1640,8 +1646,9 @@ static inline int tcp_head_timedout(struct sock *sk, struct tcp_sock *tp) * Main question: may we further continue forward transmission * with the same cwnd? */ -static int tcp_time_to_recover(struct sock *sk, struct tcp_sock *tp) +static int tcp_time_to_recover(struct sock *sk) { + struct tcp_sock *tp = tcp_sk(sk); __u32 packets_out; /* Do not perform any recovery during FRTO algorithm */ @@ -1659,7 +1666,7 @@ static int tcp_time_to_recover(struct sock *sk, struct tcp_sock *tp) /* Trick#3 : when we use RFC2988 timer restart, fast * retransmit can be triggered by timeout of queue head. */ - if (tcp_head_timedout(sk, tp)) + if (tcp_head_timedout(sk)) return 1; /* Trick#4: It is still not OK... But will it be useful to delay @@ -1668,7 +1675,7 @@ static int tcp_time_to_recover(struct sock *sk, struct tcp_sock *tp) packets_out = tp->packets_out; if (packets_out <= tp->reordering && tp->sacked_out >= max_t(__u32, packets_out/2, sysctl_tcp_reordering) && - !tcp_may_send_now(sk, tp)) { + !tcp_may_send_now(sk)) { /* We have nothing to send. This connection is limited * either by receiver window or by application. */ @@ -1708,8 +1715,10 @@ static void tcp_add_reno_sack(struct sock *sk) /* Account for ACK, ACKing some data in Reno Recovery phase. */ -static void tcp_remove_reno_sacks(struct sock *sk, struct tcp_sock *tp, int acked) +static void tcp_remove_reno_sacks(struct sock *sk, int acked) { + struct tcp_sock *tp = tcp_sk(sk); + if (acked > 0) { /* One ACK acked hole. The rest eat duplicate ACKs. */ if (acked-1 >= tp->sacked_out) @@ -1728,9 +1737,10 @@ static inline void tcp_reset_reno_sack(struct tcp_sock *tp) } /* Mark head of queue up as lost. */ -static void tcp_mark_head_lost(struct sock *sk, struct tcp_sock *tp, +static void tcp_mark_head_lost(struct sock *sk, int packets, u32 high_seq) { + struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb; int cnt; @@ -1771,15 +1781,17 @@ static void tcp_mark_head_lost(struct sock *sk, struct tcp_sock *tp, /* Account newly detected lost packet(s) */ -static void tcp_update_scoreboard(struct sock *sk, struct tcp_sock *tp) +static void tcp_update_scoreboard(struct sock *sk) { + struct tcp_sock *tp = tcp_sk(sk); + if (IsFack(tp)) { int lost = tp->fackets_out - tp->reordering; if (lost <= 0) lost = 1; - tcp_mark_head_lost(sk, tp, lost, tp->high_seq); + tcp_mark_head_lost(sk, lost, tp->high_seq); } else { - tcp_mark_head_lost(sk, tp, 1, tp->high_seq); + tcp_mark_head_lost(sk, 1, tp->high_seq); } /* New heuristics: it is possible only after we switched @@ -1787,7 +1799,7 @@ static void tcp_update_scoreboard(struct sock *sk, struct tcp_sock *tp) * Hence, we can detect timed out packets during fast * retransmit without falling to slow start. */ - if (!IsReno(tp) && tcp_head_timedout(sk, tp)) { + if (!IsReno(tp) && tcp_head_timedout(sk)) { struct sk_buff *skb; skb = tp->scoreboard_skb_hint ? tp->scoreboard_skb_hint @@ -1867,9 +1879,11 @@ static inline int tcp_packet_delayed(struct tcp_sock *tp) /* Undo procedures. */ #if FASTRETRANS_DEBUG > 1 -static void DBGUNDO(struct sock *sk, struct tcp_sock *tp, const char *msg) +static void DBGUNDO(struct sock *sk, const char *msg) { + struct tcp_sock *tp = tcp_sk(sk); struct inet_sock *inet = inet_sk(sk); + printk(KERN_DEBUG "Undo %s %u.%u.%u.%u/%u c%u l%u ss%u/%u p%u\n", msg, NIPQUAD(inet->daddr), ntohs(inet->dport), @@ -1915,13 +1929,15 @@ static inline int tcp_may_undo(struct tcp_sock *tp) } /* People celebrate: "We love our President!" */ -static int tcp_try_undo_recovery(struct sock *sk, struct tcp_sock *tp) +static int tcp_try_undo_recovery(struct sock *sk) { + struct tcp_sock *tp = tcp_sk(sk); + if (tcp_may_undo(tp)) { /* Happy end! We did not retransmit anything * or our original transmission succeeded. */ - DBGUNDO(sk, tp, inet_csk(sk)->icsk_ca_state == TCP_CA_Loss ? "loss" : "retrans"); + DBGUNDO(sk, inet_csk(sk)->icsk_ca_state == TCP_CA_Loss ? "loss" : "retrans"); tcp_undo_cwr(sk, 1); if (inet_csk(sk)->icsk_ca_state == TCP_CA_Loss) NET_INC_STATS_BH(LINUX_MIB_TCPLOSSUNDO); @@ -1941,10 +1957,12 @@ static int tcp_try_undo_recovery(struct sock *sk, struct tcp_sock *tp) } /* Try to undo cwnd reduction, because D-SACKs acked all retransmitted data */ -static void tcp_try_undo_dsack(struct sock *sk, struct tcp_sock *tp) +static void tcp_try_undo_dsack(struct sock *sk) { + struct tcp_sock *tp = tcp_sk(sk); + if (tp->undo_marker && !tp->undo_retrans) { - DBGUNDO(sk, tp, "D-SACK"); + DBGUNDO(sk, "D-SACK"); tcp_undo_cwr(sk, 1); tp->undo_marker = 0; NET_INC_STATS_BH(LINUX_MIB_TCPDSACKUNDO); @@ -1953,9 +1971,9 @@ static void tcp_try_undo_dsack(struct sock *sk, struct tcp_sock *tp) /* Undo during fast recovery after partial ACK. */ -static int tcp_try_undo_partial(struct sock *sk, struct tcp_sock *tp, - int acked) +static int tcp_try_undo_partial(struct sock *sk, int acked) { + struct tcp_sock *tp = tcp_sk(sk); /* Partial ACK arrived. Force Hoe's retransmit. */ int failed = IsReno(tp) || tp->fackets_out>tp->reordering; @@ -1968,7 +1986,7 @@ static int tcp_try_undo_partial(struct sock *sk, struct tcp_sock *tp, tcp_update_reordering(sk, tcp_fackets_out(tp) + acked, 1); - DBGUNDO(sk, tp, "Hoe"); + DBGUNDO(sk, "Hoe"); tcp_undo_cwr(sk, 0); NET_INC_STATS_BH(LINUX_MIB_TCPPARTIALUNDO); @@ -1982,8 +2000,10 @@ static int tcp_try_undo_partial(struct sock *sk, struct tcp_sock *tp, } /* Undo during loss recovery after partial ACK. */ -static int tcp_try_undo_loss(struct sock *sk, struct tcp_sock *tp) +static int tcp_try_undo_loss(struct sock *sk) { + struct tcp_sock *tp = tcp_sk(sk); + if (tcp_may_undo(tp)) { struct sk_buff *skb; tcp_for_write_queue(skb, sk) { @@ -1994,7 +2014,7 @@ static int tcp_try_undo_loss(struct sock *sk, struct tcp_sock *tp) clear_all_retrans_hints(tp); - DBGUNDO(sk, tp, "partial loss"); + DBGUNDO(sk, "partial loss"); tp->lost_out = 0; tp->left_out = tp->sacked_out; tcp_undo_cwr(sk, 1); @@ -2016,8 +2036,10 @@ static inline void tcp_complete_cwr(struct sock *sk) tcp_ca_event(sk, CA_EVENT_COMPLETE_CWR); } -static void tcp_try_to_open(struct sock *sk, struct tcp_sock *tp, int flag) +static void tcp_try_to_open(struct sock *sk, int flag) { + struct tcp_sock *tp = tcp_sk(sk); + tp->left_out = tp->sacked_out; if (tp->retrans_out == 0) @@ -2111,7 +2133,7 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una, before(tp->snd_una, tp->high_seq) && icsk->icsk_ca_state != TCP_CA_Open && tp->fackets_out > tp->reordering) { - tcp_mark_head_lost(sk, tp, tp->fackets_out-tp->reordering, tp->high_seq); + tcp_mark_head_lost(sk, tp->fackets_out-tp->reordering, tp->high_seq); NET_INC_STATS_BH(LINUX_MIB_TCPLOSS); } @@ -2127,7 +2149,7 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una, switch (icsk->icsk_ca_state) { case TCP_CA_Loss: icsk->icsk_retransmits = 0; - if (tcp_try_undo_recovery(sk, tp)) + if (tcp_try_undo_recovery(sk)) return; break; @@ -2141,7 +2163,7 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una, break; case TCP_CA_Disorder: - tcp_try_undo_dsack(sk, tp); + tcp_try_undo_dsack(sk); if (!tp->undo_marker || /* For SACK case do not Open to allow to undo * catching for all duplicate ACKs. */ @@ -2154,7 +2176,7 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una, case TCP_CA_Recovery: if (IsReno(tp)) tcp_reset_reno_sack(tp); - if (tcp_try_undo_recovery(sk, tp)) + if (tcp_try_undo_recovery(sk)) return; tcp_complete_cwr(sk); break; @@ -2170,14 +2192,14 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una, } else { int acked = prior_packets - tp->packets_out; if (IsReno(tp)) - tcp_remove_reno_sacks(sk, tp, acked); - is_dupack = tcp_try_undo_partial(sk, tp, acked); + tcp_remove_reno_sacks(sk, acked); + is_dupack = tcp_try_undo_partial(sk, acked); } break; case TCP_CA_Loss: if (flag&FLAG_DATA_ACKED) icsk->icsk_retransmits = 0; - if (!tcp_try_undo_loss(sk, tp)) { + if (!tcp_try_undo_loss(sk)) { tcp_moderate_cwnd(tp); tcp_xmit_retransmit_queue(sk); return; @@ -2194,10 +2216,10 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una, } if (icsk->icsk_ca_state == TCP_CA_Disorder) - tcp_try_undo_dsack(sk, tp); + tcp_try_undo_dsack(sk); - if (!tcp_time_to_recover(sk, tp)) { - tcp_try_to_open(sk, tp, flag); + if (!tcp_time_to_recover(sk)) { + tcp_try_to_open(sk, flag); return; } @@ -2236,8 +2258,8 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una, tcp_set_ca_state(sk, TCP_CA_Recovery); } - if (is_dupack || tcp_head_timedout(sk, tp)) - tcp_update_scoreboard(sk, tp); + if (is_dupack || tcp_head_timedout(sk)) + tcp_update_scoreboard(sk); tcp_cwnd_down(sk); tcp_xmit_retransmit_queue(sk); } @@ -2313,8 +2335,10 @@ static void tcp_cong_avoid(struct sock *sk, u32 ack, u32 rtt, * RFC2988 recommends to restart timer to now+rto. */ -static void tcp_ack_packets_out(struct sock *sk, struct tcp_sock *tp) +static void tcp_ack_packets_out(struct sock *sk) { + struct tcp_sock *tp = tcp_sk(sk); + if (!tp->packets_out) { inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS); } else { @@ -2471,7 +2495,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p) if (acked&FLAG_ACKED) { tcp_ack_update_rtt(sk, acked, seq_rtt); - tcp_ack_packets_out(sk, tp); + tcp_ack_packets_out(sk); if (rtt_sample && !(acked & FLAG_RETRANS_DATA_ACKED)) (*rtt_sample)(sk, tcp_usrtt(&tv)); @@ -2556,9 +2580,10 @@ static inline int tcp_may_update_window(const struct tcp_sock *tp, const u32 ack * Window update algorithm, described in RFC793/RFC1122 (used in linux-2.2 * and in FreeBSD. NetBSD's one is even worse.) is wrong. */ -static int tcp_ack_update_window(struct sock *sk, struct tcp_sock *tp, - struct sk_buff *skb, u32 ack, u32 ack_seq) +static int tcp_ack_update_window(struct sock *sk, struct sk_buff *skb, u32 ack, + u32 ack_seq) { + struct tcp_sock *tp = tcp_sk(sk); int flag = 0; u32 nwin = ntohs(tcp_hdr(skb)->window); @@ -2576,7 +2601,7 @@ static int tcp_ack_update_window(struct sock *sk, struct tcp_sock *tp, * fast path is recovered for sending TCP. */ tp->pred_flags = 0; - tcp_fast_path_check(sk, tp); + tcp_fast_path_check(sk); if (nwin > tp->max_window) { tp->max_window = nwin; @@ -2762,7 +2787,7 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag) else NET_INC_STATS_BH(LINUX_MIB_TCPPUREACKS); - flag |= tcp_ack_update_window(sk, tp, skb, ack, ack_seq); + flag |= tcp_ack_update_window(sk, skb, ack, ack_seq); if (TCP_SKB_CB(skb)->sacked) flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una); @@ -3426,7 +3451,7 @@ queue_and_out: } tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; if (skb->len) - tcp_event_data_recv(sk, tp, skb); + tcp_event_data_recv(sk, skb); if (th->fin) tcp_fin(skb, sk, th); @@ -3443,7 +3468,7 @@ queue_and_out: if (tp->rx_opt.num_sacks) tcp_sack_remove(tp); - tcp_fast_path_check(sk, tp); + tcp_fast_path_check(sk); if (eaten > 0) __kfree_skb(skb); @@ -3734,7 +3759,7 @@ static int tcp_prune_queue(struct sock *sk) NET_INC_STATS_BH(LINUX_MIB_PRUNECALLED); if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) - tcp_clamp_window(sk, tp); + tcp_clamp_window(sk); else if (tcp_memory_pressure) tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss); @@ -3803,8 +3828,10 @@ void tcp_cwnd_application_limited(struct sock *sk) tp->snd_cwnd_stamp = tcp_time_stamp; } -static int tcp_should_expand_sndbuf(struct sock *sk, struct tcp_sock *tp) +static int tcp_should_expand_sndbuf(struct sock *sk) { + struct tcp_sock *tp = tcp_sk(sk); + /* If the user specified a specific send buffer setting, do * not modify it. */ @@ -3836,7 +3863,7 @@ static void tcp_new_space(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); - if (tcp_should_expand_sndbuf(sk, tp)) { + if (tcp_should_expand_sndbuf(sk)) { int sndmem = max_t(u32, tp->rx_opt.mss_clamp, tp->mss_cache) + MAX_TCP_HEADER + 16 + sizeof(struct sk_buff), demanded = max_t(unsigned int, tp->snd_cwnd, @@ -3860,9 +3887,9 @@ static void tcp_check_space(struct sock *sk) } } -static inline void tcp_data_snd_check(struct sock *sk, struct tcp_sock *tp) +static inline void tcp_data_snd_check(struct sock *sk) { - tcp_push_pending_frames(sk, tp); + tcp_push_pending_frames(sk); tcp_check_space(sk); } @@ -4196,7 +4223,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, */ tcp_ack(sk, skb, 0); __kfree_skb(skb); - tcp_data_snd_check(sk, tp); + tcp_data_snd_check(sk); return 0; } else { /* Header too small */ TCP_INC_STATS_BH(TCP_MIB_INERRS); @@ -4267,12 +4294,12 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; } - tcp_event_data_recv(sk, tp, skb); + tcp_event_data_recv(sk, skb); if (TCP_SKB_CB(skb)->ack_seq != tp->snd_una) { /* Well, only one small jumplet in fast path... */ tcp_ack(sk, skb, FLAG_DATA); - tcp_data_snd_check(sk, tp); + tcp_data_snd_check(sk); if (!inet_csk_ack_scheduled(sk)) goto no_ack; } @@ -4355,7 +4382,7 @@ step5: /* step 7: process the segment text */ tcp_data_queue(sk, skb); - tcp_data_snd_check(sk, tp); + tcp_data_snd_check(sk); tcp_ack_snd_check(sk); return 0; @@ -4672,7 +4699,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, /* Do step6 onward by hand. */ tcp_urg(sk, skb, th); __kfree_skb(skb); - tcp_data_snd_check(sk, tp); + tcp_data_snd_check(sk); return 0; } @@ -4864,7 +4891,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, /* tcp_data could move socket to TIME-WAIT */ if (sk->sk_state != TCP_CLOSE) { - tcp_data_snd_check(sk, tp); + tcp_data_snd_check(sk); tcp_ack_snd_check(sk); } diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 94d9f0c63682..3a60aea744ae 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -62,12 +62,13 @@ int sysctl_tcp_base_mss __read_mostly = 512; /* By default, RFC2861 behavior. */ int sysctl_tcp_slow_start_after_idle __read_mostly = 1; -static void update_send_head(struct sock *sk, struct tcp_sock *tp, - struct sk_buff *skb) +static void update_send_head(struct sock *sk, struct sk_buff *skb) { + struct tcp_sock *tp = tcp_sk(sk); + tcp_advance_send_head(sk, skb); tp->snd_nxt = TCP_SKB_CB(skb)->end_seq; - tcp_packets_out_inc(sk, tp, skb); + tcp_packets_out_inc(sk, skb); } /* SND.NXT, if window was not shrunk. @@ -76,8 +77,10 @@ static void update_send_head(struct sock *sk, struct tcp_sock *tp, * Anything in between SND.UNA...SND.UNA+SND.WND also can be already * invalid. OK, let's make this for now: */ -static inline __u32 tcp_acceptable_seq(struct sock *sk, struct tcp_sock *tp) +static inline __u32 tcp_acceptable_seq(struct sock *sk) { + struct tcp_sock *tp = tcp_sk(sk); + if (!before(tp->snd_una+tp->snd_wnd, tp->snd_nxt)) return tp->snd_nxt; else @@ -516,7 +519,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, md5 ? &md5_hash_location : #endif NULL); - TCP_ECN_send(sk, tp, skb, tcp_header_size); + TCP_ECN_send(sk, skb, tcp_header_size); } #ifdef CONFIG_TCP_MD5SIG @@ -927,8 +930,9 @@ unsigned int tcp_current_mss(struct sock *sk, int large_allowed) /* Congestion window validation. (RFC2861) */ -static void tcp_cwnd_validate(struct sock *sk, struct tcp_sock *tp) +static void tcp_cwnd_validate(struct sock *sk) { + struct tcp_sock *tp = tcp_sk(sk); __u32 packets_out = tp->packets_out; if (packets_out >= tp->snd_cwnd) { @@ -1076,8 +1080,9 @@ static unsigned int tcp_snd_test(struct sock *sk, struct sk_buff *skb, return cwnd_quota; } -int tcp_may_send_now(struct sock *sk, struct tcp_sock *tp) +int tcp_may_send_now(struct sock *sk) { + struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb = tcp_send_head(sk); return (skb && @@ -1144,8 +1149,9 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len, * * This algorithm is from John Heffner. */ -static int tcp_tso_should_defer(struct sock *sk, struct tcp_sock *tp, struct sk_buff *skb) +static int tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb) { + struct tcp_sock *tp = tcp_sk(sk); const struct inet_connection_sock *icsk = inet_csk(sk); u32 send_win, cong_win, limit, in_flight; @@ -1324,7 +1330,7 @@ static int tcp_mtu_probe(struct sock *sk) /* Decrement cwnd here because we are sending * effectively two packets. */ tp->snd_cwnd--; - update_send_head(sk, tp, nskb); + update_send_head(sk, nskb); icsk->icsk_mtup.probe_size = tcp_mss_to_mtu(sk, nskb->len); tp->mtu_probe.probe_seq_start = TCP_SKB_CB(nskb)->seq; @@ -1387,7 +1393,7 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle) nonagle : TCP_NAGLE_PUSH)))) break; } else { - if (tcp_tso_should_defer(sk, tp, skb)) + if (tcp_tso_should_defer(sk, skb)) break; } @@ -1416,14 +1422,14 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle) /* Advance the send_head. This one is sent out. * This call will increment packets_out. */ - update_send_head(sk, tp, skb); + update_send_head(sk, skb); tcp_minshall_update(tp, mss_now, skb); sent_pkts++; } if (likely(sent_pkts)) { - tcp_cwnd_validate(sk, tp); + tcp_cwnd_validate(sk); return 0; } return !tp->packets_out && tcp_send_head(sk); @@ -1433,14 +1439,14 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle) * TCP_CORK or attempt at coalescing tiny packets. * The socket must be locked by the caller. */ -void __tcp_push_pending_frames(struct sock *sk, struct tcp_sock *tp, - unsigned int cur_mss, int nonagle) +void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss, + int nonagle) { struct sk_buff *skb = tcp_send_head(sk); if (skb) { if (tcp_write_xmit(sk, cur_mss, nonagle)) - tcp_check_probe_timer(sk, tp); + tcp_check_probe_timer(sk); } } @@ -1484,8 +1490,8 @@ void tcp_push_one(struct sock *sk, unsigned int mss_now) TCP_SKB_CB(skb)->when = tcp_time_stamp; if (likely(!tcp_transmit_skb(sk, skb, 1, sk->sk_allocation))) { - update_send_head(sk, tp, skb); - tcp_cwnd_validate(sk, tp); + update_send_head(sk, skb); + tcp_cwnd_validate(sk); return; } } @@ -1933,7 +1939,7 @@ void tcp_xmit_retransmit_queue(struct sock *sk) * segments to send. */ - if (tcp_may_send_now(sk, tp)) + if (tcp_may_send_now(sk)) return; if (tp->forward_skb_hint) { @@ -2023,7 +2029,7 @@ void tcp_send_fin(struct sock *sk) TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq + 1; tcp_queue_skb(sk, skb); } - __tcp_push_pending_frames(sk, tp, mss_now, TCP_NAGLE_OFF); + __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_OFF); } /* We get here when a process closes a file descriptor (either due to @@ -2033,7 +2039,6 @@ void tcp_send_fin(struct sock *sk) */ void tcp_send_active_reset(struct sock *sk, gfp_t priority) { - struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb; /* NOTE: No TCP options attached and we never retransmit this. */ @@ -2053,7 +2058,7 @@ void tcp_send_active_reset(struct sock *sk, gfp_t priority) skb_shinfo(skb)->gso_type = 0; /* Send it off. */ - TCP_SKB_CB(skb)->seq = tcp_acceptable_seq(sk, tp); + TCP_SKB_CB(skb)->seq = tcp_acceptable_seq(sk); TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq; TCP_SKB_CB(skb)->when = tcp_time_stamp; if (tcp_transmit_skb(sk, skb, 0, priority)) @@ -2271,7 +2276,7 @@ int tcp_connect(struct sock *sk) skb_reserve(buff, MAX_TCP_HEADER); TCP_SKB_CB(buff)->flags = TCPCB_FLAG_SYN; - TCP_ECN_send_syn(sk, tp, buff); + TCP_ECN_send_syn(sk, buff); TCP_SKB_CB(buff)->sacked = 0; skb_shinfo(buff)->gso_segs = 1; skb_shinfo(buff)->gso_size = 0; @@ -2363,7 +2368,6 @@ void tcp_send_ack(struct sock *sk) { /* If we have been reset, we may not send again. */ if (sk->sk_state != TCP_CLOSE) { - struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *buff; /* We are not putting this on the write queue, so @@ -2389,7 +2393,7 @@ void tcp_send_ack(struct sock *sk) skb_shinfo(buff)->gso_type = 0; /* Send it off, this clears delayed acks for us. */ - TCP_SKB_CB(buff)->seq = TCP_SKB_CB(buff)->end_seq = tcp_acceptable_seq(sk, tp); + TCP_SKB_CB(buff)->seq = TCP_SKB_CB(buff)->end_seq = tcp_acceptable_seq(sk); TCP_SKB_CB(buff)->when = tcp_time_stamp; tcp_transmit_skb(sk, buff, 0, GFP_ATOMIC); } @@ -2467,7 +2471,7 @@ int tcp_write_wakeup(struct sock *sk) TCP_SKB_CB(skb)->when = tcp_time_stamp; err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); if (!err) { - update_send_head(sk, tp, skb); + update_send_head(sk, skb); } return err; } else { -- cgit v1.2.3-59-g8ed1b From 2334e973559e119fa4161047035f03ad97a8676a Mon Sep 17 00:00:00 2001 From: YOSHIFUJI Hideaki Date: Sat, 21 Apr 2007 20:12:43 +0900 Subject: [IPV6] SNMP: Avoid unaligned accesses. Because stats pointer may not be aligned for u64, use memcpy to fill u64 values. Issue reported by David Miller . Signed-off-by: YOSHIFUJI Hideaki --- include/net/ipv6.h | 2 +- net/ipv6/proc.c | 22 ++++++++++++++++------ 2 files changed, 17 insertions(+), 7 deletions(-) (limited to 'include') diff --git a/include/net/ipv6.h b/include/net/ipv6.h index 4408def379bf..1df360eb0791 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -172,7 +172,7 @@ int snmp6_alloc_dev(struct inet6_dev *idev); int snmp6_free_dev(struct inet6_dev *idev); int snmp6_mib_init(void *ptr[2], size_t mibsize, size_t mibalign); void snmp6_mib_free(void *ptr[2]); -void snmp6_fill_stats(u64 *stats, struct inet6_dev *idev, int attrtype, int bytes); +void snmp6_fill_stats(void *stats, struct inet6_dev *idev, int attrtype, int bytes); struct ip6_ra_chain { diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c index c847cef626a8..aba94316b773 100644 --- a/net/ipv6/proc.c +++ b/net/ipv6/proc.c @@ -210,20 +210,30 @@ static const struct file_operations snmp6_seq_fops = { }; #endif /* CONFIG_PROC_FS */ +/* + * Stats may not be aligned for u64, so use memcpy to avoid + * unaligned accesses. + */ +static inline void __set_u64(void *p, u64 v) +{ + memcpy(p, &v, sizeof(u64)); +} + static inline void -__snmp6_fill_stats(u64 *stats, void **mib, int items, int bytes) +__snmp6_fill_stats(void *stats, void **mib, int items, int bytes) { int i; + u8 *p = stats; int pad = bytes - sizeof(u64) * items; BUG_ON(pad < 0); - stats[0] = items; - for (i = 1; i < items; i++) - stats[i] = (u64)fold_field(mib, i); - memset(&stats[items], 0, pad); + __set_u64(p, items); + for (i = 1, p += sizeof(u64); i < items; i++, p += sizeof(u64)) + __set_u64(p, fold_field(mib, i)); + memset(p, 0, pad); } void -snmp6_fill_stats(u64 *stats, struct inet6_dev *idev, int attrtype, int bytes) +snmp6_fill_stats(void *stats, struct inet6_dev *idev, int attrtype, int bytes) { switch(attrtype) { case IFLA_INET6_STATS: -- cgit v1.2.3-59-g8ed1b From 97fc8d0bc58cd09e62dc06ea5a64b58841738934 Mon Sep 17 00:00:00 2001 From: YOSHIFUJI Hideaki Date: Sat, 21 Apr 2007 19:52:04 -0700 Subject: [IPV6] SNMP: Use put_unaligned() instead of memcpy(). Hint from David Miller . Signed-off-by: YOSHIFUJI Hideaki Signed-off-by: David S. Miller --- include/net/ipv6.h | 2 +- net/ipv6/proc.c | 26 ++++++++++---------------- 2 files changed, 11 insertions(+), 17 deletions(-) (limited to 'include') diff --git a/include/net/ipv6.h b/include/net/ipv6.h index 1df360eb0791..4408def379bf 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -172,7 +172,7 @@ int snmp6_alloc_dev(struct inet6_dev *idev); int snmp6_free_dev(struct inet6_dev *idev); int snmp6_mib_init(void *ptr[2], size_t mibsize, size_t mibalign); void snmp6_mib_free(void *ptr[2]); -void snmp6_fill_stats(void *stats, struct inet6_dev *idev, int attrtype, int bytes); +void snmp6_fill_stats(u64 *stats, struct inet6_dev *idev, int attrtype, int bytes); struct ip6_ra_chain { diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c index aba94316b773..7a00bedb6b93 100644 --- a/net/ipv6/proc.c +++ b/net/ipv6/proc.c @@ -23,6 +23,7 @@ #include #include #include +#include #include #include #include @@ -210,30 +211,23 @@ static const struct file_operations snmp6_seq_fops = { }; #endif /* CONFIG_PROC_FS */ -/* - * Stats may not be aligned for u64, so use memcpy to avoid - * unaligned accesses. - */ -static inline void __set_u64(void *p, u64 v) -{ - memcpy(p, &v, sizeof(u64)); -} - static inline void -__snmp6_fill_stats(void *stats, void **mib, int items, int bytes) +__snmp6_fill_stats(u64 *stats, void **mib, int items, int bytes) { int i; - u8 *p = stats; int pad = bytes - sizeof(u64) * items; BUG_ON(pad < 0); - __set_u64(p, items); - for (i = 1, p += sizeof(u64); i < items; i++, p += sizeof(u64)) - __set_u64(p, fold_field(mib, i)); - memset(p, 0, pad); + + /* Use put_unaligned() because stats may not be aligned for u64. */ + put_unaligned(items, &stats[0]); + for (i = 1; i < items; i++) + put_unaligned(fold_field(mib, i), &stats[i]); + + memset(&stats[items], 0, pad); } void -snmp6_fill_stats(void *stats, struct inet6_dev *idev, int attrtype, int bytes) +snmp6_fill_stats(u64 *stats, struct inet6_dev *idev, int attrtype, int bytes) { switch(attrtype) { case IFLA_INET6_STATS: -- cgit v1.2.3-59-g8ed1b From 704232c2718c9d4b3375ec15a14fc0397970c449 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Mon, 23 Apr 2007 12:20:05 -0700 Subject: [WIRELESS] cfg80211: New wireless config infrastructure. This patch creates the core cfg80211 code along with some sysfs bits. This is a stripped down version to allow mac80211 to function, but doesn't include any configuration yet except for creating and removing virtual interfaces. This patch includes the nl80211 header file but it only contains the interface types which the cfg80211 interface for creating virtual interfaces relies on. Signed-off-by: Johannes Berg Signed-off-by: John W. Linville Signed-off-by: David S. Miller --- CREDITS | 6 ++ MAINTAINERS | 6 ++ include/linux/Kbuild | 1 + include/linux/netdevice.h | 4 + include/linux/nl80211.h | 38 +++++++++ include/net/cfg80211.h | 36 ++++++++ include/net/wireless.h | 139 ++++++++++++++++++++++++++++++ net/wireless/Kconfig | 3 + net/wireless/Makefile | 4 +- net/wireless/core.c | 209 ++++++++++++++++++++++++++++++++++++++++++++++ net/wireless/core.h | 49 +++++++++++ net/wireless/sysfs.c | 80 ++++++++++++++++++ net/wireless/sysfs.h | 9 ++ 13 files changed, 583 insertions(+), 1 deletion(-) create mode 100644 include/linux/nl80211.h create mode 100644 include/net/cfg80211.h create mode 100644 include/net/wireless.h create mode 100644 net/wireless/core.c create mode 100644 net/wireless/core.h create mode 100644 net/wireless/sysfs.c create mode 100644 net/wireless/sysfs.h (limited to 'include') diff --git a/CREDITS b/CREDITS index e3e7271ace0c..dede114d046e 100644 --- a/CREDITS +++ b/CREDITS @@ -317,6 +317,12 @@ S: 2322 37th Ave SW S: Seattle, Washington 98126-2010 S: USA +N: Johannes Berg +E: johannes@sipsolutions.net +W: http://johannes.sipsolutions.net/ +P: 1024D/9AB78CA5 AD02 0176 4E29 C137 1DF6 08D2 FC44 CF86 9AB7 8CA5 +D: powerpc & 802.11 hacker + N: Stephen R. van den Berg (AKA BuGless) E: berg@pool.informatik.rwth-aachen.de D: General kernel, gcc, and libc hacker diff --git a/MAINTAINERS b/MAINTAINERS index 7adebcc99cb4..f56c7e172cee 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -894,6 +894,12 @@ M: maxextreme@gmail.com L: linux-kernel@vger.kernel.org S: Maintained +CFG80211 and NL80211 +P: Johannes Berg +M: johannes@sipsolutions.net +L: linux-wireless@vger.kernel.org +S: Maintained + COMMON INTERNET FILE SYSTEM (CIFS) P: Steve French M: sfrench@samba.org diff --git a/include/linux/Kbuild b/include/linux/Kbuild index ea86f2e02716..4ff0f57d0add 100644 --- a/include/linux/Kbuild +++ b/include/linux/Kbuild @@ -113,6 +113,7 @@ header-y += netrom.h header-y += nfs2.h header-y += nfs4_mount.h header-y += nfs_mount.h +header-y += nl80211.h header-y += oom.h header-y += param.h header-y += pci_regs.h diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 71fc8ff4888b..584c199ec2d5 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -42,6 +42,8 @@ struct vlan_group; struct ethtool_ops; struct netpoll_info; +/* 802.11 specific */ +struct wireless_dev; /* source back-compat hooks */ #define SET_ETHTOOL_OPS(netdev,ops) \ ( (netdev)->ethtool_ops = (ops) ) @@ -400,6 +402,8 @@ struct net_device void *ip6_ptr; /* IPv6 specific data */ void *ec_ptr; /* Econet specific data */ void *ax25_ptr; /* AX.25 specific data */ + struct wireless_dev *ieee80211_ptr; /* IEEE 802.11 specific data, + assign before registering */ /* * Cache line mostly used on receive path (including eth_type_trans()) diff --git a/include/linux/nl80211.h b/include/linux/nl80211.h new file mode 100644 index 000000000000..9a30ba2ca75e --- /dev/null +++ b/include/linux/nl80211.h @@ -0,0 +1,38 @@ +#ifndef __LINUX_NL80211_H +#define __LINUX_NL80211_H +/* + * 802.11 netlink interface public header + * + * Copyright 2006, 2007 Johannes Berg + */ + +/** + * enum nl80211_iftype - (virtual) interface types + * @NL80211_IFTYPE_UNSPECIFIED: unspecified type, driver decides + * @NL80211_IFTYPE_ADHOC: independent BSS member + * @NL80211_IFTYPE_STATION: managed BSS member + * @NL80211_IFTYPE_AP: access point + * @NL80211_IFTYPE_AP_VLAN: VLAN interface for access points + * @NL80211_IFTYPE_WDS: wireless distribution interface + * @NL80211_IFTYPE_MONITOR: monitor interface receiving all frames + * @__NL80211_IFTYPE_AFTER_LAST: internal use + * + * These values are used with the NL80211_ATTR_IFTYPE + * to set the type of an interface. + * + */ +enum nl80211_iftype { + NL80211_IFTYPE_UNSPECIFIED, + NL80211_IFTYPE_ADHOC, + NL80211_IFTYPE_STATION, + NL80211_IFTYPE_AP, + NL80211_IFTYPE_AP_VLAN, + NL80211_IFTYPE_WDS, + NL80211_IFTYPE_MONITOR, + + /* keep last */ + __NL80211_IFTYPE_AFTER_LAST +}; +#define NL80211_IFTYPE_MAX (__NL80211_IFTYPE_AFTER_LAST - 1) + +#endif /* __LINUX_NL80211_H */ diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h new file mode 100644 index 000000000000..783a11437a57 --- /dev/null +++ b/include/net/cfg80211.h @@ -0,0 +1,36 @@ +#ifndef __NET_CFG80211_H +#define __NET_CFG80211_H + +#include +#include +#include + +/* + * 802.11 configuration in-kernel interface + * + * Copyright 2006 Johannes Berg + */ + +/* from net/wireless.h */ +struct wiphy; + +/** + * struct cfg80211_ops - backend description for wireless configuration + * + * This struct is registered by fullmac card drivers and/or wireless stacks + * in order to handle configuration requests on their interfaces. + * + * All callbacks except where otherwise noted should return 0 + * on success or a negative error code. + * + * @add_virtual_intf: create a new virtual interface with the given name + * + * @del_virtual_intf: remove the virtual interface determined by ifindex. + */ +struct cfg80211_ops { + int (*add_virtual_intf)(struct wiphy *wiphy, char *name, + unsigned int type); + int (*del_virtual_intf)(struct wiphy *wiphy, int ifindex); +}; + +#endif /* __NET_CFG80211_H */ diff --git a/include/net/wireless.h b/include/net/wireless.h new file mode 100644 index 000000000000..d30c4ba8fd99 --- /dev/null +++ b/include/net/wireless.h @@ -0,0 +1,139 @@ +#ifndef __NET_WIRELESS_H +#define __NET_WIRELESS_H + +/* + * 802.11 device management + * + * Copyright 2007 Johannes Berg + */ + +#include +#include +#include +#include + +/** + * struct wiphy - wireless hardware description + * @idx: the wiphy index assigned to this item + * @class_dev: the class device representing /sys/class/ieee80211/ + */ +struct wiphy { + /* assign these fields before you register the wiphy */ + + /* permanent MAC address */ + u8 perm_addr[ETH_ALEN]; + + /* If multiple wiphys are registered and you're handed e.g. + * a regular netdev with assigned ieee80211_ptr, you won't + * know whether it points to a wiphy your driver has registered + * or not. Assign this to something global to your driver to + * help determine whether you own this wiphy or not. */ + void *privid; + + /* fields below are read-only, assigned by cfg80211 */ + + /* the item in /sys/class/ieee80211/ points to this, + * you need use set_wiphy_dev() (see below) */ + struct device dev; + + /* dir in debugfs: ieee80211/ */ + struct dentry *debugfsdir; + + char priv[0] __attribute__((__aligned__(NETDEV_ALIGN))); +}; + +/** struct wireless_dev - wireless per-netdev state + * + * This structure must be allocated by the driver/stack + * that uses the ieee80211_ptr field in struct net_device + * (this is intentional so it can be allocated along with + * the netdev.) + * + * @wiphy: pointer to hardware description + */ +struct wireless_dev { + struct wiphy *wiphy; + + /* private to the generic wireless code */ + struct list_head list; + struct net_device *netdev; +}; + +/** + * wiphy_priv - return priv from wiphy + */ +static inline void *wiphy_priv(struct wiphy *wiphy) +{ + BUG_ON(!wiphy); + return &wiphy->priv; +} + +/** + * set_wiphy_dev - set device pointer for wiphy + */ +static inline void set_wiphy_dev(struct wiphy *wiphy, struct device *dev) +{ + wiphy->dev.parent = dev; +} + +/** + * wiphy_dev - get wiphy dev pointer + */ +static inline struct device *wiphy_dev(struct wiphy *wiphy) +{ + return wiphy->dev.parent; +} + +/** + * wiphy_name - get wiphy name + */ +static inline char *wiphy_name(struct wiphy *wiphy) +{ + return wiphy->dev.bus_id; +} + +/** + * wdev_priv - return wiphy priv from wireless_dev + */ +static inline void *wdev_priv(struct wireless_dev *wdev) +{ + BUG_ON(!wdev); + return wiphy_priv(wdev->wiphy); +} + +/** + * wiphy_new - create a new wiphy for use with cfg80211 + * + * create a new wiphy and associate the given operations with it. + * @sizeof_priv bytes are allocated for private use. + * + * the returned pointer must be assigned to each netdev's + * ieee80211_ptr for proper operation. + */ +struct wiphy *wiphy_new(struct cfg80211_ops *ops, int sizeof_priv); + +/** + * wiphy_register - register a wiphy with cfg80211 + * + * register the given wiphy + * + * Returns a non-negative wiphy index or a negative error code. + */ +extern int wiphy_register(struct wiphy *wiphy); + +/** + * wiphy_unregister - deregister a wiphy from cfg80211 + * + * unregister a device with the given priv pointer. + * After this call, no more requests can be made with this priv + * pointer, but the call may sleep to wait for an outstanding + * request that is being handled. + */ +extern void wiphy_unregister(struct wiphy *wiphy); + +/** + * wiphy_free - free wiphy + */ +extern void wiphy_free(struct wiphy *wiphy); + +#endif /* __NET_WIRELESS_H */ diff --git a/net/wireless/Kconfig b/net/wireless/Kconfig index ca2f05c29760..1863c0b07d45 100644 --- a/net/wireless/Kconfig +++ b/net/wireless/Kconfig @@ -1,3 +1,6 @@ +config CFG80211 + tristate "Improved wireless configuration API" + config WIRELESS_EXT bool "Wireless extensions" default n diff --git a/net/wireless/Makefile b/net/wireless/Makefile index cf4e3d9726b8..3f082ffae387 100644 --- a/net/wireless/Makefile +++ b/net/wireless/Makefile @@ -1 +1,3 @@ -# dummy file for now +obj-$(CONFIG_CFG80211) += cfg80211.o + +cfg80211-y += core.o sysfs.o diff --git a/net/wireless/core.c b/net/wireless/core.c new file mode 100644 index 000000000000..532e1e09e028 --- /dev/null +++ b/net/wireless/core.c @@ -0,0 +1,209 @@ +/* + * This is the linux wireless configuration interface. + * + * Copyright 2006, 2007 Johannes Berg + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "core.h" +#include "sysfs.h" + +/* name for sysfs, %d is appended */ +#define PHY_NAME "phy" + +MODULE_AUTHOR("Johannes Berg"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("wireless configuration support"); + +/* RCU might be appropriate here since we usually + * only read the list, and that can happen quite + * often because we need to do it for each command */ +LIST_HEAD(cfg80211_drv_list); +DEFINE_MUTEX(cfg80211_drv_mutex); +static int wiphy_counter; + +/* for debugfs */ +static struct dentry *ieee80211_debugfs_dir; + +/* exported functions */ + +struct wiphy *wiphy_new(struct cfg80211_ops *ops, int sizeof_priv) +{ + struct cfg80211_registered_device *drv; + int alloc_size; + + alloc_size = sizeof(*drv) + sizeof_priv; + + drv = kzalloc(alloc_size, GFP_KERNEL); + if (!drv) + return NULL; + + drv->ops = ops; + + mutex_lock(&cfg80211_drv_mutex); + + if (unlikely(wiphy_counter<0)) { + /* ugh, wrapped! */ + kfree(drv); + return NULL; + } + drv->idx = wiphy_counter; + + /* give it a proper name */ + snprintf(drv->wiphy.dev.bus_id, BUS_ID_SIZE, + PHY_NAME "%d", drv->idx); + + /* now increase counter for the next time */ + wiphy_counter++; + mutex_unlock(&cfg80211_drv_mutex); + + mutex_init(&drv->mtx); + mutex_init(&drv->devlist_mtx); + INIT_LIST_HEAD(&drv->netdev_list); + + device_initialize(&drv->wiphy.dev); + drv->wiphy.dev.class = &ieee80211_class; + drv->wiphy.dev.platform_data = drv; + + return &drv->wiphy; +} +EXPORT_SYMBOL(wiphy_new); + +int wiphy_register(struct wiphy *wiphy) +{ + struct cfg80211_registered_device *drv = wiphy_to_dev(wiphy); + int res; + + mutex_lock(&cfg80211_drv_mutex); + + + res = device_add(&drv->wiphy.dev); + if (res) + goto out_unlock; + + list_add(&drv->list, &cfg80211_drv_list); + + /* add to debugfs */ + drv->wiphy.debugfsdir = + debugfs_create_dir(wiphy_name(&drv->wiphy), + ieee80211_debugfs_dir); + + res = 0; +out_unlock: + mutex_unlock(&cfg80211_drv_mutex); + return res; +} +EXPORT_SYMBOL(wiphy_register); + +void wiphy_unregister(struct wiphy *wiphy) +{ + struct cfg80211_registered_device *drv = wiphy_to_dev(wiphy); + + mutex_lock(&cfg80211_drv_mutex); + + /* hold registered driver mutex during list removal as well + * to make sure no commands are in progress at the moment */ + mutex_lock(&drv->mtx); + list_del(&drv->list); + mutex_unlock(&drv->mtx); + + device_del(&drv->wiphy.dev); + debugfs_remove(drv->wiphy.debugfsdir); + + mutex_unlock(&cfg80211_drv_mutex); +} +EXPORT_SYMBOL(wiphy_unregister); + +void cfg80211_dev_free(struct cfg80211_registered_device *drv) +{ + mutex_destroy(&drv->mtx); + mutex_destroy(&drv->devlist_mtx); + kfree(drv); +} + +void wiphy_free(struct wiphy *wiphy) +{ + put_device(&wiphy->dev); +} +EXPORT_SYMBOL(wiphy_free); + +static int cfg80211_netdev_notifier_call(struct notifier_block * nb, + unsigned long state, + void *ndev) +{ + struct net_device *dev = ndev; + struct cfg80211_registered_device *rdev; + + if (!dev->ieee80211_ptr) + return 0; + + rdev = wiphy_to_dev(dev->ieee80211_ptr->wiphy); + + switch (state) { + case NETDEV_REGISTER: + mutex_lock(&rdev->devlist_mtx); + list_add(&dev->ieee80211_ptr->list, &rdev->netdev_list); + if (sysfs_create_link(&dev->dev.kobj, &rdev->wiphy.dev.kobj, + "phy80211")) { + printk(KERN_ERR "wireless: failed to add phy80211 " + "symlink to netdev!\n"); + } + dev->ieee80211_ptr->netdev = dev; + mutex_unlock(&rdev->devlist_mtx); + break; + case NETDEV_UNREGISTER: + mutex_lock(&rdev->devlist_mtx); + if (!list_empty(&dev->ieee80211_ptr->list)) { + sysfs_remove_link(&dev->dev.kobj, "phy80211"); + list_del_init(&dev->ieee80211_ptr->list); + } + mutex_unlock(&rdev->devlist_mtx); + break; + } + + return 0; +} + +static struct notifier_block cfg80211_netdev_notifier = { + .notifier_call = cfg80211_netdev_notifier_call, +}; + +static int cfg80211_init(void) +{ + int err = wiphy_sysfs_init(); + if (err) + goto out_fail_sysfs; + + err = register_netdevice_notifier(&cfg80211_netdev_notifier); + if (err) + goto out_fail_notifier; + + ieee80211_debugfs_dir = debugfs_create_dir("ieee80211", NULL); + + return 0; + +out_fail_notifier: + wiphy_sysfs_exit(); +out_fail_sysfs: + return err; +} +module_init(cfg80211_init); + +static void cfg80211_exit(void) +{ + debugfs_remove(ieee80211_debugfs_dir); + unregister_netdevice_notifier(&cfg80211_netdev_notifier); + wiphy_sysfs_exit(); +} +module_exit(cfg80211_exit); diff --git a/net/wireless/core.h b/net/wireless/core.h new file mode 100644 index 000000000000..158db1edb92a --- /dev/null +++ b/net/wireless/core.h @@ -0,0 +1,49 @@ +/* + * Wireless configuration interface internals. + * + * Copyright 2006, 2007 Johannes Berg + */ +#ifndef __NET_WIRELESS_CORE_H +#define __NET_WIRELESS_CORE_H +#include +#include +#include +#include +#include +#include + +struct cfg80211_registered_device { + struct cfg80211_ops *ops; + struct list_head list; + /* we hold this mutex during any call so that + * we cannot do multiple calls at once, and also + * to avoid the deregister call to proceed while + * any call is in progress */ + struct mutex mtx; + + /* wiphy index, internal only */ + int idx; + + /* associate netdev list */ + struct mutex devlist_mtx; + struct list_head netdev_list; + + /* must be last because of the way we do wiphy_priv(), + * and it should at least be aligned to NETDEV_ALIGN */ + struct wiphy wiphy __attribute__((__aligned__(NETDEV_ALIGN))); +}; + +static inline +struct cfg80211_registered_device *wiphy_to_dev(struct wiphy *wiphy) +{ + BUG_ON(!wiphy); + return container_of(wiphy, struct cfg80211_registered_device, wiphy); +} + +extern struct mutex cfg80211_drv_mutex; +extern struct list_head cfg80211_drv_list; + +/* free object */ +extern void cfg80211_dev_free(struct cfg80211_registered_device *drv); + +#endif /* __NET_WIRELESS_CORE_H */ diff --git a/net/wireless/sysfs.c b/net/wireless/sysfs.c new file mode 100644 index 000000000000..3ebae1442963 --- /dev/null +++ b/net/wireless/sysfs.c @@ -0,0 +1,80 @@ +/* + * This file provides /sys/class/ieee80211// + * and some default attributes. + * + * Copyright 2005-2006 Jiri Benc + * Copyright 2006 Johannes Berg + * + * This file is GPLv2 as found in COPYING. + */ + +#include +#include +#include +#include +#include +#include +#include "sysfs.h" +#include "core.h" + +static inline struct cfg80211_registered_device *dev_to_rdev( + struct device *dev) +{ + return container_of(dev, struct cfg80211_registered_device, wiphy.dev); +} + +static ssize_t _show_index(struct device *dev, struct device_attribute *attr, + char *buf) +{ + return sprintf(buf, "%d\n", dev_to_rdev(dev)->idx); +} + +static ssize_t _show_permaddr(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + char *addr = dev_to_rdev(dev)->wiphy.perm_addr; + + return sprintf(buf, "%.2x:%.2x:%.2x:%.2x:%.2x:%.2x\n", + addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]); +} + +static struct device_attribute ieee80211_dev_attrs[] = { + __ATTR(index, S_IRUGO, _show_index, NULL), + __ATTR(macaddress, S_IRUGO, _show_permaddr, NULL), + {} +}; + +static void wiphy_dev_release(struct device *dev) +{ + struct cfg80211_registered_device *rdev = dev_to_rdev(dev); + + cfg80211_dev_free(rdev); +} + +static int wiphy_uevent(struct device *dev, char **envp, + int num_envp, char *buf, int size) +{ + /* TODO, we probably need stuff here */ + return 0; +} + +struct class ieee80211_class = { + .name = "ieee80211", + .owner = THIS_MODULE, + .dev_release = wiphy_dev_release, + .dev_attrs = ieee80211_dev_attrs, +#ifdef CONFIG_HOTPLUG + .dev_uevent = wiphy_uevent, +#endif +}; + +int wiphy_sysfs_init(void) +{ + return class_register(&ieee80211_class); +} + +void wiphy_sysfs_exit(void) +{ + class_unregister(&ieee80211_class); +} diff --git a/net/wireless/sysfs.h b/net/wireless/sysfs.h new file mode 100644 index 000000000000..65acbebd3711 --- /dev/null +++ b/net/wireless/sysfs.h @@ -0,0 +1,9 @@ +#ifndef __WIRELESS_SYSFS_H +#define __WIRELESS_SYSFS_H + +extern int wiphy_sysfs_init(void); +extern void wiphy_sysfs_exit(void); + +extern struct class ieee80211_class; + +#endif /* __WIRELESS_SYSFS_H */ -- cgit v1.2.3-59-g8ed1b From 9e101eab153073d8a1fc7ea22b20af65de8ab44b Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Mon, 23 Apr 2007 12:20:55 -0700 Subject: [WIRELESS]: Remove wext over netlink. As scheduled, this patch removes the pointless wext over netlink code. Signed-off-by: Johannes Berg Signed-off-by: John W. Linville Signed-off-by: David S. Miller --- Documentation/feature-removal-schedule.txt | 12 - include/net/iw_handler.h | 10 - net/core/rtnetlink.c | 31 -- net/core/wireless.c | 735 ----------------------------- net/wireless/Kconfig | 10 - 5 files changed, 798 deletions(-) (limited to 'include') diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt index 9817b60e70a3..976c8a1bd7cd 100644 --- a/Documentation/feature-removal-schedule.txt +++ b/Documentation/feature-removal-schedule.txt @@ -285,18 +285,6 @@ Who: Richard Purdie --------------------------- -What: Wireless extensions over netlink (CONFIG_NET_WIRELESS_RTNETLINK) -When: with the merge of wireless-dev, 2.6.22 or later -Why: The option/code is - * not enabled on most kernels - * not required by any userspace tools (except an experimental one, - and even there only for some parts, others use ioctl) - * pointless since wext is no longer evolving and the ioctl - interface needs to be kept -Who: Johannes Berg - ---------------------------- - What: i8xx_tco watchdog driver When: in 2.6.22 Why: the i8xx_tco watchdog driver has been replaced by the iTCO_wdt diff --git a/include/net/iw_handler.h b/include/net/iw_handler.h index 8a830188354d..909ca87d13b7 100644 --- a/include/net/iw_handler.h +++ b/include/net/iw_handler.h @@ -440,16 +440,6 @@ extern int dev_get_wireless_info(char * buffer, char **start, off_t offset, /* Handle IOCTLs, called in net/core/dev.c */ extern int wireless_process_ioctl(struct ifreq *ifr, unsigned int cmd); -/* Handle RtNetlink requests, called in net/core/rtnetlink.c */ -extern int wireless_rtnetlink_set(struct net_device * dev, - char * data, - int len); -extern int wireless_rtnetlink_get(struct net_device * dev, - char * data, - int len, - char ** p_buf, - int * p_len); - /* Second : functions that may be called by driver modules */ /* Send a single event to user space */ diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 75cea8ea4cf3..4fe0f4b3a345 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -51,10 +51,6 @@ #include #include #include -#ifdef CONFIG_NET_WIRELESS_RTNETLINK -#include -#include -#endif /* CONFIG_NET_WIRELESS_RTNETLINK */ struct rtnl_link { @@ -684,17 +680,6 @@ static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) modified = 1; } -#ifdef CONFIG_NET_WIRELESS_RTNETLINK - if (tb[IFLA_WIRELESS]) { - /* Call Wireless Extensions. - * Various stuff checked in there... */ - err = wireless_rtnetlink_set(dev, nla_data(tb[IFLA_WIRELESS]), - nla_len(tb[IFLA_WIRELESS])); - if (err < 0) - goto errout_dev; - } -#endif /* CONFIG_NET_WIRELESS_RTNETLINK */ - if (tb[IFLA_BROADCAST]) { nla_memcpy(dev->broadcast, tb[IFLA_BROADCAST], dev->addr_len); send_addr_notify = 1; @@ -758,22 +743,6 @@ static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg) } else return -EINVAL; - -#ifdef CONFIG_NET_WIRELESS_RTNETLINK - if (tb[IFLA_WIRELESS]) { - /* Call Wireless Extensions. We need to know the size before - * we can alloc. Various stuff checked in there... */ - err = wireless_rtnetlink_get(dev, nla_data(tb[IFLA_WIRELESS]), - nla_len(tb[IFLA_WIRELESS]), - &iw_buf, &iw_buf_len); - if (err < 0) - goto errout; - - /* Payload is at an offset in buffer */ - iw = iw_buf + IW_EV_POINT_OFF; - } -#endif /* CONFIG_NET_WIRELESS_RTNETLINK */ - nskb = nlmsg_new(if_nlmsg_size(iw_buf_len), GFP_KERNEL); if (nskb == NULL) { err = -ENOBUFS; diff --git a/net/core/wireless.c b/net/core/wireless.c index 86db63d7f760..fba295e05e7a 100644 --- a/net/core/wireless.c +++ b/net/core/wireless.c @@ -104,12 +104,10 @@ /* Debugging stuff */ #undef WE_IOCTL_DEBUG /* Debug IOCTL API */ -#undef WE_RTNETLINK_DEBUG /* Debug RtNetlink API */ #undef WE_EVENT_DEBUG /* Debug Event dispatcher */ #undef WE_SPY_DEBUG /* Debug enhanced spy support */ /* Options */ -//CONFIG_NET_WIRELESS_RTNETLINK /* Wireless requests over RtNetlink */ #define WE_EVENT_RTNETLINK /* Propagate events using RtNetlink */ #define WE_SET_EVENT /* Generate an event on some set commands */ @@ -1145,739 +1143,6 @@ int wireless_process_ioctl(struct ifreq *ifr, unsigned int cmd) return -EINVAL; } -/********************** RTNETLINK REQUEST API **********************/ -/* - * The alternate user space API to configure all those Wireless Extensions - * is through RtNetlink. - * This API support only the new driver API (iw_handler). - * - * This RtNetlink API use the same query/reply model as the ioctl API. - * Maximum effort has been done to fit in the RtNetlink model, and - * we support both RtNetlink Set and RtNelink Get operations. - * On the other hand, we don't offer Dump operations because of the - * following reasons : - * o Large number of parameters, most optional - * o Large size of some parameters (> 100 bytes) - * o Each parameters need to be extracted from hardware - * o Scan requests can take seconds and disable network activity. - * Because of this high cost/overhead, we want to return only the - * parameters the user application is really interested in. - * We could offer partial Dump using the IW_DESCR_FLAG_DUMP flag. - * - * The API uses the standard RtNetlink socket. When the RtNetlink code - * find a IFLA_WIRELESS field in a RtNetlink SET_LINK request, - * it calls here. - */ - -#ifdef CONFIG_NET_WIRELESS_RTNETLINK -/* ---------------------------------------------------------------- */ -/* - * Wrapper to call a standard Wireless Extension GET handler. - * We do various checks and call the handler with the proper args. - */ -static int rtnetlink_standard_get(struct net_device * dev, - struct iw_event * request, - int request_len, - iw_handler handler, - char ** p_buf, - int * p_len) -{ - const struct iw_ioctl_description * descr = NULL; - unsigned int cmd; - union iwreq_data * wrqu; - int hdr_len; - struct iw_request_info info; - char * buffer = NULL; - int buffer_size = 0; - int ret = -EINVAL; - - /* Get the description of the Request */ - cmd = request->cmd; - if ((cmd - SIOCIWFIRST) >= standard_ioctl_num) - return -EOPNOTSUPP; - descr = &(standard_ioctl[cmd - SIOCIWFIRST]); - -#ifdef WE_RTNETLINK_DEBUG - printk(KERN_DEBUG "%s (WE.r) : Found standard handler for 0x%04X\n", - dev->name, cmd); - printk(KERN_DEBUG "%s (WE.r) : Header type : %d, Token type : %d, size : %d, token : %d\n", dev->name, descr->header_type, descr->token_type, descr->token_size, descr->max_tokens); -#endif /* WE_RTNETLINK_DEBUG */ - - /* Check if wrqu is complete */ - hdr_len = event_type_size[descr->header_type]; - if (request_len < hdr_len) { -#ifdef WE_RTNETLINK_DEBUG - printk(KERN_DEBUG - "%s (WE.r) : Wireless request too short (%d)\n", - dev->name, request_len); -#endif /* WE_RTNETLINK_DEBUG */ - return -EINVAL; - } - - /* Prepare the call */ - info.cmd = cmd; - info.flags = 0; - - /* Check if we have extra data in the reply or not */ - if (descr->header_type != IW_HEADER_TYPE_POINT) { - - /* Create the kernel buffer that we will return. - * It's at an offset to match the TYPE_POINT case... */ - buffer_size = request_len + IW_EV_POINT_OFF; - buffer = kmalloc(buffer_size, GFP_KERNEL); - if (buffer == NULL) { - return -ENOMEM; - } - /* Copy event data */ - memcpy(buffer + IW_EV_POINT_OFF, request, request_len); - /* Use our own copy of wrqu */ - wrqu = (union iwreq_data *) (buffer + IW_EV_POINT_OFF - + IW_EV_LCP_PK_LEN); - - /* No extra arguments. Trivial to handle */ - ret = handler(dev, &info, wrqu, NULL); - - } else { - union iwreq_data wrqu_point; - char * extra = NULL; - int extra_size = 0; - - /* Get a temp copy of wrqu (skip pointer) */ - memcpy(((char *) &wrqu_point) + IW_EV_POINT_OFF, - ((char *) request) + IW_EV_LCP_PK_LEN, - IW_EV_POINT_LEN - IW_EV_LCP_PK_LEN); - - /* Calculate space needed by arguments. Always allocate - * for max space. Easier, and won't last long... */ - extra_size = descr->max_tokens * descr->token_size; - /* Support for very large requests */ - if ((descr->flags & IW_DESCR_FLAG_NOMAX) && - (wrqu_point.data.length > descr->max_tokens)) - extra_size = (wrqu_point.data.length - * descr->token_size); - buffer_size = extra_size + IW_EV_POINT_PK_LEN + IW_EV_POINT_OFF; -#ifdef WE_RTNETLINK_DEBUG - printk(KERN_DEBUG "%s (WE.r) : Malloc %d bytes (%d bytes)\n", - dev->name, extra_size, buffer_size); -#endif /* WE_RTNETLINK_DEBUG */ - - /* Create the kernel buffer that we will return */ - buffer = kmalloc(buffer_size, GFP_KERNEL); - if (buffer == NULL) { - return -ENOMEM; - } - - /* Put wrqu in the right place (just before extra). - * Leave space for IWE header and dummy pointer... - * Note that IW_EV_LCP_PK_LEN==4 bytes, so it's still aligned. - */ - memcpy(buffer + IW_EV_LCP_PK_LEN + IW_EV_POINT_OFF, - ((char *) &wrqu_point) + IW_EV_POINT_OFF, - IW_EV_POINT_PK_LEN - IW_EV_LCP_PK_LEN); - wrqu = (union iwreq_data *) (buffer + IW_EV_LCP_PK_LEN); - - /* Extra comes logically after that. Offset +12 bytes. */ - extra = buffer + IW_EV_POINT_OFF + IW_EV_POINT_PK_LEN; - - /* Call the handler */ - ret = handler(dev, &info, wrqu, extra); - - /* Calculate real returned length */ - extra_size = (wrqu->data.length * descr->token_size); - /* Re-adjust reply size */ - request->len = extra_size + IW_EV_POINT_PK_LEN; - - /* Put the iwe header where it should, i.e. scrap the - * dummy pointer. */ - memcpy(buffer + IW_EV_POINT_OFF, request, IW_EV_LCP_PK_LEN); - -#ifdef WE_RTNETLINK_DEBUG - printk(KERN_DEBUG "%s (WE.r) : Reply 0x%04X, hdr_len %d, tokens %d, extra_size %d, buffer_size %d\n", dev->name, cmd, hdr_len, wrqu->data.length, extra_size, buffer_size); -#endif /* WE_RTNETLINK_DEBUG */ - - /* Check if there is enough buffer up there */ - if (wrqu_point.data.length < wrqu->data.length) - ret = -E2BIG; - } - - /* Return the buffer to the caller */ - if (!ret) { - *p_buf = buffer; - *p_len = request->len; - } else { - /* Cleanup */ - if (buffer) - kfree(buffer); - } - - return ret; -} - -/* ---------------------------------------------------------------- */ -/* - * Wrapper to call a standard Wireless Extension SET handler. - * We do various checks and call the handler with the proper args. - */ -static inline int rtnetlink_standard_set(struct net_device * dev, - struct iw_event * request, - int request_len, - iw_handler handler) -{ - const struct iw_ioctl_description * descr = NULL; - unsigned int cmd; - union iwreq_data * wrqu; - union iwreq_data wrqu_point; - int hdr_len; - char * extra = NULL; - int extra_size = 0; - struct iw_request_info info; - int ret = -EINVAL; - - /* Get the description of the Request */ - cmd = request->cmd; - if ((cmd - SIOCIWFIRST) >= standard_ioctl_num) - return -EOPNOTSUPP; - descr = &(standard_ioctl[cmd - SIOCIWFIRST]); - -#ifdef WE_RTNETLINK_DEBUG - printk(KERN_DEBUG "%s (WE.r) : Found standard SET handler for 0x%04X\n", - dev->name, cmd); - printk(KERN_DEBUG "%s (WE.r) : Header type : %d, Token type : %d, size : %d, token : %d\n", dev->name, descr->header_type, descr->token_type, descr->token_size, descr->max_tokens); -#endif /* WE_RTNETLINK_DEBUG */ - - /* Extract fixed header from request. This is properly aligned. */ - wrqu = (union iwreq_data *) (((char *) request) + IW_EV_LCP_PK_LEN); - - /* Check if wrqu is complete */ - hdr_len = event_type_pk_size[descr->header_type]; - if (request_len < hdr_len) { -#ifdef WE_RTNETLINK_DEBUG - printk(KERN_DEBUG - "%s (WE.r) : Wireless request too short (%d)\n", - dev->name, request_len); -#endif /* WE_RTNETLINK_DEBUG */ - return -EINVAL; - } - - /* Prepare the call */ - info.cmd = cmd; - info.flags = 0; - - /* Check if we have extra data in the request or not */ - if (descr->header_type != IW_HEADER_TYPE_POINT) { - - /* No extra arguments. Trivial to handle */ - ret = handler(dev, &info, wrqu, NULL); - - } else { - int extra_len; - - /* Put wrqu in the right place (skip pointer) */ - memcpy(((char *) &wrqu_point) + IW_EV_POINT_OFF, - wrqu, IW_EV_POINT_PK_LEN - IW_EV_LCP_PK_LEN); - /* Don't forget about the event code... */ - wrqu = &wrqu_point; - - /* Check if number of token fits within bounds */ - if (wrqu_point.data.length > descr->max_tokens) - return -E2BIG; - if (wrqu_point.data.length < descr->min_tokens) - return -EINVAL; - - /* Real length of payload */ - extra_len = wrqu_point.data.length * descr->token_size; - - /* Check if request is self consistent */ - if ((request_len - hdr_len) < extra_len) { -#ifdef WE_RTNETLINK_DEBUG - printk(KERN_DEBUG "%s (WE.r) : Wireless request data too short (%d)\n", - dev->name, extra_size); -#endif /* WE_RTNETLINK_DEBUG */ - return -EINVAL; - } - -#ifdef WE_RTNETLINK_DEBUG - printk(KERN_DEBUG "%s (WE.r) : Malloc %d bytes\n", - dev->name, extra_size); -#endif /* WE_RTNETLINK_DEBUG */ - - /* Always allocate for max space. Easier, and won't last - * long... */ - extra_size = descr->max_tokens * descr->token_size; - extra = kmalloc(extra_size, GFP_KERNEL); - if (extra == NULL) - return -ENOMEM; - - /* Copy extra in aligned buffer */ - memcpy(extra, ((char *) request) + hdr_len, extra_len); - - /* Call the handler */ - ret = handler(dev, &info, &wrqu_point, extra); - } - -#ifdef WE_SET_EVENT - /* Generate an event to notify listeners of the change */ - if ((descr->flags & IW_DESCR_FLAG_EVENT) && - ((ret == 0) || (ret == -EIWCOMMIT))) { - if (descr->flags & IW_DESCR_FLAG_RESTRICT) - /* If the event is restricted, don't - * export the payload */ - wireless_send_event(dev, cmd, wrqu, NULL); - else - wireless_send_event(dev, cmd, wrqu, extra); - } -#endif /* WE_SET_EVENT */ - - /* Cleanup - I told you it wasn't that long ;-) */ - if (extra) - kfree(extra); - - /* Call commit handler if needed and defined */ - if (ret == -EIWCOMMIT) - ret = call_commit_handler(dev); - - return ret; -} - -/* ---------------------------------------------------------------- */ -/* - * Wrapper to call a private Wireless Extension GET handler. - * Same as above... - * It's not as nice and slimline as the standard wrapper. The cause - * is struct iw_priv_args, which was not really designed for the - * job we are going here. - * - * IMPORTANT : This function prevent to set and get data on the same - * IOCTL and enforce the SET/GET convention. Not doing it would be - * far too hairy... - * If you need to set and get data at the same time, please don't use - * a iw_handler but process it in your ioctl handler (i.e. use the - * old driver API). - */ -static inline int rtnetlink_private_get(struct net_device * dev, - struct iw_event * request, - int request_len, - iw_handler handler, - char ** p_buf, - int * p_len) -{ - const struct iw_priv_args * descr = NULL; - unsigned int cmd; - union iwreq_data * wrqu; - int hdr_len; - struct iw_request_info info; - int extra_size = 0; - int i; - char * buffer = NULL; - int buffer_size = 0; - int ret = -EINVAL; - - /* Get the description of the Request */ - cmd = request->cmd; - for (i = 0; i < dev->wireless_handlers->num_private_args; i++) - if (cmd == dev->wireless_handlers->private_args[i].cmd) { - descr = &(dev->wireless_handlers->private_args[i]); - break; - } - if (descr == NULL) - return -EOPNOTSUPP; - -#ifdef WE_RTNETLINK_DEBUG - printk(KERN_DEBUG "%s (WE.r) : Found private handler for 0x%04X\n", - dev->name, cmd); - printk(KERN_DEBUG "%s (WE.r) : Name %s, set %X, get %X\n", - dev->name, descr->name, descr->set_args, descr->get_args); -#endif /* WE_RTNETLINK_DEBUG */ - - /* Compute the max size of the get arguments */ - extra_size = get_priv_size(descr->get_args); - - /* Does it fits in wrqu ? */ - if ((descr->get_args & IW_PRIV_SIZE_FIXED) && - (extra_size <= IFNAMSIZ)) { - hdr_len = extra_size; - extra_size = 0; - } else { - hdr_len = IW_EV_POINT_PK_LEN; - } - - /* Check if wrqu is complete */ - if (request_len < hdr_len) { -#ifdef WE_RTNETLINK_DEBUG - printk(KERN_DEBUG - "%s (WE.r) : Wireless request too short (%d)\n", - dev->name, request_len); -#endif /* WE_RTNETLINK_DEBUG */ - return -EINVAL; - } - - /* Prepare the call */ - info.cmd = cmd; - info.flags = 0; - - /* Check if we have a pointer to user space data or not. */ - if (extra_size == 0) { - - /* Create the kernel buffer that we will return. - * It's at an offset to match the TYPE_POINT case... */ - buffer_size = request_len + IW_EV_POINT_OFF; - buffer = kmalloc(buffer_size, GFP_KERNEL); - if (buffer == NULL) { - return -ENOMEM; - } - /* Copy event data */ - memcpy(buffer + IW_EV_POINT_OFF, request, request_len); - /* Use our own copy of wrqu */ - wrqu = (union iwreq_data *) (buffer + IW_EV_POINT_OFF - + IW_EV_LCP_PK_LEN); - - /* No extra arguments. Trivial to handle */ - ret = handler(dev, &info, wrqu, (char *) wrqu); - - } else { - char * extra; - - /* Buffer for full reply */ - buffer_size = extra_size + IW_EV_POINT_PK_LEN + IW_EV_POINT_OFF; - -#ifdef WE_RTNETLINK_DEBUG - printk(KERN_DEBUG "%s (WE.r) : Malloc %d bytes (%d bytes)\n", - dev->name, extra_size, buffer_size); -#endif /* WE_RTNETLINK_DEBUG */ - - /* Create the kernel buffer that we will return */ - buffer = kmalloc(buffer_size, GFP_KERNEL); - if (buffer == NULL) { - return -ENOMEM; - } - - /* Put wrqu in the right place (just before extra). - * Leave space for IWE header and dummy pointer... - * Note that IW_EV_LCP_PK_LEN==4 bytes, so it's still aligned. - */ - memcpy(buffer + IW_EV_LCP_PK_LEN + IW_EV_POINT_OFF, - ((char *) request) + IW_EV_LCP_PK_LEN, - IW_EV_POINT_PK_LEN - IW_EV_LCP_PK_LEN); - wrqu = (union iwreq_data *) (buffer + IW_EV_LCP_PK_LEN); - - /* Extra comes logically after that. Offset +12 bytes. */ - extra = buffer + IW_EV_POINT_OFF + IW_EV_POINT_PK_LEN; - - /* Call the handler */ - ret = handler(dev, &info, wrqu, extra); - - /* Adjust for the actual length if it's variable, - * avoid leaking kernel bits outside. */ - if (!(descr->get_args & IW_PRIV_SIZE_FIXED)) - extra_size = adjust_priv_size(descr->get_args, wrqu); - /* Re-adjust reply size */ - request->len = extra_size + IW_EV_POINT_PK_LEN; - - /* Put the iwe header where it should, i.e. scrap the - * dummy pointer. */ - memcpy(buffer + IW_EV_POINT_OFF, request, IW_EV_LCP_PK_LEN); - -#ifdef WE_RTNETLINK_DEBUG - printk(KERN_DEBUG "%s (WE.r) : Reply 0x%04X, hdr_len %d, tokens %d, extra_size %d, buffer_size %d\n", dev->name, cmd, hdr_len, wrqu->data.length, extra_size, buffer_size); -#endif /* WE_RTNETLINK_DEBUG */ - } - - /* Return the buffer to the caller */ - if (!ret) { - *p_buf = buffer; - *p_len = request->len; - } else { - /* Cleanup */ - if (buffer) - kfree(buffer); - } - - return ret; -} - -/* ---------------------------------------------------------------- */ -/* - * Wrapper to call a private Wireless Extension SET handler. - * Same as above... - * It's not as nice and slimline as the standard wrapper. The cause - * is struct iw_priv_args, which was not really designed for the - * job we are going here. - * - * IMPORTANT : This function prevent to set and get data on the same - * IOCTL and enforce the SET/GET convention. Not doing it would be - * far too hairy... - * If you need to set and get data at the same time, please don't use - * a iw_handler but process it in your ioctl handler (i.e. use the - * old driver API). - */ -static inline int rtnetlink_private_set(struct net_device * dev, - struct iw_event * request, - int request_len, - iw_handler handler) -{ - const struct iw_priv_args * descr = NULL; - unsigned int cmd; - union iwreq_data * wrqu; - union iwreq_data wrqu_point; - int hdr_len; - char * extra = NULL; - int extra_size = 0; - int offset = 0; /* For sub-ioctls */ - struct iw_request_info info; - int i; - int ret = -EINVAL; - - /* Get the description of the Request */ - cmd = request->cmd; - for (i = 0; i < dev->wireless_handlers->num_private_args; i++) - if (cmd == dev->wireless_handlers->private_args[i].cmd) { - descr = &(dev->wireless_handlers->private_args[i]); - break; - } - if (descr == NULL) - return -EOPNOTSUPP; - -#ifdef WE_RTNETLINK_DEBUG - printk(KERN_DEBUG "%s (WE.r) : Found private handler for 0x%04X\n", - ifr->ifr_name, cmd); - printk(KERN_DEBUG "%s (WE.r) : Name %s, set %X, get %X\n", - dev->name, descr->name, descr->set_args, descr->get_args); -#endif /* WE_RTNETLINK_DEBUG */ - - /* Compute the size of the set arguments */ - /* Check for sub-ioctl handler */ - if (descr->name[0] == '\0') - /* Reserve one int for sub-ioctl index */ - offset = sizeof(__u32); - - /* Size of set arguments */ - extra_size = get_priv_size(descr->set_args); - - /* Does it fits in wrqu ? */ - if ((descr->set_args & IW_PRIV_SIZE_FIXED) && - (extra_size <= IFNAMSIZ)) { - hdr_len = IW_EV_LCP_PK_LEN + extra_size; - extra_size = 0; - } else { - hdr_len = IW_EV_POINT_PK_LEN; - } - - /* Extract fixed header from request. This is properly aligned. */ - wrqu = (union iwreq_data *) (((char *) request) + IW_EV_LCP_PK_LEN); - - /* Check if wrqu is complete */ - if (request_len < hdr_len) { -#ifdef WE_RTNETLINK_DEBUG - printk(KERN_DEBUG - "%s (WE.r) : Wireless request too short (%d)\n", - dev->name, request_len); -#endif /* WE_RTNETLINK_DEBUG */ - return -EINVAL; - } - - /* Prepare the call */ - info.cmd = cmd; - info.flags = 0; - - /* Check if we have a pointer to user space data or not. */ - if (extra_size == 0) { - - /* No extra arguments. Trivial to handle */ - ret = handler(dev, &info, wrqu, (char *) wrqu); - - } else { - int extra_len; - - /* Put wrqu in the right place (skip pointer) */ - memcpy(((char *) &wrqu_point) + IW_EV_POINT_OFF, - wrqu, IW_EV_POINT_PK_LEN - IW_EV_LCP_PK_LEN); - - /* Does it fits within bounds ? */ - if (wrqu_point.data.length > (descr->set_args & - IW_PRIV_SIZE_MASK)) - return -E2BIG; - - /* Real length of payload */ - extra_len = adjust_priv_size(descr->set_args, &wrqu_point); - - /* Check if request is self consistent */ - if ((request_len - hdr_len) < extra_len) { -#ifdef WE_RTNETLINK_DEBUG - printk(KERN_DEBUG "%s (WE.r) : Wireless request data too short (%d)\n", - dev->name, extra_size); -#endif /* WE_RTNETLINK_DEBUG */ - return -EINVAL; - } - -#ifdef WE_RTNETLINK_DEBUG - printk(KERN_DEBUG "%s (WE.r) : Malloc %d bytes\n", - dev->name, extra_size); -#endif /* WE_RTNETLINK_DEBUG */ - - /* Always allocate for max space. Easier, and won't last - * long... */ - extra = kmalloc(extra_size, GFP_KERNEL); - if (extra == NULL) - return -ENOMEM; - - /* Copy extra in aligned buffer */ - memcpy(extra, ((char *) request) + hdr_len, extra_len); - - /* Call the handler */ - ret = handler(dev, &info, &wrqu_point, extra); - - /* Cleanup - I told you it wasn't that long ;-) */ - kfree(extra); - } - - /* Call commit handler if needed and defined */ - if (ret == -EIWCOMMIT) - ret = call_commit_handler(dev); - - return ret; -} - -/* ---------------------------------------------------------------- */ -/* - * Main RtNetlink dispatcher. Called from the main networking code - * (do_getlink() in net/core/rtnetlink.c). - * Check the type of Request and call the appropriate wrapper... - */ -int wireless_rtnetlink_get(struct net_device * dev, - char * data, - int len, - char ** p_buf, - int * p_len) -{ - struct iw_event * request = (struct iw_event *) data; - iw_handler handler; - - /* Check length */ - if (len < IW_EV_LCP_PK_LEN) { - printk(KERN_DEBUG "%s (WE.r) : RtNetlink request too short (%d)\n", - dev->name, len); - return -EINVAL; - } - - /* ReCheck length (len may have padding) */ - if (request->len > len) { - printk(KERN_DEBUG "%s (WE.r) : RtNetlink request len invalid (%d-%d)\n", - dev->name, request->len, len); - return -EINVAL; - } - - /* Only accept GET requests in here */ - if (!IW_IS_GET(request->cmd)) - return -EOPNOTSUPP; - - /* If command is `get the encoding parameters', check if - * the user has the right to do it */ - if (request->cmd == SIOCGIWENCODE || - request->cmd == SIOCGIWENCODEEXT) { - if (!capable(CAP_NET_ADMIN)) - return -EPERM; - } - - /* Special cases */ - if (request->cmd == SIOCGIWSTATS) - /* Get Wireless Stats */ - return rtnetlink_standard_get(dev, - request, - request->len, - &iw_handler_get_iwstats, - p_buf, p_len); - if (request->cmd == SIOCGIWPRIV) { - /* Check if we have some wireless handlers defined */ - if (dev->wireless_handlers == NULL) - return -EOPNOTSUPP; - /* Get Wireless Stats */ - return rtnetlink_standard_get(dev, - request, - request->len, - &iw_handler_get_private, - p_buf, p_len); - } - - /* Basic check */ - if (!netif_device_present(dev)) - return -ENODEV; - - /* Try to find the handler */ - handler = get_handler(dev, request->cmd); - if (handler != NULL) { - /* Standard and private are not the same */ - if (request->cmd < SIOCIWFIRSTPRIV) - return rtnetlink_standard_get(dev, - request, - request->len, - handler, - p_buf, p_len); - else - return rtnetlink_private_get(dev, - request, - request->len, - handler, - p_buf, p_len); - } - - return -EOPNOTSUPP; -} - -/* ---------------------------------------------------------------- */ -/* - * Main RtNetlink dispatcher. Called from the main networking code - * (do_setlink() in net/core/rtnetlink.c). - * Check the type of Request and call the appropriate wrapper... - */ -int wireless_rtnetlink_set(struct net_device * dev, - char * data, - int len) -{ - struct iw_event * request = (struct iw_event *) data; - iw_handler handler; - - /* Check length */ - if (len < IW_EV_LCP_PK_LEN) { - printk(KERN_DEBUG "%s (WE.r) : RtNetlink request too short (%d)\n", - dev->name, len); - return -EINVAL; - } - - /* ReCheck length (len may have padding) */ - if (request->len > len) { - printk(KERN_DEBUG "%s (WE.r) : RtNetlink request len invalid (%d-%d)\n", - dev->name, request->len, len); - return -EINVAL; - } - - /* Only accept SET requests in here */ - if (!IW_IS_SET(request->cmd)) - return -EOPNOTSUPP; - - /* Basic check */ - if (!netif_device_present(dev)) - return -ENODEV; - - /* New driver API : try to find the handler */ - handler = get_handler(dev, request->cmd); - if (handler != NULL) { - /* Standard and private are not the same */ - if (request->cmd < SIOCIWFIRSTPRIV) - return rtnetlink_standard_set(dev, - request, - request->len, - handler); - else - return rtnetlink_private_set(dev, - request, - request->len, - handler); - } - - return -EOPNOTSUPP; -} -#endif /* CONFIG_NET_WIRELESS_RTNETLINK */ - /************************* EVENT PROCESSING *************************/ /* diff --git a/net/wireless/Kconfig b/net/wireless/Kconfig index 1863c0b07d45..a228d56a91b8 100644 --- a/net/wireless/Kconfig +++ b/net/wireless/Kconfig @@ -14,13 +14,3 @@ config WIRELESS_EXT Say N (if you can) unless you know you need wireless extensions for external modules. - -config NET_WIRELESS_RTNETLINK - bool "Wireless Extension API over RtNetlink" - depends on WIRELESS_EXT - ---help--- - Support the Wireless Extension API over the RtNetlink socket - in addition to the traditional ioctl interface (selected above). - - For now, few tools use this facility, but it might grow in the - future. The only downside is that it adds 4.5 kB to your kernel. -- cgit v1.2.3-59-g8ed1b From 164891aadf1721fca4dce473bb0e0998181537c6 Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Mon, 23 Apr 2007 22:26:16 -0700 Subject: [TCP]: Congestion control API update. Do some simple changes to make congestion control API faster/cleaner. * use ktime_t rather than timeval * merge rtt sampling into existing ack callback this means one indirect call versus two per ack. * use flags bits to store options/settings Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- include/linux/skbuff.h | 5 +++++ include/net/tcp.h | 9 +++++---- net/ipv4/tcp_bic.c | 2 +- net/ipv4/tcp_cong.c | 14 +++++++------- net/ipv4/tcp_cubic.c | 2 +- net/ipv4/tcp_htcp.c | 2 +- net/ipv4/tcp_illinois.c | 16 +++++++--------- net/ipv4/tcp_input.c | 25 ++++++++----------------- net/ipv4/tcp_lp.c | 8 +++++--- net/ipv4/tcp_output.c | 2 +- net/ipv4/tcp_vegas.c | 10 +++++++--- net/ipv4/tcp_veno.c | 10 +++++++--- net/ipv4/tcp_westwood.c | 2 +- net/ipv4/tcp_yeah.c | 6 ++++-- net/ipv4/tcp_yeah.h | 7 +++++-- 15 files changed, 65 insertions(+), 55 deletions(-) (limited to 'include') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 50f6f6a094cf..2694cb3ca763 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -1569,6 +1569,11 @@ static inline void __net_timestamp(struct sk_buff *skb) skb->tstamp = ktime_get_real(); } +static inline ktime_t net_timedelta(ktime_t t) +{ + return ktime_sub(ktime_get_real(), t); +} + extern __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len); extern __sum16 __skb_checksum_complete(struct sk_buff *skb); diff --git a/include/net/tcp.h b/include/net/tcp.h index 43910fe3c448..a385797f160a 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -629,9 +629,12 @@ enum tcp_ca_event { #define TCP_CA_MAX 128 #define TCP_CA_BUF_MAX (TCP_CA_NAME_MAX*TCP_CA_MAX) +#define TCP_CONG_NON_RESTRICTED 0x1 +#define TCP_CONG_RTT_STAMP 0x2 + struct tcp_congestion_ops { struct list_head list; - int non_restricted; + unsigned long flags; /* initialize private data (optional) */ void (*init)(struct sock *sk); @@ -645,8 +648,6 @@ struct tcp_congestion_ops { /* do new cwnd calculation (required) */ void (*cong_avoid)(struct sock *sk, u32 ack, u32 rtt, u32 in_flight, int good_ack); - /* round trip time sample per acked packet (optional) */ - void (*rtt_sample)(struct sock *sk, u32 usrtt); /* call before changing ca_state (optional) */ void (*set_state)(struct sock *sk, u8 new_state); /* call when cwnd event occurs (optional) */ @@ -654,7 +655,7 @@ struct tcp_congestion_ops { /* new value of cwnd after loss (optional) */ u32 (*undo_cwnd)(struct sock *sk); /* hook for packet ack accounting (optional) */ - void (*pkts_acked)(struct sock *sk, u32 num_acked); + void (*pkts_acked)(struct sock *sk, u32 num_acked, ktime_t last); /* get info for inet_diag (optional) */ void (*get_info)(struct sock *sk, u32 ext, struct sk_buff *skb); diff --git a/net/ipv4/tcp_bic.c b/net/ipv4/tcp_bic.c index 5730333cd0ac..281c9f913257 100644 --- a/net/ipv4/tcp_bic.c +++ b/net/ipv4/tcp_bic.c @@ -206,7 +206,7 @@ static void bictcp_state(struct sock *sk, u8 new_state) /* Track delayed acknowledgment ratio using sliding window * ratio = (15*ratio + sample) / 16 */ -static void bictcp_acked(struct sock *sk, u32 cnt) +static void bictcp_acked(struct sock *sk, u32 cnt, ktime_t last) { const struct inet_connection_sock *icsk = inet_csk(sk); diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c index ccd88407e0cd..86b26539e54b 100644 --- a/net/ipv4/tcp_cong.c +++ b/net/ipv4/tcp_cong.c @@ -126,7 +126,7 @@ int tcp_set_default_congestion_control(const char *name) #endif if (ca) { - ca->non_restricted = 1; /* default is always allowed */ + ca->flags |= TCP_CONG_NON_RESTRICTED; /* default is always allowed */ list_move(&ca->list, &tcp_cong_list); ret = 0; } @@ -181,7 +181,7 @@ void tcp_get_allowed_congestion_control(char *buf, size_t maxlen) *buf = '\0'; rcu_read_lock(); list_for_each_entry_rcu(ca, &tcp_cong_list, list) { - if (!ca->non_restricted) + if (!(ca->flags & TCP_CONG_NON_RESTRICTED)) continue; offs += snprintf(buf + offs, maxlen - offs, "%s%s", @@ -212,16 +212,16 @@ int tcp_set_allowed_congestion_control(char *val) } } - /* pass 2 clear */ + /* pass 2 clear old values */ list_for_each_entry_rcu(ca, &tcp_cong_list, list) - ca->non_restricted = 0; + ca->flags &= ~TCP_CONG_NON_RESTRICTED; /* pass 3 mark as allowed */ while ((name = strsep(&val, " ")) && *name) { ca = tcp_ca_find(name); WARN_ON(!ca); if (ca) - ca->non_restricted = 1; + ca->flags |= TCP_CONG_NON_RESTRICTED; } out: spin_unlock(&tcp_cong_list_lock); @@ -256,7 +256,7 @@ int tcp_set_congestion_control(struct sock *sk, const char *name) if (!ca) err = -ENOENT; - else if (!(ca->non_restricted || capable(CAP_NET_ADMIN))) + else if (!((ca->flags & TCP_CONG_NON_RESTRICTED) || capable(CAP_NET_ADMIN))) err = -EPERM; else if (!try_module_get(ca->owner)) @@ -371,8 +371,8 @@ u32 tcp_reno_min_cwnd(const struct sock *sk) EXPORT_SYMBOL_GPL(tcp_reno_min_cwnd); struct tcp_congestion_ops tcp_reno = { + .flags = TCP_CONG_NON_RESTRICTED, .name = "reno", - .non_restricted = 1, .owner = THIS_MODULE, .ssthresh = tcp_reno_ssthresh, .cong_avoid = tcp_reno_cong_avoid, diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c index 296845be912b..14224487b16b 100644 --- a/net/ipv4/tcp_cubic.c +++ b/net/ipv4/tcp_cubic.c @@ -334,7 +334,7 @@ static void bictcp_state(struct sock *sk, u8 new_state) /* Track delayed acknowledgment ratio using sliding window * ratio = (15*ratio + sample) / 16 */ -static void bictcp_acked(struct sock *sk, u32 cnt) +static void bictcp_acked(struct sock *sk, u32 cnt, ktime_t last) { const struct inet_connection_sock *icsk = inet_csk(sk); diff --git a/net/ipv4/tcp_htcp.c b/net/ipv4/tcp_htcp.c index 1020eb48d8d1..4ba4a7ae0a85 100644 --- a/net/ipv4/tcp_htcp.c +++ b/net/ipv4/tcp_htcp.c @@ -98,7 +98,7 @@ static inline void measure_rtt(struct sock *sk) } } -static void measure_achieved_throughput(struct sock *sk, u32 pkts_acked) +static void measure_achieved_throughput(struct sock *sk, u32 pkts_acked, ktime_t last) { const struct inet_connection_sock *icsk = inet_csk(sk); const struct tcp_sock *tp = tcp_sk(sk); diff --git a/net/ipv4/tcp_illinois.c b/net/ipv4/tcp_illinois.c index ae6298600886..8e3165917f72 100644 --- a/net/ipv4/tcp_illinois.c +++ b/net/ipv4/tcp_illinois.c @@ -83,9 +83,14 @@ static void tcp_illinois_init(struct sock *sk) } /* Measure RTT for each ack. */ -static void tcp_illinois_rtt_sample(struct sock *sk, u32 rtt) +static void tcp_illinois_acked(struct sock *sk, u32 pkts_acked, ktime_t last) { struct illinois *ca = inet_csk_ca(sk); + u32 rtt; + + ca->acked = pkts_acked; + + rtt = ktime_to_ns(net_timedelta(last)) / NSEC_PER_USEC; /* ignore bogus values, this prevents wraparound in alpha math */ if (rtt > RTT_MAX) @@ -103,13 +108,6 @@ static void tcp_illinois_rtt_sample(struct sock *sk, u32 rtt) ca->sum_rtt += rtt; } -/* Capture count of packets covered by ack, to adjust for delayed acks */ -static void tcp_illinois_acked(struct sock *sk, u32 pkts_acked) -{ - struct illinois *ca = inet_csk_ca(sk); - ca->acked = pkts_acked; -} - /* Maximum queuing delay */ static inline u32 max_delay(const struct illinois *ca) { @@ -325,12 +323,12 @@ static void tcp_illinois_info(struct sock *sk, u32 ext, } static struct tcp_congestion_ops tcp_illinois = { + .flags = TCP_CONG_RTT_STAMP, .init = tcp_illinois_init, .ssthresh = tcp_illinois_ssthresh, .min_cwnd = tcp_reno_min_cwnd, .cong_avoid = tcp_illinois_cong_avoid, .set_state = tcp_illinois_state, - .rtt_sample = tcp_illinois_rtt_sample, .get_info = tcp_illinois_info, .pkts_acked = tcp_illinois_acked, diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 633389390788..051f0f815f17 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -2402,14 +2402,6 @@ static int tcp_tso_acked(struct sock *sk, struct sk_buff *skb, return acked; } -static u32 tcp_usrtt(struct timeval *tv) -{ - struct timeval now; - - do_gettimeofday(&now); - return (now.tv_sec - tv->tv_sec) * 1000000 + (now.tv_usec - tv->tv_usec); -} - /* Remove acknowledged frames from the retransmission queue. */ static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p) { @@ -2420,9 +2412,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p) int acked = 0; __s32 seq_rtt = -1; u32 pkts_acked = 0; - void (*rtt_sample)(struct sock *sk, u32 usrtt) - = icsk->icsk_ca_ops->rtt_sample; - struct timeval tv = { .tv_sec = 0, .tv_usec = 0 }; + ktime_t last_ackt = ktime_set(0,0); while ((skb = tcp_write_queue_head(sk)) && skb != tcp_send_head(sk)) { @@ -2471,7 +2461,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p) seq_rtt = -1; } else if (seq_rtt < 0) { seq_rtt = now - scb->when; - skb_get_timestamp(skb, &tv); + last_ackt = skb->tstamp; } if (sacked & TCPCB_SACKED_ACKED) tp->sacked_out -= tcp_skb_pcount(skb); @@ -2484,7 +2474,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p) } } else if (seq_rtt < 0) { seq_rtt = now - scb->when; - skb_get_timestamp(skb, &tv); + last_ackt = skb->tstamp; } tcp_dec_pcount_approx(&tp->fackets_out, skb); tcp_packets_out_dec(tp, skb); @@ -2494,13 +2484,14 @@ static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p) } if (acked&FLAG_ACKED) { + const struct tcp_congestion_ops *ca_ops + = inet_csk(sk)->icsk_ca_ops; + tcp_ack_update_rtt(sk, acked, seq_rtt); tcp_ack_packets_out(sk); - if (rtt_sample && !(acked & FLAG_RETRANS_DATA_ACKED)) - (*rtt_sample)(sk, tcp_usrtt(&tv)); - if (icsk->icsk_ca_ops->pkts_acked) - icsk->icsk_ca_ops->pkts_acked(sk, pkts_acked); + if (ca_ops->pkts_acked) + ca_ops->pkts_acked(sk, pkts_acked, last_ackt); } #if FASTRETRANS_DEBUG > 0 diff --git a/net/ipv4/tcp_lp.c b/net/ipv4/tcp_lp.c index f0ebaf0e21cb..b4e062ab24a1 100644 --- a/net/ipv4/tcp_lp.c +++ b/net/ipv4/tcp_lp.c @@ -218,7 +218,7 @@ static u32 tcp_lp_owd_calculator(struct sock *sk) * 3. calc smoothed OWD (SOWD). * Most ideas come from the original TCP-LP implementation. */ -static void tcp_lp_rtt_sample(struct sock *sk, u32 usrtt) +static void tcp_lp_rtt_sample(struct sock *sk, u32 rtt) { struct lp *lp = inet_csk_ca(sk); s64 mowd = tcp_lp_owd_calculator(sk); @@ -261,11 +261,13 @@ static void tcp_lp_rtt_sample(struct sock *sk, u32 usrtt) * newReno in increase case. * We work it out by following the idea from TCP-LP's paper directly */ -static void tcp_lp_pkts_acked(struct sock *sk, u32 num_acked) +static void tcp_lp_pkts_acked(struct sock *sk, u32 num_acked, ktime_t last) { struct tcp_sock *tp = tcp_sk(sk); struct lp *lp = inet_csk_ca(sk); + tcp_lp_rtt_sample(sk, ktime_to_ns(net_timedelta(last)) / NSEC_PER_USEC); + /* calc inference */ if (tcp_time_stamp > tp->rx_opt.rcv_tsecr) lp->inference = 3 * (tcp_time_stamp - tp->rx_opt.rcv_tsecr); @@ -312,11 +314,11 @@ static void tcp_lp_pkts_acked(struct sock *sk, u32 num_acked) } static struct tcp_congestion_ops tcp_lp = { + .flags = TCP_CONG_RTT_STAMP, .init = tcp_lp_init, .ssthresh = tcp_reno_ssthresh, .cong_avoid = tcp_lp_cong_avoid, .min_cwnd = tcp_reno_min_cwnd, - .rtt_sample = tcp_lp_rtt_sample, .pkts_acked = tcp_lp_pkts_acked, .owner = THIS_MODULE, diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 3a60aea744ae..e70a6840cb64 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -409,7 +409,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, /* If congestion control is doing timestamping, we must * take such a timestamp before we potentially clone/copy. */ - if (icsk->icsk_ca_ops->rtt_sample) + if (icsk->icsk_ca_ops->flags & TCP_CONG_RTT_STAMP) __net_timestamp(skb); if (likely(clone_it)) { diff --git a/net/ipv4/tcp_vegas.c b/net/ipv4/tcp_vegas.c index 87e72bef6d08..f4104eeb5f26 100644 --- a/net/ipv4/tcp_vegas.c +++ b/net/ipv4/tcp_vegas.c @@ -120,10 +120,13 @@ static void tcp_vegas_init(struct sock *sk) * o min-filter RTT samples from a much longer window (forever for now) * to find the propagation delay (baseRTT) */ -static void tcp_vegas_rtt_calc(struct sock *sk, u32 usrtt) +static void tcp_vegas_pkts_acked(struct sock *sk, u32 cnt, ktime_t last) { struct vegas *vegas = inet_csk_ca(sk); - u32 vrtt = usrtt + 1; /* Never allow zero rtt or baseRTT */ + u32 vrtt; + + /* Never allow zero rtt or baseRTT */ + vrtt = (ktime_to_ns(net_timedelta(last)) / NSEC_PER_USEC) + 1; /* Filter to find propagation delay: */ if (vrtt < vegas->baseRTT) @@ -353,11 +356,12 @@ static void tcp_vegas_get_info(struct sock *sk, u32 ext, } static struct tcp_congestion_ops tcp_vegas = { + .flags = TCP_CONG_RTT_STAMP, .init = tcp_vegas_init, .ssthresh = tcp_reno_ssthresh, .cong_avoid = tcp_vegas_cong_avoid, .min_cwnd = tcp_reno_min_cwnd, - .rtt_sample = tcp_vegas_rtt_calc, + .pkts_acked = tcp_vegas_pkts_acked, .set_state = tcp_vegas_state, .cwnd_event = tcp_vegas_cwnd_event, .get_info = tcp_vegas_get_info, diff --git a/net/ipv4/tcp_veno.c b/net/ipv4/tcp_veno.c index ce57bf302f6c..0b50d0607a0e 100644 --- a/net/ipv4/tcp_veno.c +++ b/net/ipv4/tcp_veno.c @@ -69,10 +69,13 @@ static void tcp_veno_init(struct sock *sk) } /* Do rtt sampling needed for Veno. */ -static void tcp_veno_rtt_calc(struct sock *sk, u32 usrtt) +static void tcp_veno_pkts_acked(struct sock *sk, u32 cnt, ktime_t last) { struct veno *veno = inet_csk_ca(sk); - u32 vrtt = usrtt + 1; /* Never allow zero rtt or basertt */ + u32 vrtt; + + /* Never allow zero rtt or baseRTT */ + vrtt = (ktime_to_ns(net_timedelta(last)) / NSEC_PER_USEC) + 1; /* Filter to find propagation delay: */ if (vrtt < veno->basertt) @@ -199,10 +202,11 @@ static u32 tcp_veno_ssthresh(struct sock *sk) } static struct tcp_congestion_ops tcp_veno = { + .flags = TCP_CONG_RTT_STAMP, .init = tcp_veno_init, .ssthresh = tcp_veno_ssthresh, .cong_avoid = tcp_veno_cong_avoid, - .rtt_sample = tcp_veno_rtt_calc, + .pkts_acked = tcp_veno_pkts_acked, .set_state = tcp_veno_state, .cwnd_event = tcp_veno_cwnd_event, diff --git a/net/ipv4/tcp_westwood.c b/net/ipv4/tcp_westwood.c index ae1026a67720..e61e09dd513e 100644 --- a/net/ipv4/tcp_westwood.c +++ b/net/ipv4/tcp_westwood.c @@ -100,7 +100,7 @@ static void westwood_filter(struct westwood *w, u32 delta) * Called after processing group of packets. * but all westwood needs is the last sample of srtt. */ -static void tcp_westwood_pkts_acked(struct sock *sk, u32 cnt) +static void tcp_westwood_pkts_acked(struct sock *sk, u32 cnt, ktime_t last) { struct westwood *w = inet_csk_ca(sk); if (cnt > 0) diff --git a/net/ipv4/tcp_yeah.c b/net/ipv4/tcp_yeah.c index 46dd1bee583a..81ef02c1649a 100644 --- a/net/ipv4/tcp_yeah.c +++ b/net/ipv4/tcp_yeah.c @@ -64,13 +64,15 @@ static void tcp_yeah_init(struct sock *sk) } -static void tcp_yeah_pkts_acked(struct sock *sk, u32 pkts_acked) +static void tcp_yeah_pkts_acked(struct sock *sk, u32 pkts_acked, ktime_t last) { const struct inet_connection_sock *icsk = inet_csk(sk); struct yeah *yeah = inet_csk_ca(sk); if (icsk->icsk_ca_state == TCP_CA_Open) yeah->pkts_acked = pkts_acked; + + tcp_vegas_pkts_acked(sk, pkts_acked, last); } static void tcp_yeah_cong_avoid(struct sock *sk, u32 ack, @@ -237,11 +239,11 @@ static u32 tcp_yeah_ssthresh(struct sock *sk) { } static struct tcp_congestion_ops tcp_yeah = { + .flags = TCP_CONG_RTT_STAMP, .init = tcp_yeah_init, .ssthresh = tcp_yeah_ssthresh, .cong_avoid = tcp_yeah_cong_avoid, .min_cwnd = tcp_reno_min_cwnd, - .rtt_sample = tcp_vegas_rtt_calc, .set_state = tcp_vegas_state, .cwnd_event = tcp_vegas_cwnd_event, .get_info = tcp_vegas_get_info, diff --git a/net/ipv4/tcp_yeah.h b/net/ipv4/tcp_yeah.h index a62d82038fd0..33ad5385c188 100644 --- a/net/ipv4/tcp_yeah.h +++ b/net/ipv4/tcp_yeah.h @@ -81,10 +81,13 @@ static void tcp_vegas_state(struct sock *sk, u8 ca_state) * o min-filter RTT samples from a much longer window (forever for now) * to find the propagation delay (baseRTT) */ -static void tcp_vegas_rtt_calc(struct sock *sk, u32 usrtt) +static void tcp_vegas_pkts_acked(struct sock *sk, u32 cnt, ktime_t last) { struct vegas *vegas = inet_csk_ca(sk); - u32 vrtt = usrtt + 1; /* Never allow zero rtt or baseRTT */ + u32 vrtt; + + /* Never allow zero rtt or baseRTT */ + vrtt = (ktime_to_ns(net_timedelta(last)) / NSEC_PER_USEC) + 1; /* Filter to find propagation delay: */ if (vrtt < vegas->baseRTT) -- cgit v1.2.3-59-g8ed1b From 43fb45cb79e9441a79ece206cf741774500dd627 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Tue, 24 Apr 2007 14:07:27 -0700 Subject: [WIRELESS] cfg80211: Update comment for locking. This patch adds a comment that was part of my rtnl locking patch for cfg80211 but which I forgot for the merge. Signed-off-by: Johannes Berg Signed-off-by: John W. Linville Signed-off-by: David S. Miller --- include/net/cfg80211.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'include') diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 783a11437a57..88171f8ce58a 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -23,6 +23,10 @@ struct wiphy; * All callbacks except where otherwise noted should return 0 * on success or a negative error code. * + * All operations are currently invoked under rtnl for consistency with the + * wireless extensions but this is subject to reevaluation as soon as this + * code is used more widely and we have a first user without wext. + * * @add_virtual_intf: create a new virtual interface with the given name * * @del_virtual_intf: remove the virtual interface determined by ifindex. -- cgit v1.2.3-59-g8ed1b From 84299b3bc4eaedc0734fcc9052b01291e44445fc Mon Sep 17 00:00:00 2001 From: YOSHIFUJI Hideaki Date: Tue, 24 Apr 2007 16:21:38 -0700 Subject: [TCP]: Fix linkage errors on i386. To avoid raw division, use ktime_to_timeval() to get usec. Signed-off-by: YOSHIFUJI Hideaki Signed-off-by: David S. Miller --- include/linux/ktime.h | 6 ++++++ net/ipv4/tcp_illinois.c | 2 +- net/ipv4/tcp_lp.c | 2 +- net/ipv4/tcp_vegas.c | 2 +- net/ipv4/tcp_veno.c | 2 +- 5 files changed, 10 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/linux/ktime.h b/include/linux/ktime.h index 248305bb9a18..81bb9c7a4eb3 100644 --- a/include/linux/ktime.h +++ b/include/linux/ktime.h @@ -259,6 +259,12 @@ static inline s64 ktime_to_ns(const ktime_t kt) #endif +static inline s64 ktime_to_us(const ktime_t kt) +{ + struct timeval tv = ktime_to_timeval(kt); + return (s64) tv.tv_sec * USEC_PER_SEC + tv.tv_usec; +} + /* * The resolution of the clocks. The resolution value is returned in * the clock_getres() system call to give application programmers an diff --git a/net/ipv4/tcp_illinois.c b/net/ipv4/tcp_illinois.c index 8e3165917f72..4adc47c55351 100644 --- a/net/ipv4/tcp_illinois.c +++ b/net/ipv4/tcp_illinois.c @@ -90,7 +90,7 @@ static void tcp_illinois_acked(struct sock *sk, u32 pkts_acked, ktime_t last) ca->acked = pkts_acked; - rtt = ktime_to_ns(net_timedelta(last)) / NSEC_PER_USEC; + rtt = ktime_to_us(net_timedelta(last)); /* ignore bogus values, this prevents wraparound in alpha math */ if (rtt > RTT_MAX) diff --git a/net/ipv4/tcp_lp.c b/net/ipv4/tcp_lp.c index b4e062ab24a1..43294ad9f63e 100644 --- a/net/ipv4/tcp_lp.c +++ b/net/ipv4/tcp_lp.c @@ -266,7 +266,7 @@ static void tcp_lp_pkts_acked(struct sock *sk, u32 num_acked, ktime_t last) struct tcp_sock *tp = tcp_sk(sk); struct lp *lp = inet_csk_ca(sk); - tcp_lp_rtt_sample(sk, ktime_to_ns(net_timedelta(last)) / NSEC_PER_USEC); + tcp_lp_rtt_sample(sk, ktime_to_us(net_timedelta(last))); /* calc inference */ if (tcp_time_stamp > tp->rx_opt.rcv_tsecr) diff --git a/net/ipv4/tcp_vegas.c b/net/ipv4/tcp_vegas.c index 0f0ee7f732c3..73e19cf7df21 100644 --- a/net/ipv4/tcp_vegas.c +++ b/net/ipv4/tcp_vegas.c @@ -118,7 +118,7 @@ void tcp_vegas_pkts_acked(struct sock *sk, u32 cnt, ktime_t last) u32 vrtt; /* Never allow zero rtt or baseRTT */ - vrtt = (ktime_to_ns(net_timedelta(last)) / NSEC_PER_USEC) + 1; + vrtt = ktime_to_us(net_timedelta(last)) + 1; /* Filter to find propagation delay: */ if (vrtt < vegas->baseRTT) diff --git a/net/ipv4/tcp_veno.c b/net/ipv4/tcp_veno.c index 0b50d0607a0e..9edb340f2f95 100644 --- a/net/ipv4/tcp_veno.c +++ b/net/ipv4/tcp_veno.c @@ -75,7 +75,7 @@ static void tcp_veno_pkts_acked(struct sock *sk, u32 cnt, ktime_t last) u32 vrtt; /* Never allow zero rtt or baseRTT */ - vrtt = (ktime_to_ns(net_timedelta(last)) / NSEC_PER_USEC) + 1; + vrtt = ktime_to_us(net_timedelta(last)) + 1; /* Filter to find propagation delay: */ if (vrtt < veno->basertt) -- cgit v1.2.3-59-g8ed1b From 5e0f04351d11e07a23b5ab4914282cbb78027e50 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 24 Apr 2007 21:53:35 -0700 Subject: [IPV4]: Consolidate common SNMP code This patch moves the SNMP code shared between IPv4/IPv6 from proc.c into net/ipv4/af_inet.c. This makes sense because these functions aren't specific to /proc. As a result we can again skip proc.o if /proc is disabled. Signed-off-by: Herbert Xu Acked-by: YOSHIFUJI Hideaki Signed-off-by: David S. Miller --- include/net/ip.h | 1 + net/ipv4/Makefile | 4 ++-- net/ipv4/af_inet.c | 40 ++++++++++++++++++++++++++++++++ net/ipv4/proc.c | 68 +++++++++++------------------------------------------- 4 files changed, 57 insertions(+), 56 deletions(-) (limited to 'include') diff --git a/include/net/ip.h b/include/net/ip.h index f41ce07f6700..bb207db03675 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -166,6 +166,7 @@ DECLARE_SNMP_STAT(struct linux_mib, net_statistics); #define NET_ADD_STATS_BH(field, adnd) SNMP_ADD_STATS_BH(net_statistics, field, adnd) #define NET_ADD_STATS_USER(field, adnd) SNMP_ADD_STATS_USER(net_statistics, field, adnd) +extern unsigned long snmp_fold_field(void *mib[], int offt); extern int snmp_mib_init(void *ptr[2], size_t mibsize, size_t mibalign); extern void snmp_mib_free(void *ptr[2]); diff --git a/net/ipv4/Makefile b/net/ipv4/Makefile index 3bd25f56f5da..4ff6c151d7f3 100644 --- a/net/ipv4/Makefile +++ b/net/ipv4/Makefile @@ -10,11 +10,11 @@ obj-y := route.o inetpeer.o protocol.o \ tcp_minisocks.o tcp_cong.o \ datagram.o raw.o udp.o udplite.o \ arp.o icmp.o devinet.o af_inet.o igmp.o \ - sysctl_net_ipv4.o fib_frontend.o fib_semantics.o \ - proc.o + sysctl_net_ipv4.o fib_frontend.o fib_semantics.o obj-$(CONFIG_IP_FIB_HASH) += fib_hash.o obj-$(CONFIG_IP_FIB_TRIE) += fib_trie.o +obj-$(CONFIG_PROC_FS) += proc.o obj-$(CONFIG_IP_MULTIPLE_TABLES) += fib_rules.o obj-$(CONFIG_IP_MROUTE) += ipmr.o obj-$(CONFIG_NET_IPIP) += ipip.o diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index a33ca7e7e8f8..16aae8ef5555 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -1219,6 +1219,46 @@ out: return segs; } +unsigned long snmp_fold_field(void *mib[], int offt) +{ + unsigned long res = 0; + int i; + + for_each_possible_cpu(i) { + res += *(((unsigned long *) per_cpu_ptr(mib[0], i)) + offt); + res += *(((unsigned long *) per_cpu_ptr(mib[1], i)) + offt); + } + return res; +} +EXPORT_SYMBOL_GPL(snmp_fold_field); + +int snmp_mib_init(void *ptr[2], size_t mibsize, size_t mibalign) +{ + BUG_ON(ptr == NULL); + ptr[0] = __alloc_percpu(mibsize); + if (!ptr[0]) + goto err0; + ptr[1] = __alloc_percpu(mibsize); + if (!ptr[1]) + goto err1; + return 0; +err1: + free_percpu(ptr[0]); + ptr[0] = NULL; +err0: + return -ENOMEM; +} +EXPORT_SYMBOL_GPL(snmp_mib_init); + +void snmp_mib_free(void *ptr[2]) +{ + BUG_ON(ptr == NULL); + free_percpu(ptr[0]); + free_percpu(ptr[1]); + ptr[0] = ptr[1] = NULL; +} +EXPORT_SYMBOL_GPL(snmp_mib_free); + #ifdef CONFIG_IP_MULTICAST static struct net_protocol igmp_protocol = { .handler = igmp_rcv, diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c index a236154591fa..37ab5802ca08 100644 --- a/net/ipv4/proc.c +++ b/net/ipv4/proc.c @@ -45,7 +45,6 @@ #include #include -#ifdef CONFIG_PROC_FS static int fold_prot_inuse(struct proto *proto) { int res = 0; @@ -88,19 +87,6 @@ static const struct file_operations sockstat_seq_fops = { .release = single_release, }; -static unsigned long -fold_field(void *mib[], int offt) -{ - unsigned long res = 0; - int i; - - for_each_possible_cpu(i) { - res += *(((unsigned long *) per_cpu_ptr(mib[0], i)) + offt); - res += *(((unsigned long *) per_cpu_ptr(mib[1], i)) + offt); - } - return res; -} - /* snmp items */ static const struct snmp_mib snmp4_ipstats_list[] = { SNMP_MIB_ITEM("InReceives", IPSTATS_MIB_INRECEIVES), @@ -267,8 +253,8 @@ static int snmp_seq_show(struct seq_file *seq, void *v) for (i = 0; snmp4_ipstats_list[i].name != NULL; i++) seq_printf(seq, " %lu", - fold_field((void **) ip_statistics, - snmp4_ipstats_list[i].entry)); + snmp_fold_field((void **)ip_statistics, + snmp4_ipstats_list[i].entry)); seq_puts(seq, "\nIcmp:"); for (i = 0; snmp4_icmp_list[i].name != NULL; i++) @@ -277,8 +263,8 @@ static int snmp_seq_show(struct seq_file *seq, void *v) seq_puts(seq, "\nIcmp:"); for (i = 0; snmp4_icmp_list[i].name != NULL; i++) seq_printf(seq, " %lu", - fold_field((void **) icmp_statistics, - snmp4_icmp_list[i].entry)); + snmp_fold_field((void **)icmp_statistics, + snmp4_icmp_list[i].entry)); seq_puts(seq, "\nTcp:"); for (i = 0; snmp4_tcp_list[i].name != NULL; i++) @@ -289,12 +275,12 @@ static int snmp_seq_show(struct seq_file *seq, void *v) /* MaxConn field is signed, RFC 2012 */ if (snmp4_tcp_list[i].entry == TCP_MIB_MAXCONN) seq_printf(seq, " %ld", - fold_field((void **) tcp_statistics, - snmp4_tcp_list[i].entry)); + snmp_fold_field((void **)tcp_statistics, + snmp4_tcp_list[i].entry)); else seq_printf(seq, " %lu", - fold_field((void **) tcp_statistics, - snmp4_tcp_list[i].entry)); + snmp_fold_field((void **)tcp_statistics, + snmp4_tcp_list[i].entry)); } seq_puts(seq, "\nUdp:"); @@ -304,8 +290,8 @@ static int snmp_seq_show(struct seq_file *seq, void *v) seq_puts(seq, "\nUdp:"); for (i = 0; snmp4_udp_list[i].name != NULL; i++) seq_printf(seq, " %lu", - fold_field((void **) udp_statistics, - snmp4_udp_list[i].entry)); + snmp_fold_field((void **)udp_statistics, + snmp4_udp_list[i].entry)); /* the UDP and UDP-Lite MIBs are the same */ seq_puts(seq, "\nUdpLite:"); @@ -315,8 +301,8 @@ static int snmp_seq_show(struct seq_file *seq, void *v) seq_puts(seq, "\nUdpLite:"); for (i = 0; snmp4_udp_list[i].name != NULL; i++) seq_printf(seq, " %lu", - fold_field((void **) udplite_statistics, - snmp4_udp_list[i].entry) ); + snmp_fold_field((void **)udplite_statistics, + snmp4_udp_list[i].entry)); seq_putc(seq, '\n'); return 0; @@ -349,8 +335,8 @@ static int netstat_seq_show(struct seq_file *seq, void *v) seq_puts(seq, "\nTcpExt:"); for (i = 0; snmp4_net_list[i].name != NULL; i++) seq_printf(seq, " %lu", - fold_field((void **) net_statistics, - snmp4_net_list[i].entry)); + snmp_fold_field((void **)net_statistics, + snmp4_net_list[i].entry)); seq_putc(seq, '\n'); return 0; @@ -391,30 +377,4 @@ out_netstat: rc = -ENOMEM; goto out; } -#endif - -int snmp_mib_init(void *ptr[2], size_t mibsize, size_t mibalign) -{ - BUG_ON(ptr == NULL); - ptr[0] = __alloc_percpu(mibsize); - if (!ptr[0]) - goto err0; - ptr[1] = __alloc_percpu(mibsize); - if (!ptr[1]) - goto err1; - return 0; -err1: - free_percpu(ptr[0]); - ptr[0] = NULL; -err0: - return -ENOMEM; -} - -void snmp_mib_free(void *ptr[2]) -{ - BUG_ON(ptr == NULL); - free_percpu(ptr[0]); - free_percpu(ptr[1]); - ptr[0] = ptr[1] = NULL; -} -- cgit v1.2.3-59-g8ed1b From 7f7d9a6b96c5708c5184cbed61bbc15b163a0f08 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 24 Apr 2007 21:54:09 -0700 Subject: [IPV6]: Consolidate common SNMP code This patch moves the non-proc SNMP code into addrconf.c and reuses IPv4 SNMP code where applicable. As a result we can skip proc.o if /proc is disabled. Note that I've made a number of functions static since they're only used by addrconf.c for now. If they ever get used elsewhere we can always remove the static. Signed-off-by: Herbert Xu Acked-by: YOSHIFUJI Hideaki Signed-off-by: David S. Miller --- include/net/ipv6.h | 20 +++++---- net/ipv6/Makefile | 3 +- net/ipv6/addrconf.c | 60 +++++++++++++++++++++++++ net/ipv6/af_inet6.c | 30 ++++++------- net/ipv6/proc.c | 125 +--------------------------------------------------- 5 files changed, 91 insertions(+), 147 deletions(-) (limited to 'include') diff --git a/include/net/ipv6.h b/include/net/ipv6.h index 4408def379bf..f70afef9c3cc 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -166,14 +166,6 @@ DECLARE_SNMP_STAT(struct udp_mib, udplite_stats_in6); if (is_udplite) SNMP_INC_STATS_USER(udplite_stats_in6, field); \ else SNMP_INC_STATS_USER(udp_stats_in6, field); } while(0) -int snmp6_register_dev(struct inet6_dev *idev); -int snmp6_unregister_dev(struct inet6_dev *idev); -int snmp6_alloc_dev(struct inet6_dev *idev); -int snmp6_free_dev(struct inet6_dev *idev); -int snmp6_mib_init(void *ptr[2], size_t mibsize, size_t mibalign); -void snmp6_mib_free(void *ptr[2]); -void snmp6_fill_stats(u64 *stats, struct inet6_dev *idev, int attrtype, int bytes); - struct ip6_ra_chain { struct ip6_ra_chain *next; @@ -606,8 +598,20 @@ extern int udplite6_proc_init(void); extern void udplite6_proc_exit(void); extern int ipv6_misc_proc_init(void); extern void ipv6_misc_proc_exit(void); +extern int snmp6_register_dev(struct inet6_dev *idev); +extern int snmp6_unregister_dev(struct inet6_dev *idev); extern struct rt6_statistics rt6_stats; +#else +static inline int snmp6_register_dev(struct inet6_dev *idev) +{ + return 0; +} + +static inline int snmp6_unregister_dev(struct inet6_dev *idev) +{ + return 0; +} #endif #ifdef CONFIG_SYSCTL diff --git a/net/ipv6/Makefile b/net/ipv6/Makefile index e9478688c3d3..0acf64dc5775 100644 --- a/net/ipv6/Makefile +++ b/net/ipv6/Makefile @@ -7,7 +7,7 @@ obj-$(CONFIG_IPV6) += ipv6.o ipv6-objs := af_inet6.o anycast.o ip6_output.o ip6_input.o addrconf.o \ route.o ip6_fib.o ipv6_sockglue.o ndisc.o udp.o udplite.o \ raw.o protocol.o icmp.o mcast.o reassembly.o tcp_ipv6.o \ - exthdrs.o sysctl_net_ipv6.o datagram.o proc.o \ + exthdrs.o sysctl_net_ipv6.o datagram.o \ ip6_flowlabel.o inet6_connection_sock.o ipv6-$(CONFIG_XFRM) += xfrm6_policy.o xfrm6_state.o xfrm6_input.o \ @@ -28,6 +28,7 @@ obj-$(CONFIG_INET6_XFRM_MODE_TUNNEL) += xfrm6_mode_tunnel.o obj-$(CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION) += xfrm6_mode_ro.o obj-$(CONFIG_INET6_XFRM_MODE_BEET) += xfrm6_mode_beet.o obj-$(CONFIG_NETFILTER) += netfilter/ +obj-$(CONFIG_PROC_FS) += proc.o obj-$(CONFIG_IPV6_SIT) += sit.o obj-$(CONFIG_IPV6_TUNNEL) += ip6_tunnel.o diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 33ccc95c349b..ea86bf4bfe0a 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -81,6 +81,7 @@ #endif #include +#include #include #include @@ -246,6 +247,37 @@ static void addrconf_mod_timer(struct inet6_ifaddr *ifp, add_timer(&ifp->timer); } +static int snmp6_alloc_dev(struct inet6_dev *idev) +{ + int err = -ENOMEM; + + if (!idev || !idev->dev) + return -EINVAL; + + if (snmp_mib_init((void **)idev->stats.ipv6, + sizeof(struct ipstats_mib), + __alignof__(struct ipstats_mib)) < 0) + goto err_ip; + if (snmp_mib_init((void **)idev->stats.icmpv6, + sizeof(struct icmpv6_mib), + __alignof__(struct icmpv6_mib)) < 0) + goto err_icmp; + + return 0; + +err_icmp: + snmp_mib_free((void **)idev->stats.ipv6); +err_ip: + return err; +} + +static int snmp6_free_dev(struct inet6_dev *idev) +{ + snmp_mib_free((void **)idev->stats.icmpv6); + snmp_mib_free((void **)idev->stats.ipv6); + return 0; +} + /* Nobody refers to this device, we may destroy it. */ static void in6_dev_finish_destroy_rcu(struct rcu_head *head) @@ -3438,6 +3470,34 @@ static inline size_t inet6_if_nlmsg_size(void) ); } +static inline void __snmp6_fill_stats(u64 *stats, void **mib, int items, + int bytes) +{ + int i; + int pad = bytes - sizeof(u64) * items; + BUG_ON(pad < 0); + + /* Use put_unaligned() because stats may not be aligned for u64. */ + put_unaligned(items, &stats[0]); + for (i = 1; i < items; i++) + put_unaligned(snmp_fold_field(mib, i), &stats[i]); + + memset(&stats[items], 0, pad); +} + +static void snmp6_fill_stats(u64 *stats, struct inet6_dev *idev, int attrtype, + int bytes) +{ + switch(attrtype) { + case IFLA_INET6_STATS: + __snmp6_fill_stats(stats, (void **)idev->stats.ipv6, IPSTATS_MIB_MAX, bytes); + break; + case IFLA_INET6_ICMP6STATS: + __snmp6_fill_stats(stats, (void **)idev->stats.icmpv6, ICMP6_MIB_MAX, bytes); + break; + } +} + static int inet6_fill_ifinfo(struct sk_buff *skb, struct inet6_dev *idev, u32 pid, u32 seq, int event, unsigned int flags) { diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index 825d03e87ae0..18cb928c8d92 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c @@ -714,26 +714,26 @@ EXPORT_SYMBOL_GPL(ipv6_opt_accepted); static int __init init_ipv6_mibs(void) { - if (snmp6_mib_init((void **)ipv6_statistics, sizeof (struct ipstats_mib), - __alignof__(struct ipstats_mib)) < 0) + if (snmp_mib_init((void **)ipv6_statistics, sizeof (struct ipstats_mib), + __alignof__(struct ipstats_mib)) < 0) goto err_ip_mib; - if (snmp6_mib_init((void **)icmpv6_statistics, sizeof (struct icmpv6_mib), - __alignof__(struct icmpv6_mib)) < 0) + if (snmp_mib_init((void **)icmpv6_statistics, sizeof (struct icmpv6_mib), + __alignof__(struct icmpv6_mib)) < 0) goto err_icmp_mib; - if (snmp6_mib_init((void **)udp_stats_in6, sizeof (struct udp_mib), - __alignof__(struct udp_mib)) < 0) + if (snmp_mib_init((void **)udp_stats_in6, sizeof (struct udp_mib), + __alignof__(struct udp_mib)) < 0) goto err_udp_mib; - if (snmp6_mib_init((void **)udplite_stats_in6, sizeof (struct udp_mib), - __alignof__(struct udp_mib)) < 0) + if (snmp_mib_init((void **)udplite_stats_in6, sizeof (struct udp_mib), + __alignof__(struct udp_mib)) < 0) goto err_udplite_mib; return 0; err_udplite_mib: - snmp6_mib_free((void **)udp_stats_in6); + snmp_mib_free((void **)udp_stats_in6); err_udp_mib: - snmp6_mib_free((void **)icmpv6_statistics); + snmp_mib_free((void **)icmpv6_statistics); err_icmp_mib: - snmp6_mib_free((void **)ipv6_statistics); + snmp_mib_free((void **)ipv6_statistics); err_ip_mib: return -ENOMEM; @@ -741,10 +741,10 @@ err_ip_mib: static void cleanup_ipv6_mibs(void) { - snmp6_mib_free((void **)ipv6_statistics); - snmp6_mib_free((void **)icmpv6_statistics); - snmp6_mib_free((void **)udp_stats_in6); - snmp6_mib_free((void **)udplite_stats_in6); + snmp_mib_free((void **)ipv6_statistics); + snmp_mib_free((void **)icmpv6_statistics); + snmp_mib_free((void **)udp_stats_in6); + snmp_mib_free((void **)udplite_stats_in6); } static int __init inet6_init(void) diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c index 7a00bedb6b93..acb306a5dd56 100644 --- a/net/ipv6/proc.c +++ b/net/ipv6/proc.c @@ -23,13 +23,12 @@ #include #include #include -#include +#include #include #include #include #include -#ifdef CONFIG_PROC_FS static struct proc_dir_entry *proc_net_devsnmp6; static int fold_prot_inuse(struct proto *proto) @@ -142,29 +141,14 @@ static struct snmp_mib snmp6_udplite6_list[] = { SNMP_MIB_ITEM("UdpLite6OutDatagrams", UDP_MIB_OUTDATAGRAMS), SNMP_MIB_SENTINEL }; -#endif /* CONFIG_PROC_FS */ -static unsigned long -fold_field(void *mib[], int offt) -{ - unsigned long res = 0; - int i; - - for_each_possible_cpu(i) { - res += *(((unsigned long *)per_cpu_ptr(mib[0], i)) + offt); - res += *(((unsigned long *)per_cpu_ptr(mib[1], i)) + offt); - } - return res; -} - -#ifdef CONFIG_PROC_FS static inline void snmp6_seq_show_item(struct seq_file *seq, void **mib, struct snmp_mib *itemlist) { int i; for (i=0; itemlist[i].name; i++) seq_printf(seq, "%-32s\t%lu\n", itemlist[i].name, - fold_field(mib, itemlist[i].entry)); + snmp_fold_field(mib, itemlist[i].entry)); } static int snmp6_seq_show(struct seq_file *seq, void *v) @@ -209,37 +193,7 @@ static const struct file_operations snmp6_seq_fops = { .llseek = seq_lseek, .release = single_release, }; -#endif /* CONFIG_PROC_FS */ -static inline void -__snmp6_fill_stats(u64 *stats, void **mib, int items, int bytes) -{ - int i; - int pad = bytes - sizeof(u64) * items; - BUG_ON(pad < 0); - - /* Use put_unaligned() because stats may not be aligned for u64. */ - put_unaligned(items, &stats[0]); - for (i = 1; i < items; i++) - put_unaligned(fold_field(mib, i), &stats[i]); - - memset(&stats[items], 0, pad); -} - -void -snmp6_fill_stats(u64 *stats, struct inet6_dev *idev, int attrtype, int bytes) -{ - switch(attrtype) { - case IFLA_INET6_STATS: - __snmp6_fill_stats(stats, (void **)idev->stats.ipv6, IPSTATS_MIB_MAX, bytes); - break; - case IFLA_INET6_ICMP6STATS: - __snmp6_fill_stats(stats, (void **)idev->stats.icmpv6, ICMP6_MIB_MAX, bytes); - break; - } -} - -#ifdef CONFIG_PROC_FS int snmp6_register_dev(struct inet6_dev *idev) { struct proc_dir_entry *p; @@ -304,78 +258,3 @@ void ipv6_misc_proc_exit(void) proc_net_remove("snmp6"); } -#else /* CONFIG_PROC_FS */ - - -int snmp6_register_dev(struct inet6_dev *idev) -{ - return 0; -} - -int snmp6_unregister_dev(struct inet6_dev *idev) -{ - return 0; -} - -#endif /* CONFIG_PROC_FS */ - -int snmp6_alloc_dev(struct inet6_dev *idev) -{ - int err = -ENOMEM; - - if (!idev || !idev->dev) - return -EINVAL; - - if (snmp6_mib_init((void **)idev->stats.ipv6, sizeof(struct ipstats_mib), - __alignof__(struct ipstats_mib)) < 0) - goto err_ip; - if (snmp6_mib_init((void **)idev->stats.icmpv6, sizeof(struct icmpv6_mib), - __alignof__(struct icmpv6_mib)) < 0) - goto err_icmp; - - return 0; - -err_icmp: - snmp6_mib_free((void **)idev->stats.ipv6); -err_ip: - return err; -} - -int snmp6_free_dev(struct inet6_dev *idev) -{ - snmp6_mib_free((void **)idev->stats.icmpv6); - snmp6_mib_free((void **)idev->stats.ipv6); - return 0; -} - -int snmp6_mib_init(void *ptr[2], size_t mibsize, size_t mibalign) -{ - if (ptr == NULL) - return -EINVAL; - - ptr[0] = __alloc_percpu(mibsize); - if (!ptr[0]) - goto err0; - - ptr[1] = __alloc_percpu(mibsize); - if (!ptr[1]) - goto err1; - - return 0; - -err1: - free_percpu(ptr[0]); - ptr[0] = NULL; -err0: - return -ENOMEM; -} - -void snmp6_mib_free(void *ptr[2]) -{ - if (ptr == NULL) - return; - free_percpu(ptr[0]); - free_percpu(ptr[1]); - ptr[0] = ptr[1] = NULL; -} - -- cgit v1.2.3-59-g8ed1b From df8981dc1928f3a231d91f27c2b3dc373fb4d410 Mon Sep 17 00:00:00 2001 From: YOSHIFUJI Hideaki Date: Tue, 24 Apr 2007 20:44:49 +0900 Subject: [IPV6]: Export in6addr_any for future use. Signed-off-by: YOSHIFUJI Hideaki --- include/linux/in6.h | 2 -- net/ipv6/addrconf.c | 2 -- 2 files changed, 4 deletions(-) (limited to 'include') diff --git a/include/linux/in6.h b/include/linux/in6.h index d559fac4a26d..2a61c82af115 100644 --- a/include/linux/in6.h +++ b/include/linux/in6.h @@ -44,10 +44,8 @@ struct in6_addr * NOTE: Be aware the IN6ADDR_* constants and in6addr_* externals are defined * in network byte order, not in host byte order as are the IPv4 equivalents */ -#if 0 extern const struct in6_addr in6addr_any; #define IN6ADDR_ANY_INIT { { { 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 } } } -#endif extern const struct in6_addr in6addr_loopback; #define IN6ADDR_LOOPBACK_INIT { { { 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1 } } } diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index ea86bf4bfe0a..e04e49373505 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -209,9 +209,7 @@ static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = { }; /* IPv6 Wildcard Address and Loopback Address defined by RFC2553 */ -#if 0 const struct in6_addr in6addr_any = IN6ADDR_ANY_INIT; -#endif const struct in6_addr in6addr_loopback = IN6ADDR_LOOPBACK_INIT; static void addrconf_del_timer(struct inet6_ifaddr *ifp) -- cgit v1.2.3-59-g8ed1b From 2111f8b9e58fd04b87b8b07d66485f255a57b0bb Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Wed, 25 Apr 2007 22:05:55 -0700 Subject: [BRIDGE]: drop PAUSE frames Pause frames should never make it out of the network device into the stack. But if a device was misconfigured, it might happen. So drop pause frames in bridge. Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- include/linux/if_ether.h | 1 + net/bridge/br_input.c | 7 ++++++- 2 files changed, 7 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/if_ether.h b/include/linux/if_ether.h index f6863fbcf334..1db774cf9dc2 100644 --- a/include/linux/if_ether.h +++ b/include/linux/if_ether.h @@ -61,6 +61,7 @@ #define ETH_P_8021Q 0x8100 /* 802.1Q VLAN Extended Header */ #define ETH_P_IPX 0x8137 /* IPX over DIX */ #define ETH_P_IPV6 0x86DD /* IPv6 over bluebook */ +#define ETH_P_PAUSE 0x8808 /* IEEE Pause frames. See 802.3 31B */ #define ETH_P_SLOW 0x8809 /* Slow Protocol. See 802.3ad 43B */ #define ETH_P_WCCP 0x883E /* Web-cache coordination protocol * defined in draft-wilson-wrec-wccp-v2-00.txt */ diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c index 364e0ba44158..5662567c8aed 100644 --- a/net/bridge/br_input.c +++ b/net/bridge/br_input.c @@ -131,9 +131,14 @@ struct sk_buff *br_handle_frame(struct net_bridge_port *p, struct sk_buff *skb) if (!is_valid_ether_addr(eth_hdr(skb)->h_source)) goto drop; - if (unlikely(is_link_local(dest))) + if (unlikely(is_link_local(dest))) { + /* Pause frames shouldn't be passed up by driver anyway */ + if (skb->protocol == htons(ETH_P_PAUSE)) + goto drop; + return (NF_HOOK(PF_BRIDGE, NF_BR_LOCAL_IN, skb, skb->dev, NULL, br_handle_local_finish) == 0) ? skb : NULL; + } switch (p->state) { case BR_STATE_FORWARDING: -- cgit v1.2.3-59-g8ed1b From 28d8909bc790d936ce33f4402adf7577533bbd4b Mon Sep 17 00:00:00 2001 From: Jamal Hadi Salim Date: Thu, 26 Apr 2007 00:10:29 -0700 Subject: [XFRM]: Export SAD info. On a system with a lot of SAs, counting SAD entries chews useful CPU time since you need to dump the whole SAD to user space; i.e something like ip xfrm state ls | grep -i src | wc -l I have seen taking literally minutes on a 40K SAs when the system is swapping. With this patch, some of the SAD info (that was already being tracked) is exposed to user space. i.e you do: ip xfrm state count And you get the count; you can also pass -s to the command line and get the hash info. Signed-off-by: Jamal Hadi Salim Signed-off-by: David S. Miller --- include/linux/xfrm.h | 25 +++++++++++++++++++++++ include/net/xfrm.h | 8 ++++++++ net/xfrm/xfrm_state.c | 10 +++++++++ net/xfrm/xfrm_user.c | 56 +++++++++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 99 insertions(+) (limited to 'include') diff --git a/include/linux/xfrm.h b/include/linux/xfrm.h index 15ca89e9961b..9c656a5cf842 100644 --- a/include/linux/xfrm.h +++ b/include/linux/xfrm.h @@ -181,6 +181,10 @@ enum { XFRM_MSG_MIGRATE, #define XFRM_MSG_MIGRATE XFRM_MSG_MIGRATE + XFRM_MSG_NEWSADINFO, +#define XFRM_MSG_NEWSADINFO XFRM_MSG_NEWSADINFO + XFRM_MSG_GETSADINFO, +#define XFRM_MSG_GETSADINFO XFRM_MSG_GETSADINFO __XFRM_MSG_MAX }; #define XFRM_MSG_MAX (__XFRM_MSG_MAX - 1) @@ -234,6 +238,17 @@ enum xfrm_ae_ftype_t { #define XFRM_AE_MAX (__XFRM_AE_MAX - 1) }; +/* SAD Table filter flags */ +enum xfrm_sad_ftype_t { + XFRM_SAD_UNSPEC, + XFRM_SAD_HMASK=1, + XFRM_SAD_HMAX=2, + XFRM_SAD_CNT=4, + __XFRM_SAD_MAX + +#define XFRM_SAD_MAX (__XFRM_SAD_MAX - 1) +}; + struct xfrm_userpolicy_type { __u8 type; __u16 reserved1; @@ -265,6 +280,16 @@ enum xfrm_attr_type_t { #define XFRMA_MAX (__XFRMA_MAX - 1) }; +enum xfrm_sadattr_type_t { + XFRMA_SAD_UNSPEC, + XFRMA_SADHMASK, + XFRMA_SADHMAX, + XFRMA_SADCNT, + __XFRMA_SAD_MAX + +#define XFRMA_SAD_MAX (__XFRMA_SAD_MAX - 1) +}; + struct xfrm_usersa_info { struct xfrm_selector sel; struct xfrm_id id; diff --git a/include/net/xfrm.h b/include/net/xfrm.h index e144a25814bd..8287081d77f2 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -416,6 +416,13 @@ struct xfrm_audit u32 secid; }; +/* SAD metadata, add more later */ +struct xfrm_sadinfo +{ + u32 sadhcnt; /* current hash bkts */ + u32 sadhmcnt; /* max allowed hash bkts */ + u32 sadcnt; /* current running count */ +}; #ifdef CONFIG_AUDITSYSCALL extern void xfrm_audit_log(uid_t auid, u32 secid, int type, int result, struct xfrm_policy *xp, struct xfrm_state *x); @@ -938,6 +945,7 @@ static inline int xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **s extern struct xfrm_state *xfrm_find_acq_byseq(u32 seq); extern int xfrm_state_delete(struct xfrm_state *x); extern void xfrm_state_flush(u8 proto, struct xfrm_audit *audit_info); +extern void xfrm_sad_getinfo(struct xfrm_sadinfo *si); extern int xfrm_replay_check(struct xfrm_state *x, __be32 seq); extern void xfrm_replay_advance(struct xfrm_state *x, __be32 seq); extern void xfrm_replay_notify(struct xfrm_state *x, int event); diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index 743f07e7f698..f3a61ebd8d65 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c @@ -421,6 +421,16 @@ restart: } EXPORT_SYMBOL(xfrm_state_flush); +void xfrm_sad_getinfo(struct xfrm_sadinfo *si) +{ + spin_lock_bh(&xfrm_state_lock); + si->sadcnt = xfrm_state_num; + si->sadhcnt = xfrm_state_hmask; + si->sadhmcnt = xfrm_state_hashmax; + spin_unlock_bh(&xfrm_state_lock); +} +EXPORT_SYMBOL(xfrm_sad_getinfo); + static int xfrm_init_tempsel(struct xfrm_state *x, struct flowi *fl, struct xfrm_tmpl *tmpl, diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index f91521d5f2ab..cb4cc1bde5d1 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c @@ -672,6 +672,61 @@ static struct sk_buff *xfrm_state_netlink(struct sk_buff *in_skb, return skb; } +static int build_sadinfo(struct sk_buff *skb, u32 pid, u32 seq, u32 flags) +{ + struct xfrm_sadinfo si; + struct nlmsghdr *nlh; + u32 *f; + + nlh = nlmsg_put(skb, pid, seq, XFRM_MSG_NEWSADINFO, sizeof(u32), 0); + if (nlh == NULL) /* shouldnt really happen ... */ + return -EMSGSIZE; + + f = nlmsg_data(nlh); + *f = flags; + xfrm_sad_getinfo(&si); + + if (flags & XFRM_SAD_HMASK) + NLA_PUT_U32(skb, XFRMA_SADHMASK, si.sadhcnt); + if (flags & XFRM_SAD_HMAX) + NLA_PUT_U32(skb, XFRMA_SADHMAX, si.sadhmcnt); + if (flags & XFRM_SAD_CNT) + NLA_PUT_U32(skb, XFRMA_SADCNT, si.sadcnt); + + return nlmsg_end(skb, nlh); + +nla_put_failure: + nlmsg_cancel(skb, nlh); + return -EMSGSIZE; +} + +static int xfrm_get_sadinfo(struct sk_buff *skb, struct nlmsghdr *nlh, + struct rtattr **xfrma) +{ + struct sk_buff *r_skb; + u32 *flags = NLMSG_DATA(nlh); + u32 spid = NETLINK_CB(skb).pid; + u32 seq = nlh->nlmsg_seq; + int len = NLMSG_LENGTH(sizeof(u32)); + + if (*flags & XFRM_SAD_HMASK) + len += RTA_SPACE(sizeof(u32)); + if (*flags & XFRM_SAD_HMAX) + len += RTA_SPACE(sizeof(u32)); + if (*flags & XFRM_SAD_CNT) + len += RTA_SPACE(sizeof(u32)); + + r_skb = alloc_skb(len, GFP_ATOMIC); + + if (r_skb == NULL) + return -ENOMEM; + + if (build_sadinfo(r_skb, spid, seq, *flags) < 0) + BUG(); + + return nlmsg_unicast(xfrm_nl, r_skb, spid); +} + static int xfrm_get_sa(struct sk_buff *skb, struct nlmsghdr *nlh, struct rtattr **xfrma) { @@ -1850,6 +1905,7 @@ static struct xfrm_link { [XFRM_MSG_NEWAE - XFRM_MSG_BASE] = { .doit = xfrm_new_ae }, [XFRM_MSG_GETAE - XFRM_MSG_BASE] = { .doit = xfrm_get_ae }, [XFRM_MSG_MIGRATE - XFRM_MSG_BASE] = { .doit = xfrm_do_migrate }, + [XFRM_MSG_GETSADINFO - XFRM_MSG_BASE] = { .doit = xfrm_get_sadinfo }, }; static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) -- cgit v1.2.3-59-g8ed1b From 42bad1da506cafa7041a02ab84033a724afe88ac Mon Sep 17 00:00:00 2001 From: Adrian Bunk Date: Thu, 26 Apr 2007 00:57:41 -0700 Subject: [NETLINK]: Possible cleanups. - make the following needlessly global variables static: - core/rtnetlink.c: struct rtnl_msg_handlers[] - netfilter/nf_conntrack_proto.c: struct nf_ct_protos[] - make the following needlessly global functions static: - core/rtnetlink.c: rtnl_dump_all() - netlink/af_netlink.c: netlink_queue_skip() Signed-off-by: Adrian Bunk Signed-off-by: Andrew Morton Signed-off-by: David S. Miller --- include/net/netfilter/nf_conntrack_l4proto.h | 1 - include/net/netlink.h | 2 -- include/net/rtnetlink.h | 1 - net/core/rtnetlink.c | 6 ++---- net/netfilter/nf_conntrack_proto.c | 2 +- net/netlink/af_netlink.c | 4 ++-- 6 files changed, 5 insertions(+), 11 deletions(-) (limited to 'include') diff --git a/include/net/netfilter/nf_conntrack_l4proto.h b/include/net/netfilter/nf_conntrack_l4proto.h index 8415182ec126..f46cb930414c 100644 --- a/include/net/netfilter/nf_conntrack_l4proto.h +++ b/include/net/netfilter/nf_conntrack_l4proto.h @@ -97,7 +97,6 @@ extern struct nf_conntrack_l4proto nf_conntrack_l4proto_udp6; extern struct nf_conntrack_l4proto nf_conntrack_l4proto_generic; #define MAX_NF_CT_PROTO 256 -extern struct nf_conntrack_l4proto **nf_ct_protos[PF_MAX]; extern struct nf_conntrack_l4proto * __nf_ct_l4proto_find(u_int16_t l3proto, u_int8_t l4proto); diff --git a/include/net/netlink.h b/include/net/netlink.h index 2e4c90a98a7f..0bf325c29aff 100644 --- a/include/net/netlink.h +++ b/include/net/netlink.h @@ -217,8 +217,6 @@ struct nl_info { extern void netlink_run_queue(struct sock *sk, unsigned int *qlen, int (*cb)(struct sk_buff *, struct nlmsghdr *)); -extern void netlink_queue_skip(struct nlmsghdr *nlh, - struct sk_buff *skb); extern int nlmsg_notify(struct sock *sk, struct sk_buff *skb, u32 pid, unsigned int group, int report, gfp_t flags); diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h index 086fa9e89509..3b3d4745618d 100644 --- a/include/net/rtnetlink.h +++ b/include/net/rtnetlink.h @@ -13,7 +13,6 @@ extern void rtnl_register(int protocol, int msgtype, rtnl_doit_func, rtnl_dumpit_func); extern int rtnl_unregister(int protocol, int msgtype); extern void rtnl_unregister_all(int protocol); -extern int rtnl_dump_all(struct sk_buff *skb, struct netlink_callback *cb); static inline int rtnl_msg_family(struct nlmsghdr *nlh) { diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 4fe0f4b3a345..cec111109155 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -97,7 +97,7 @@ int rtattr_parse(struct rtattr *tb[], int maxattr, struct rtattr *rta, int len) return 0; } -struct rtnl_link *rtnl_msg_handlers[NPROTO]; +static struct rtnl_link *rtnl_msg_handlers[NPROTO]; static inline int rtm_msgindex(int msgtype) { @@ -765,7 +765,7 @@ errout: return err; } -int rtnl_dump_all(struct sk_buff *skb, struct netlink_callback *cb) +static int rtnl_dump_all(struct sk_buff *skb, struct netlink_callback *cb) { int idx; int s_idx = cb->family; @@ -789,8 +789,6 @@ int rtnl_dump_all(struct sk_buff *skb, struct netlink_callback *cb) return skb->len; } -EXPORT_SYMBOL_GPL(rtnl_dump_all); - void rtmsg_ifinfo(int type, struct net_device *dev, unsigned change) { struct sk_buff *skb; diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c index e4aad2087f4d..6d947068c58f 100644 --- a/net/netfilter/nf_conntrack_proto.c +++ b/net/netfilter/nf_conntrack_proto.c @@ -28,7 +28,7 @@ #include #include -struct nf_conntrack_l4proto **nf_ct_protos[PF_MAX] __read_mostly; +static struct nf_conntrack_l4proto **nf_ct_protos[PF_MAX] __read_mostly; struct nf_conntrack_l3proto *nf_ct_l3protos[AF_MAX] __read_mostly; EXPORT_SYMBOL_GPL(nf_ct_l3protos); diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 64d4b27f25ab..42d2fb94eff1 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -121,6 +121,7 @@ static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait); static int netlink_dump(struct sock *sk); static void netlink_destroy_callback(struct netlink_callback *cb); +static void netlink_queue_skip(struct nlmsghdr *nlh, struct sk_buff *skb); static DEFINE_RWLOCK(nl_table_lock); static atomic_t nl_table_users = ATOMIC_INIT(0); @@ -1568,7 +1569,7 @@ void netlink_run_queue(struct sock *sk, unsigned int *qlen, * Pulls the given netlink message off the socket buffer so the next * call to netlink_queue_run() will not reconsider the message. */ -void netlink_queue_skip(struct nlmsghdr *nlh, struct sk_buff *skb) +static void netlink_queue_skip(struct nlmsghdr *nlh, struct sk_buff *skb) { int msglen = NLMSG_ALIGN(nlh->nlmsg_len); @@ -1851,7 +1852,6 @@ core_initcall(netlink_proto_init); EXPORT_SYMBOL(netlink_ack); EXPORT_SYMBOL(netlink_run_queue); -EXPORT_SYMBOL(netlink_queue_skip); EXPORT_SYMBOL(netlink_broadcast); EXPORT_SYMBOL(netlink_dump_start); EXPORT_SYMBOL(netlink_kernel_create); -- cgit v1.2.3-59-g8ed1b From c1a068f6b0c38665c079e8d4ca241e24020eff36 Mon Sep 17 00:00:00 2001 From: "Robert P. J. Day" Date: Thu, 26 Apr 2007 00:58:39 -0700 Subject: [NET]: Delete unused header file linux/sdla_fr.h. Delete the unreferenced header file include/linux/sdla_fr.h. Signed-off-by: Robert P. J. Day Signed-off-by: Andrew Morton --- include/linux/sdla_fr.h | 638 ------------------------------------------------ 1 file changed, 638 deletions(-) delete mode 100644 include/linux/sdla_fr.h (limited to 'include') diff --git a/include/linux/sdla_fr.h b/include/linux/sdla_fr.h deleted file mode 100644 index cdfa77fcb06b..000000000000 --- a/include/linux/sdla_fr.h +++ /dev/null @@ -1,638 +0,0 @@ -/***************************************************************************** -* sdla_fr.h Sangoma frame relay firmware API definitions. -* -* Author: Gideon Hack -* Nenad Corbic -* -* Copyright: (c) 1995-2000 Sangoma Technologies Inc. -* -* This program is free software; you can redistribute it and/or -* modify it under the terms of the GNU General Public License -* as published by the Free Software Foundation; either version -* 2 of the License, or (at your option) any later version. -* ============================================================================ -* Oct 04, 1999 Gideon Hack Updated API structures -* Jun 02, 1999 Gideon Hack Modifications for S514 support -* Oct 12, 1997 Jaspreet Singh Added FR_READ_DLCI_IB_MAPPING -* Jul 21, 1997 Jaspreet Singh Changed FRRES_TOO_LONG and FRRES_TOO_MANY to -* 0x05 and 0x06 respectively. -* Dec 23, 1996 Gene Kozin v2.0 -* Apr 29, 1996 Gene Kozin v1.0 (merged version S502 & S508 definitions). -* Sep 26, 1995 Gene Kozin Initial version. -*****************************************************************************/ -#ifndef _SDLA_FR_H -#define _SDLA_FR_H - -/*---------------------------------------------------------------------------- - * Notes: - * ------ - * 1. All structures defined in this file are byte-alined. - * - * Compiler Platform - * -------- -------- - * GNU C Linux - */ - -#ifndef PACKED -# define PACKED __attribute__((packed)) -#endif /* PACKED */ - -/* Adapter memory layout */ -#define FR_MB_VECTOR 0xE000 /* mailbox window vector */ -#define FR502_RX_VECTOR 0xA000 /* S502 direct receive window vector */ -#define FR502_MBOX_OFFS 0xF60 /* S502 mailbox offset */ -#define FR508_MBOX_OFFS 0 /* S508 mailbox offset */ -#define FR502_FLAG_OFFS 0x1FF0 /* S502 status flags offset */ -#define FR508_FLAG_OFFS 0x1000 /* S508 status flags offset */ -#define FR502_RXMB_OFFS 0x900 /* S502 direct receive mailbox offset */ -#define FR508_TXBC_OFFS 0x1100 /* S508 Tx buffer info offset */ -#define FR508_RXBC_OFFS 0x1120 /* S508 Rx buffer info offset */ - -/* Important constants */ -#define FR502_MAX_DATA 4096 /* maximum data buffer length */ -#define FR508_MAX_DATA 4080 /* maximum data buffer length */ -#define MIN_LGTH_FR_DATA_CFG 300 /* min Information frame length -(for configuration purposes) */ -#define FR_MAX_NO_DATA_BYTES_IN_FRAME 15354 /* max Information frame length */ - -#define HIGHEST_VALID_DLCI 991 - -/****** Data Structures *****************************************************/ - -/*---------------------------------------------------------------------------- - * Frame relay command block. - */ -typedef struct fr_cmd -{ - unsigned char command PACKED; /* command code */ - unsigned short length PACKED; /* length of data buffer */ - unsigned char result PACKED; /* return code */ - unsigned short dlci PACKED; /* DLCI number */ - unsigned char attr PACKED; /* FECN, BECN, DE and C/R bits */ - unsigned short rxlost1 PACKED; /* frames discarded at int. level */ - unsigned long rxlost2 PACKED; /* frames discarded at app. level */ - unsigned char rsrv[2] PACKED; /* reserved for future use */ -} fr_cmd_t; - -/* 'command' field defines */ -#define FR_WRITE 0x01 -#define FR_READ 0x02 -#define FR_ISSUE_IS_FRAME 0x03 -#define FR_SET_CONFIG 0x10 -#define FR_READ_CONFIG 0x11 -#define FR_COMM_DISABLE 0x12 -#define FR_COMM_ENABLE 0x13 -#define FR_READ_STATUS 0x14 -#define FR_READ_STATISTICS 0x15 -#define FR_FLUSH_STATISTICS 0x16 -#define FR_LIST_ACTIVE_DLCI 0x17 -#define FR_FLUSH_DATA_BUFFERS 0x18 -#define FR_READ_ADD_DLC_STATS 0x19 -#define FR_ADD_DLCI 0x20 -#define FR_DELETE_DLCI 0x21 -#define FR_ACTIVATE_DLCI 0x22 -#define FR_DEACTIVATE_DLCI 0x22 -#define FR_READ_MODEM_STATUS 0x30 -#define FR_SET_MODEM_STATUS 0x31 -#define FR_READ_ERROR_STATS 0x32 -#define FR_FLUSH_ERROR_STATS 0x33 -#define FR_READ_DLCI_IB_MAPPING 0x34 -#define FR_READ_CODE_VERSION 0x40 -#define FR_SET_INTR_MODE 0x50 -#define FR_READ_INTR_MODE 0x51 -#define FR_SET_TRACE_CONFIG 0x60 -#define FR_FT1_STATUS_CTRL 0x80 -#define FR_SET_FT1_MODE 0x81 - -/* Special UDP drivers management commands */ -#define FPIPE_ENABLE_TRACING 0x41 -#define FPIPE_DISABLE_TRACING 0x42 -#define FPIPE_GET_TRACE_INFO 0x43 -#define FPIPE_FT1_READ_STATUS 0x44 -#define FPIPE_DRIVER_STAT_IFSEND 0x45 -#define FPIPE_DRIVER_STAT_INTR 0x46 -#define FPIPE_DRIVER_STAT_GEN 0x47 -#define FPIPE_FLUSH_DRIVER_STATS 0x48 -#define FPIPE_ROUTER_UP_TIME 0x49 - -/* 'result' field defines */ -#define FRRES_OK 0x00 /* command executed successfully */ -#define FRRES_DISABLED 0x01 /* communications not enabled */ -#define FRRES_INOPERATIVE 0x02 /* channel inoperative */ -#define FRRES_DLCI_INACTIVE 0x03 /* DLCI is inactive */ -#define FRRES_DLCI_INVALID 0x04 /* DLCI is not configured */ -#define FRRES_TOO_LONG 0x05 -#define FRRES_TOO_MANY 0x06 -#define FRRES_CIR_OVERFLOW 0x07 /* Tx throughput has exceeded CIR */ -#define FRRES_BUFFER_OVERFLOW 0x08 -#define FRRES_MODEM_FAILURE 0x10 /* DCD and/or CTS dropped */ -#define FRRES_CHANNEL_DOWN 0x11 /* channel became inoperative */ -#define FRRES_CHANNEL_UP 0x12 /* channel became operative */ -#define FRRES_DLCI_CHANGE 0x13 /* DLCI status (or number) changed */ -#define FRRES_DLCI_MISMATCH 0x14 -#define FRRES_INVALID_CMD 0x1F /* invalid command */ - -/* 'attr' field defines */ -#define FRATTR_ - -/*---------------------------------------------------------------------------- - * Frame relay mailbox. - * This structure is located at offset FR50?_MBOX_OFFS into FR_MB_VECTOR. - * For S502 it is also located at offset FR502_RXMB_OFFS into - * FR502_RX_VECTOR. - */ -typedef struct fr_mbox -{ - unsigned char opflag PACKED; /* 00h: execution flag */ - fr_cmd_t cmd PACKED; /* 01h: command block */ - unsigned char data[1] PACKED; /* 10h: variable length data buffer */ -} fr_mbox_t; - -/*---------------------------------------------------------------------------- - * S502 frame relay status flags. - * This structure is located at offset FR502_FLAG_OFFS into FR_MB_VECTOR. - */ -typedef struct fr502_flags -{ - unsigned char rsrv1[1] PACKED; /* 00h: */ - unsigned char tx_ready PACKED; /* 01h: Tx buffer available */ - unsigned char rx_ready PACKED; /* 02h: Rx frame available */ - unsigned char event PACKED; /* 03h: asynchronous event */ - unsigned char mstatus PACKED; /* 04h: modem status */ - unsigned char rsrv2[8] PACKED; /* 05h: */ - unsigned char iflag PACKED; /* 0Dh: interrupt flag */ - unsigned char imask PACKED; /* 0Eh: interrupt mask */ -} fr502_flags_t; - -/*---------------------------------------------------------------------------- - * S508 frame relay status flags. - * This structure is located at offset FR508_FLAG_OFFS into FR_MB_VECTOR. - */ -typedef struct fr508_flags -{ - unsigned char rsrv1[3] PACKED; /* 00h: reserved */ - unsigned char event PACKED; /* 03h: asynchronous event */ - unsigned char mstatus PACKED; /* 04h: modem status */ - unsigned char rsrv2[11] PACKED; /* 05h: reserved */ - unsigned char iflag PACKED; /* 10h: interrupt flag */ - unsigned char imask PACKED; /* 11h: interrupt mask */ - unsigned long tse_offs PACKED; /* 12h: Tx status element */ - unsigned short dlci PACKED; /* 16h: DLCI NUMBER */ -} fr508_flags_t; - -/* 'event' field defines */ -#define FR_EVENT_STATUS 0x01 /* channel status change */ -#define FR_EVENT_DLC_STATUS 0x02 /* DLC status change */ -#define FR_EVENT_BAD_DLCI 0x04 /* FSR included wrong DLCI */ -#define FR_EVENT_LINK_DOWN 0x40 /* DCD or CTS low */ - -/* 'mstatus' field defines */ -#define FR_MDM_DCD 0x08 /* mdm_status: DCD */ -#define FR_MDM_CTS 0x20 /* mdm_status: CTS */ - -/* 'iflag' & 'imask' fields defines */ -#define FR_INTR_RXRDY 0x01 /* Rx ready */ -#define FR_INTR_TXRDY 0x02 /* Tx ready */ -#define FR_INTR_MODEM 0x04 /* modem status change (DCD, CTS) */ -#define FR_INTR_READY 0x08 /* interface command completed */ -#define FR_INTR_DLC 0x10 /* DLC status change */ -#define FR_INTR_TIMER 0x20 /* millisecond timer */ -#define FR_INTR_TX_MULT_DLCIs 0x80 /* Tx interrupt on multiple DLCIs */ - - -/*---------------------------------------------------------------------------- - * Receive Buffer Configuration Info. S508 only! - * This structure is located at offset FR508_RXBC_OFFS into FR_MB_VECTOR. - */ -typedef struct fr_buf_info -{ - unsigned short rse_num PACKED; /* 00h: number of status elements */ - unsigned long rse_base PACKED; /* 02h: receive status array base */ - unsigned long rse_next PACKED; /* 06h: next status element */ - unsigned long buf_base PACKED; /* 0Ah: rotational buffer base */ - unsigned short reserved PACKED; /* 0Eh: */ - unsigned long buf_top PACKED; /* 10h: rotational buffer top */ -} fr_buf_info_t; - -/*---------------------------------------------------------------------------- - * Buffer Status Element. S508 only! - * Array of structures of this type is located at offset defined by the - * 'rse_base' field of the frBufInfo_t structure into absolute adapter - * memory address space. - */ -typedef struct fr_rx_buf_ctl -{ - unsigned char flag PACKED; /* 00h: ready flag */ - unsigned short length PACKED; /* 01h: frame length */ - unsigned short dlci PACKED; /* 03h: DLCI */ - unsigned char attr PACKED; /* 05h: FECN/BECN/DE/CR */ - unsigned short tmstamp PACKED; /* 06h: time stamp */ - unsigned short rsrv[2] PACKED; /* 08h: */ - unsigned long offset PACKED; /* 0Ch: buffer absolute address */ -} fr_rx_buf_ctl_t; - -typedef struct fr_tx_buf_ctl -{ - unsigned char flag PACKED; /* 00h: ready flag */ - unsigned short rsrv0[2] PACKED; /* 01h: */ - unsigned short length PACKED; /* 05h: frame length */ - unsigned short dlci PACKED; /* 07h: DLCI */ - unsigned char attr PACKED; /* 09h: FECN/BECN/DE/CR */ - unsigned short rsrv1 PACKED; /* 0Ah: */ - unsigned long offset PACKED; /* 0Ch: buffer absolute address */ -} fr_tx_buf_ctl_t; - -/*---------------------------------------------------------------------------- - * Global Configuration Block. Passed to FR_SET_CONFIG command when dlci == 0. - */ -typedef struct fr_conf -{ - unsigned short station PACKED; /* 00h: CPE/Node */ - unsigned short options PACKED; /* 02h: configuration options */ - unsigned short kbps PACKED; /* 04h: baud rate in kbps */ - unsigned short port PACKED; /* 06h: RS-232/V.35 */ - unsigned short mtu PACKED; /* 08h: max. transmit length */ - unsigned short t391 PACKED; /* 0Ah: */ - unsigned short t392 PACKED; /* 0Ch: */ - unsigned short n391 PACKED; /* 0Eh: */ - unsigned short n392 PACKED; /* 10h: */ - unsigned short n393 PACKED; /* 12h: */ - unsigned short cir_fwd PACKED; /* 14h: */ - unsigned short bc_fwd PACKED; /* 16h: */ - unsigned short be_fwd PACKED; /* 18h: */ - unsigned short cir_bwd PACKED; /* 1Ah: */ - unsigned short bc_bwd PACKED; /* 1Ch: */ - unsigned short be_bwd PACKED; /* 1Eh: */ - unsigned short dlci[0] PACKED; /* 20h: */ -} fr_conf_t; - -/* 'station_type' defines */ -#define FRCFG_STATION_CPE 0 -#define FRCFG_STATION_NODE 1 - -/* 'conf_flags' defines */ -#define FRCFG_IGNORE_TX_CIR 0x0001 -#define FRCFG_IGNORE_RX_CIR 0x0002 -#define FRCFG_DONT_RETRANSMIT 0x0004 -#define FRCFG_IGNORE_CBS 0x0008 -#define FRCFG_THROUGHPUT 0x0010 /* enable throughput calculation */ -#define FRCFG_DIRECT_RX 0x0080 /* enable direct receive buffer */ -#define FRCFG_AUTO_CONFIG 0x8000 /* enable auto DLCI configuration */ - -/* 'baud_rate' defines */ -#define FRCFG_BAUD_1200 12 -#define FRCFG_BAUD_2400 24 -#define FRCFG_BAUD_4800 48 -#define FRCFG_BAUD_9600 96 -#define FRCFG_BAUD_19200 19 -#define FRCFG_BAUD_38400 38 -#define FRCFG_BAUD_56000 56 -#define FRCFG_BAUD_64000 64 -#define FRCFG_BAUD_128000 128 - -/* 'port_mode' defines */ -#define FRCFG_MODE_EXT_CLK 0x0000 -#define FRCFG_MODE_INT_CLK 0x0001 -#define FRCFG_MODE_V35 0x0000 /* S508 only */ -#define FRCFG_MODE_RS232 0x0002 /* S508 only */ - -/* defines for line tracing */ - -/* the line trace status element presented by the frame relay code */ -typedef struct { - unsigned char flag PACKED; /* ready flag */ - unsigned short length PACKED; /* trace length */ - unsigned char rsrv0[2] PACKED; /* reserved */ - unsigned char attr PACKED; /* trace attributes */ - unsigned short tmstamp PACKED; /* time stamp */ - unsigned char rsrv1[4] PACKED; /* reserved */ - unsigned long offset PACKED; /* buffer absolute address */ -} fr_trc_el_t; - -typedef struct { - unsigned char status PACKED; /* status flag */ - unsigned char data_passed PACKED; /* 0 if no data passed, 1 if */ - /* data passed */ - unsigned short length PACKED; /* frame length */ - unsigned short tmstamp PACKED; /* time stamp */ -} fpipemon_trc_hdr_t; - -typedef struct { - fpipemon_trc_hdr_t fpipemon_trc_hdr PACKED; - unsigned char data[FR_MAX_NO_DATA_BYTES_IN_FRAME] PACKED; -} fpipemon_trc_t; - -/* bit settings for the 'status' byte - note that bits 1, 2 and 3 are used */ -/* for returning the number of frames being passed to fpipemon */ -#define TRC_OUTGOING_FRM 0x01 -#define TRC_ABORT_ERROR 0x10 -#define TRC_CRC_ERROR 0x20 -#define TRC_OVERRUN_ERROR 0x40 -#define MORE_TRC_DATA 0x80 - -#define MAX_FRMS_TRACED 0x07 - -#define NO_TRC_ELEMENTS_OFF 0x9000 -#define BASE_TRC_ELEMENTS_OFF 0x9002 -#define TRC_ACTIVE 0x01 -#define FLUSH_TRC_BUFFERS 0x02 -#define FLUSH_TRC_STATISTICS 0x04 -#define TRC_SIGNALLING_FRMS 0x10 -#define TRC_INFO_FRMS 0x20 -#define ACTIVATE_TRC (TRC_ACTIVE | TRC_SIGNALLING_FRMS | TRC_INFO_FRMS) -#define RESET_TRC (FLUSH_TRC_BUFFERS | FLUSH_TRC_STATISTICS) - -/*---------------------------------------------------------------------------- - * Channel configuration. - * This structure is passed to the FR_SET_CONFIG command when dlci != 0. - */ -typedef struct fr_dlc_conf -{ - unsigned short conf_flags PACKED; /* 00h: configuration bits */ - unsigned short cir_fwd PACKED; /* 02h: */ - unsigned short bc_fwd PACKED; /* 04h: */ - unsigned short be_fwd PACKED; /* 06h: */ - unsigned short cir_bwd PACKED; /* 08h: */ - unsigned short bc_bwd PACKED; /* 0Ah: */ - unsigned short be_bwd PACKED; /* 0Ch: */ -} fr_dlc_conf_t; - -/*---------------------------------------------------------------------------- - * S502 interrupt mode control block. - * This structure is passed to the FR_SET_INTR_FLAGS and returned by the - * FR_READ_INTR_FLAGS commands. - */ -typedef struct fr502_intr_ctl -{ - unsigned char mode PACKED; /* 00h: interrupt enable flags */ - unsigned short tx_len PACKED; /* 01h: required Tx buffer size */ -} fr502_intr_ctl_t; - -/*---------------------------------------------------------------------------- - * S508 interrupt mode control block. - * This structure is passed to the FR_SET_INTR_FLAGS and returned by the - * FR_READ_INTR_FLAGS commands. - */ -typedef struct fr508_intr_ctl -{ - unsigned char mode PACKED; /* 00h: interrupt enable flags */ - unsigned short tx_len PACKED; /* 01h: required Tx buffer size */ - unsigned char irq PACKED; /* 03h: IRQ level to activate */ - unsigned char flags PACKED; /* 04h: ?? */ - unsigned short timeout PACKED; /* 05h: ms, for timer interrupt */ -} fr508_intr_ctl_t; - -/*---------------------------------------------------------------------------- - * Channel status. - * This structure is returned by the FR_READ_STATUS command. - */ -typedef struct fr_dlc_Status -{ - unsigned char status PACKED; /* 00h: link/DLCI status */ - struct - { - unsigned short dlci PACKED; /* 01h: DLCI number */ - unsigned char status PACKED; /* 03h: DLCI status */ - } circuit[1] PACKED; -} fr_dlc_status_t; - -/* 'status' defines */ -#define FR_LINK_INOPER 0x00 /* for global status (DLCI == 0) */ -#define FR_LINK_OPER 0x01 -#define FR_DLCI_DELETED 0x01 /* for circuit status (DLCI != 0) */ -#define FR_DLCI_ACTIVE 0x02 -#define FR_DLCI_WAITING 0x04 -#define FR_DLCI_NEW 0x08 -#define FR_DLCI_REPORT 0x40 - -/*---------------------------------------------------------------------------- - * Global Statistics Block. - * This structure is returned by the FR_READ_STATISTICS command when - * dcli == 0. - */ -typedef struct fr_link_stat -{ - unsigned short rx_too_long PACKED; /* 00h: */ - unsigned short rx_dropped PACKED; /* 02h: */ - unsigned short rx_dropped2 PACKED; /* 04h: */ - unsigned short rx_bad_dlci PACKED; /* 06h: */ - unsigned short rx_bad_format PACKED; /* 08h: */ - unsigned short retransmitted PACKED; /* 0Ah: */ - unsigned short cpe_tx_FSE PACKED; /* 0Ch: */ - unsigned short cpe_tx_LIV PACKED; /* 0Eh: */ - unsigned short cpe_rx_FSR PACKED; /* 10h: */ - unsigned short cpe_rx_LIV PACKED; /* 12h: */ - unsigned short node_rx_FSE PACKED; /* 14h: */ - unsigned short node_rx_LIV PACKED; /* 16h: */ - unsigned short node_tx_FSR PACKED; /* 18h: */ - unsigned short node_tx_LIV PACKED; /* 1Ah: */ - unsigned short rx_ISF_err PACKED; /* 1Ch: */ - unsigned short rx_unsolicited PACKED; /* 1Eh: */ - unsigned short rx_SSN_err PACKED; /* 20h: */ - unsigned short rx_RSN_err PACKED; /* 22h: */ - unsigned short T391_timeouts PACKED; /* 24h: */ - unsigned short T392_timeouts PACKED; /* 26h: */ - unsigned short N392_reached PACKED; /* 28h: */ - unsigned short cpe_SSN_RSN PACKED; /* 2Ah: */ - unsigned short current_SSN PACKED; /* 2Ch: */ - unsigned short current_RSN PACKED; /* 2Eh: */ - unsigned short curreny_T391 PACKED; /* 30h: */ - unsigned short current_T392 PACKED; /* 32h: */ - unsigned short current_N392 PACKED; /* 34h: */ - unsigned short current_N393 PACKED; /* 36h: */ -} fr_link_stat_t; - -/*---------------------------------------------------------------------------- - * DLCI statistics. - * This structure is returned by the FR_READ_STATISTICS command when - * dlci != 0. - */ -typedef struct fr_dlci_stat -{ - unsigned long tx_frames PACKED; /* 00h: */ - unsigned long tx_bytes PACKED; /* 04h: */ - unsigned long rx_frames PACKED; /* 08h: */ - unsigned long rx_bytes PACKED; /* 0Ch: */ - unsigned long rx_dropped PACKED; /* 10h: */ - unsigned long rx_inactive PACKED; /* 14h: */ - unsigned long rx_exceed_CIR PACKED; /* 18h: */ - unsigned long rx_DE_set PACKED; /* 1Ch: */ - unsigned long tx_throughput PACKED; /* 20h: */ - unsigned long tx_calc_timer PACKED; /* 24h: */ - unsigned long rx_throughput PACKED; /* 28h: */ - unsigned long rx_calc_timer PACKED; /* 2Ch: */ -} fr_dlci_stat_t; - -/*---------------------------------------------------------------------------- - * Communications error statistics. - * This structure is returned by the FR_READ_ERROR_STATS command. - */ -typedef struct fr_comm_stat -{ - unsigned char rx_overruns PACKED; /* 00h: */ - unsigned char rx_bad_crc PACKED; /* 01h: */ - unsigned char rx_aborts PACKED; /* 02h: */ - unsigned char rx_too_long PACKED; /* 03h: */ - unsigned char tx_aborts PACKED; /* 04h: */ - unsigned char tx_underruns PACKED; /* 05h: */ - unsigned char tx_missed_undr PACKED; /* 06h: */ - unsigned char dcd_dropped PACKED; /* 07h: */ - unsigned char cts_dropped PACKED; /* 08h: */ -} fr_comm_stat_t; - -/*---------------------------------------------------------------------------- - * Defines for the FR_ISSUE_IS_FRAME command. - */ -#define FR_ISF_LVE 2 /* issue Link Verification Enquiry */ -#define FR_ISF_FSE 3 /* issue Full Status Enquiry */ - -/*---------------------------------------------------------------------------- - * Frame Relay ARP Header -- Used for Dynamic route creation with InvARP - */ - -typedef struct arphdr_fr - { - unsigned short ar_hrd PACKED; /* format of hardware addr */ - unsigned short ar_pro PACKED; /* format of protocol addr */ - unsigned char ar_hln PACKED; /* length of hardware addr */ - unsigned char ar_pln PACKED; /* length of protocol addr */ - unsigned short ar_op PACKED; /* ARP opcode */ - unsigned short ar_sha PACKED; /* Sender DLCI addr 2 bytes */ - unsigned long ar_sip PACKED; /* Sender IP addr 4 bytes */ - unsigned short ar_tha PACKED; /* Target DLCI addr 2 bytes */ - unsigned long ar_tip PACKED; /* Target IP addr 4 bytes */ - } arphdr_fr_t; - -/*---------------------------------------------------------------------------- - * Frame Relay RFC 1490 SNAP Header -- Used to check for ARP packets - */ -typedef struct arphdr_1490 - { - unsigned char control PACKED; /* UI, etc... */ - unsigned char pad PACKED; /* Pad */ - unsigned char NLPID PACKED; /* SNAP */ - unsigned char OUI[3] PACKED; /* Ethertype, etc... */ - unsigned short PID PACKED; /* ARP, IP, etc... */ - } arphdr_1490_t; - -/* UDP/IP packet (for UDP management) layout */ - -/* The embedded control block for UDP mgmt - This is essentially a mailbox structure, without the large data field */ - -typedef struct { - unsigned char opp_flag PACKED; /* the opp flag */ - unsigned char command PACKED; /* command code */ - unsigned short length PACKED; /* length of data buffer */ - unsigned char result PACKED; /* return code */ - unsigned short dlci PACKED; /* DLCI number */ - unsigned char attr PACKED; /* FECN, BECN, DE and C/R bits */ - unsigned short rxlost1 PACKED; /* frames discarded at int. level */ - unsigned long rxlost2 PACKED; /* frames discarded at app. level */ - unsigned char rsrv[2] PACKED; /* reserved for future use */ -} cblock_t; - - -/* UDP management packet layout (data area of ip packet) */ - -typedef struct { - unsigned char control PACKED; - unsigned char NLPID PACKED; -} fr_encap_hdr_t; - -typedef struct { -// fr_encap_hdr_t fr_encap_hdr PACKED; - ip_pkt_t ip_pkt PACKED; - udp_pkt_t udp_pkt PACKED; - wp_mgmt_t wp_mgmt PACKED; - cblock_t cblock PACKED; - unsigned char data[4080] PACKED; -} fr_udp_pkt_t; - - -/* valid ip_protocol for UDP management */ -#define UDPMGMT_UDP_PROTOCOL 0x11 - -#define UDPMGMT_FPIPE_SIGNATURE "FPIPE8ND" -#define UDPMGMT_DRVRSTATS_SIGNATURE "DRVSTATS" - -/* values for request/reply byte */ -#define UDPMGMT_REQUEST 0x01 -#define UDPMGMT_REPLY 0x02 -#define UDP_OFFSET 12 - -typedef struct { - unsigned long if_send_entry; - unsigned long if_send_skb_null; - unsigned long if_send_broadcast; - unsigned long if_send_multicast; - unsigned long if_send_critical_ISR; - unsigned long if_send_critical_non_ISR; - unsigned long if_send_busy; - unsigned long if_send_busy_timeout; - unsigned long if_send_DRVSTATS_request; - unsigned long if_send_FPIPE_request; - unsigned long if_send_wan_disconnected; - unsigned long if_send_dlci_disconnected; - unsigned long if_send_no_bfrs; - unsigned long if_send_adptr_bfrs_full; - unsigned long if_send_bfrs_passed_to_adptr; - unsigned long if_send_consec_send_fail; -} drvstats_if_send_t; - -typedef struct { - unsigned long rx_intr_no_socket; - unsigned long rx_intr_dev_not_started; - unsigned long rx_intr_DRVSTATS_request; - unsigned long rx_intr_FPIPE_request; - unsigned long rx_intr_bfr_not_passed_to_stack; - unsigned long rx_intr_bfr_passed_to_stack; - } drvstats_rx_intr_t; - -typedef struct { - unsigned long UDP_FPIPE_mgmt_kmalloc_err; - unsigned long UDP_FPIPE_mgmt_direction_err; - unsigned long UDP_FPIPE_mgmt_adptr_type_err; - unsigned long UDP_FPIPE_mgmt_adptr_cmnd_OK; - unsigned long UDP_FPIPE_mgmt_adptr_cmnd_timeout; - unsigned long UDP_FPIPE_mgmt_adptr_send_passed; - unsigned long UDP_FPIPE_mgmt_adptr_send_failed; - unsigned long UDP_FPIPE_mgmt_not_passed_to_stack; - unsigned long UDP_FPIPE_mgmt_passed_to_stack; - unsigned long UDP_FPIPE_mgmt_no_socket; - unsigned long UDP_DRVSTATS_mgmt_kmalloc_err; - unsigned long UDP_DRVSTATS_mgmt_adptr_cmnd_OK; - unsigned long UDP_DRVSTATS_mgmt_adptr_cmnd_timeout; - unsigned long UDP_DRVSTATS_mgmt_adptr_send_passed; - unsigned long UDP_DRVSTATS_mgmt_adptr_send_failed; - unsigned long UDP_DRVSTATS_mgmt_not_passed_to_stack; - unsigned long UDP_DRVSTATS_mgmt_passed_to_stack; - unsigned long UDP_DRVSTATS_mgmt_no_socket; -} drvstats_gen_t; - -typedef struct { - unsigned char attr PACKED; - unsigned short time_stamp PACKED; - unsigned char reserved[13] PACKED; -} api_rx_hdr_t; - -typedef struct { - api_rx_hdr_t api_rx_hdr PACKED; - void * data PACKED; -} api_rx_element_t; - -typedef struct { - unsigned char attr PACKED; - unsigned char reserved[15] PACKED; -} api_tx_hdr_t; - -typedef struct { - api_tx_hdr_t api_tx_hdr PACKED; - void * data PACKED; -} api_tx_element_t; - -#ifdef _MSC_ -# pragma pack() -#endif -#endif /* _SDLA_FR_H */ - -- cgit v1.2.3-59-g8ed1b From 48491e6bdb8fa73751cc95f740175ec799db5d55 Mon Sep 17 00:00:00 2001 From: "Robert P. J. Day" Date: Thu, 26 Apr 2007 00:59:27 -0700 Subject: [NET]: Delete unused header file linux/if_wanpipe_common.h Delete the unreferenced header file include/linux/if_wanpipe_common.h, as well as the reference to it in the Doc file. Signed-off-by: Robert P. J. Day Signed-off-by: Andrew Morton Signed-off-by: David S. Miller --- Documentation/networking/wan-router.txt | 1 - include/linux/if_wanpipe_common.h | 58 --------------------------------- 2 files changed, 59 deletions(-) delete mode 100644 include/linux/if_wanpipe_common.h (limited to 'include') diff --git a/Documentation/networking/wan-router.txt b/Documentation/networking/wan-router.txt index 653978dcea7f..07dd6d9930a1 100644 --- a/Documentation/networking/wan-router.txt +++ b/Documentation/networking/wan-router.txt @@ -250,7 +250,6 @@ PRODUCT COMPONENTS AND RELATED FILES sdladrv.h SDLA support module API definitions sdlasfm.h SDLA firmware module definitions if_wanpipe.h WANPIPE Socket definitions - if_wanpipe_common.h WANPIPE Socket/Driver common definitions. sdlapci.h WANPIPE PCI definitions diff --git a/include/linux/if_wanpipe_common.h b/include/linux/if_wanpipe_common.h deleted file mode 100644 index 6e5461d69fdd..000000000000 --- a/include/linux/if_wanpipe_common.h +++ /dev/null @@ -1,58 +0,0 @@ -/***************************************************************************** -* if_wanipe_common.h Sangoma Driver/Socket common area definitions. -* -* Author: Nenad Corbic -* -* Copyright: (c) 2000 Sangoma Technologies Inc. -* -* This program is free software; you can redistribute it and/or -* modify it under the terms of the GNU General Public License -* as published by the Free Software Foundation; either version -* 2 of the License, or (at your option) any later version. -* ============================================================================ -* Jan 13, 2000 Nenad Corbic Initial version -*****************************************************************************/ - - -#ifndef _WANPIPE_SOCK_DRIVER_COMMON_H -#define _WANPIPE_SOCK_DRIVER_COMMON_H - -typedef struct { - struct net_device *slave; - atomic_t packet_sent; - atomic_t receive_block; - atomic_t command; - atomic_t disconnect; - atomic_t driver_busy; - long common_critical; - struct timer_list *tx_timer; - struct sock *sk; /* Wanpipe Sock bind's here */ - int (*func)(struct sk_buff *skb, struct net_device *dev, - struct sock *sk); - - struct work_struct wanpipe_work; /* deferred keventd work */ - unsigned char rw_bind; /* Sock bind state */ - unsigned char usedby; - unsigned char state; - unsigned char svc; - unsigned short lcn; - void *mbox; -} wanpipe_common_t; - - -enum { - WANSOCK_UNCONFIGURED, /* link/channel is not configured */ - WANSOCK_DISCONNECTED, /* link/channel is disconnected */ - WANSOCK_CONNECTING, /* connection is in progress */ - WANSOCK_CONNECTED, /* link/channel is operational */ - WANSOCK_LIMIT, /* for verification only */ - WANSOCK_DUALPORT, /* for Dual Port cards */ - WANSOCK_DISCONNECTING, - WANSOCK_BINDED, - WANSOCK_BIND_LISTEN, - WANSOCK_LISTEN -}; - -#endif - - -- cgit v1.2.3-59-g8ed1b From 777a447529ad138f5fceb9c9ad28bab19848f277 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Thu, 22 Feb 2007 06:24:10 -0800 Subject: [SPARC64]: Unify timer interrupt handler. Things were scattered all over the place, split between SMP and non-SMP. Unify it all so that dyntick support is easier to add. Signed-off-by: David S. Miller --- arch/sparc64/kernel/irq.c | 26 ------------- arch/sparc64/kernel/smp.c | 87 +------------------------------------------ arch/sparc64/kernel/time.c | 43 +++++++++------------ arch/sparc64/kernel/ttable.S | 6 +-- include/asm-sparc64/cpudata.h | 4 +- include/asm-sparc64/smp.h | 4 +- include/asm-sparc64/ttable.h | 27 -------------- 7 files changed, 25 insertions(+), 172 deletions(-) (limited to 'include') diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c index c443db184371..d1bb3b3f2639 100644 --- a/arch/sparc64/kernel/irq.c +++ b/arch/sparc64/kernel/irq.c @@ -589,32 +589,6 @@ void ack_bad_irq(unsigned int virt_irq) ino, virt_irq); } -#ifndef CONFIG_SMP -extern irqreturn_t timer_interrupt(int, void *); - -void timer_irq(int irq, struct pt_regs *regs) -{ - unsigned long clr_mask = 1 << irq; - unsigned long tick_mask = tick_ops->softint_mask; - struct pt_regs *old_regs; - - if (get_softint() & tick_mask) { - irq = 0; - clr_mask = tick_mask; - } - clear_softint(clr_mask); - - old_regs = set_irq_regs(regs); - irq_enter(); - - kstat_this_cpu.irqs[0]++; - timer_interrupt(irq, NULL); - - irq_exit(); - set_irq_regs(old_regs); -} -#endif - void handler_irq(int irq, struct pt_regs *regs) { struct ino_bucket *bucket; diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c index fc99f7b8012f..39deb0346eb5 100644 --- a/arch/sparc64/kernel/smp.c +++ b/arch/sparc64/kernel/smp.c @@ -45,7 +45,7 @@ extern void calibrate_delay(void); /* Please don't make this stuff initdata!!! --DaveM */ -static unsigned char boot_cpu_id; +unsigned char boot_cpu_id; cpumask_t cpu_online_map __read_mostly = CPU_MASK_NONE; cpumask_t phys_cpu_present_map __read_mostly = CPU_MASK_NONE; @@ -81,8 +81,6 @@ void __init smp_store_cpu_info(int id) struct device_node *dp; int def; - /* multiplier and counter set by - smp_setup_percpu_timer() */ cpu_data(id).udelay_val = loops_per_jiffy; cpu_find_by_mid(id, &dp); @@ -1180,75 +1178,10 @@ void smp_penguin_jailcell(int irq, struct pt_regs *regs) preempt_enable(); } -#define prof_multiplier(__cpu) cpu_data(__cpu).multiplier -#define prof_counter(__cpu) cpu_data(__cpu).counter - -void smp_percpu_timer_interrupt(struct pt_regs *regs) -{ - unsigned long compare, tick, pstate; - int cpu = smp_processor_id(); - int user = user_mode(regs); - struct pt_regs *old_regs; - - /* - * Check for level 14 softint. - */ - { - unsigned long tick_mask = tick_ops->softint_mask; - - if (!(get_softint() & tick_mask)) { - extern void handler_irq(int, struct pt_regs *); - - handler_irq(14, regs); - return; - } - clear_softint(tick_mask); - } - - old_regs = set_irq_regs(regs); - do { - profile_tick(CPU_PROFILING); - if (!--prof_counter(cpu)) { - irq_enter(); - - if (cpu == boot_cpu_id) { - kstat_this_cpu.irqs[0]++; - timer_tick_interrupt(regs); - } - - update_process_times(user); - - irq_exit(); - - prof_counter(cpu) = prof_multiplier(cpu); - } - - /* Guarantee that the following sequences execute - * uninterrupted. - */ - __asm__ __volatile__("rdpr %%pstate, %0\n\t" - "wrpr %0, %1, %%pstate" - : "=r" (pstate) - : "i" (PSTATE_IE)); - - compare = tick_ops->add_compare(current_tick_offset); - tick = tick_ops->get_tick(); - - /* Restore PSTATE_IE. */ - __asm__ __volatile__("wrpr %0, 0x0, %%pstate" - : /* no outputs */ - : "r" (pstate)); - } while (time_after_eq(tick, compare)); - set_irq_regs(old_regs); -} - static void __init smp_setup_percpu_timer(void) { - int cpu = smp_processor_id(); unsigned long pstate; - prof_counter(cpu) = prof_multiplier(cpu) = 1; - /* Guarantee that the following sequences execute * uninterrupted. */ @@ -1269,28 +1202,12 @@ void __init smp_tick_init(void) { boot_cpu_id = hard_smp_processor_id(); current_tick_offset = timer_tick_offset; - - prof_counter(boot_cpu_id) = prof_multiplier(boot_cpu_id) = 1; } /* /proc/profile writes can call this, don't __init it please. */ -static DEFINE_SPINLOCK(prof_setup_lock); - int setup_profiling_timer(unsigned int multiplier) { - unsigned long flags; - int i; - - if ((!multiplier) || (timer_tick_offset / multiplier) < 1000) - return -EINVAL; - - spin_lock_irqsave(&prof_setup_lock, flags); - for_each_possible_cpu(i) - prof_multiplier(i) = multiplier; - current_tick_offset = (timer_tick_offset / multiplier); - spin_unlock_irqrestore(&prof_setup_lock, flags); - - return 0; + return -EINVAL; } static void __init smp_tune_scheduling(void) diff --git a/arch/sparc64/kernel/time.c b/arch/sparc64/kernel/time.c index d457079118dc..48e1217c1e42 100644 --- a/arch/sparc64/kernel/time.c +++ b/arch/sparc64/kernel/time.c @@ -31,6 +31,7 @@ #include #include #include +#include #include #include @@ -423,12 +424,6 @@ static struct sparc64_tick_ops hbtick_operations __read_mostly = { .softint_mask = 1UL << 0, }; -/* timer_interrupt() needs to keep up the real-time clock, - * as well as call the "do_timer()" routine every clocktick - * - * NOTE: On SUN5 systems the ticker interrupt comes in using 2 - * interrupts, one at level14 and one with softint bit 0. - */ unsigned long timer_tick_offset __read_mostly; static unsigned long timer_ticks_per_nsec_quotient __read_mostly; @@ -487,18 +482,27 @@ void notify_arch_cmos_timer(void) mod_timer(&sync_cmos_timer, jiffies + 1); } -irqreturn_t timer_interrupt(int irq, void *dev_id) +void timer_interrupt(int irq, struct pt_regs *regs) { + struct pt_regs *old_regs = set_irq_regs(regs); unsigned long ticks, compare, pstate; + unsigned long tick_mask = tick_ops->softint_mask; + + clear_softint(tick_mask); + + irq_enter(); - write_seqlock(&xtime_lock); + kstat_this_cpu.irqs[0]++; do { -#ifndef CONFIG_SMP profile_tick(CPU_PROFILING); update_process_times(user_mode(get_irq_regs())); -#endif - do_timer(1); + + if (smp_processor_id() == boot_cpu_id) { + write_seqlock(&xtime_lock); + do_timer(1); + write_sequnlock(&xtime_lock); + } /* Guarantee that the following sequences execute * uninterrupted. @@ -515,24 +519,13 @@ irqreturn_t timer_interrupt(int irq, void *dev_id) __asm__ __volatile__("wrpr %0, 0x0, %%pstate" : /* no outputs */ : "r" (pstate)); - } while (time_after_eq(ticks, compare)); + } while (unlikely(time_after_eq(ticks, compare))); - write_sequnlock(&xtime_lock); + irq_exit(); - return IRQ_HANDLED; + set_irq_regs(old_regs); } -#ifdef CONFIG_SMP -void timer_tick_interrupt(struct pt_regs *regs) -{ - write_seqlock(&xtime_lock); - - do_timer(1); - - write_sequnlock(&xtime_lock); -} -#endif - /* Kick start a stopped clock (procedure from the Sun NVRAM/hostid FAQ). */ static void __init kick_start_clock(void) { diff --git a/arch/sparc64/kernel/ttable.S b/arch/sparc64/kernel/ttable.S index d7d2a8bdc66e..7575aa371da8 100644 --- a/arch/sparc64/kernel/ttable.S +++ b/arch/sparc64/kernel/ttable.S @@ -60,11 +60,7 @@ tl0_irq4: BTRAP(0x44) tl0_irq5: TRAP_IRQ(handler_irq, 5) tl0_irq6: BTRAP(0x46) BTRAP(0x47) BTRAP(0x48) BTRAP(0x49) tl0_irq10: BTRAP(0x4a) BTRAP(0x4b) BTRAP(0x4c) BTRAP(0x4d) -#ifndef CONFIG_SMP -tl0_irq14: TRAP_IRQ(timer_irq, 14) -#else -tl0_irq14: TICK_SMP_IRQ -#endif +tl0_irq14: TRAP_IRQ(timer_interrupt, 14) tl0_irq15: TRAP_IRQ(handler_irq, 15) tl0_resv050: BTRAP(0x50) BTRAP(0x51) BTRAP(0x52) BTRAP(0x53) BTRAP(0x54) BTRAP(0x55) tl0_resv056: BTRAP(0x56) BTRAP(0x57) BTRAP(0x58) BTRAP(0x59) BTRAP(0x5a) BTRAP(0x5b) diff --git a/include/asm-sparc64/cpudata.h b/include/asm-sparc64/cpudata.h index f2cc9411b4c7..e89922d6718c 100644 --- a/include/asm-sparc64/cpudata.h +++ b/include/asm-sparc64/cpudata.h @@ -17,8 +17,8 @@ typedef struct { /* Dcache line 1 */ unsigned int __softirq_pending; /* must be 1st, see rtrap.S */ - unsigned int multiplier; - unsigned int counter; + unsigned int __pad0_1; + unsigned int __pad0_2; unsigned int __pad1; unsigned long clock_tick; /* %tick's per second */ unsigned long udelay_val; diff --git a/include/asm-sparc64/smp.h b/include/asm-sparc64/smp.h index 388249b751c3..cca54804b722 100644 --- a/include/asm-sparc64/smp.h +++ b/include/asm-sparc64/smp.h @@ -42,15 +42,15 @@ extern int hard_smp_processor_id(void); #define raw_smp_processor_id() (current_thread_info()->cpu) extern void smp_setup_cpu_possible_map(void); +extern unsigned char boot_cpu_id; #endif /* !(__ASSEMBLY__) */ #else #define smp_setup_cpu_possible_map() do { } while (0) +#define boot_cpu_id (0) #endif /* !(CONFIG_SMP) */ -#define NO_PROC_ID 0xFF - #endif /* !(_SPARC64_SMP_H) */ diff --git a/include/asm-sparc64/ttable.h b/include/asm-sparc64/ttable.h index c2a16e188499..bbb9c8f13d61 100644 --- a/include/asm-sparc64/ttable.h +++ b/include/asm-sparc64/ttable.h @@ -157,23 +157,6 @@ ba,a,pt %xcc, rtrap_irq; \ .previous; -#define TICK_SMP_IRQ \ - rdpr %pil, %g2; \ - wrpr %g0, 15, %pil; \ - sethi %hi(1f-4), %g7; \ - ba,pt %xcc, etrap_irq; \ - or %g7, %lo(1f-4), %g7; \ - nop; \ - nop; \ - nop; \ - .subsection 2; \ -1: call trace_hardirqs_off; \ - nop; \ - call smp_percpu_timer_interrupt; \ - add %sp, PTREGS_OFF, %o0; \ - ba,a,pt %xcc, rtrap_irq; \ - .previous; - #else #define TRAP_IRQ(routine, level) \ @@ -186,16 +169,6 @@ add %sp, PTREGS_OFF, %o1; \ ba,a,pt %xcc, rtrap_irq; -#define TICK_SMP_IRQ \ - rdpr %pil, %g2; \ - wrpr %g0, 15, %pil; \ - sethi %hi(109f), %g7; \ - ba,pt %xcc, etrap_irq; \ -109: or %g7, %lo(109b), %g7; \ - call smp_percpu_timer_interrupt; \ - add %sp, PTREGS_OFF, %o0; \ - ba,a,pt %xcc, rtrap_irq; - #endif #define TRAP_IVEC TRAP_NOSAVE(do_ivec) -- cgit v1.2.3-59-g8ed1b From 112f48716d9f292c92a033cff9e3ce7405ed4280 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Mon, 5 Mar 2007 15:28:37 -0800 Subject: [SPARC64]: Add clocksource/clockevents support. I'd like to thank John Stul and others for helping me along the way. A lot of cleanups fell out of this. For example, the get_compare() tick_op was totally unused, so was deleted. And the most often used tick_op members were grouped together for cache-friendlyness. The sparc64 TSC is given to the kernel as a one-shot timer. tick_ops->init_timer() simply turns off the privileged bit in the tick register (when possible), and disables the interrupt by setting bit 63 in the compare register. The ->disable_irq() op also sets this bit. tick_ops->add_compare() is changed to: 1) Add the given delta to "tick" not to "compare" 2) Return a boolean which, if true, means that the tick value read after writing the compare value was found to have incremented past the initial tick value. This mirrors logic used in the HPET driver's ->next_event() method. Each tick_ops implementation also now provides a name string. And we feed this into the clocksource and clockevents layers. Signed-off-by: David S. Miller --- arch/sparc64/Kconfig | 14 +- arch/sparc64/kernel/smp.c | 29 +--- arch/sparc64/kernel/time.c | 408 +++++++++++++++++++++++--------------------- include/asm-sparc64/timer.h | 17 +- 4 files changed, 238 insertions(+), 230 deletions(-) (limited to 'include') diff --git a/arch/sparc64/Kconfig b/arch/sparc64/Kconfig index 1a6348b565fb..51c87fdd998c 100644 --- a/arch/sparc64/Kconfig +++ b/arch/sparc64/Kconfig @@ -19,6 +19,14 @@ config SPARC64 SPARC64 ports; its web page is available at . +config GENERIC_TIME + bool + default y + +config GENERIC_CLOCKEVENTS + bool + default y + config 64BIT def_bool y @@ -34,10 +42,6 @@ config LOCKDEP_SUPPORT bool default y -config TIME_INTERPOLATION - bool - default y - config ARCH_MAY_HAVE_PC_FDC bool default y @@ -113,6 +117,8 @@ config GENERIC_HARDIRQS menu "General machine setup" +source "kernel/time/Kconfig" + config SMP bool "Symmetric multi-processing support" ---help--- diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c index 39deb0346eb5..d4f0a70f4845 100644 --- a/arch/sparc64/kernel/smp.c +++ b/arch/sparc64/kernel/smp.c @@ -123,7 +123,7 @@ void __init smp_store_cpu_info(int id) cpu_data(id).ecache_size, cpu_data(id).ecache_line_size); } -static void smp_setup_percpu_timer(void); +extern void setup_sparc64_timer(void); static volatile unsigned long callin_flag = 0; @@ -138,7 +138,7 @@ void __init smp_callin(void) __flush_tlb_all(); - smp_setup_percpu_timer(); + setup_sparc64_timer(); if (cheetah_pcache_forced_on) cheetah_enable_pcache(); @@ -175,8 +175,6 @@ void cpu_panic(void) panic("SMP bolixed\n"); } -static unsigned long current_tick_offset __read_mostly; - /* This tick register synchronization scheme is taken entirely from * the ia64 port, see arch/ia64/kernel/smpboot.c for details and credit. * @@ -259,7 +257,7 @@ void smp_synchronize_tick_client(void) } else adj = -delta; - tick_ops->add_tick(adj, current_tick_offset); + tick_ops->add_tick(adj); } #if DEBUG_TICK_SYNC t[i].rt = rt; @@ -1178,30 +1176,9 @@ void smp_penguin_jailcell(int irq, struct pt_regs *regs) preempt_enable(); } -static void __init smp_setup_percpu_timer(void) -{ - unsigned long pstate; - - /* Guarantee that the following sequences execute - * uninterrupted. - */ - __asm__ __volatile__("rdpr %%pstate, %0\n\t" - "wrpr %0, %1, %%pstate" - : "=r" (pstate) - : "i" (PSTATE_IE)); - - tick_ops->init_tick(current_tick_offset); - - /* Restore PSTATE_IE. */ - __asm__ __volatile__("wrpr %0, 0x0, %%pstate" - : /* no outputs */ - : "r" (pstate)); -} - void __init smp_tick_init(void) { boot_cpu_id = hard_smp_processor_id(); - current_tick_offset = timer_tick_offset; } /* /proc/profile writes can call this, don't __init it please. */ diff --git a/arch/sparc64/kernel/time.c b/arch/sparc64/kernel/time.c index 48e1217c1e42..21e3b0b9d9ce 100644 --- a/arch/sparc64/kernel/time.c +++ b/arch/sparc64/kernel/time.c @@ -32,6 +32,8 @@ #include #include #include +#include +#include #include #include @@ -61,6 +63,7 @@ static void __iomem *mstk48t59_regs; static int set_rtc_mmss(unsigned long); #define TICK_PRIV_BIT (1UL << 63) +#define TICKCMP_IRQ_BIT (1UL << 63) #ifdef CONFIG_SMP unsigned long profile_pc(struct pt_regs *regs) @@ -94,21 +97,22 @@ static void tick_disable_protection(void) : "g2"); } -static void tick_init_tick(unsigned long offset) +static void tick_disable_irq(void) { - tick_disable_protection(); - __asm__ __volatile__( - " rd %%tick, %%g1\n" - " andn %%g1, %1, %%g1\n" " ba,pt %%xcc, 1f\n" - " add %%g1, %0, %%g1\n" + " nop\n" " .align 64\n" - "1: wr %%g1, 0x0, %%tick_cmpr\n" + "1: wr %0, 0x0, %%tick_cmpr\n" " rd %%tick_cmpr, %%g0" : /* no outputs */ - : "r" (offset), "r" (TICK_PRIV_BIT) - : "g1"); + : "r" (TICKCMP_IRQ_BIT)); +} + +static void tick_init_tick(void) +{ + tick_disable_protection(); + tick_disable_irq(); } static unsigned long tick_get_tick(void) @@ -122,20 +126,14 @@ static unsigned long tick_get_tick(void) return ret & ~TICK_PRIV_BIT; } -static unsigned long tick_get_compare(void) +static int tick_add_compare(unsigned long adj) { - unsigned long ret; + unsigned long orig_tick, new_tick, new_compare; - __asm__ __volatile__("rd %%tick_cmpr, %0\n\t" - "mov %0, %0" - : "=r" (ret)); + __asm__ __volatile__("rd %%tick, %0" + : "=r" (orig_tick)); - return ret; -} - -static unsigned long tick_add_compare(unsigned long adj) -{ - unsigned long new_compare; + orig_tick &= ~TICKCMP_IRQ_BIT; /* Workaround for Spitfire Errata (#54 I think??), I discovered * this via Sun BugID 4008234, mentioned in Solaris-2.5.1 patch @@ -146,44 +144,41 @@ static unsigned long tick_add_compare(unsigned long adj) * at the start of an I-cache line, and perform a dummy * read back from %tick_cmpr right after writing to it. -DaveM */ - __asm__ __volatile__("rd %%tick_cmpr, %0\n\t" - "ba,pt %%xcc, 1f\n\t" - " add %0, %1, %0\n\t" + __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" + " add %1, %2, %0\n\t" ".align 64\n" "1:\n\t" "wr %0, 0, %%tick_cmpr\n\t" - "rd %%tick_cmpr, %%g0" - : "=&r" (new_compare) - : "r" (adj)); + "rd %%tick_cmpr, %%g0\n\t" + : "=r" (new_compare) + : "r" (orig_tick), "r" (adj)); + + __asm__ __volatile__("rd %%tick, %0" + : "=r" (new_tick)); + new_tick &= ~TICKCMP_IRQ_BIT; - return new_compare; + return ((long)(new_tick - (orig_tick+adj))) > 0L; } -static unsigned long tick_add_tick(unsigned long adj, unsigned long offset) +static unsigned long tick_add_tick(unsigned long adj) { - unsigned long new_tick, tmp; + unsigned long new_tick; /* Also need to handle Blackbird bug here too. */ __asm__ __volatile__("rd %%tick, %0\n\t" - "add %0, %2, %0\n\t" + "add %0, %1, %0\n\t" "wrpr %0, 0, %%tick\n\t" - "andn %0, %4, %1\n\t" - "ba,pt %%xcc, 1f\n\t" - " add %1, %3, %1\n\t" - ".align 64\n" - "1:\n\t" - "wr %1, 0, %%tick_cmpr\n\t" - "rd %%tick_cmpr, %%g0" - : "=&r" (new_tick), "=&r" (tmp) - : "r" (adj), "r" (offset), "r" (TICK_PRIV_BIT)); + : "=&r" (new_tick) + : "r" (adj)); return new_tick; } static struct sparc64_tick_ops tick_operations __read_mostly = { + .name = "tick", .init_tick = tick_init_tick, + .disable_irq = tick_disable_irq, .get_tick = tick_get_tick, - .get_compare = tick_get_compare, .add_tick = tick_add_tick, .add_compare = tick_add_compare, .softint_mask = 1UL << 0, @@ -191,7 +186,15 @@ static struct sparc64_tick_ops tick_operations __read_mostly = { struct sparc64_tick_ops *tick_ops __read_mostly = &tick_operations; -static void stick_init_tick(unsigned long offset) +static void stick_disable_irq(void) +{ + __asm__ __volatile__( + "wr %0, 0x0, %%asr25" + : /* no outputs */ + : "r" (TICKCMP_IRQ_BIT)); +} + +static void stick_init_tick(void) { /* Writes to the %tick and %stick register are not * allowed on sun4v. The Hypervisor controls that @@ -199,6 +202,7 @@ static void stick_init_tick(unsigned long offset) */ if (tlb_type != hypervisor) { tick_disable_protection(); + tick_disable_irq(); /* Let the user get at STICK too. */ __asm__ __volatile__( @@ -210,14 +214,7 @@ static void stick_init_tick(unsigned long offset) : "g1", "g2"); } - __asm__ __volatile__( - " rd %%asr24, %%g1\n" - " andn %%g1, %1, %%g1\n" - " add %%g1, %0, %%g1\n" - " wr %%g1, 0x0, %%asr25" - : /* no outputs */ - : "r" (offset), "r" (TICK_PRIV_BIT) - : "g1"); + stick_disable_irq(); } static unsigned long stick_get_tick(void) @@ -230,49 +227,43 @@ static unsigned long stick_get_tick(void) return ret & ~TICK_PRIV_BIT; } -static unsigned long stick_get_compare(void) -{ - unsigned long ret; - - __asm__ __volatile__("rd %%asr25, %0" - : "=r" (ret)); - - return ret; -} - -static unsigned long stick_add_tick(unsigned long adj, unsigned long offset) +static unsigned long stick_add_tick(unsigned long adj) { - unsigned long new_tick, tmp; + unsigned long new_tick; __asm__ __volatile__("rd %%asr24, %0\n\t" - "add %0, %2, %0\n\t" + "add %0, %1, %0\n\t" "wr %0, 0, %%asr24\n\t" - "andn %0, %4, %1\n\t" - "add %1, %3, %1\n\t" - "wr %1, 0, %%asr25" - : "=&r" (new_tick), "=&r" (tmp) - : "r" (adj), "r" (offset), "r" (TICK_PRIV_BIT)); + : "=&r" (new_tick) + : "r" (adj)); return new_tick; } -static unsigned long stick_add_compare(unsigned long adj) +static int stick_add_compare(unsigned long adj) { - unsigned long new_compare; + unsigned long orig_tick, new_tick; - __asm__ __volatile__("rd %%asr25, %0\n\t" - "add %0, %1, %0\n\t" - "wr %0, 0, %%asr25" - : "=&r" (new_compare) - : "r" (adj)); + __asm__ __volatile__("rd %%asr24, %0" + : "=r" (orig_tick)); + orig_tick &= ~TICKCMP_IRQ_BIT; + + __asm__ __volatile__("wr %0, 0, %%asr25" + : /* no outputs */ + : "r" (orig_tick + adj)); + + __asm__ __volatile__("rd %%asr24, %0" + : "=r" (new_tick)); + new_tick &= ~TICKCMP_IRQ_BIT; - return new_compare; + return ((long)(new_tick - (orig_tick+adj))) > 0L; } static struct sparc64_tick_ops stick_operations __read_mostly = { + .name = "stick", .init_tick = stick_init_tick, + .disable_irq = stick_disable_irq, .get_tick = stick_get_tick, - .get_compare = stick_get_compare, .add_tick = stick_add_tick, .add_compare = stick_add_compare, .softint_mask = 1UL << 16, @@ -321,20 +312,6 @@ static unsigned long __hbird_read_stick(void) return ret; } -static unsigned long __hbird_read_compare(void) -{ - unsigned long low, high; - unsigned long addr = HBIRD_STICKCMP_ADDR; - - __asm__ __volatile__("ldxa [%2] %3, %0\n\t" - "add %2, 0x8, %2\n\t" - "ldxa [%2] %3, %1" - : "=&r" (low), "=&r" (high), "=&r" (addr) - : "i" (ASI_PHYS_BYPASS_EC_E), "2" (addr)); - - return (high << 32UL) | low; -} - static void __hbird_write_stick(unsigned long val) { unsigned long low = (val & 0xffffffffUL); @@ -365,10 +342,13 @@ static void __hbird_write_compare(unsigned long val) "i" (ASI_PHYS_BYPASS_EC_E)); } -static void hbtick_init_tick(unsigned long offset) +static void hbtick_disable_irq(void) { - unsigned long val; + __hbird_write_compare(TICKCMP_IRQ_BIT); +} +static void hbtick_init_tick(void) +{ tick_disable_protection(); /* XXX This seems to be necessary to 'jumpstart' Hummingbird @@ -378,8 +358,7 @@ static void hbtick_init_tick(unsigned long offset) */ __hbird_write_stick(__hbird_read_stick()); - val = __hbird_read_stick() & ~TICK_PRIV_BIT; - __hbird_write_compare(val + offset); + hbtick_disable_irq(); } static unsigned long hbtick_get_tick(void) @@ -387,45 +366,40 @@ static unsigned long hbtick_get_tick(void) return __hbird_read_stick() & ~TICK_PRIV_BIT; } -static unsigned long hbtick_get_compare(void) -{ - return __hbird_read_compare(); -} - -static unsigned long hbtick_add_tick(unsigned long adj, unsigned long offset) +static unsigned long hbtick_add_tick(unsigned long adj) { unsigned long val; val = __hbird_read_stick() + adj; __hbird_write_stick(val); - val &= ~TICK_PRIV_BIT; - __hbird_write_compare(val + offset); - return val; } -static unsigned long hbtick_add_compare(unsigned long adj) +static int hbtick_add_compare(unsigned long adj) { - unsigned long val = __hbird_read_compare() + adj; + unsigned long val = __hbird_read_stick(); + unsigned long val2; - val &= ~TICK_PRIV_BIT; + val &= ~TICKCMP_IRQ_BIT; + val += adj; __hbird_write_compare(val); - return val; + val2 = __hbird_read_stick() & ~TICKCMP_IRQ_BIT; + + return ((long)(val2 - val)) > 0L; } static struct sparc64_tick_ops hbtick_operations __read_mostly = { + .name = "hbtick", .init_tick = hbtick_init_tick, + .disable_irq = hbtick_disable_irq, .get_tick = hbtick_get_tick, - .get_compare = hbtick_get_compare, .add_tick = hbtick_add_tick, .add_compare = hbtick_add_compare, .softint_mask = 1UL << 0, }; -unsigned long timer_tick_offset __read_mostly; - static unsigned long timer_ticks_per_nsec_quotient __read_mostly; #define TICK_SIZE (tick_nsec / 1000) @@ -482,50 +456,6 @@ void notify_arch_cmos_timer(void) mod_timer(&sync_cmos_timer, jiffies + 1); } -void timer_interrupt(int irq, struct pt_regs *regs) -{ - struct pt_regs *old_regs = set_irq_regs(regs); - unsigned long ticks, compare, pstate; - unsigned long tick_mask = tick_ops->softint_mask; - - clear_softint(tick_mask); - - irq_enter(); - - kstat_this_cpu.irqs[0]++; - - do { - profile_tick(CPU_PROFILING); - update_process_times(user_mode(get_irq_regs())); - - if (smp_processor_id() == boot_cpu_id) { - write_seqlock(&xtime_lock); - do_timer(1); - write_sequnlock(&xtime_lock); - } - - /* Guarantee that the following sequences execute - * uninterrupted. - */ - __asm__ __volatile__("rdpr %%pstate, %0\n\t" - "wrpr %0, %1, %%pstate" - : "=r" (pstate) - : "i" (PSTATE_IE)); - - compare = tick_ops->add_compare(timer_tick_offset); - ticks = tick_ops->get_tick(); - - /* Restore PSTATE_IE. */ - __asm__ __volatile__("wrpr %0, 0x0, %%pstate" - : /* no outputs */ - : "r" (pstate)); - } while (unlikely(time_after_eq(ticks, compare))); - - irq_exit(); - - set_irq_regs(old_regs); -} - /* Kick start a stopped clock (procedure from the Sun NVRAM/hostid FAQ). */ static void __init kick_start_clock(void) { @@ -923,7 +853,6 @@ static unsigned long sparc64_init_timers(void) prop = of_find_property(dp, "stick-frequency", NULL); } clock = *(unsigned int *) prop->value; - timer_tick_offset = clock / HZ; #ifdef CONFIG_SMP smp_tick_init(); @@ -932,26 +861,6 @@ static unsigned long sparc64_init_timers(void) return clock; } -static void sparc64_start_timers(void) -{ - unsigned long pstate; - - /* Guarantee that the following sequences execute - * uninterrupted. - */ - __asm__ __volatile__("rdpr %%pstate, %0\n\t" - "wrpr %0, %1, %%pstate" - : "=r" (pstate) - : "i" (PSTATE_IE)); - - tick_ops->init_tick(timer_tick_offset); - - /* Restore PSTATE_IE. */ - __asm__ __volatile__("wrpr %0, 0x0, %%pstate" - : /* no outputs */ - : "r" (pstate)); -} - struct freq_table { unsigned long clock_tick_ref; unsigned int ref_freq; @@ -998,29 +907,148 @@ static struct notifier_block sparc64_cpufreq_notifier_block = { #endif /* CONFIG_CPU_FREQ */ -static struct time_interpolator sparc64_cpu_interpolator = { - .source = TIME_SOURCE_CPU, - .shift = 16, - .mask = 0xffffffffffffffffLL +static int sparc64_next_event(unsigned long delta, + struct clock_event_device *evt) +{ + return tick_ops->add_compare(delta); +} + +static void sparc64_timer_setup(enum clock_event_mode mode, + struct clock_event_device *evt) +{ + switch (mode) { + case CLOCK_EVT_MODE_ONESHOT: + break; + + case CLOCK_EVT_MODE_SHUTDOWN: + tick_ops->disable_irq(); + break; + + case CLOCK_EVT_MODE_PERIODIC: + case CLOCK_EVT_MODE_UNUSED: + WARN_ON(1); + break; + }; +} + +static struct clock_event_device sparc64_clockevent = { + .features = CLOCK_EVT_FEAT_ONESHOT, + .set_mode = sparc64_timer_setup, + .set_next_event = sparc64_next_event, + .rating = 100, + .shift = 30, + .irq = -1, }; +static DEFINE_PER_CPU(struct clock_event_device, sparc64_events); -/* The quotient formula is taken from the IA64 port. */ -#define SPARC64_NSEC_PER_CYC_SHIFT 10UL -void __init time_init(void) +void timer_interrupt(int irq, struct pt_regs *regs) { - unsigned long clock = sparc64_init_timers(); + struct pt_regs *old_regs = set_irq_regs(regs); + unsigned long tick_mask = tick_ops->softint_mask; + int cpu = smp_processor_id(); + struct clock_event_device *evt = &per_cpu(sparc64_events, cpu); + + clear_softint(tick_mask); + + irq_enter(); + + kstat_this_cpu.irqs[0]++; + + if (unlikely(!evt->event_handler)) { + printk(KERN_WARNING + "Spurious SPARC64 timer interrupt on cpu %d\n", cpu); + } else + evt->event_handler(evt); + + irq_exit(); + + set_irq_regs(old_regs); +} - sparc64_cpu_interpolator.frequency = clock; - register_time_interpolator(&sparc64_cpu_interpolator); +void __devinit setup_sparc64_timer(void) +{ + struct clock_event_device *sevt; + unsigned long pstate; - /* Now that the interpolator is registered, it is - * safe to start the timer ticking. + /* Guarantee that the following sequences execute + * uninterrupted. */ - sparc64_start_timers(); + __asm__ __volatile__("rdpr %%pstate, %0\n\t" + "wrpr %0, %1, %%pstate" + : "=r" (pstate) + : "i" (PSTATE_IE)); + + tick_ops->init_tick(); + + /* Restore PSTATE_IE. */ + __asm__ __volatile__("wrpr %0, 0x0, %%pstate" + : /* no outputs */ + : "r" (pstate)); + + sevt = &__get_cpu_var(sparc64_events); + + memcpy(sevt, &sparc64_clockevent, sizeof(*sevt)); + sevt->cpumask = cpumask_of_cpu(smp_processor_id()); + + clockevents_register_device(sevt); +} + +#define SPARC64_NSEC_PER_CYC_SHIFT 32UL + +static struct clocksource clocksource_tick = { + .rating = 100, + .mask = CLOCKSOURCE_MASK(64), + .shift = 16, + .flags = CLOCK_SOURCE_IS_CONTINUOUS, +}; + +static void __init setup_clockevent_multiplier(unsigned long hz) +{ + unsigned long mult, shift = 32; + + while (1) { + mult = div_sc(hz, NSEC_PER_SEC, shift); + if (mult && (mult >> 32UL) == 0UL) + break; + + shift--; + } + + sparc64_clockevent.shift = shift; + sparc64_clockevent.mult = mult; +} + +void __init time_init(void) +{ + unsigned long clock = sparc64_init_timers(); timer_ticks_per_nsec_quotient = - (((NSEC_PER_SEC << SPARC64_NSEC_PER_CYC_SHIFT) + - (clock / 2)) / clock); + clocksource_hz2mult(clock, SPARC64_NSEC_PER_CYC_SHIFT); + + clocksource_tick.name = tick_ops->name; + clocksource_tick.mult = + clocksource_hz2mult(clock, + clocksource_tick.shift); + clocksource_tick.read = tick_ops->get_tick; + + printk("clocksource: mult[%x] shift[%d]\n", + clocksource_tick.mult, clocksource_tick.shift); + + clocksource_register(&clocksource_tick); + + sparc64_clockevent.name = tick_ops->name; + + setup_clockevent_multiplier(clock); + + sparc64_clockevent.max_delta_ns = + clockevent_delta2ns(0x7fffffffffffffff, &sparc64_clockevent); + sparc64_clockevent.min_delta_ns = + clockevent_delta2ns(0xF, &sparc64_clockevent); + + printk("clockevent: mult[%lx] shift[%d]\n", + sparc64_clockevent.mult, sparc64_clockevent.shift); + + setup_sparc64_timer(); #ifdef CONFIG_CPU_FREQ cpufreq_register_notifier(&sparc64_cpufreq_notifier_block, diff --git a/include/asm-sparc64/timer.h b/include/asm-sparc64/timer.h index d435594df786..ccbd69448866 100644 --- a/include/asm-sparc64/timer.h +++ b/include/asm-sparc64/timer.h @@ -11,22 +11,19 @@ struct sparc64_tick_ops { - void (*init_tick)(unsigned long); unsigned long (*get_tick)(void); - unsigned long (*get_compare)(void); - unsigned long (*add_tick)(unsigned long, unsigned long); - unsigned long (*add_compare)(unsigned long); + int (*add_compare)(unsigned long); unsigned long softint_mask; + void (*disable_irq)(void); + + void (*init_tick)(void); + unsigned long (*add_tick)(unsigned long); + + char *name; }; extern struct sparc64_tick_ops *tick_ops; -#ifdef CONFIG_SMP -extern unsigned long timer_tick_offset; -struct pt_regs; -extern void timer_tick_interrupt(struct pt_regs *); -#endif - extern unsigned long sparc64_get_clock_tick(unsigned int cpu); #endif /* _SPARC64_TIMER_H */ -- cgit v1.2.3-59-g8ed1b From 66f3cb7ccfe6d735bd1fa435aebc9b985ac74e07 Mon Sep 17 00:00:00 2001 From: Stephen Rothwell Date: Thu, 29 Mar 2007 00:50:29 -0700 Subject: [SPARC64] constify of_get_property return: include Signed-off-by: Stephen Rothwell Signed-off-by: David S. Miller --- include/asm-sparc64/floppy.h | 4 ++-- include/asm-sparc64/parport.h | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/asm-sparc64/floppy.h b/include/asm-sparc64/floppy.h index 331013a0053e..4aa0925e1b1b 100644 --- a/include/asm-sparc64/floppy.h +++ b/include/asm-sparc64/floppy.h @@ -549,7 +549,7 @@ static int __init ebus_fdthree_p(struct linux_ebus_device *edev) if (!strcmp(edev->prom_node->name, "fdthree")) return 1; if (!strcmp(edev->prom_node->name, "floppy")) { - char *compat; + const char *compat; compat = of_get_property(edev->prom_node, "compatible", NULL); @@ -661,7 +661,7 @@ static unsigned long __init sun_floppy_init(void) struct linux_ebus_device *edev = NULL; unsigned long config = 0; void __iomem *auxio_reg; - char *state_prop; + const char *state_prop; for_each_ebus(ebus) { for_each_ebusdev(edev, ebus) { diff --git a/include/asm-sparc64/parport.h b/include/asm-sparc64/parport.h index 284dfd01a33d..6340a5253a34 100644 --- a/include/asm-sparc64/parport.h +++ b/include/asm-sparc64/parport.h @@ -103,7 +103,7 @@ static int ebus_ecpp_p(struct linux_ebus_device *edev) if (!strcmp(edev->prom_node->name, "ecpp")) return 1; if (!strcmp(edev->prom_node->name, "parallel")) { - char *compat; + const char *compat; compat = of_get_property(edev->prom_node, "compatible", NULL); -- cgit v1.2.3-59-g8ed1b From 64b94701c0714f814e640ff351d5f784fdc0381e Mon Sep 17 00:00:00 2001 From: Stephen Rothwell Date: Thu, 29 Mar 2007 00:53:28 -0700 Subject: [SPARC/64]: constify of_get_property return Finally, we actually change the functions themselves. Signed-off-by: Stephen Rothwell Signed-off-by: David S. Miller --- arch/sparc/kernel/prom.c | 2 +- arch/sparc64/kernel/prom.c | 2 +- include/asm-sparc/prom.h | 2 +- include/asm-sparc64/prom.h | 4 ++-- 4 files changed, 5 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/arch/sparc/kernel/prom.c b/arch/sparc/kernel/prom.c index 8359d00a2489..d2a8297d56fa 100644 --- a/arch/sparc/kernel/prom.c +++ b/arch/sparc/kernel/prom.c @@ -170,7 +170,7 @@ EXPORT_SYMBOL(of_find_property); * Find a property with a given name for a given node * and return the value. */ -void *of_get_property(struct device_node *np, const char *name, int *lenp) +const void *of_get_property(struct device_node *np, const char *name, int *lenp) { struct property *pp = of_find_property(np,name,lenp); return pp ? pp->value : NULL; diff --git a/arch/sparc64/kernel/prom.c b/arch/sparc64/kernel/prom.c index 25b70368973c..493db961a1f6 100644 --- a/arch/sparc64/kernel/prom.c +++ b/arch/sparc64/kernel/prom.c @@ -174,7 +174,7 @@ EXPORT_SYMBOL(of_find_property); * Find a property with a given name for a given node * and return the value. */ -void *of_get_property(struct device_node *np, const char *name, int *lenp) +const void *of_get_property(struct device_node *np, const char *name, int *lenp) { struct property *pp = of_find_property(np,name,lenp); return pp ? pp->value : NULL; diff --git a/include/asm-sparc/prom.h b/include/asm-sparc/prom.h index 274868d8598d..30d53ed4f5cd 100644 --- a/include/asm-sparc/prom.h +++ b/include/asm-sparc/prom.h @@ -89,7 +89,7 @@ extern struct property *of_find_property(struct device_node *np, const char *name, int *lenp); extern int of_device_is_compatible(struct device_node *device, const char *); -extern void *of_get_property(struct device_node *node, const char *name, +extern const void *of_get_property(struct device_node *node, const char *name, int *lenp); #define get_property(node,name,lenp) of_get_property(node,name,lenp) extern int of_set_property(struct device_node *node, const char *name, void *val, int len); diff --git a/include/asm-sparc64/prom.h b/include/asm-sparc64/prom.h index 0eca2d98627f..50b03387c97b 100644 --- a/include/asm-sparc64/prom.h +++ b/include/asm-sparc64/prom.h @@ -97,8 +97,8 @@ extern struct property *of_find_property(struct device_node *np, const char *name, int *lenp); extern int of_device_is_compatible(struct device_node *device, const char *); -extern void *of_get_property(struct device_node *node, const char *name, - int *lenp); +extern const void *of_get_property(struct device_node *node, const char *name, + int *lenp); #define get_property(node,name,lenp) of_get_property(node,name,lenp) extern int of_set_property(struct device_node *node, const char *name, void *val, int len); extern int of_getintprop_default(struct device_node *np, -- cgit v1.2.3-59-g8ed1b From 357418e7cac16fed4ca558c6037d189d2109c9c2 Mon Sep 17 00:00:00 2001 From: Stephen Rothwell Date: Thu, 29 Mar 2007 00:54:04 -0700 Subject: [SPARC]: constify some paramaters of OF routines This starts bringing the PowerPC and Sparc implemetations back closer together. Signed-off-by: Stephen Rothwell Signed-off-by: David S. Miller --- arch/sparc/kernel/prom.c | 9 ++++++--- include/asm-sparc/prom.h | 10 ++++++---- 2 files changed, 12 insertions(+), 7 deletions(-) (limited to 'include') diff --git a/arch/sparc/kernel/prom.c b/arch/sparc/kernel/prom.c index d2a8297d56fa..9ddf6035915c 100644 --- a/arch/sparc/kernel/prom.c +++ b/arch/sparc/kernel/prom.c @@ -32,7 +32,8 @@ static struct device_node *allnodes; */ static DEFINE_RWLOCK(devtree_lock); -int of_device_is_compatible(struct device_node *device, const char *compat) +int of_device_is_compatible(const struct device_node *device, + const char *compat) { const char* cp; int cplen, l; @@ -150,7 +151,8 @@ struct device_node *of_find_compatible_node(struct device_node *from, } EXPORT_SYMBOL(of_find_compatible_node); -struct property *of_find_property(struct device_node *np, const char *name, +struct property *of_find_property(const struct device_node *np, + const char *name, int *lenp) { struct property *pp; @@ -170,7 +172,8 @@ EXPORT_SYMBOL(of_find_property); * Find a property with a given name for a given node * and return the value. */ -const void *of_get_property(struct device_node *np, const char *name, int *lenp) +const void *of_get_property(const struct device_node *np, const char *name, + int *lenp) { struct property *pp = of_find_property(np,name,lenp); return pp ? pp->value : NULL; diff --git a/include/asm-sparc/prom.h b/include/asm-sparc/prom.h index 30d53ed4f5cd..39ad2224a117 100644 --- a/include/asm-sparc/prom.h +++ b/include/asm-sparc/prom.h @@ -85,12 +85,14 @@ extern struct device_node *of_find_node_by_phandle(phandle handle); extern struct device_node *of_get_parent(const struct device_node *node); extern struct device_node *of_get_next_child(const struct device_node *node, struct device_node *prev); -extern struct property *of_find_property(struct device_node *np, +extern struct property *of_find_property(const struct device_node *np, const char *name, int *lenp); -extern int of_device_is_compatible(struct device_node *device, const char *); -extern const void *of_get_property(struct device_node *node, const char *name, - int *lenp); +extern int of_device_is_compatible(const struct device_node *device, + const char *); +extern const void *of_get_property(const struct device_node *node, + const char *name, + int *lenp); #define get_property(node,name,lenp) of_get_property(node,name,lenp) extern int of_set_property(struct device_node *node, const char *name, void *val, int len); extern int of_getintprop_default(struct device_node *np, -- cgit v1.2.3-59-g8ed1b From ded220bd8f0823771fc0a9bdf7f5bcbe543197b6 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Thu, 29 Mar 2007 01:18:42 -0700 Subject: [STRING]: Move strcasecmp/strncasecmp to lib/string.c We have several platforms using local copies of identical code. Signed-off-by: David S. Miller --- arch/alpha/lib/Makefile | 1 - arch/alpha/lib/strcasecmp.c | 26 -------------------------- arch/powerpc/kernel/ppc_ksyms.c | 2 -- arch/powerpc/lib/Makefile | 5 ++--- arch/powerpc/lib/strcase.c | 25 ------------------------- arch/ppc/kernel/ppc_ksyms.c | 2 -- arch/ppc/lib/Makefile | 2 +- arch/ppc/lib/strcase.c | 24 ------------------------ arch/sh/lib/Makefile | 2 +- arch/sh/lib/strcasecmp.c | 26 -------------------------- arch/xtensa/lib/Makefile | 2 +- arch/xtensa/lib/strcasecmp.c | 32 -------------------------------- include/asm-alpha/string.h | 2 -- include/asm-powerpc/string.h | 2 -- include/asm-sh/string.h | 3 --- include/linux/string.h | 6 ++++++ lib/string.c | 28 ++++++++++++++++++++++++++++ 17 files changed, 39 insertions(+), 151 deletions(-) delete mode 100644 arch/alpha/lib/strcasecmp.c delete mode 100644 arch/powerpc/lib/strcase.c delete mode 100644 arch/ppc/lib/strcase.c delete mode 100644 arch/sh/lib/strcasecmp.c delete mode 100644 arch/xtensa/lib/strcasecmp.c (limited to 'include') diff --git a/arch/alpha/lib/Makefile b/arch/alpha/lib/Makefile index 21cf624d7329..ea098f3b629f 100644 --- a/arch/alpha/lib/Makefile +++ b/arch/alpha/lib/Makefile @@ -36,7 +36,6 @@ lib-y = __divqu.o __remqu.o __divlu.o __remlu.o \ $(ev6-y)csum_ipv6_magic.o \ $(ev6-y)clear_page.o \ $(ev6-y)copy_page.o \ - strcasecmp.o \ fpreg.o \ callback_srm.o srm_puts.o srm_printk.o diff --git a/arch/alpha/lib/strcasecmp.c b/arch/alpha/lib/strcasecmp.c deleted file mode 100644 index 4e57a216feaf..000000000000 --- a/arch/alpha/lib/strcasecmp.c +++ /dev/null @@ -1,26 +0,0 @@ -/* - * linux/arch/alpha/lib/strcasecmp.c - */ - -#include - - -/* We handle nothing here except the C locale. Since this is used in - only one place, on strings known to contain only 7 bit ASCII, this - is ok. */ - -int strcasecmp(const char *a, const char *b) -{ - int ca, cb; - - do { - ca = *a++ & 0xff; - cb = *b++ & 0xff; - if (ca >= 'A' && ca <= 'Z') - ca += 'a' - 'A'; - if (cb >= 'A' && cb <= 'Z') - cb += 'a' - 'A'; - } while (ca == cb && ca != '\0'); - - return ca - cb; -} diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c index ecee596d28f6..2f8e9c02c92a 100644 --- a/arch/powerpc/kernel/ppc_ksyms.c +++ b/arch/powerpc/kernel/ppc_ksyms.c @@ -84,8 +84,6 @@ EXPORT_SYMBOL(strncpy); EXPORT_SYMBOL(strcat); EXPORT_SYMBOL(strlen); EXPORT_SYMBOL(strcmp); -EXPORT_SYMBOL(strcasecmp); -EXPORT_SYMBOL(strncasecmp); EXPORT_SYMBOL(csum_partial); EXPORT_SYMBOL(csum_partial_copy_generic); diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile index 4b1ba49fbd9e..450258de7ca1 100644 --- a/arch/powerpc/lib/Makefile +++ b/arch/powerpc/lib/Makefile @@ -7,13 +7,12 @@ EXTRA_CFLAGS += -mno-minimal-toc endif ifeq ($(CONFIG_PPC_MERGE),y) -obj-y := string.o strcase.o +obj-y := string.o obj-$(CONFIG_PPC32) += div64.o copy_32.o checksum_32.o endif obj-$(CONFIG_PPC64) += checksum_64.o copypage_64.o copyuser_64.o \ - memcpy_64.o usercopy_64.o mem_64.o string.o \ - strcase.o + memcpy_64.o usercopy_64.o mem_64.o string.o obj-$(CONFIG_QUICC_ENGINE) += rheap.o obj-$(CONFIG_XMON) += sstep.o obj-$(CONFIG_KPROBES) += sstep.o diff --git a/arch/powerpc/lib/strcase.c b/arch/powerpc/lib/strcase.c deleted file mode 100644 index f8ec1eba3fdd..000000000000 --- a/arch/powerpc/lib/strcase.c +++ /dev/null @@ -1,25 +0,0 @@ -#include -#include -#include - -int strcasecmp(const char *s1, const char *s2) -{ - int c1, c2; - - do { - c1 = tolower(*s1++); - c2 = tolower(*s2++); - } while (c1 == c2 && c1 != 0); - return c1 - c2; -} - -int strncasecmp(const char *s1, const char *s2, size_t n) -{ - int c1, c2; - - do { - c1 = tolower(*s1++); - c2 = tolower(*s2++); - } while ((--n > 0) && c1 == c2 && c1 != 0); - return c1 - c2; -} diff --git a/arch/ppc/kernel/ppc_ksyms.c b/arch/ppc/kernel/ppc_ksyms.c index 1318b6f4c3df..4ad499605d05 100644 --- a/arch/ppc/kernel/ppc_ksyms.c +++ b/arch/ppc/kernel/ppc_ksyms.c @@ -93,8 +93,6 @@ EXPORT_SYMBOL(strncpy); EXPORT_SYMBOL(strcat); EXPORT_SYMBOL(strlen); EXPORT_SYMBOL(strcmp); -EXPORT_SYMBOL(strcasecmp); -EXPORT_SYMBOL(strncasecmp); EXPORT_SYMBOL(__div64_32); EXPORT_SYMBOL(csum_partial); diff --git a/arch/ppc/lib/Makefile b/arch/ppc/lib/Makefile index 50358e4ea159..422bef9bae7b 100644 --- a/arch/ppc/lib/Makefile +++ b/arch/ppc/lib/Makefile @@ -2,7 +2,7 @@ # Makefile for ppc-specific library files.. # -obj-y := checksum.o string.o strcase.o div64.o +obj-y := checksum.o string.o div64.o obj-$(CONFIG_8xx) += rheap.o obj-$(CONFIG_CPM2) += rheap.o diff --git a/arch/ppc/lib/strcase.c b/arch/ppc/lib/strcase.c deleted file mode 100644 index 3b0094cc2b52..000000000000 --- a/arch/ppc/lib/strcase.c +++ /dev/null @@ -1,24 +0,0 @@ -#include -#include - -int strcasecmp(const char *s1, const char *s2) -{ - int c1, c2; - - do { - c1 = tolower(*s1++); - c2 = tolower(*s2++); - } while (c1 == c2 && c1 != 0); - return c1 - c2; -} - -int strncasecmp(const char *s1, const char *s2, size_t n) -{ - int c1, c2; - - do { - c1 = tolower(*s1++); - c2 = tolower(*s2++); - } while ((--n > 0) && c1 == c2 && c1 != 0); - return c1 - c2; -} diff --git a/arch/sh/lib/Makefile b/arch/sh/lib/Makefile index b5681e3f9684..0b9cca5c7cb4 100644 --- a/arch/sh/lib/Makefile +++ b/arch/sh/lib/Makefile @@ -3,7 +3,7 @@ # lib-y = delay.o memset.o memmove.o memchr.o \ - checksum.o strcasecmp.o strlen.o div64.o udivdi3.o \ + checksum.o strlen.o div64.o udivdi3.o \ div64-generic.o memcpy-y := memcpy.o diff --git a/arch/sh/lib/strcasecmp.c b/arch/sh/lib/strcasecmp.c deleted file mode 100644 index 4e57a216feaf..000000000000 --- a/arch/sh/lib/strcasecmp.c +++ /dev/null @@ -1,26 +0,0 @@ -/* - * linux/arch/alpha/lib/strcasecmp.c - */ - -#include - - -/* We handle nothing here except the C locale. Since this is used in - only one place, on strings known to contain only 7 bit ASCII, this - is ok. */ - -int strcasecmp(const char *a, const char *b) -{ - int ca, cb; - - do { - ca = *a++ & 0xff; - cb = *b++ & 0xff; - if (ca >= 'A' && ca <= 'Z') - ca += 'a' - 'A'; - if (cb >= 'A' && cb <= 'Z') - cb += 'a' - 'A'; - } while (ca == cb && ca != '\0'); - - return ca - cb; -} diff --git a/arch/xtensa/lib/Makefile b/arch/xtensa/lib/Makefile index ed935b58e8a4..6c4fdd86acd8 100644 --- a/arch/xtensa/lib/Makefile +++ b/arch/xtensa/lib/Makefile @@ -2,6 +2,6 @@ # Makefile for Xtensa-specific library files. # -lib-y += memcopy.o memset.o checksum.o strcasecmp.o \ +lib-y += memcopy.o memset.o checksum.o \ usercopy.o strncpy_user.o strnlen_user.o lib-$(CONFIG_PCI) += pci-auto.o diff --git a/arch/xtensa/lib/strcasecmp.c b/arch/xtensa/lib/strcasecmp.c deleted file mode 100644 index 165b2d6effa5..000000000000 --- a/arch/xtensa/lib/strcasecmp.c +++ /dev/null @@ -1,32 +0,0 @@ -/* - * linux/arch/xtensa/lib/strcasecmp.c - * - * This file is subject to the terms and conditions of the GNU General - * Public License. See the file "COPYING" in the main directory of - * this archive for more details. - * - * Copyright (C) 2002 Tensilica Inc. - */ - -#include - - -/* We handle nothing here except the C locale. Since this is used in - only one place, on strings known to contain only 7 bit ASCII, this - is ok. */ - -int strcasecmp(const char *a, const char *b) -{ - int ca, cb; - - do { - ca = *a++ & 0xff; - cb = *b++ & 0xff; - if (ca >= 'A' && ca <= 'Z') - ca += 'a' - 'A'; - if (cb >= 'A' && cb <= 'Z') - cb += 'a' - 'A'; - } while (ca == cb && ca != '\0'); - - return ca - cb; -} diff --git a/include/asm-alpha/string.h b/include/asm-alpha/string.h index 9e44fea669bf..b02b8a282940 100644 --- a/include/asm-alpha/string.h +++ b/include/asm-alpha/string.h @@ -61,8 +61,6 @@ extern void * __memsetw(void *dest, unsigned short, size_t count); ? __constant_c_memset((s),0x0001000100010001UL*(unsigned short)(c),(n)) \ : __memsetw((s),(c),(n))) -extern int strcasecmp(const char *, const char *); - #endif /* __KERNEL__ */ #endif /* __ALPHA_STRING_H__ */ diff --git a/include/asm-powerpc/string.h b/include/asm-powerpc/string.h index faa407f33c6b..aa40f92c298d 100644 --- a/include/asm-powerpc/string.h +++ b/include/asm-powerpc/string.h @@ -14,8 +14,6 @@ #define __HAVE_ARCH_MEMCMP #define __HAVE_ARCH_MEMCHR -extern int strcasecmp(const char *, const char *); -extern int strncasecmp(const char *, const char *, __kernel_size_t); extern char * strcpy(char *,const char *); extern char * strncpy(char *,const char *, __kernel_size_t); extern __kernel_size_t strlen(const char *); diff --git a/include/asm-sh/string.h b/include/asm-sh/string.h index 95bc7db006b0..55f8db6bc1d7 100644 --- a/include/asm-sh/string.h +++ b/include/asm-sh/string.h @@ -126,9 +126,6 @@ extern void *memchr(const void *__s, int __c, size_t __n); #define __HAVE_ARCH_STRLEN extern size_t strlen(const char *); -/* arch/sh/lib/strcasecmp.c */ -extern int strcasecmp(const char *, const char *); - #endif /* __KERNEL__ */ #endif /* __ASM_SH_STRING_H */ diff --git a/include/linux/string.h b/include/linux/string.h index 4f69ef9e6eb5..7f2eb6a477f9 100644 --- a/include/linux/string.h +++ b/include/linux/string.h @@ -47,6 +47,12 @@ extern int strncmp(const char *,const char *,__kernel_size_t); #ifndef __HAVE_ARCH_STRNICMP extern int strnicmp(const char *, const char *, __kernel_size_t); #endif +#ifndef __HAVE_ARCH_STRCASECMP +extern int strcasecmp(const char *s1, const char *s2); +#endif +#ifndef __HAVE_ARCH_STRNCASECMP +extern int strncasecmp(const char *s1, const char *s2, size_t n); +#endif #ifndef __HAVE_ARCH_STRCHR extern char * strchr(const char *,int); #endif diff --git a/lib/string.c b/lib/string.c index bab440fb0dfc..5efafed3d6b6 100644 --- a/lib/string.c +++ b/lib/string.c @@ -60,6 +60,34 @@ int strnicmp(const char *s1, const char *s2, size_t len) EXPORT_SYMBOL(strnicmp); #endif +#ifndef __HAVE_ARCH_STRCASECMP +int strcasecmp(const char *s1, const char *s2) +{ + int c1, c2; + + do { + c1 = tolower(*s1++); + c2 = tolower(*s2++); + } while (c1 == c2 && c1 != 0); + return c1 - c2; +} +EXPORT_SYMBOL(strcasecmp); +#endif + +#ifndef __HAVE_ARCH_STRNCASECMP +int strncasecmp(const char *s1, const char *s2, size_t n) +{ + int c1, c2; + + do { + c1 = tolower(*s1++); + c2 = tolower(*s2++); + } while ((--n > 0) && c1 == c2 && c1 != 0); + return c1 - c2; +} +EXPORT_SYMBOL(strncasecmp); +#endif + #ifndef __HAVE_ARCH_STRCPY /** * strcpy - Copy a %NUL terminated string -- cgit v1.2.3-59-g8ed1b From 1327e9b62fc88e64ffbbd42d61fccd34e521bb86 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Wed, 28 Feb 2007 17:55:46 -0800 Subject: [SPARC64] ebus: Convert to use pci_device_to_OF_node(). Also, we don't need to store or use the PBM so kill that from the linux_ebus. Signed-off-by: David S. Miller --- arch/sparc64/kernel/ebus.c | 12 +++--------- include/asm-sparc64/ebus.h | 2 -- 2 files changed, 3 insertions(+), 11 deletions(-) (limited to 'include') diff --git a/arch/sparc64/kernel/ebus.c b/arch/sparc64/kernel/ebus.c index 1962bfa4ef3d..0ace17bafba4 100644 --- a/arch/sparc64/kernel/ebus.c +++ b/arch/sparc64/kernel/ebus.c @@ -438,11 +438,9 @@ static struct pci_dev *find_next_ebus(struct pci_dev *start, int *is_rio_p) void __init ebus_init(void) { - struct pci_pbm_info *pbm; struct linux_ebus_device *dev; struct linux_ebus *ebus; struct pci_dev *pdev; - struct pcidev_cookie *cookie; struct device_node *dp; int is_rio; int num_ebus = 0; @@ -453,8 +451,7 @@ void __init ebus_init(void) return; } - cookie = pdev->sysdata; - dp = cookie->prom_node; + dp = pci_device_to_OF_node(pdev); ebus_chain = ebus = ebus_alloc(sizeof(struct linux_ebus)); ebus->next = NULL; @@ -480,8 +477,7 @@ void __init ebus_init(void) break; } ebus->is_rio = is_rio; - cookie = pdev->sysdata; - dp = cookie->prom_node; + dp = pci_device_to_OF_node(pdev); continue; } printk("ebus%d:", num_ebus); @@ -489,7 +485,6 @@ void __init ebus_init(void) ebus->index = num_ebus; ebus->prom_node = dp; ebus->self = pdev; - ebus->parent = pbm = cookie->pbm; ebus->ofdev.node = dp; ebus->ofdev.dev.parent = &pdev->dev; @@ -531,8 +526,7 @@ void __init ebus_init(void) if (!pdev) break; - cookie = pdev->sysdata; - dp = cookie->prom_node; + dp = pci_device_to_OF_node(pdev); ebus->next = ebus_alloc(sizeof(struct linux_ebus)); ebus = ebus->next; diff --git a/include/asm-sparc64/ebus.h b/include/asm-sparc64/ebus.h index a4afe9d5703a..9c1c6db2a790 100644 --- a/include/asm-sparc64/ebus.h +++ b/include/asm-sparc64/ebus.h @@ -8,7 +8,6 @@ #ifndef __SPARC64_EBUS_H #define __SPARC64_EBUS_H -#include #include #include #include @@ -41,7 +40,6 @@ struct linux_ebus { struct of_device ofdev; struct linux_ebus *next; struct linux_ebus_device *devices; - struct pci_pbm_info *parent; struct pci_dev *self; int index; int is_rio; -- cgit v1.2.3-59-g8ed1b From deb66c4521e119442aa266553e8cbfc86eb71232 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Wed, 28 Feb 2007 18:01:38 -0800 Subject: [SPARC64] isa: Convert to use pci_device_to_OF_node(). Also, do not try to compute resources by hand, instead use the pre-computed ones in the of_device. Signed-off-by: David S. Miller --- arch/sparc64/kernel/isa.c | 36 ++++-------------------------------- include/asm-sparc64/isa.h | 2 -- 2 files changed, 4 insertions(+), 34 deletions(-) (limited to 'include') diff --git a/arch/sparc64/kernel/isa.c b/arch/sparc64/kernel/isa.c index 791eeb7e3873..6a6882e57ff2 100644 --- a/arch/sparc64/kernel/isa.c +++ b/arch/sparc64/kernel/isa.c @@ -24,27 +24,9 @@ static void __init report_dev(struct sparc_isa_device *isa_dev, int child) static void __init isa_dev_get_resource(struct sparc_isa_device *isa_dev) { - const struct linux_prom_registers *pregs; - unsigned long base, len; - int prop_len; - - pregs = of_get_property(isa_dev->prom_node, "reg", &prop_len); - if (!pregs) - return; - - /* Only the first one is interesting. */ - len = pregs[0].reg_size; - base = (((unsigned long)pregs[0].which_io << 32) | - (unsigned long)pregs[0].phys_addr); - base += isa_dev->bus->parent->io_space.start; - - isa_dev->resource.start = base; - isa_dev->resource.end = (base + len - 1UL); - isa_dev->resource.flags = IORESOURCE_IO; - isa_dev->resource.name = isa_dev->prom_node->name; + struct of_device *op = of_find_device_by_node(isa_dev->prom_node); - request_resource(&isa_dev->bus->parent->io_space, - &isa_dev->resource); + memcpy(&isa_dev->resource, &op->resource[0], sizeof(struct resource)); } static void __init isa_dev_get_irq(struct sparc_isa_device *isa_dev) @@ -158,19 +140,10 @@ void __init isa_init(void) pdev = NULL; while ((pdev = pci_get_device(vendor, device, pdev)) != NULL) { - struct pcidev_cookie *pdev_cookie; - struct pci_pbm_info *pbm; struct sparc_isa_bridge *isa_br; struct device_node *dp; - pdev_cookie = pdev->sysdata; - if (!pdev_cookie) { - printk("ISA: Warning, ISA bridge ignored due to " - "lack of OBP data.\n"); - continue; - } - pbm = pdev_cookie->pbm; - dp = pdev_cookie->prom_node; + dp = pci_device_to_OF_node(pdev); isa_br = kzalloc(sizeof(*isa_br), GFP_KERNEL); if (!isa_br) { @@ -195,10 +168,9 @@ void __init isa_init(void) isa_br->next = isa_chain; isa_chain = isa_br; - isa_br->parent = pbm; isa_br->self = pdev; isa_br->index = index++; - isa_br->prom_node = pdev_cookie->prom_node; + isa_br->prom_node = dp; printk("isa%d:", isa_br->index); diff --git a/include/asm-sparc64/isa.h b/include/asm-sparc64/isa.h index d9728b9031fc..ecd9290f78d4 100644 --- a/include/asm-sparc64/isa.h +++ b/include/asm-sparc64/isa.h @@ -7,7 +7,6 @@ #ifndef __SPARC64_ISA_H #define __SPARC64_ISA_H -#include #include #include #include @@ -29,7 +28,6 @@ struct sparc_isa_bridge { struct of_device ofdev; struct sparc_isa_bridge *next; struct sparc_isa_device *devices; - struct pci_pbm_info *parent; struct pci_dev *self; int index; struct device_node *prom_node; -- cgit v1.2.3-59-g8ed1b From a2fb23af1c31ad6e0c281e56d385f803229d57fa Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Wed, 28 Feb 2007 23:35:04 -0800 Subject: [SPARC64]: Probe PCI bus using OF device tree. Almost entirely taken from the 64-bit PowerPC PCI code. This allowed to eliminate a ton of cruft from the sparc64 PCI layer. Signed-off-by: David S. Miller --- arch/sparc64/kernel/pci.c | 375 ++++++++++++++++++++- arch/sparc64/kernel/pci_common.c | 707 --------------------------------------- arch/sparc64/kernel/pci_impl.h | 15 +- arch/sparc64/kernel/pci_iommu.c | 47 +-- arch/sparc64/kernel/pci_psycho.c | 26 +- arch/sparc64/kernel/pci_sabre.c | 53 +-- arch/sparc64/kernel/pci_schizo.c | 26 +- arch/sparc64/kernel/pci_sun4v.c | 199 ++--------- include/asm-sparc64/device.h | 18 +- include/asm-sparc64/pbm.h | 23 -- 10 files changed, 426 insertions(+), 1063 deletions(-) (limited to 'include') diff --git a/arch/sparc64/kernel/pci.c b/arch/sparc64/kernel/pci.c index 12109886bb1e..246b8009a2b4 100644 --- a/arch/sparc64/kernel/pci.c +++ b/arch/sparc64/kernel/pci.c @@ -1,9 +1,11 @@ -/* $Id: pci.c,v 1.39 2002/01/05 01:13:43 davem Exp $ - * pci.c: UltraSparc PCI controller support. +/* pci.c: UltraSparc PCI controller support. * * Copyright (C) 1997, 1998, 1999 David S. Miller (davem@redhat.com) * Copyright (C) 1998, 1999 Eddie C. Dost (ecd@skynet.be) * Copyright (C) 1999 Jakub Jelinek (jj@ultra.linux.cz) + * + * OF tree based PCI bus probing taken from the PowerPC port + * with minor modifications, see there for credits. */ #include @@ -300,6 +302,329 @@ static void __init pci_controller_probe(void) pci_controller_scan(pci_controller_init); } +static unsigned long pci_parse_of_flags(u32 addr0) +{ + unsigned long flags = 0; + + if (addr0 & 0x02000000) { + flags = IORESOURCE_MEM | PCI_BASE_ADDRESS_SPACE_MEMORY; + flags |= (addr0 >> 22) & PCI_BASE_ADDRESS_MEM_TYPE_64; + flags |= (addr0 >> 28) & PCI_BASE_ADDRESS_MEM_TYPE_1M; + if (addr0 & 0x40000000) + flags |= IORESOURCE_PREFETCH + | PCI_BASE_ADDRESS_MEM_PREFETCH; + } else if (addr0 & 0x01000000) + flags = IORESOURCE_IO | PCI_BASE_ADDRESS_SPACE_IO; + return flags; +} + +/* The of_device layer has translated all of the assigned-address properties + * into physical address resources, we only have to figure out the register + * mapping. + */ +static void pci_parse_of_addrs(struct of_device *op, + struct device_node *node, + struct pci_dev *dev) +{ + struct resource *op_res; + const u32 *addrs; + int proplen; + + addrs = of_get_property(node, "assigned-addresses", &proplen); + if (!addrs) + return; + printk(" parse addresses (%d bytes) @ %p\n", proplen, addrs); + op_res = &op->resource[0]; + for (; proplen >= 20; proplen -= 20, addrs += 5, op_res++) { + struct resource *res; + unsigned long flags; + int i; + + flags = pci_parse_of_flags(addrs[0]); + if (!flags) + continue; + i = addrs[0] & 0xff; + printk(" start: %lx, end: %lx, i: %x\n", + op_res->start, op_res->end, i); + + if (PCI_BASE_ADDRESS_0 <= i && i <= PCI_BASE_ADDRESS_5) { + res = &dev->resource[(i - PCI_BASE_ADDRESS_0) >> 2]; + } else if (i == dev->rom_base_reg) { + res = &dev->resource[PCI_ROM_RESOURCE]; + flags |= IORESOURCE_READONLY | IORESOURCE_CACHEABLE; + } else { + printk(KERN_ERR "PCI: bad cfg reg num 0x%x\n", i); + continue; + } + res->start = op_res->start; + res->end = op_res->end; + res->flags = flags; + res->name = pci_name(dev); + } +} + +struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm, + struct device_node *node, + struct pci_bus *bus, int devfn) +{ + struct dev_archdata *sd; + struct pci_dev *dev; + const char *type; + + dev = kzalloc(sizeof(struct pci_dev), GFP_KERNEL); + if (!dev) + return NULL; + + sd = &dev->dev.archdata; + sd->iommu = pbm->iommu; + sd->stc = &pbm->stc; + sd->host_controller = pbm; + sd->prom_node = node; + sd->op = of_find_device_by_node(node); + sd->msi_num = 0xffffffff; + + type = of_get_property(node, "device_type", NULL); + if (type == NULL) + type = ""; + + printk(" create device, devfn: %x, type: %s\n", devfn, type); + + dev->bus = bus; + dev->sysdata = node; + dev->dev.parent = bus->bridge; + dev->dev.bus = &pci_bus_type; + dev->devfn = devfn; + dev->multifunction = 0; /* maybe a lie? */ + + dev->vendor = of_getintprop_default(node, "vendor-id", 0xffff); + dev->device = of_getintprop_default(node, "device-id", 0xffff); + dev->subsystem_vendor = + of_getintprop_default(node, "subsystem-vendor-id", 0); + dev->subsystem_device = + of_getintprop_default(node, "subsystem-id", 0); + + dev->cfg_size = pci_cfg_space_size(dev); + + sprintf(pci_name(dev), "%04x:%02x:%02x.%d", pci_domain_nr(bus), + dev->bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn)); + dev->class = of_getintprop_default(node, "class-code", 0); + + printk(" class: 0x%x\n", dev->class); + + dev->current_state = 4; /* unknown power state */ + dev->error_state = pci_channel_io_normal; + + if (!strcmp(type, "pci") || !strcmp(type, "pciex")) { + /* a PCI-PCI bridge */ + dev->hdr_type = PCI_HEADER_TYPE_BRIDGE; + dev->rom_base_reg = PCI_ROM_ADDRESS1; + } else if (!strcmp(type, "cardbus")) { + dev->hdr_type = PCI_HEADER_TYPE_CARDBUS; + } else { + dev->hdr_type = PCI_HEADER_TYPE_NORMAL; + dev->rom_base_reg = PCI_ROM_ADDRESS; + + dev->irq = sd->op->irqs[0]; + if (dev->irq == 0xffffffff) + dev->irq = PCI_IRQ_NONE; + } + + pci_parse_of_addrs(sd->op, node, dev); + + printk(" adding to system ...\n"); + + pci_device_add(dev, bus); + + return dev; +} + +static void __init pci_of_scan_bus(struct pci_pbm_info *pbm, + struct device_node *node, + struct pci_bus *bus); + +#define GET_64BIT(prop, i) ((((u64) (prop)[(i)]) << 32) | (prop)[(i)+1]) + +void __devinit of_scan_pci_bridge(struct pci_pbm_info *pbm, + struct device_node *node, + struct pci_dev *dev) +{ + struct pci_bus *bus; + const u32 *busrange, *ranges; + int len, i; + struct resource *res; + unsigned int flags; + u64 size; + + printk("of_scan_pci_bridge(%s)\n", node->full_name); + + /* parse bus-range property */ + busrange = of_get_property(node, "bus-range", &len); + if (busrange == NULL || len != 8) { + printk(KERN_DEBUG "Can't get bus-range for PCI-PCI bridge %s\n", + node->full_name); + return; + } + ranges = of_get_property(node, "ranges", &len); + if (ranges == NULL) { + printk(KERN_DEBUG "Can't get ranges for PCI-PCI bridge %s\n", + node->full_name); + return; + } + + bus = pci_add_new_bus(dev->bus, dev, busrange[0]); + if (!bus) { + printk(KERN_ERR "Failed to create pci bus for %s\n", + node->full_name); + return; + } + + bus->primary = dev->bus->number; + bus->subordinate = busrange[1]; + bus->bridge_ctl = 0; + + /* parse ranges property */ + /* PCI #address-cells == 3 and #size-cells == 2 always */ + res = &dev->resource[PCI_BRIDGE_RESOURCES]; + for (i = 0; i < PCI_NUM_RESOURCES - PCI_BRIDGE_RESOURCES; ++i) { + res->flags = 0; + bus->resource[i] = res; + ++res; + } + i = 1; + for (; len >= 32; len -= 32, ranges += 8) { + struct resource *root; + + flags = pci_parse_of_flags(ranges[0]); + size = GET_64BIT(ranges, 6); + if (flags == 0 || size == 0) + continue; + if (flags & IORESOURCE_IO) { + res = bus->resource[0]; + if (res->flags) { + printk(KERN_ERR "PCI: ignoring extra I/O range" + " for bridge %s\n", node->full_name); + continue; + } + root = &pbm->io_space; + } else { + if (i >= PCI_NUM_RESOURCES - PCI_BRIDGE_RESOURCES) { + printk(KERN_ERR "PCI: too many memory ranges" + " for bridge %s\n", node->full_name); + continue; + } + res = bus->resource[i]; + ++i; + root = &pbm->mem_space; + } + + res->start = GET_64BIT(ranges, 1); + res->end = res->start + size - 1; + res->flags = flags; + + /* Another way to implement this would be to add an of_device + * layer routine that can calculate a resource for a given + * range property value in a PCI device. + */ + pbm->parent->resource_adjust(dev, res, root); + } + sprintf(bus->name, "PCI Bus %04x:%02x", pci_domain_nr(bus), + bus->number); + printk(" bus name: %s\n", bus->name); + + pci_of_scan_bus(pbm, node, bus); +} + +static void __init pci_of_scan_bus(struct pci_pbm_info *pbm, + struct device_node *node, + struct pci_bus *bus) +{ + struct device_node *child; + const u32 *reg; + int reglen, devfn; + struct pci_dev *dev; + + printk("PCI: scan_bus[%s] bus no %d\n", + node->full_name, bus->number); + + child = NULL; + while ((child = of_get_next_child(node, child)) != NULL) { + printk(" * %s\n", child->full_name); + reg = of_get_property(child, "reg", ®len); + if (reg == NULL || reglen < 20) + continue; + devfn = (reg[0] >> 8) & 0xff; + + /* create a new pci_dev for this device */ + dev = of_create_pci_dev(pbm, child, bus, devfn); + if (!dev) + continue; + printk("PCI: dev header type: %x\n", dev->hdr_type); + + if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE || + dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) + of_scan_pci_bridge(pbm, child, dev); + } +} + +static ssize_t +show_pciobppath_attr(struct device * dev, struct device_attribute * attr, char * buf) +{ + struct pci_dev *pdev; + struct device_node *dp; + + pdev = to_pci_dev(dev); + dp = pdev->dev.archdata.prom_node; + + return snprintf (buf, PAGE_SIZE, "%s\n", dp->full_name); +} + +static DEVICE_ATTR(obppath, S_IRUSR | S_IRGRP | S_IROTH, show_pciobppath_attr, NULL); + +static void __devinit pci_bus_register_of_sysfs(struct pci_bus *bus) +{ + struct pci_dev *dev; + int err; + + list_for_each_entry(dev, &bus->devices, bus_list) { + /* we don't really care if we can create this file or + * not, but we need to assign the result of the call + * or the world will fall under alien invasion and + * everybody will be frozen on a spaceship ready to be + * eaten on alpha centauri by some green and jelly + * humanoid. + */ + err = sysfs_create_file(&dev->dev.kobj, &dev_attr_obppath.attr); + } +} + +struct pci_bus * __init pci_scan_one_pbm(struct pci_pbm_info *pbm) +{ + struct pci_controller_info *p = pbm->parent; + struct device_node *node = pbm->prom_node; + struct pci_bus *bus; + + printk("PCI: Scanning PBM %s\n", node->full_name); + + /* XXX parent device? XXX */ + bus = pci_create_bus(NULL, pbm->pci_first_busno, p->pci_ops, pbm); + if (!bus) { + printk(KERN_ERR "Failed to create bus for %s\n", + node->full_name); + return NULL; + } + bus->secondary = pbm->pci_first_busno; + bus->subordinate = pbm->pci_last_busno; + + bus->resource[0] = &pbm->io_space; + bus->resource[1] = &pbm->mem_space; + + pci_of_scan_bus(pbm, node, bus); + pci_bus_add_devices(bus); + pci_bus_register_of_sysfs(bus); + + return bus; +} + static void __init pci_scan_each_controller_bus(void) { struct pci_controller_info *p; @@ -360,8 +685,33 @@ void pcibios_align_resource(void *data, struct resource *res, { } -int pcibios_enable_device(struct pci_dev *pdev, int mask) +int pcibios_enable_device(struct pci_dev *dev, int mask) { + u16 cmd, oldcmd; + int i; + + pci_read_config_word(dev, PCI_COMMAND, &cmd); + oldcmd = cmd; + + for (i = 0; i < PCI_NUM_RESOURCES; i++) { + struct resource *res = &dev->resource[i]; + + /* Only set up the requested stuff */ + if (!(mask & (1<flags & IORESOURCE_IO) + cmd |= PCI_COMMAND_IO; + if (res->flags & IORESOURCE_MEM) + cmd |= PCI_COMMAND_MEMORY; + } + + if (cmd != oldcmd) { + printk(KERN_DEBUG "PCI: Enabling device: (%s), cmd %x\n", + pci_name(dev), cmd); + /* Enable the appropriate bits in the PCI command register. */ + pci_write_config_word(dev, PCI_COMMAND, cmd); + } return 0; } @@ -422,17 +772,10 @@ char * __devinit pcibios_setup(char *str) static int __pci_mmap_make_offset_bus(struct pci_dev *pdev, struct vm_area_struct *vma, enum pci_mmap_state mmap_state) { - struct pcidev_cookie *pcp = pdev->sysdata; - struct pci_pbm_info *pbm; + struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller; struct pci_controller_info *p; unsigned long space_size, user_offset, user_size; - if (!pcp) - return -ENXIO; - pbm = pcp->pbm; - if (!pbm) - return -ENXIO; - p = pbm->parent; if (p->pbms_same_domain) { unsigned long lowest, highest; @@ -651,8 +994,7 @@ EXPORT_SYMBOL(pci_domain_nr); #ifdef CONFIG_PCI_MSI int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc) { - struct pcidev_cookie *pcp = pdev->sysdata; - struct pci_pbm_info *pbm = pcp->pbm; + struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller; struct pci_controller_info *p = pbm->parent; int virt_irq, err; @@ -670,8 +1012,7 @@ void arch_teardown_msi_irq(unsigned int virt_irq) { struct msi_desc *entry = get_irq_msi(virt_irq); struct pci_dev *pdev = entry->dev; - struct pcidev_cookie *pcp = pdev->sysdata; - struct pci_pbm_info *pbm = pcp->pbm; + struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller; struct pci_controller_info *p = pbm->parent; if (!pbm->msi_num || !p->setup_msi_irq) @@ -683,9 +1024,7 @@ void arch_teardown_msi_irq(unsigned int virt_irq) struct device_node *pci_device_to_OF_node(struct pci_dev *pdev) { - struct pcidev_cookie *pc = pdev->sysdata; - - return pc->op->node; + return pdev->dev.archdata.prom_node; } EXPORT_SYMBOL(pci_device_to_OF_node); diff --git a/arch/sparc64/kernel/pci_common.c b/arch/sparc64/kernel/pci_common.c index 5a92cb90ebe0..0d3c95df0d95 100644 --- a/arch/sparc64/kernel/pci_common.c +++ b/arch/sparc64/kernel/pci_common.c @@ -16,713 +16,6 @@ #include "pci_impl.h" -/* Fix self device of BUS and hook it into BUS->self. - * The pci_scan_bus does not do this for the host bridge. - */ -void __init pci_fixup_host_bridge_self(struct pci_bus *pbus) -{ - struct pci_dev *pdev; - - list_for_each_entry(pdev, &pbus->devices, bus_list) { - if (pdev->class >> 8 == PCI_CLASS_BRIDGE_HOST) { - pbus->self = pdev; - return; - } - } - - prom_printf("PCI: Critical error, cannot find host bridge PDEV.\n"); - prom_halt(); -} - -/* Find the OBP PROM device tree node for a PCI device. */ -static struct device_node * __init -find_device_prom_node(struct pci_pbm_info *pbm, struct pci_dev *pdev, - struct device_node *bus_node, - struct linux_prom_pci_registers **pregs, - int *nregs) -{ - struct device_node *dp; - - *nregs = 0; - - /* - * Return the PBM's PROM node in case we are it's PCI device, - * as the PBM's reg property is different to standard PCI reg - * properties. We would delete this device entry otherwise, - * which confuses XFree86's device probing... - */ - if ((pdev->bus->number == pbm->pci_bus->number) && (pdev->devfn == 0) && - (pdev->vendor == PCI_VENDOR_ID_SUN) && - (pdev->device == PCI_DEVICE_ID_SUN_PBM || - pdev->device == PCI_DEVICE_ID_SUN_SCHIZO || - pdev->device == PCI_DEVICE_ID_SUN_TOMATILLO || - pdev->device == PCI_DEVICE_ID_SUN_SABRE || - pdev->device == PCI_DEVICE_ID_SUN_HUMMINGBIRD)) - return bus_node; - - dp = bus_node->child; - while (dp) { - struct linux_prom_pci_registers *regs; - struct property *prop; - int len; - - prop = of_find_property(dp, "reg", &len); - if (!prop) - goto do_next_sibling; - - regs = prop->value; - if (((regs[0].phys_hi >> 8) & 0xff) == pdev->devfn) { - *pregs = regs; - *nregs = len / sizeof(struct linux_prom_pci_registers); - return dp; - } - - do_next_sibling: - dp = dp->sibling; - } - - return NULL; -} - -/* Older versions of OBP on PCI systems encode 64-bit MEM - * space assignments incorrectly, this fixes them up. We also - * take the opportunity here to hide other kinds of bogus - * assignments. - */ -static void __init fixup_obp_assignments(struct pci_dev *pdev, - struct pcidev_cookie *pcp) -{ - int i; - - if (pdev->vendor == PCI_VENDOR_ID_AL && - (pdev->device == PCI_DEVICE_ID_AL_M7101 || - pdev->device == PCI_DEVICE_ID_AL_M1533)) { - int i; - - /* Zap all of the normal resources, they are - * meaningless and generate bogus resource collision - * messages. This is OpenBoot's ill-fated attempt to - * represent the implicit resources that these devices - * have. - */ - pcp->num_prom_assignments = 0; - for (i = 0; i < 6; i++) { - pdev->resource[i].start = - pdev->resource[i].end = - pdev->resource[i].flags = 0; - } - pdev->resource[PCI_ROM_RESOURCE].start = - pdev->resource[PCI_ROM_RESOURCE].end = - pdev->resource[PCI_ROM_RESOURCE].flags = 0; - return; - } - - for (i = 0; i < pcp->num_prom_assignments; i++) { - struct linux_prom_pci_registers *ap; - int space; - - ap = &pcp->prom_assignments[i]; - space = ap->phys_hi >> 24; - if ((space & 0x3) == 2 && - (space & 0x4) != 0) { - ap->phys_hi &= ~(0x7 << 24); - ap->phys_hi |= 0x3 << 24; - } - } -} - -static ssize_t -show_pciobppath_attr(struct device * dev, struct device_attribute * attr, char * buf) -{ - struct pci_dev *pdev; - struct pcidev_cookie *sysdata; - - pdev = to_pci_dev(dev); - sysdata = pdev->sysdata; - - return snprintf (buf, PAGE_SIZE, "%s\n", sysdata->prom_node->full_name); -} - -static DEVICE_ATTR(obppath, S_IRUSR | S_IRGRP | S_IROTH, show_pciobppath_attr, NULL); - -/* Fill in the PCI device cookie sysdata for the given - * PCI device. This cookie is the means by which one - * can get to OBP and PCI controller specific information - * for a PCI device. - */ -static void __init pdev_cookie_fillin(struct pci_pbm_info *pbm, - struct pci_dev *pdev, - struct device_node *bus_node) -{ - struct linux_prom_pci_registers *pregs = NULL; - struct pcidev_cookie *pcp; - struct device_node *dp; - struct property *prop; - int nregs, len, err; - - dp = find_device_prom_node(pbm, pdev, bus_node, - &pregs, &nregs); - if (!dp) { - /* If it is not in the OBP device tree then - * there must be a damn good reason for it. - * - * So what we do is delete the device from the - * PCI device tree completely. This scenario - * is seen, for example, on CP1500 for the - * second EBUS/HappyMeal pair if the external - * connector for it is not present. - */ - pci_remove_bus_device(pdev); - return; - } - - pcp = kzalloc(sizeof(*pcp), GFP_ATOMIC); - if (pcp == NULL) { - prom_printf("PCI_COOKIE: Fatal malloc error, aborting...\n"); - prom_halt(); - } - pcp->pbm = pbm; - pcp->prom_node = dp; - pcp->op = of_find_device_by_node(dp); - memcpy(pcp->prom_regs, pregs, - nregs * sizeof(struct linux_prom_pci_registers)); - pcp->num_prom_regs = nregs; - - /* We can't have the pcidev_cookie assignments be just - * direct pointers into the property value, since they - * are potentially modified by the probing process. - */ - prop = of_find_property(dp, "assigned-addresses", &len); - if (!prop) { - pcp->num_prom_assignments = 0; - } else { - memcpy(pcp->prom_assignments, prop->value, len); - pcp->num_prom_assignments = - (len / sizeof(pcp->prom_assignments[0])); - } - - if (strcmp(dp->name, "ebus") == 0) { - struct linux_prom_ebus_ranges *erng; - int iter; - - /* EBUS is special... */ - prop = of_find_property(dp, "ranges", &len); - if (!prop) { - prom_printf("EBUS: Fatal error, no range property\n"); - prom_halt(); - } - erng = prop->value; - len = (len / sizeof(erng[0])); - for (iter = 0; iter < len; iter++) { - struct linux_prom_ebus_ranges *ep = &erng[iter]; - struct linux_prom_pci_registers *ap; - - ap = &pcp->prom_assignments[iter]; - - ap->phys_hi = ep->parent_phys_hi; - ap->phys_mid = ep->parent_phys_mid; - ap->phys_lo = ep->parent_phys_lo; - ap->size_hi = 0; - ap->size_lo = ep->size; - } - pcp->num_prom_assignments = len; - } - - fixup_obp_assignments(pdev, pcp); - - pdev->sysdata = pcp; - - /* we don't really care if we can create this file or not, - * but we need to assign the result of the call or the world will fall - * under alien invasion and everybody will be frozen on a spaceship - * ready to be eaten on alpha centauri by some green and jelly humanoid. - */ - err = sysfs_create_file(&pdev->dev.kobj, &dev_attr_obppath.attr); -} - -void __init pci_fill_in_pbm_cookies(struct pci_bus *pbus, - struct pci_pbm_info *pbm, - struct device_node *dp) -{ - struct pci_dev *pdev, *pdev_next; - struct pci_bus *this_pbus, *pbus_next; - - /* This must be _safe because the cookie fillin - routine can delete devices from the tree. */ - list_for_each_entry_safe(pdev, pdev_next, &pbus->devices, bus_list) - pdev_cookie_fillin(pbm, pdev, dp); - - list_for_each_entry_safe(this_pbus, pbus_next, &pbus->children, node) { - struct pcidev_cookie *pcp = this_pbus->self->sysdata; - - pci_fill_in_pbm_cookies(this_pbus, pbm, pcp->prom_node); - } -} - -static void __init bad_assignment(struct pci_dev *pdev, - struct linux_prom_pci_registers *ap, - struct resource *res, - int do_prom_halt) -{ - prom_printf("PCI: Bogus PROM assignment. BUS[%02x] DEVFN[%x]\n", - pdev->bus->number, pdev->devfn); - if (ap) - prom_printf("PCI: phys[%08x:%08x:%08x] size[%08x:%08x]\n", - ap->phys_hi, ap->phys_mid, ap->phys_lo, - ap->size_hi, ap->size_lo); - if (res) - prom_printf("PCI: RES[%016lx-->%016lx:(%lx)]\n", - res->start, res->end, res->flags); - if (do_prom_halt) - prom_halt(); -} - -static struct resource * -__init get_root_resource(struct linux_prom_pci_registers *ap, - struct pci_pbm_info *pbm) -{ - int space = (ap->phys_hi >> 24) & 3; - - switch (space) { - case 0: - /* Configuration space, silently ignore it. */ - return NULL; - - case 1: - /* 16-bit IO space */ - return &pbm->io_space; - - case 2: - /* 32-bit MEM space */ - return &pbm->mem_space; - - case 3: - /* 64-bit MEM space, these are allocated out of - * the 32-bit mem_space range for the PBM, ie. - * we just zero out the upper 32-bits. - */ - return &pbm->mem_space; - - default: - printk("PCI: What is resource space %x?\n", space); - return NULL; - }; -} - -static struct resource * -__init get_device_resource(struct linux_prom_pci_registers *ap, - struct pci_dev *pdev) -{ - struct resource *res; - int breg = (ap->phys_hi & 0xff); - - switch (breg) { - case PCI_ROM_ADDRESS: - /* Unfortunately I have seen several cases where - * buggy FCODE uses a space value of '1' (I/O space) - * in the register property for the ROM address - * so disable this sanity check for now. - */ -#if 0 - { - int space = (ap->phys_hi >> 24) & 3; - - /* It had better be MEM space. */ - if (space != 2) - bad_assignment(pdev, ap, NULL, 0); - } -#endif - res = &pdev->resource[PCI_ROM_RESOURCE]; - break; - - case PCI_BASE_ADDRESS_0: - case PCI_BASE_ADDRESS_1: - case PCI_BASE_ADDRESS_2: - case PCI_BASE_ADDRESS_3: - case PCI_BASE_ADDRESS_4: - case PCI_BASE_ADDRESS_5: - res = &pdev->resource[(breg - PCI_BASE_ADDRESS_0) / 4]; - break; - - default: - bad_assignment(pdev, ap, NULL, 0); - res = NULL; - break; - }; - - return res; -} - -static void __init pdev_record_assignments(struct pci_pbm_info *pbm, - struct pci_dev *pdev) -{ - struct pcidev_cookie *pcp = pdev->sysdata; - int i; - - for (i = 0; i < pcp->num_prom_assignments; i++) { - struct linux_prom_pci_registers *ap; - struct resource *root, *res; - - /* The format of this property is specified in - * the PCI Bus Binding to IEEE1275-1994. - */ - ap = &pcp->prom_assignments[i]; - root = get_root_resource(ap, pbm); - res = get_device_resource(ap, pdev); - if (root == NULL || res == NULL || - res->flags == 0) - continue; - - /* Ok we know which resource this PROM assignment is - * for, sanity check it. - */ - if ((res->start & 0xffffffffUL) != ap->phys_lo) - bad_assignment(pdev, ap, res, 1); - - /* If it is a 64-bit MEM space assignment, verify that - * the resource is too and that the upper 32-bits match. - */ - if (((ap->phys_hi >> 24) & 3) == 3) { - if (((res->flags & IORESOURCE_MEM) == 0) || - ((res->flags & PCI_BASE_ADDRESS_MEM_TYPE_MASK) - != PCI_BASE_ADDRESS_MEM_TYPE_64)) - bad_assignment(pdev, ap, res, 1); - if ((res->start >> 32) != ap->phys_mid) - bad_assignment(pdev, ap, res, 1); - - /* PBM cannot generate cpu initiated PIOs - * to the full 64-bit space. Therefore the - * upper 32-bits better be zero. If it is - * not, just skip it and we will assign it - * properly ourselves. - */ - if ((res->start >> 32) != 0UL) { - printk(KERN_ERR "PCI: OBP assigns out of range MEM address " - "%016lx for region %ld on device %s\n", - res->start, (res - &pdev->resource[0]), pci_name(pdev)); - continue; - } - } - - /* Adjust the resource into the physical address space - * of this PBM. - */ - pbm->parent->resource_adjust(pdev, res, root); - - if (request_resource(root, res) < 0) { - int rnum; - - /* OK, there is some conflict. But this is fine - * since we'll reassign it in the fixup pass. - * - * Do not print the warning for ROM resources - * as such a conflict is quite common and - * harmless as the ROM bar is disabled. - */ - rnum = (res - &pdev->resource[0]); - if (rnum != PCI_ROM_RESOURCE) - printk(KERN_ERR "PCI: Resource collision, " - "region %d " - "[%016lx:%016lx] of device %s\n", - rnum, - res->start, res->end, - pci_name(pdev)); - } - } -} - -void __init pci_record_assignments(struct pci_pbm_info *pbm, - struct pci_bus *pbus) -{ - struct pci_dev *dev; - struct pci_bus *bus; - - list_for_each_entry(dev, &pbus->devices, bus_list) - pdev_record_assignments(pbm, dev); - - list_for_each_entry(bus, &pbus->children, node) - pci_record_assignments(pbm, bus); -} - -/* Return non-zero if PDEV has implicit I/O resources even - * though it may not have an I/O base address register - * active. - */ -static int __init has_implicit_io(struct pci_dev *pdev) -{ - int class = pdev->class >> 8; - - if (class == PCI_CLASS_NOT_DEFINED || - class == PCI_CLASS_NOT_DEFINED_VGA || - class == PCI_CLASS_STORAGE_IDE || - (pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY) - return 1; - - return 0; -} - -static void __init pdev_assign_unassigned(struct pci_pbm_info *pbm, - struct pci_dev *pdev) -{ - u32 reg; - u16 cmd; - int i, io_seen, mem_seen; - - io_seen = mem_seen = 0; - for (i = 0; i < PCI_NUM_RESOURCES; i++) { - struct resource *root, *res; - unsigned long size, min, max, align; - - res = &pdev->resource[i]; - - if (res->flags & IORESOURCE_IO) - io_seen++; - else if (res->flags & IORESOURCE_MEM) - mem_seen++; - - /* If it is already assigned or the resource does - * not exist, there is nothing to do. - */ - if (res->parent != NULL || res->flags == 0UL) - continue; - - /* Determine the root we allocate from. */ - if (res->flags & IORESOURCE_IO) { - root = &pbm->io_space; - min = root->start + 0x400UL; - max = root->end; - } else { - root = &pbm->mem_space; - min = root->start; - max = min + 0x80000000UL; - } - - size = res->end - res->start; - align = size + 1; - if (allocate_resource(root, res, size + 1, min, max, align, NULL, NULL) < 0) { - /* uh oh */ - prom_printf("PCI: Failed to allocate resource %d for %s\n", - i, pci_name(pdev)); - prom_halt(); - } - - /* Update PCI config space. */ - pbm->parent->base_address_update(pdev, i); - } - - /* Special case, disable the ROM. Several devices - * act funny (ie. do not respond to memory space writes) - * when it is left enabled. A good example are Qlogic,ISP - * adapters. - */ - pci_read_config_dword(pdev, PCI_ROM_ADDRESS, ®); - reg &= ~PCI_ROM_ADDRESS_ENABLE; - pci_write_config_dword(pdev, PCI_ROM_ADDRESS, reg); - - /* If we saw I/O or MEM resources, enable appropriate - * bits in PCI command register. - */ - if (io_seen || mem_seen) { - pci_read_config_word(pdev, PCI_COMMAND, &cmd); - if (io_seen || has_implicit_io(pdev)) - cmd |= PCI_COMMAND_IO; - if (mem_seen) - cmd |= PCI_COMMAND_MEMORY; - pci_write_config_word(pdev, PCI_COMMAND, cmd); - } - - /* If this is a PCI bridge or an IDE controller, - * enable bus mastering. In the former case also - * set the cache line size correctly. - */ - if (((pdev->class >> 8) == PCI_CLASS_BRIDGE_PCI) || - (((pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) && - ((pdev->class & 0x80) != 0))) { - pci_read_config_word(pdev, PCI_COMMAND, &cmd); - cmd |= PCI_COMMAND_MASTER; - pci_write_config_word(pdev, PCI_COMMAND, cmd); - - if ((pdev->class >> 8) == PCI_CLASS_BRIDGE_PCI) - pci_write_config_byte(pdev, - PCI_CACHE_LINE_SIZE, - (64 / sizeof(u32))); - } -} - -void __init pci_assign_unassigned(struct pci_pbm_info *pbm, - struct pci_bus *pbus) -{ - struct pci_dev *dev; - struct pci_bus *bus; - - list_for_each_entry(dev, &pbus->devices, bus_list) - pdev_assign_unassigned(pbm, dev); - - list_for_each_entry(bus, &pbus->children, node) - pci_assign_unassigned(pbm, bus); -} - -static void __init pdev_fixup_irq(struct pci_dev *pdev) -{ - struct pcidev_cookie *pcp = pdev->sysdata; - struct of_device *op = pcp->op; - - if (op->irqs[0] == 0xffffffff) { - pdev->irq = PCI_IRQ_NONE; - return; - } - - pdev->irq = op->irqs[0]; - - pci_write_config_byte(pdev, PCI_INTERRUPT_LINE, - pdev->irq & PCI_IRQ_INO); -} - -void __init pci_fixup_irq(struct pci_pbm_info *pbm, - struct pci_bus *pbus) -{ - struct pci_dev *dev; - struct pci_bus *bus; - - list_for_each_entry(dev, &pbus->devices, bus_list) - pdev_fixup_irq(dev); - - list_for_each_entry(bus, &pbus->children, node) - pci_fixup_irq(pbm, bus); -} - -static void pdev_setup_busmastering(struct pci_dev *pdev, int is_66mhz) -{ - u16 cmd; - u8 hdr_type, min_gnt, ltimer; - - pci_read_config_word(pdev, PCI_COMMAND, &cmd); - cmd |= PCI_COMMAND_MASTER; - pci_write_config_word(pdev, PCI_COMMAND, cmd); - - /* Read it back, if the mastering bit did not - * get set, the device does not support bus - * mastering so we have nothing to do here. - */ - pci_read_config_word(pdev, PCI_COMMAND, &cmd); - if ((cmd & PCI_COMMAND_MASTER) == 0) - return; - - /* Set correct cache line size, 64-byte on all - * Sparc64 PCI systems. Note that the value is - * measured in 32-bit words. - */ - pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, - 64 / sizeof(u32)); - - pci_read_config_byte(pdev, PCI_HEADER_TYPE, &hdr_type); - hdr_type &= ~0x80; - if (hdr_type != PCI_HEADER_TYPE_NORMAL) - return; - - /* If the latency timer is already programmed with a non-zero - * value, assume whoever set it (OBP or whoever) knows what - * they are doing. - */ - pci_read_config_byte(pdev, PCI_LATENCY_TIMER, <imer); - if (ltimer != 0) - return; - - /* XXX Since I'm tipping off the min grant value to - * XXX choose a suitable latency timer value, I also - * XXX considered making use of the max latency value - * XXX as well. Unfortunately I've seen too many bogusly - * XXX low settings for it to the point where it lacks - * XXX any usefulness. In one case, an ethernet card - * XXX claimed a min grant of 10 and a max latency of 5. - * XXX Now, if I had two such cards on the same bus I - * XXX could not set the desired burst period (calculated - * XXX from min grant) without violating the max latency - * XXX bound. Duh... - * XXX - * XXX I blame dumb PC bios implementors for stuff like - * XXX this, most of them don't even try to do something - * XXX sensible with latency timer values and just set some - * XXX default value (usually 32) into every device. - */ - - pci_read_config_byte(pdev, PCI_MIN_GNT, &min_gnt); - - if (min_gnt == 0) { - /* If no min_gnt setting then use a default - * value. - */ - if (is_66mhz) - ltimer = 16; - else - ltimer = 32; - } else { - int shift_factor; - - if (is_66mhz) - shift_factor = 2; - else - shift_factor = 3; - - /* Use a default value when the min_gnt value - * is erroneously high. - */ - if (((unsigned int) min_gnt << shift_factor) > 512 || - ((min_gnt << shift_factor) & 0xff) == 0) { - ltimer = 8 << shift_factor; - } else { - ltimer = min_gnt << shift_factor; - } - } - - pci_write_config_byte(pdev, PCI_LATENCY_TIMER, ltimer); -} - -void pci_determine_66mhz_disposition(struct pci_pbm_info *pbm, - struct pci_bus *pbus) -{ - struct pci_dev *pdev; - int all_are_66mhz; - u16 status; - - if (pbm->is_66mhz_capable == 0) { - all_are_66mhz = 0; - goto out; - } - - all_are_66mhz = 1; - list_for_each_entry(pdev, &pbus->devices, bus_list) { - pci_read_config_word(pdev, PCI_STATUS, &status); - if (!(status & PCI_STATUS_66MHZ)) { - all_are_66mhz = 0; - break; - } - } -out: - pbm->all_devs_66mhz = all_are_66mhz; - - printk("PCI%d(PBM%c): Bus running at %dMHz\n", - pbm->parent->index, - (pbm == &pbm->parent->pbm_A) ? 'A' : 'B', - (all_are_66mhz ? 66 : 33)); -} - -void pci_setup_busmastering(struct pci_pbm_info *pbm, - struct pci_bus *pbus) -{ - struct pci_dev *dev; - struct pci_bus *bus; - int is_66mhz; - - is_66mhz = pbm->is_66mhz_capable && pbm->all_devs_66mhz; - - list_for_each_entry(dev, &pbus->devices, bus_list) - pdev_setup_busmastering(dev, is_66mhz); - - list_for_each_entry(bus, &pbus->children, node) - pci_setup_busmastering(pbm, bus); -} - void pci_register_legacy_regions(struct resource *io_res, struct resource *mem_res) { diff --git a/arch/sparc64/kernel/pci_impl.h b/arch/sparc64/kernel/pci_impl.h index 971e2bea30b4..ea8a6bd146ae 100644 --- a/arch/sparc64/kernel/pci_impl.h +++ b/arch/sparc64/kernel/pci_impl.h @@ -17,20 +17,7 @@ extern struct pci_controller_info *pci_controller_root; extern int pci_num_controllers; /* PCI bus scanning and fixup support. */ -extern void pci_fixup_host_bridge_self(struct pci_bus *pbus); -extern void pci_fill_in_pbm_cookies(struct pci_bus *pbus, - struct pci_pbm_info *pbm, - struct device_node *prom_node); -extern void pci_record_assignments(struct pci_pbm_info *pbm, - struct pci_bus *pbus); -extern void pci_assign_unassigned(struct pci_pbm_info *pbm, - struct pci_bus *pbus); -extern void pci_fixup_irq(struct pci_pbm_info *pbm, - struct pci_bus *pbus); -extern void pci_determine_66mhz_disposition(struct pci_pbm_info *pbm, - struct pci_bus *pbus); -extern void pci_setup_busmastering(struct pci_pbm_info *pbm, - struct pci_bus *pbus); +extern struct pci_bus *pci_scan_one_pbm(struct pci_pbm_info *pbm); extern void pci_register_legacy_regions(struct resource *io_res, struct resource *mem_res); diff --git a/arch/sparc64/kernel/pci_iommu.c b/arch/sparc64/kernel/pci_iommu.c index 7aca0f33f885..554daabb381e 100644 --- a/arch/sparc64/kernel/pci_iommu.c +++ b/arch/sparc64/kernel/pci_iommu.c @@ -220,7 +220,6 @@ static inline void iommu_free_ctx(struct pci_iommu *iommu, int ctx) */ static void *pci_4u_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp, gfp_t gfp) { - struct pcidev_cookie *pcp; struct pci_iommu *iommu; iopte_t *iopte; unsigned long flags, order, first_page; @@ -237,8 +236,7 @@ static void *pci_4u_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr return NULL; memset((char *)first_page, 0, PAGE_SIZE << order); - pcp = pdev->sysdata; - iommu = pcp->pbm->iommu; + iommu = pdev->dev.archdata.iommu; spin_lock_irqsave(&iommu->lock, flags); iopte = alloc_npages(iommu, size >> IO_PAGE_SHIFT); @@ -268,14 +266,12 @@ static void *pci_4u_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr /* Free and unmap a consistent DMA translation. */ static void pci_4u_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_t dvma) { - struct pcidev_cookie *pcp; struct pci_iommu *iommu; iopte_t *iopte; unsigned long flags, order, npages; npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT; - pcp = pdev->sysdata; - iommu = pcp->pbm->iommu; + iommu = pdev->dev.archdata.iommu; iopte = iommu->page_table + ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT); @@ -295,7 +291,6 @@ static void pci_4u_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, */ static dma_addr_t pci_4u_map_single(struct pci_dev *pdev, void *ptr, size_t sz, int direction) { - struct pcidev_cookie *pcp; struct pci_iommu *iommu; struct pci_strbuf *strbuf; iopte_t *base; @@ -304,9 +299,8 @@ static dma_addr_t pci_4u_map_single(struct pci_dev *pdev, void *ptr, size_t sz, u32 bus_addr, ret; unsigned long iopte_protection; - pcp = pdev->sysdata; - iommu = pcp->pbm->iommu; - strbuf = &pcp->pbm->stc; + iommu = pdev->dev.archdata.iommu; + strbuf = pdev->dev.archdata.stc; if (unlikely(direction == PCI_DMA_NONE)) goto bad_no_ctx; @@ -416,7 +410,6 @@ do_flush_sync: /* Unmap a single streaming mode DMA translation. */ static void pci_4u_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction) { - struct pcidev_cookie *pcp; struct pci_iommu *iommu; struct pci_strbuf *strbuf; iopte_t *base; @@ -428,9 +421,8 @@ static void pci_4u_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_ return; } - pcp = pdev->sysdata; - iommu = pcp->pbm->iommu; - strbuf = &pcp->pbm->stc; + iommu = pdev->dev.archdata.iommu; + strbuf = pdev->dev.archdata.stc; npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK); npages >>= IO_PAGE_SHIFT; @@ -549,7 +541,6 @@ static inline void fill_sg(iopte_t *iopte, struct scatterlist *sg, */ static int pci_4u_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction) { - struct pcidev_cookie *pcp; struct pci_iommu *iommu; struct pci_strbuf *strbuf; unsigned long flags, ctx, npages, iopte_protection; @@ -570,9 +561,8 @@ static int pci_4u_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int n return 1; } - pcp = pdev->sysdata; - iommu = pcp->pbm->iommu; - strbuf = &pcp->pbm->stc; + iommu = pdev->dev.archdata.iommu; + strbuf = pdev->dev.archdata.stc; if (unlikely(direction == PCI_DMA_NONE)) goto bad_no_ctx; @@ -636,7 +626,6 @@ bad_no_ctx: /* Unmap a set of streaming mode DMA translations. */ static void pci_4u_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction) { - struct pcidev_cookie *pcp; struct pci_iommu *iommu; struct pci_strbuf *strbuf; iopte_t *base; @@ -648,9 +637,8 @@ static void pci_4u_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, in WARN_ON(1); } - pcp = pdev->sysdata; - iommu = pcp->pbm->iommu; - strbuf = &pcp->pbm->stc; + iommu = pdev->dev.archdata.iommu; + strbuf = pdev->dev.archdata.stc; bus_addr = sglist->dma_address & IO_PAGE_MASK; @@ -696,14 +684,12 @@ static void pci_4u_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, in */ static void pci_4u_dma_sync_single_for_cpu(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction) { - struct pcidev_cookie *pcp; struct pci_iommu *iommu; struct pci_strbuf *strbuf; unsigned long flags, ctx, npages; - pcp = pdev->sysdata; - iommu = pcp->pbm->iommu; - strbuf = &pcp->pbm->stc; + iommu = pdev->dev.archdata.iommu; + strbuf = pdev->dev.archdata.stc; if (!strbuf->strbuf_enabled) return; @@ -736,15 +722,13 @@ static void pci_4u_dma_sync_single_for_cpu(struct pci_dev *pdev, dma_addr_t bus_ */ static void pci_4u_dma_sync_sg_for_cpu(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction) { - struct pcidev_cookie *pcp; struct pci_iommu *iommu; struct pci_strbuf *strbuf; unsigned long flags, ctx, npages, i; u32 bus_addr; - pcp = pdev->sysdata; - iommu = pcp->pbm->iommu; - strbuf = &pcp->pbm->stc; + iommu = pdev->dev.archdata.iommu; + strbuf = pdev->dev.archdata.stc; if (!strbuf->strbuf_enabled) return; @@ -809,13 +793,12 @@ static void ali_sound_dma_hack(struct pci_dev *pdev, int set_bit) int pci_dma_supported(struct pci_dev *pdev, u64 device_mask) { - struct pcidev_cookie *pcp = pdev->sysdata; u64 dma_addr_mask; if (pdev == NULL) { dma_addr_mask = 0xffffffff; } else { - struct pci_iommu *iommu = pcp->pbm->iommu; + struct pci_iommu *iommu = pdev->dev.archdata.iommu; dma_addr_mask = iommu->dma_addr_mask; diff --git a/arch/sparc64/kernel/pci_psycho.c b/arch/sparc64/kernel/pci_psycho.c index fda5db223d96..12ea30d30b2f 100644 --- a/arch/sparc64/kernel/pci_psycho.c +++ b/arch/sparc64/kernel/pci_psycho.c @@ -905,8 +905,7 @@ static void psycho_resource_adjust(struct pci_dev *pdev, static void psycho_base_address_update(struct pci_dev *pdev, int resource) { - struct pcidev_cookie *pcp = pdev->sysdata; - struct pci_pbm_info *pbm = pcp->pbm; + struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller; struct resource *res, *root; u32 reg; int where, size, is_64bit; @@ -968,28 +967,7 @@ static void pbm_config_busmastering(struct pci_pbm_info *pbm) static void pbm_scan_bus(struct pci_controller_info *p, struct pci_pbm_info *pbm) { - struct pcidev_cookie *cookie = kzalloc(sizeof(*cookie), GFP_KERNEL); - - if (!cookie) { - prom_printf("PSYCHO: Critical allocation failure.\n"); - prom_halt(); - } - - /* All we care about is the PBM. */ - cookie->pbm = pbm; - - pbm->pci_bus = pci_scan_bus(pbm->pci_first_busno, - p->pci_ops, - pbm); - pci_fixup_host_bridge_self(pbm->pci_bus); - pbm->pci_bus->self->sysdata = cookie; - - pci_fill_in_pbm_cookies(pbm->pci_bus, pbm, pbm->prom_node); - pci_record_assignments(pbm, pbm->pci_bus); - pci_assign_unassigned(pbm, pbm->pci_bus); - pci_fixup_irq(pbm, pbm->pci_bus); - pci_determine_66mhz_disposition(pbm, pbm->pci_bus); - pci_setup_busmastering(pbm, pbm->pci_bus); + pbm->pci_bus = pci_scan_one_pbm(pbm); } static void psycho_scan_bus(struct pci_controller_info *p) diff --git a/arch/sparc64/kernel/pci_sabre.c b/arch/sparc64/kernel/pci_sabre.c index 94bb681f2323..bbf624517508 100644 --- a/arch/sparc64/kernel/pci_sabre.c +++ b/arch/sparc64/kernel/pci_sabre.c @@ -710,8 +710,8 @@ static irqreturn_t sabre_pcierr_intr_other(struct pci_controller_info *p) p->index); ret = IRQ_HANDLED; } - pci_read_config_word(sabre_root_bus->self, - PCI_STATUS, &stat); + pci_bus_read_config_word(sabre_root_bus, 0, + PCI_STATUS, &stat); if (stat & (PCI_STATUS_PARITY | PCI_STATUS_SIG_TARGET_ABORT | PCI_STATUS_REC_TARGET_ABORT | @@ -719,8 +719,8 @@ static irqreturn_t sabre_pcierr_intr_other(struct pci_controller_info *p) PCI_STATUS_SIG_SYSTEM_ERROR)) { printk("SABRE%d: PCI bus error, PCI_STATUS[%04x]\n", p->index, stat); - pci_write_config_word(sabre_root_bus->self, - PCI_STATUS, 0xffff); + pci_bus_write_config_word(sabre_root_bus, 0, + PCI_STATUS, 0xffff); ret = IRQ_HANDLED; } return ret; @@ -887,8 +887,7 @@ static void sabre_resource_adjust(struct pci_dev *pdev, static void sabre_base_address_update(struct pci_dev *pdev, int resource) { - struct pcidev_cookie *pcp = pdev->sysdata; - struct pci_pbm_info *pbm = pcp->pbm; + struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller; struct resource *res; unsigned long base; u32 reg; @@ -978,27 +977,11 @@ static void apb_init(struct pci_controller_info *p, struct pci_bus *sabre_bus) } } -static struct pcidev_cookie *alloc_bridge_cookie(struct pci_pbm_info *pbm) -{ - struct pcidev_cookie *cookie = kzalloc(sizeof(*cookie), GFP_KERNEL); - - if (!cookie) { - prom_printf("SABRE: Critical allocation failure.\n"); - prom_halt(); - } - - /* All we care about is the PBM. */ - cookie->pbm = pbm; - - return cookie; -} - static void sabre_scan_bus(struct pci_controller_info *p) { static int once; struct pci_bus *sabre_bus, *pbus; struct pci_pbm_info *pbm; - struct pcidev_cookie *cookie; int sabres_scanned; /* The APB bridge speaks to the Sabre host PCI bridge @@ -1020,13 +1003,9 @@ static void sabre_scan_bus(struct pci_controller_info *p) } once++; - cookie = alloc_bridge_cookie(&p->pbm_A); - - sabre_bus = pci_scan_bus(p->pci_first_busno, - p->pci_ops, - &p->pbm_A); - pci_fixup_host_bridge_self(sabre_bus); - sabre_bus->self->sysdata = cookie; + sabre_bus = pci_scan_one_pbm(&p->pbm_A); + if (!sabre_bus) + return; sabre_root_bus = sabre_bus; @@ -1043,19 +1022,9 @@ static void sabre_scan_bus(struct pci_controller_info *p) } else continue; - cookie = alloc_bridge_cookie(pbm); - pbus->self->sysdata = cookie; - sabres_scanned++; - pbus->sysdata = pbm; pbm->pci_bus = pbus; - pci_fill_in_pbm_cookies(pbus, pbm, pbm->prom_node); - pci_record_assignments(pbm, pbus); - pci_assign_unassigned(pbm, pbus); - pci_fixup_irq(pbm, pbus); - pci_determine_66mhz_disposition(pbm, pbus); - pci_setup_busmastering(pbm, pbus); } if (!sabres_scanned) { @@ -1063,12 +1032,6 @@ static void sabre_scan_bus(struct pci_controller_info *p) pbm = &p->pbm_A; sabre_bus->sysdata = pbm; pbm->pci_bus = sabre_bus; - pci_fill_in_pbm_cookies(sabre_bus, pbm, pbm->prom_node); - pci_record_assignments(pbm, sabre_bus); - pci_assign_unassigned(pbm, sabre_bus); - pci_fixup_irq(pbm, sabre_bus); - pci_determine_66mhz_disposition(pbm, sabre_bus); - pci_setup_busmastering(pbm, sabre_bus); } sabre_register_error_handlers(p); diff --git a/arch/sparc64/kernel/pci_schizo.c b/arch/sparc64/kernel/pci_schizo.c index 66911b126aed..48dea52ac522 100644 --- a/arch/sparc64/kernel/pci_schizo.c +++ b/arch/sparc64/kernel/pci_schizo.c @@ -1232,28 +1232,7 @@ static void pbm_config_busmastering(struct pci_pbm_info *pbm) static void pbm_scan_bus(struct pci_controller_info *p, struct pci_pbm_info *pbm) { - struct pcidev_cookie *cookie = kzalloc(sizeof(*cookie), GFP_KERNEL); - - if (!cookie) { - prom_printf("%s: Critical allocation failure.\n", pbm->name); - prom_halt(); - } - - /* All we care about is the PBM. */ - cookie->pbm = pbm; - - pbm->pci_bus = pci_scan_bus(pbm->pci_first_busno, - p->pci_ops, - pbm); - pci_fixup_host_bridge_self(pbm->pci_bus); - pbm->pci_bus->self->sysdata = cookie; - - pci_fill_in_pbm_cookies(pbm->pci_bus, pbm, pbm->prom_node); - pci_record_assignments(pbm, pbm->pci_bus); - pci_assign_unassigned(pbm, pbm->pci_bus); - pci_fixup_irq(pbm, pbm->pci_bus); - pci_determine_66mhz_disposition(pbm, pbm->pci_bus); - pci_setup_busmastering(pbm, pbm->pci_bus); + pbm->pci_bus = pci_scan_one_pbm(pbm); } static void __schizo_scan_bus(struct pci_controller_info *p, @@ -1297,8 +1276,7 @@ static void tomatillo_scan_bus(struct pci_controller_info *p) static void schizo_base_address_update(struct pci_dev *pdev, int resource) { - struct pcidev_cookie *pcp = pdev->sysdata; - struct pci_pbm_info *pbm = pcp->pbm; + struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller; struct resource *res, *root; u32 reg; int where, size, is_64bit; diff --git a/arch/sparc64/kernel/pci_sun4v.c b/arch/sparc64/kernel/pci_sun4v.c index bd74c155519e..eec7def379dc 100644 --- a/arch/sparc64/kernel/pci_sun4v.c +++ b/arch/sparc64/kernel/pci_sun4v.c @@ -53,8 +53,8 @@ static inline void pci_iommu_batch_start(struct pci_dev *pdev, unsigned long pro /* Interrupts must be disabled. */ static long pci_iommu_batch_flush(struct pci_iommu_batch *p) { - struct pcidev_cookie *pcp = p->pdev->sysdata; - unsigned long devhandle = pcp->pbm->devhandle; + struct pci_pbm_info *pbm = p->pdev->dev.archdata.host_controller; + unsigned long devhandle = pbm->devhandle; unsigned long prot = p->prot; unsigned long entry = p->entry; u64 *pglist = p->pglist; @@ -159,7 +159,6 @@ static void pci_arena_free(struct pci_iommu_arena *arena, unsigned long base, un static void *pci_4v_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp, gfp_t gfp) { - struct pcidev_cookie *pcp; struct pci_iommu *iommu; unsigned long flags, order, first_page, npages, n; void *ret; @@ -178,8 +177,7 @@ static void *pci_4v_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr memset((char *)first_page, 0, PAGE_SIZE << order); - pcp = pdev->sysdata; - iommu = pcp->pbm->iommu; + iommu = pdev->dev.archdata.iommu; spin_lock_irqsave(&iommu->lock, flags); entry = pci_arena_alloc(&iommu->arena, npages); @@ -226,15 +224,15 @@ arena_alloc_fail: static void pci_4v_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_t dvma) { - struct pcidev_cookie *pcp; + struct pci_pbm_info *pbm; struct pci_iommu *iommu; unsigned long flags, order, npages, entry; u32 devhandle; npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT; - pcp = pdev->sysdata; - iommu = pcp->pbm->iommu; - devhandle = pcp->pbm->devhandle; + iommu = pdev->dev.archdata.iommu; + pbm = pdev->dev.archdata.host_controller; + devhandle = pbm->devhandle; entry = ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT); spin_lock_irqsave(&iommu->lock, flags); @@ -259,7 +257,6 @@ static void pci_4v_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, static dma_addr_t pci_4v_map_single(struct pci_dev *pdev, void *ptr, size_t sz, int direction) { - struct pcidev_cookie *pcp; struct pci_iommu *iommu; unsigned long flags, npages, oaddr; unsigned long i, base_paddr; @@ -267,8 +264,7 @@ static dma_addr_t pci_4v_map_single(struct pci_dev *pdev, void *ptr, size_t sz, unsigned long prot; long entry; - pcp = pdev->sysdata; - iommu = pcp->pbm->iommu; + iommu = pdev->dev.archdata.iommu; if (unlikely(direction == PCI_DMA_NONE)) goto bad; @@ -324,7 +320,7 @@ iommu_map_fail: static void pci_4v_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction) { - struct pcidev_cookie *pcp; + struct pci_pbm_info *pbm; struct pci_iommu *iommu; unsigned long flags, npages; long entry; @@ -336,9 +332,9 @@ static void pci_4v_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_ return; } - pcp = pdev->sysdata; - iommu = pcp->pbm->iommu; - devhandle = pcp->pbm->devhandle; + iommu = pdev->dev.archdata.iommu; + pbm = pdev->dev.archdata.host_controller; + devhandle = pbm->devhandle; npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK); npages >>= IO_PAGE_SHIFT; @@ -460,7 +456,6 @@ iommu_map_failed: static int pci_4v_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction) { - struct pcidev_cookie *pcp; struct pci_iommu *iommu; unsigned long flags, npages, prot; u32 dma_base; @@ -480,8 +475,7 @@ static int pci_4v_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int n return 1; } - pcp = pdev->sysdata; - iommu = pcp->pbm->iommu; + iommu = pdev->dev.archdata.iommu; if (unlikely(direction == PCI_DMA_NONE)) goto bad; @@ -537,7 +531,7 @@ iommu_map_failed: static void pci_4v_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction) { - struct pcidev_cookie *pcp; + struct pci_pbm_info *pbm; struct pci_iommu *iommu; unsigned long flags, i, npages; long entry; @@ -548,9 +542,9 @@ static void pci_4v_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, in WARN_ON(1); } - pcp = pdev->sysdata; - iommu = pcp->pbm->iommu; - devhandle = pcp->pbm->devhandle; + iommu = pdev->dev.archdata.iommu; + pbm = pdev->dev.archdata.host_controller; + devhandle = pbm->devhandle; bus_addr = sglist->dma_address & IO_PAGE_MASK; @@ -600,132 +594,12 @@ struct pci_iommu_ops pci_sun4v_iommu_ops = { .dma_sync_sg_for_cpu = pci_4v_dma_sync_sg_for_cpu, }; -/* SUN4V PCI configuration space accessors. */ - -struct pdev_entry { - struct pdev_entry *next; - u32 devhandle; - unsigned int bus; - unsigned int device; - unsigned int func; -}; - -#define PDEV_HTAB_SIZE 16 -#define PDEV_HTAB_MASK (PDEV_HTAB_SIZE - 1) -static struct pdev_entry *pdev_htab[PDEV_HTAB_SIZE]; - -static inline unsigned int pdev_hashfn(u32 devhandle, unsigned int bus, unsigned int device, unsigned int func) -{ - unsigned int val; - - val = (devhandle ^ (devhandle >> 4)); - val ^= bus; - val ^= device; - val ^= func; - - return val & PDEV_HTAB_MASK; -} - -static int pdev_htab_add(u32 devhandle, unsigned int bus, unsigned int device, unsigned int func) -{ - struct pdev_entry *p = kmalloc(sizeof(*p), GFP_KERNEL); - struct pdev_entry **slot; - - if (!p) - return -ENOMEM; - - slot = &pdev_htab[pdev_hashfn(devhandle, bus, device, func)]; - p->next = *slot; - *slot = p; - - p->devhandle = devhandle; - p->bus = bus; - p->device = device; - p->func = func; - - return 0; -} - -/* Recursively descend into the OBP device tree, rooted at toplevel_node, - * looking for a PCI device matching bus and devfn. - */ -static int obp_find(struct device_node *toplevel_node, unsigned int bus, unsigned int devfn) -{ - toplevel_node = toplevel_node->child; - - while (toplevel_node != NULL) { - struct linux_prom_pci_registers *regs; - struct property *prop; - int ret; - - ret = obp_find(toplevel_node, bus, devfn); - if (ret != 0) - return ret; - - prop = of_find_property(toplevel_node, "reg", NULL); - if (!prop) - goto next_sibling; - - regs = prop->value; - if (((regs->phys_hi >> 16) & 0xff) == bus && - ((regs->phys_hi >> 8) & 0xff) == devfn) - break; - - next_sibling: - toplevel_node = toplevel_node->sibling; - } - - return toplevel_node != NULL; -} - -static int pdev_htab_populate(struct pci_pbm_info *pbm) -{ - u32 devhandle = pbm->devhandle; - unsigned int bus; - - for (bus = pbm->pci_first_busno; bus <= pbm->pci_last_busno; bus++) { - unsigned int devfn; - - for (devfn = 0; devfn < 256; devfn++) { - unsigned int device = PCI_SLOT(devfn); - unsigned int func = PCI_FUNC(devfn); - - if (obp_find(pbm->prom_node, bus, devfn)) { - int err = pdev_htab_add(devhandle, bus, - device, func); - if (err) - return err; - } - } - } - - return 0; -} - -static struct pdev_entry *pdev_find(u32 devhandle, unsigned int bus, unsigned int device, unsigned int func) -{ - struct pdev_entry *p; - - p = pdev_htab[pdev_hashfn(devhandle, bus, device, func)]; - while (p) { - if (p->devhandle == devhandle && - p->bus == bus && - p->device == device && - p->func == func) - break; - - p = p->next; - } - - return p; -} - static inline int pci_sun4v_out_of_range(struct pci_pbm_info *pbm, unsigned int bus, unsigned int device, unsigned int func) { if (bus < pbm->pci_first_busno || bus > pbm->pci_last_busno) return 1; - return pdev_find(pbm->devhandle, bus, device, func) == NULL; + return 0; } static int pci_sun4v_read_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn, @@ -800,27 +674,7 @@ static struct pci_ops pci_sun4v_ops = { static void pbm_scan_bus(struct pci_controller_info *p, struct pci_pbm_info *pbm) { - struct pcidev_cookie *cookie = kzalloc(sizeof(*cookie), GFP_KERNEL); - - if (!cookie) { - prom_printf("%s: Critical allocation failure.\n", pbm->name); - prom_halt(); - } - - /* All we care about is the PBM. */ - cookie->pbm = pbm; - - pbm->pci_bus = pci_scan_bus(pbm->pci_first_busno, p->pci_ops, pbm); -#if 0 - pci_fixup_host_bridge_self(pbm->pci_bus); - pbm->pci_bus->self->sysdata = cookie; -#endif - pci_fill_in_pbm_cookies(pbm->pci_bus, pbm, pbm->prom_node); - pci_record_assignments(pbm, pbm->pci_bus); - pci_assign_unassigned(pbm, pbm->pci_bus); - pci_fixup_irq(pbm, pbm->pci_bus); - pci_determine_66mhz_disposition(pbm, pbm->pci_bus); - pci_setup_busmastering(pbm, pbm->pci_bus); + pbm->pci_bus = pci_scan_one_pbm(pbm); } static void pci_sun4v_scan_bus(struct pci_controller_info *p) @@ -846,8 +700,7 @@ static void pci_sun4v_scan_bus(struct pci_controller_info *p) static void pci_sun4v_base_address_update(struct pci_dev *pdev, int resource) { - struct pcidev_cookie *pcp = pdev->sysdata; - struct pci_pbm_info *pbm = pcp->pbm; + struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller; struct resource *res, *root; u32 reg; int where, size, is_64bit; @@ -1410,8 +1263,7 @@ static int pci_sun4v_setup_msi_irq(unsigned int *virt_irq_p, struct pci_dev *pdev, struct msi_desc *entry) { - struct pcidev_cookie *pcp = pdev->sysdata; - struct pci_pbm_info *pbm = pcp->pbm; + struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller; unsigned long devino, msiqid; struct msi_msg msg; int msi_num, err; @@ -1455,7 +1307,7 @@ static int pci_sun4v_setup_msi_irq(unsigned int *virt_irq_p, if (pci_sun4v_msi_setvalid(pbm->devhandle, msi_num, HV_MSIVALID_VALID)) goto out_err; - pcp->msi_num = msi_num; + pdev->dev.archdata.msi_num = msi_num; if (entry->msi_attrib.is_64) { msg.address_hi = pbm->msi64_start >> 32; @@ -1484,12 +1336,11 @@ out_err: static void pci_sun4v_teardown_msi_irq(unsigned int virt_irq, struct pci_dev *pdev) { - struct pcidev_cookie *pcp = pdev->sysdata; - struct pci_pbm_info *pbm = pcp->pbm; + struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller; unsigned long msiqid, err; unsigned int msi_num; - msi_num = pcp->msi_num; + msi_num = pdev->dev.archdata.msi_num; err = pci_sun4v_msi_getmsiq(pbm->devhandle, msi_num, &msiqid); if (err) { printk(KERN_ERR "%s: getmsiq gives error %lu\n", @@ -1559,8 +1410,6 @@ static void pci_sun4v_pbm_init(struct pci_controller_info *p, struct device_node pci_sun4v_get_bus_range(pbm); pci_sun4v_iommu_init(pbm); pci_sun4v_msi_init(pbm); - - pdev_htab_populate(pbm); } void sun4v_pci_init(struct device_node *dp, char *model_name) diff --git a/include/asm-sparc64/device.h b/include/asm-sparc64/device.h index d8f9872b0e2d..d5a4559b9555 100644 --- a/include/asm-sparc64/device.h +++ b/include/asm-sparc64/device.h @@ -3,5 +3,21 @@ * * This file is released under the GPLv2 */ -#include +#ifndef _ASM_SPARC64_DEVICE_H +#define _ASM_SPARC64_DEVICE_H +struct device_node; +struct of_device; + +struct dev_archdata { + void *iommu; + void *stc; + void *host_controller; + + struct device_node *prom_node; + struct of_device *op; + + unsigned int msi_num; +}; + +#endif /* _ASM_SPARC64_DEVICE_H */ diff --git a/include/asm-sparc64/pbm.h b/include/asm-sparc64/pbm.h index 7a246d8a1828..88974d685a3d 100644 --- a/include/asm-sparc64/pbm.h +++ b/include/asm-sparc64/pbm.h @@ -244,27 +244,4 @@ struct pci_controller_info { unsigned int pci_last_busno; }; -/* PCI devices which are not bridges have this placed in their pci_dev - * sysdata member. This makes OBP aware PCI device drivers easier to - * code. - */ -struct pcidev_cookie { - struct pci_pbm_info *pbm; - struct device_node *prom_node; - struct of_device *op; - struct linux_prom_pci_registers prom_regs[PROMREG_MAX]; - int num_prom_regs; - struct linux_prom_pci_registers prom_assignments[PROMREG_MAX]; - int num_prom_assignments; -#ifdef CONFIG_PCI_MSI - unsigned int msi_num; -#endif -}; - -/* Currently these are the same across all PCI controllers - * we support. Someday they may not be... - */ -#define PCI_IRQ_IGN 0x000007c0 /* Interrupt Group Number */ -#define PCI_IRQ_INO 0x0000003f /* Interrupt Number */ - #endif /* !(__SPARC64_PBM_H) */ -- cgit v1.2.3-59-g8ed1b From 1e8a8cc52daa95e702303ca3ce67955a4c051d7d Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Wed, 28 Feb 2007 23:38:38 -0800 Subject: [SPARC64]: Internalize pci_memspace_mask. The only user was bus_dvma_to_mem() which is no longer used by any driver, so kill that, and the export of pci_memspace_mask. The only user now is the PCI mmap support code. Signed-off-by: David S. Miller --- arch/sparc64/kernel/pci.c | 2 ++ arch/sparc64/kernel/pci_impl.h | 1 + arch/sparc64/kernel/sparc64_ksyms.c | 1 - include/asm-sparc64/io.h | 8 -------- 4 files changed, 3 insertions(+), 9 deletions(-) (limited to 'include') diff --git a/arch/sparc64/kernel/pci.c b/arch/sparc64/kernel/pci.c index 246b8009a2b4..ec0d12a48c2b 100644 --- a/arch/sparc64/kernel/pci.c +++ b/arch/sparc64/kernel/pci.c @@ -27,6 +27,8 @@ #include #include +#include "pci_impl.h" + unsigned long pci_memspace_mask = 0xffffffffUL; #ifndef CONFIG_PCI diff --git a/arch/sparc64/kernel/pci_impl.h b/arch/sparc64/kernel/pci_impl.h index ea8a6bd146ae..c4ba702e1fd9 100644 --- a/arch/sparc64/kernel/pci_impl.h +++ b/arch/sparc64/kernel/pci_impl.h @@ -13,6 +13,7 @@ #include extern struct pci_controller_info *pci_controller_root; +extern unsigned long pci_memspace_mask; extern int pci_num_controllers; diff --git a/arch/sparc64/kernel/sparc64_ksyms.c b/arch/sparc64/kernel/sparc64_ksyms.c index beffc82a1e85..d00f51a5683f 100644 --- a/arch/sparc64/kernel/sparc64_ksyms.c +++ b/arch/sparc64/kernel/sparc64_ksyms.c @@ -212,7 +212,6 @@ EXPORT_SYMBOL(insl); #ifdef CONFIG_PCI EXPORT_SYMBOL(ebus_chain); EXPORT_SYMBOL(isa_chain); -EXPORT_SYMBOL(pci_memspace_mask); EXPORT_SYMBOL(pci_alloc_consistent); EXPORT_SYMBOL(pci_free_consistent); EXPORT_SYMBOL(pci_map_single); diff --git a/include/asm-sparc64/io.h b/include/asm-sparc64/io.h index 30b912d8e8bc..ad595b679842 100644 --- a/include/asm-sparc64/io.h +++ b/include/asm-sparc64/io.h @@ -24,14 +24,6 @@ extern unsigned long kern_base, kern_size; #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) #define BIO_VMERGE_BOUNDARY 8192 -/* Different PCI controllers we support have their PCI MEM space - * mapped to an either 2GB (Psycho) or 4GB (Sabre) aligned area, - * so need to chop off the top 33 or 32 bits. - */ -extern unsigned long pci_memspace_mask; - -#define bus_dvma_to_mem(__vaddr) ((__vaddr) & pci_memspace_mask) - static __inline__ u8 _inb(unsigned long addr) { u8 ret; -- cgit v1.2.3-59-g8ed1b From 229177c7f38d6a2b1285b42da4b19d76346b4bac Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Thu, 8 Mar 2007 22:11:00 -0800 Subject: [SPARC64]: Kill PBM intmap software state. Set but never used. Signed-off-by: David S. Miller --- arch/sparc64/kernel/pci_psycho.c | 12 ------------ arch/sparc64/kernel/pci_schizo.c | 8 -------- arch/sparc64/kernel/pci_sun4v.c | 8 -------- include/asm-sparc64/pbm.h | 3 --- 4 files changed, 31 deletions(-) (limited to 'include') diff --git a/arch/sparc64/kernel/pci_psycho.c b/arch/sparc64/kernel/pci_psycho.c index c08681b7a44e..7b7010a1eb2f 100644 --- a/arch/sparc64/kernel/pci_psycho.c +++ b/arch/sparc64/kernel/pci_psycho.c @@ -1175,18 +1175,6 @@ static void psycho_pbm_init(struct pci_controller_info *p, pbm->num_pbm_ranges = 0; } - prop = of_find_property(dp, "interrupt-map", &len); - if (prop) { - pbm->pbm_intmap = prop->value; - pbm->num_pbm_intmap = - (len / sizeof(struct linux_prom_pci_intmap)); - - prop = of_find_property(dp, "interrupt-map-mask", NULL); - pbm->pbm_intmask = prop->value; - } else { - pbm->num_pbm_intmap = 0; - } - prop = of_find_property(dp, "bus-range", NULL); busrange = prop->value; pbm->pci_first_busno = busrange[0]; diff --git a/arch/sparc64/kernel/pci_schizo.c b/arch/sparc64/kernel/pci_schizo.c index 79ad2688317b..81daf903bdf4 100644 --- a/arch/sparc64/kernel/pci_schizo.c +++ b/arch/sparc64/kernel/pci_schizo.c @@ -1607,14 +1607,6 @@ static void schizo_pbm_init(struct pci_controller_info *p, pci_determine_mem_io_space(pbm); - pbm->pbm_intmap = of_get_property(dp, "interrupt-map", &len); - if (pbm->pbm_intmap) { - pbm->num_pbm_intmap = - (len / sizeof(struct linux_prom_pci_intmap)); - pbm->pbm_intmask = - of_get_property(dp, "interrupt-map-mask", NULL); - } - ino_bitmap = of_get_property(dp, "ino-bitmap", NULL); pbm->ino_bitmap = (((u64)ino_bitmap[1] << 32UL) | ((u64)ino_bitmap[0] << 0UL)); diff --git a/arch/sparc64/kernel/pci_sun4v.c b/arch/sparc64/kernel/pci_sun4v.c index e1af009617cf..7bee6b19272e 100644 --- a/arch/sparc64/kernel/pci_sun4v.c +++ b/arch/sparc64/kernel/pci_sun4v.c @@ -1332,14 +1332,6 @@ static void pci_sun4v_pbm_init(struct pci_controller_info *p, struct device_node pci_determine_mem_io_space(pbm); - prop = of_find_property(dp, "interrupt-map", &len); - pbm->pbm_intmap = prop->value; - pbm->num_pbm_intmap = - (len / sizeof(struct linux_prom_pci_intmap)); - - prop = of_find_property(dp, "interrupt-map-mask", NULL); - pbm->pbm_intmask = prop->value; - pci_sun4v_get_bus_range(pbm); pci_sun4v_iommu_init(pbm); pci_sun4v_msi_init(pbm); diff --git a/include/asm-sparc64/pbm.h b/include/asm-sparc64/pbm.h index 88974d685a3d..f31de45f4873 100644 --- a/include/asm-sparc64/pbm.h +++ b/include/asm-sparc64/pbm.h @@ -162,9 +162,6 @@ struct pci_pbm_info { struct device_node *prom_node; struct linux_prom_pci_ranges *pbm_ranges; int num_pbm_ranges; - struct linux_prom_pci_intmap *pbm_intmap; - int num_pbm_intmap; - struct linux_prom_pci_intmask *pbm_intmask; u64 ino_bitmap; /* PBM I/O and Memory space resources. */ -- cgit v1.2.3-59-g8ed1b From 3487a1f9e719d36c9b2d4d492994b2dd815a58b7 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Thu, 8 Mar 2007 22:28:17 -0800 Subject: [SPARC64]: Kill PBM ranges software state. It is only used in one spot and we can just fetch the OF property right there. Signed-off-by: David S. Miller --- arch/sparc64/kernel/pci_common.c | 19 +++++++++++++++---- arch/sparc64/kernel/pci_psycho.c | 10 ---------- arch/sparc64/kernel/pci_schizo.c | 5 ----- arch/sparc64/kernel/pci_sun4v.c | 13 ------------- include/asm-sparc64/pbm.h | 2 -- 5 files changed, 15 insertions(+), 34 deletions(-) (limited to 'include') diff --git a/arch/sparc64/kernel/pci_common.c b/arch/sparc64/kernel/pci_common.c index 4945d700a769..6b5c8e7a3eb0 100644 --- a/arch/sparc64/kernel/pci_common.c +++ b/arch/sparc64/kernel/pci_common.c @@ -73,17 +73,28 @@ static void pci_register_iommu_region(struct pci_pbm_info *pbm) void pci_determine_mem_io_space(struct pci_pbm_info *pbm) { + struct linux_prom_pci_ranges *pbm_ranges; int i, saw_mem, saw_io; + int num_pbm_ranges; saw_mem = saw_io = 0; - for (i = 0; i < pbm->num_pbm_ranges; i++) { - struct linux_prom_pci_ranges *pr = &pbm->pbm_ranges[i]; + pbm_ranges = of_get_property(pbm->prom_node, "ranges", &i); + num_pbm_ranges = i / sizeof(*pbm_ranges); + + for (i = 0; i < num_pbm_ranges; i++) { + struct linux_prom_pci_ranges *pr = &pbm_ranges[i]; unsigned long a; + u32 parent_phys_hi, parent_phys_lo; int type; + parent_phys_hi = pr->parent_phys_hi; + parent_phys_lo = pr->parent_phys_lo; + if (tlb_type == hypervisor) + parent_phys_hi &= 0x0fffffff; + type = (pr->child_phys_hi >> 24) & 0x3; - a = (((unsigned long)pr->parent_phys_hi << 32UL) | - ((unsigned long)pr->parent_phys_lo << 0UL)); + a = (((unsigned long)parent_phys_hi << 32UL) | + ((unsigned long)parent_phys_lo << 0UL)); switch (type) { case 0: diff --git a/arch/sparc64/kernel/pci_psycho.c b/arch/sparc64/kernel/pci_psycho.c index 7b7010a1eb2f..1717df549488 100644 --- a/arch/sparc64/kernel/pci_psycho.c +++ b/arch/sparc64/kernel/pci_psycho.c @@ -1136,7 +1136,6 @@ static void psycho_pbm_init(struct pci_controller_info *p, unsigned int *busrange; struct property *prop; struct pci_pbm_info *pbm; - int len; if (is_pbm_a) { pbm = &p->pbm_A; @@ -1166,15 +1165,6 @@ static void psycho_pbm_init(struct pci_controller_info *p, pbm->name, pbm->chip_version, pbm->chip_revision); - prop = of_find_property(dp, "ranges", &len); - if (prop) { - pbm->pbm_ranges = prop->value; - pbm->num_pbm_ranges = - (len / sizeof(struct linux_prom_pci_ranges)); - } else { - pbm->num_pbm_ranges = 0; - } - prop = of_find_property(dp, "bus-range", NULL); busrange = prop->value; pbm->pci_first_busno = busrange[0]; diff --git a/arch/sparc64/kernel/pci_schizo.c b/arch/sparc64/kernel/pci_schizo.c index 81daf903bdf4..dec8dc9499e0 100644 --- a/arch/sparc64/kernel/pci_schizo.c +++ b/arch/sparc64/kernel/pci_schizo.c @@ -1542,7 +1542,6 @@ static void schizo_pbm_init(struct pci_controller_info *p, const char *chipset_name; u32 *ino_bitmap; int is_pbm_a; - int len; switch (chip_type) { case PBM_CHIP_TYPE_TOMATILLO: @@ -1601,10 +1600,6 @@ static void schizo_pbm_init(struct pci_controller_info *p, schizo_pbm_hw_init(pbm); - pbm->pbm_ranges = of_get_property(dp, "ranges", &len); - pbm->num_pbm_ranges = - (len / sizeof(struct linux_prom_pci_ranges)); - pci_determine_mem_io_space(pbm); ino_bitmap = of_get_property(dp, "ino-bitmap", NULL); diff --git a/arch/sparc64/kernel/pci_sun4v.c b/arch/sparc64/kernel/pci_sun4v.c index 7bee6b19272e..9b57ba1cb947 100644 --- a/arch/sparc64/kernel/pci_sun4v.c +++ b/arch/sparc64/kernel/pci_sun4v.c @@ -1301,8 +1301,6 @@ static void pci_sun4v_msi_init(struct pci_pbm_info *pbm) static void pci_sun4v_pbm_init(struct pci_controller_info *p, struct device_node *dp, u32 devhandle) { struct pci_pbm_info *pbm; - struct property *prop; - int len, i; if (devhandle & 0x40) pbm = &p->pbm_B; @@ -1319,17 +1317,6 @@ static void pci_sun4v_pbm_init(struct pci_controller_info *p, struct device_node printk("%s: SUN4V PCI Bus Module\n", pbm->name); - prop = of_find_property(dp, "ranges", &len); - pbm->pbm_ranges = prop->value; - pbm->num_pbm_ranges = - (len / sizeof(struct linux_prom_pci_ranges)); - - /* Mask out the top 8 bits of the ranges, leaving the real - * physical address. - */ - for (i = 0; i < pbm->num_pbm_ranges; i++) - pbm->pbm_ranges[i].parent_phys_hi &= 0x0fffffff; - pci_determine_mem_io_space(pbm); pci_sun4v_get_bus_range(pbm); diff --git a/include/asm-sparc64/pbm.h b/include/asm-sparc64/pbm.h index f31de45f4873..629290987429 100644 --- a/include/asm-sparc64/pbm.h +++ b/include/asm-sparc64/pbm.h @@ -160,8 +160,6 @@ struct pci_pbm_info { /* OBP specific information. */ struct device_node *prom_node; - struct linux_prom_pci_ranges *pbm_ranges; - int num_pbm_ranges; u64 ino_bitmap; /* PBM I/O and Memory space resources. */ -- cgit v1.2.3-59-g8ed1b From 0bae5f81b6f8130f5197e59b0e2ad6820c766b2b Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Thu, 8 Mar 2007 22:42:19 -0800 Subject: [SPARC64]: Kill pci_controller->resource_adjust() All the implementations can be identical and generic, so no need for controller specific methods. Signed-off-by: David S. Miller --- arch/sparc64/kernel/pci.c | 17 ++++++++++++----- arch/sparc64/kernel/pci_psycho.c | 9 --------- arch/sparc64/kernel/pci_sabre.c | 17 ----------------- arch/sparc64/kernel/pci_schizo.c | 9 --------- arch/sparc64/kernel/pci_sun4v.c | 9 --------- include/asm-sparc64/pbm.h | 1 - 6 files changed, 12 insertions(+), 50 deletions(-) (limited to 'include') diff --git a/arch/sparc64/kernel/pci.c b/arch/sparc64/kernel/pci.c index b63341c2a334..6b94d97e56ad 100644 --- a/arch/sparc64/kernel/pci.c +++ b/arch/sparc64/kernel/pci.c @@ -469,6 +469,13 @@ static void __init apb_calc_first_last(u8 map, u32 *first_p, u32 *last_p) *last_p = last; } +static void __init pci_resource_adjust(struct resource *res, + struct resource *root) +{ + res->start += root->start; + res->end += root->start; +} + /* Cook up fake bus resources for SUNW,simba PCI bridges which lack * a proper 'ranges' property. */ @@ -486,7 +493,7 @@ static void __init apb_fake_ranges(struct pci_dev *dev, res->start = (first << 21); res->end = (last << 21) + ((1 << 21) - 1); res->flags = IORESOURCE_IO; - pbm->parent->resource_adjust(dev, res, &pbm->io_space); + pci_resource_adjust(res, &pbm->io_space); pci_read_config_byte(dev, APB_MEM_ADDRESS_MAP, &map); apb_calc_first_last(map, &first, &last); @@ -494,7 +501,7 @@ static void __init apb_fake_ranges(struct pci_dev *dev, res->start = (first << 21); res->end = (last << 21) + ((1 << 21) - 1); res->flags = IORESOURCE_MEM; - pbm->parent->resource_adjust(dev, res, &pbm->mem_space); + pci_resource_adjust(res, &pbm->mem_space); } static void __init pci_of_scan_bus(struct pci_pbm_info *pbm, @@ -594,7 +601,7 @@ void __devinit of_scan_pci_bridge(struct pci_pbm_info *pbm, * layer routine that can calculate a resource for a given * range property value in a PCI device. */ - pbm->parent->resource_adjust(dev, res, root); + pci_resource_adjust(res, root); } simba_cont: sprintf(bus->name, "PCI Bus %04x:%02x", pci_domain_nr(bus), @@ -803,7 +810,7 @@ void pcibios_resource_to_bus(struct pci_dev *pdev, struct pci_bus_region *region else root = &pbm->mem_space; - pbm->parent->resource_adjust(pdev, &zero_res, root); + pci_resource_adjust(&zero_res, root); region->start = res->start - zero_res.start; region->end = res->end - zero_res.start; @@ -824,7 +831,7 @@ void pcibios_bus_to_resource(struct pci_dev *pdev, struct resource *res, else root = &pbm->mem_space; - pbm->parent->resource_adjust(pdev, res, root); + pci_resource_adjust(res, root); } EXPORT_SYMBOL(pcibios_bus_to_resource); diff --git a/arch/sparc64/kernel/pci_psycho.c b/arch/sparc64/kernel/pci_psycho.c index 1717df549488..c3f212725b04 100644 --- a/arch/sparc64/kernel/pci_psycho.c +++ b/arch/sparc64/kernel/pci_psycho.c @@ -894,14 +894,6 @@ static void psycho_register_error_handlers(struct pci_controller_info *p) } /* PSYCHO boot time probing and initialization. */ -static void psycho_resource_adjust(struct pci_dev *pdev, - struct resource *res, - struct resource *root) -{ - res->start += root->start; - res->end += root->start; -} - static void psycho_base_address_update(struct pci_dev *pdev, int resource) { struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller; @@ -1218,7 +1210,6 @@ void psycho_init(struct device_node *dp, char *model_name) p->pbms_same_domain = 0; p->scan_bus = psycho_scan_bus; p->base_address_update = psycho_base_address_update; - p->resource_adjust = psycho_resource_adjust; p->pci_ops = &psycho_ops; prop = of_find_property(dp, "reg", NULL); diff --git a/arch/sparc64/kernel/pci_sabre.c b/arch/sparc64/kernel/pci_sabre.c index 2dad171b54e2..64cdce81d86a 100644 --- a/arch/sparc64/kernel/pci_sabre.c +++ b/arch/sparc64/kernel/pci_sabre.c @@ -862,22 +862,6 @@ static void sabre_register_error_handlers(struct pci_controller_info *p) sabre_write(base + SABRE_PCICTRL, tmp); } -static void sabre_resource_adjust(struct pci_dev *pdev, - struct resource *res, - struct resource *root) -{ - struct pci_pbm_info *pbm = pdev->bus->sysdata; - unsigned long base; - - if (res->flags & IORESOURCE_IO) - base = pbm->controller_regs + SABRE_IOSPACE; - else - base = pbm->controller_regs + SABRE_MEMSPACE; - - res->start += base; - res->end += base; -} - static void sabre_base_address_update(struct pci_dev *pdev, int resource) { struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller; @@ -1116,7 +1100,6 @@ void sabre_init(struct device_node *dp, char *model_name) p->pbms_same_domain = 1; p->scan_bus = sabre_scan_bus; p->base_address_update = sabre_base_address_update; - p->resource_adjust = sabre_resource_adjust; p->pci_ops = &sabre_ops; /* diff --git a/arch/sparc64/kernel/pci_schizo.c b/arch/sparc64/kernel/pci_schizo.c index dec8dc9499e0..ba9206eb9516 100644 --- a/arch/sparc64/kernel/pci_schizo.c +++ b/arch/sparc64/kernel/pci_schizo.c @@ -1295,14 +1295,6 @@ static void schizo_base_address_update(struct pci_dev *pdev, int resource) pci_write_config_dword(pdev, where + 4, 0); } -static void schizo_resource_adjust(struct pci_dev *pdev, - struct resource *res, - struct resource *root) -{ - res->start += root->start; - res->end += root->start; -} - #define SCHIZO_STRBUF_CONTROL (0x02800UL) #define SCHIZO_STRBUF_FLUSH (0x02808UL) #define SCHIZO_STRBUF_FSYNC (0x02810UL) @@ -1670,7 +1662,6 @@ static void __schizo_init(struct device_node *dp, char *model_name, int chip_typ p->index = pci_num_controllers++; p->scan_bus = schizo_scan_bus; p->base_address_update = schizo_base_address_update; - p->resource_adjust = schizo_resource_adjust; p->pci_ops = &schizo_ops; /* Like PSYCHO we have a 2GB aligned area for memory space. */ diff --git a/arch/sparc64/kernel/pci_sun4v.c b/arch/sparc64/kernel/pci_sun4v.c index 9b57ba1cb947..ce0417e776a1 100644 --- a/arch/sparc64/kernel/pci_sun4v.c +++ b/arch/sparc64/kernel/pci_sun4v.c @@ -743,14 +743,6 @@ static void pci_sun4v_base_address_update(struct pci_dev *pdev, int resource) pci_write_config_dword(pdev, where + 4, 0); } -static void pci_sun4v_resource_adjust(struct pci_dev *pdev, - struct resource *res, - struct resource *root) -{ - res->start += root->start; - res->end += root->start; -} - static unsigned long probe_existing_entries(struct pci_pbm_info *pbm, struct pci_iommu *iommu) { @@ -1387,7 +1379,6 @@ void sun4v_pci_init(struct device_node *dp, char *model_name) p->scan_bus = pci_sun4v_scan_bus; p->base_address_update = pci_sun4v_base_address_update; - p->resource_adjust = pci_sun4v_resource_adjust; #ifdef CONFIG_PCI_MSI p->setup_msi_irq = pci_sun4v_setup_msi_irq; p->teardown_msi_irq = pci_sun4v_teardown_msi_irq; diff --git a/include/asm-sparc64/pbm.h b/include/asm-sparc64/pbm.h index 629290987429..1bd5b37c6d43 100644 --- a/include/asm-sparc64/pbm.h +++ b/include/asm-sparc64/pbm.h @@ -225,7 +225,6 @@ struct pci_controller_info { /* Operations which are controller specific. */ void (*scan_bus)(struct pci_controller_info *); void (*base_address_update)(struct pci_dev *, int); - void (*resource_adjust)(struct pci_dev *, struct resource *, struct resource *); #ifdef CONFIG_PCI_MSI int (*setup_msi_irq)(unsigned int *virt_irq_p, struct pci_dev *pdev, -- cgit v1.2.3-59-g8ed1b From 8d3aee937596d2ca6676c2c27789751445bf0bc9 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Thu, 8 Mar 2007 22:46:02 -0800 Subject: [SPARC64]: Kill pci_controller->base_address_update(). Implemented but never actually used. Signed-off-by: David S. Miller --- arch/sparc64/kernel/pci_psycho.c | 45 --------------------------------------- arch/sparc64/kernel/pci_sabre.c | 46 ---------------------------------------- arch/sparc64/kernel/pci_schizo.c | 45 --------------------------------------- arch/sparc64/kernel/pci_sun4v.c | 46 ---------------------------------------- include/asm-sparc64/pbm.h | 1 - 5 files changed, 183 deletions(-) (limited to 'include') diff --git a/arch/sparc64/kernel/pci_psycho.c b/arch/sparc64/kernel/pci_psycho.c index c3f212725b04..64bd3579f1ca 100644 --- a/arch/sparc64/kernel/pci_psycho.c +++ b/arch/sparc64/kernel/pci_psycho.c @@ -894,50 +894,6 @@ static void psycho_register_error_handlers(struct pci_controller_info *p) } /* PSYCHO boot time probing and initialization. */ -static void psycho_base_address_update(struct pci_dev *pdev, int resource) -{ - struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller; - struct resource *res, *root; - u32 reg; - int where, size, is_64bit; - - res = &pdev->resource[resource]; - if (resource < 6) { - where = PCI_BASE_ADDRESS_0 + (resource * 4); - } else if (resource == PCI_ROM_RESOURCE) { - where = pdev->rom_base_reg; - } else { - /* Somebody might have asked allocation of a non-standard resource */ - return; - } - - is_64bit = 0; - if (res->flags & IORESOURCE_IO) - root = &pbm->io_space; - else { - root = &pbm->mem_space; - if ((res->flags & PCI_BASE_ADDRESS_MEM_TYPE_MASK) - == PCI_BASE_ADDRESS_MEM_TYPE_64) - is_64bit = 1; - } - - size = res->end - res->start; - pci_read_config_dword(pdev, where, ®); - reg = ((reg & size) | - (((u32)(res->start - root->start)) & ~size)); - if (resource == PCI_ROM_RESOURCE) { - reg |= PCI_ROM_ADDRESS_ENABLE; - res->flags |= IORESOURCE_ROM_ENABLE; - } - pci_write_config_dword(pdev, where, reg); - - /* This knows that the upper 32-bits of the address - * must be zero. Our PCI common layer enforces this. - */ - if (is_64bit) - pci_write_config_dword(pdev, where + 4, 0); -} - static void pbm_config_busmastering(struct pci_pbm_info *pbm) { u8 *addr; @@ -1209,7 +1165,6 @@ void psycho_init(struct device_node *dp, char *model_name) p->index = pci_num_controllers++; p->pbms_same_domain = 0; p->scan_bus = psycho_scan_bus; - p->base_address_update = psycho_base_address_update; p->pci_ops = &psycho_ops; prop = of_find_property(dp, "reg", NULL); diff --git a/arch/sparc64/kernel/pci_sabre.c b/arch/sparc64/kernel/pci_sabre.c index 64cdce81d86a..f3ec7bdacdc0 100644 --- a/arch/sparc64/kernel/pci_sabre.c +++ b/arch/sparc64/kernel/pci_sabre.c @@ -862,51 +862,6 @@ static void sabre_register_error_handlers(struct pci_controller_info *p) sabre_write(base + SABRE_PCICTRL, tmp); } -static void sabre_base_address_update(struct pci_dev *pdev, int resource) -{ - struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller; - struct resource *res; - unsigned long base; - u32 reg; - int where, size, is_64bit; - - res = &pdev->resource[resource]; - if (resource < 6) { - where = PCI_BASE_ADDRESS_0 + (resource * 4); - } else if (resource == PCI_ROM_RESOURCE) { - where = pdev->rom_base_reg; - } else { - /* Somebody might have asked allocation of a non-standard resource */ - return; - } - - is_64bit = 0; - if (res->flags & IORESOURCE_IO) - base = pbm->controller_regs + SABRE_IOSPACE; - else { - base = pbm->controller_regs + SABRE_MEMSPACE; - if ((res->flags & PCI_BASE_ADDRESS_MEM_TYPE_MASK) - == PCI_BASE_ADDRESS_MEM_TYPE_64) - is_64bit = 1; - } - - size = res->end - res->start; - pci_read_config_dword(pdev, where, ®); - reg = ((reg & size) | - (((u32)(res->start - base)) & ~size)); - if (resource == PCI_ROM_RESOURCE) { - reg |= PCI_ROM_ADDRESS_ENABLE; - res->flags |= IORESOURCE_ROM_ENABLE; - } - pci_write_config_dword(pdev, where, reg); - - /* This knows that the upper 32-bits of the address - * must be zero. Our PCI common layer enforces this. - */ - if (is_64bit) - pci_write_config_dword(pdev, where + 4, 0); -} - static void apb_init(struct pci_controller_info *p, struct pci_bus *sabre_bus) { struct pci_dev *pdev; @@ -1099,7 +1054,6 @@ void sabre_init(struct device_node *dp, char *model_name) p->index = pci_num_controllers++; p->pbms_same_domain = 1; p->scan_bus = sabre_scan_bus; - p->base_address_update = sabre_base_address_update; p->pci_ops = &sabre_ops; /* diff --git a/arch/sparc64/kernel/pci_schizo.c b/arch/sparc64/kernel/pci_schizo.c index ba9206eb9516..99912db4e7e8 100644 --- a/arch/sparc64/kernel/pci_schizo.c +++ b/arch/sparc64/kernel/pci_schizo.c @@ -1251,50 +1251,6 @@ static void schizo_scan_bus(struct pci_controller_info *p) schizo_register_error_handlers(p); } -static void schizo_base_address_update(struct pci_dev *pdev, int resource) -{ - struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller; - struct resource *res, *root; - u32 reg; - int where, size, is_64bit; - - res = &pdev->resource[resource]; - if (resource < 6) { - where = PCI_BASE_ADDRESS_0 + (resource * 4); - } else if (resource == PCI_ROM_RESOURCE) { - where = pdev->rom_base_reg; - } else { - /* Somebody might have asked allocation of a non-standard resource */ - return; - } - - is_64bit = 0; - if (res->flags & IORESOURCE_IO) - root = &pbm->io_space; - else { - root = &pbm->mem_space; - if ((res->flags & PCI_BASE_ADDRESS_MEM_TYPE_MASK) - == PCI_BASE_ADDRESS_MEM_TYPE_64) - is_64bit = 1; - } - - size = res->end - res->start; - pci_read_config_dword(pdev, where, ®); - reg = ((reg & size) | - (((u32)(res->start - root->start)) & ~size)); - if (resource == PCI_ROM_RESOURCE) { - reg |= PCI_ROM_ADDRESS_ENABLE; - res->flags |= IORESOURCE_ROM_ENABLE; - } - pci_write_config_dword(pdev, where, reg); - - /* This knows that the upper 32-bits of the address - * must be zero. Our PCI common layer enforces this. - */ - if (is_64bit) - pci_write_config_dword(pdev, where + 4, 0); -} - #define SCHIZO_STRBUF_CONTROL (0x02800UL) #define SCHIZO_STRBUF_FLUSH (0x02808UL) #define SCHIZO_STRBUF_FSYNC (0x02810UL) @@ -1661,7 +1617,6 @@ static void __schizo_init(struct device_node *dp, char *model_name, int chip_typ p->index = pci_num_controllers++; p->scan_bus = schizo_scan_bus; - p->base_address_update = schizo_base_address_update; p->pci_ops = &schizo_ops; /* Like PSYCHO we have a 2GB aligned area for memory space. */ diff --git a/arch/sparc64/kernel/pci_sun4v.c b/arch/sparc64/kernel/pci_sun4v.c index ce0417e776a1..f9cd9f620d4e 100644 --- a/arch/sparc64/kernel/pci_sun4v.c +++ b/arch/sparc64/kernel/pci_sun4v.c @@ -698,51 +698,6 @@ static void pci_sun4v_scan_bus(struct pci_controller_info *p) /* XXX register error interrupt handlers XXX */ } -static void pci_sun4v_base_address_update(struct pci_dev *pdev, int resource) -{ - struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller; - struct resource *res, *root; - u32 reg; - int where, size, is_64bit; - - res = &pdev->resource[resource]; - if (resource < 6) { - where = PCI_BASE_ADDRESS_0 + (resource * 4); - } else if (resource == PCI_ROM_RESOURCE) { - where = pdev->rom_base_reg; - } else { - /* Somebody might have asked allocation of a non-standard resource */ - return; - } - - /* XXX 64-bit MEM handling is not %100 correct... XXX */ - is_64bit = 0; - if (res->flags & IORESOURCE_IO) - root = &pbm->io_space; - else { - root = &pbm->mem_space; - if ((res->flags & PCI_BASE_ADDRESS_MEM_TYPE_MASK) - == PCI_BASE_ADDRESS_MEM_TYPE_64) - is_64bit = 1; - } - - size = res->end - res->start; - pci_read_config_dword(pdev, where, ®); - reg = ((reg & size) | - (((u32)(res->start - root->start)) & ~size)); - if (resource == PCI_ROM_RESOURCE) { - reg |= PCI_ROM_ADDRESS_ENABLE; - res->flags |= IORESOURCE_ROM_ENABLE; - } - pci_write_config_dword(pdev, where, reg); - - /* This knows that the upper 32-bits of the address - * must be zero. Our PCI common layer enforces this. - */ - if (is_64bit) - pci_write_config_dword(pdev, where + 4, 0); -} - static unsigned long probe_existing_entries(struct pci_pbm_info *pbm, struct pci_iommu *iommu) { @@ -1378,7 +1333,6 @@ void sun4v_pci_init(struct device_node *dp, char *model_name) p->pbms_same_domain = 0; p->scan_bus = pci_sun4v_scan_bus; - p->base_address_update = pci_sun4v_base_address_update; #ifdef CONFIG_PCI_MSI p->setup_msi_irq = pci_sun4v_setup_msi_irq; p->teardown_msi_irq = pci_sun4v_teardown_msi_irq; diff --git a/include/asm-sparc64/pbm.h b/include/asm-sparc64/pbm.h index 1bd5b37c6d43..07f58adb591a 100644 --- a/include/asm-sparc64/pbm.h +++ b/include/asm-sparc64/pbm.h @@ -224,7 +224,6 @@ struct pci_controller_info { /* Operations which are controller specific. */ void (*scan_bus)(struct pci_controller_info *); - void (*base_address_update)(struct pci_dev *, int); #ifdef CONFIG_PCI_MSI int (*setup_msi_irq)(unsigned int *virt_irq_p, struct pci_dev *pdev, -- cgit v1.2.3-59-g8ed1b From 3875c5c02d7112aa85f815d65d8add2e39ae9e34 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Thu, 8 Mar 2007 22:52:11 -0800 Subject: [SPARC64]: Kill pci_controller->pbms_same_domain We don't do the "Simba APB is a PBM" bogosity for Sabre controllers any longer, so this pbms_same_domain thing is no longer necessary. Signed-off-by: David S. Miller --- arch/sparc64/kernel/pci.c | 75 +++++++--------------------------------- arch/sparc64/kernel/pci_psycho.c | 1 - arch/sparc64/kernel/pci_sabre.c | 1 - arch/sparc64/kernel/pci_sun4v.c | 1 - include/asm-sparc64/pbm.h | 3 -- 5 files changed, 12 insertions(+), 69 deletions(-) (limited to 'include') diff --git a/arch/sparc64/kernel/pci.c b/arch/sparc64/kernel/pci.c index 6b94d97e56ad..a7809a00324c 100644 --- a/arch/sparc64/kernel/pci.c +++ b/arch/sparc64/kernel/pci.c @@ -857,43 +857,12 @@ static int __pci_mmap_make_offset_bus(struct pci_dev *pdev, struct vm_area_struc unsigned long space_size, user_offset, user_size; p = pbm->parent; - if (p->pbms_same_domain) { - unsigned long lowest, highest; - - lowest = ~0UL; highest = 0UL; - if (mmap_state == pci_mmap_io) { - if (p->pbm_A.io_space.flags) { - lowest = p->pbm_A.io_space.start; - highest = p->pbm_A.io_space.end + 1; - } - if (p->pbm_B.io_space.flags) { - if (lowest > p->pbm_B.io_space.start) - lowest = p->pbm_B.io_space.start; - if (highest < p->pbm_B.io_space.end + 1) - highest = p->pbm_B.io_space.end + 1; - } - space_size = highest - lowest; - } else { - if (p->pbm_A.mem_space.flags) { - lowest = p->pbm_A.mem_space.start; - highest = p->pbm_A.mem_space.end + 1; - } - if (p->pbm_B.mem_space.flags) { - if (lowest > p->pbm_B.mem_space.start) - lowest = p->pbm_B.mem_space.start; - if (highest < p->pbm_B.mem_space.end + 1) - highest = p->pbm_B.mem_space.end + 1; - } - space_size = highest - lowest; - } + if (mmap_state == pci_mmap_io) { + space_size = (pbm->io_space.end - + pbm->io_space.start) + 1; } else { - if (mmap_state == pci_mmap_io) { - space_size = (pbm->io_space.end - - pbm->io_space.start) + 1; - } else { - space_size = (pbm->mem_space.end - - pbm->mem_space.start) + 1; - } + space_size = (pbm->mem_space.end - + pbm->mem_space.start) + 1; } /* Make sure the request is in range. */ @@ -904,31 +873,12 @@ static int __pci_mmap_make_offset_bus(struct pci_dev *pdev, struct vm_area_struc (user_offset + user_size) > space_size) return -EINVAL; - if (p->pbms_same_domain) { - unsigned long lowest = ~0UL; - - if (mmap_state == pci_mmap_io) { - if (p->pbm_A.io_space.flags) - lowest = p->pbm_A.io_space.start; - if (p->pbm_B.io_space.flags && - lowest > p->pbm_B.io_space.start) - lowest = p->pbm_B.io_space.start; - } else { - if (p->pbm_A.mem_space.flags) - lowest = p->pbm_A.mem_space.start; - if (p->pbm_B.mem_space.flags && - lowest > p->pbm_B.mem_space.start) - lowest = p->pbm_B.mem_space.start; - } - vma->vm_pgoff = (lowest + user_offset) >> PAGE_SHIFT; + if (mmap_state == pci_mmap_io) { + vma->vm_pgoff = (pbm->io_space.start + + user_offset) >> PAGE_SHIFT; } else { - if (mmap_state == pci_mmap_io) { - vma->vm_pgoff = (pbm->io_space.start + - user_offset) >> PAGE_SHIFT; - } else { - vma->vm_pgoff = (pbm->mem_space.start + - user_offset) >> PAGE_SHIFT; - } + vma->vm_pgoff = (pbm->mem_space.start + + user_offset) >> PAGE_SHIFT; } return 0; @@ -1062,9 +1012,8 @@ int pci_domain_nr(struct pci_bus *pbus) struct pci_controller_info *p = pbm->parent; ret = p->index; - if (p->pbms_same_domain == 0) - ret = ((ret << 1) + - ((pbm == &pbm->parent->pbm_B) ? 1 : 0)); + ret = ((ret << 1) + + ((pbm == &pbm->parent->pbm_B) ? 1 : 0)); } return ret; diff --git a/arch/sparc64/kernel/pci_psycho.c b/arch/sparc64/kernel/pci_psycho.c index 64bd3579f1ca..0ab2aa0261ce 100644 --- a/arch/sparc64/kernel/pci_psycho.c +++ b/arch/sparc64/kernel/pci_psycho.c @@ -1163,7 +1163,6 @@ void psycho_init(struct device_node *dp, char *model_name) p->pbm_A.portid = upa_portid; p->pbm_B.portid = upa_portid; p->index = pci_num_controllers++; - p->pbms_same_domain = 0; p->scan_bus = psycho_scan_bus; p->pci_ops = &psycho_ops; diff --git a/arch/sparc64/kernel/pci_sabre.c b/arch/sparc64/kernel/pci_sabre.c index f3ec7bdacdc0..cef81c88548a 100644 --- a/arch/sparc64/kernel/pci_sabre.c +++ b/arch/sparc64/kernel/pci_sabre.c @@ -1052,7 +1052,6 @@ void sabre_init(struct device_node *dp, char *model_name) p->pbm_A.portid = upa_portid; p->index = pci_num_controllers++; - p->pbms_same_domain = 1; p->scan_bus = sabre_scan_bus; p->pci_ops = &sabre_ops; diff --git a/arch/sparc64/kernel/pci_sun4v.c b/arch/sparc64/kernel/pci_sun4v.c index f9cd9f620d4e..b63ef26abf02 100644 --- a/arch/sparc64/kernel/pci_sun4v.c +++ b/arch/sparc64/kernel/pci_sun4v.c @@ -1330,7 +1330,6 @@ void sun4v_pci_init(struct device_node *dp, char *model_name) pci_controller_root = p; p->index = pci_num_controllers++; - p->pbms_same_domain = 0; p->scan_bus = pci_sun4v_scan_bus; #ifdef CONFIG_PCI_MSI diff --git a/include/asm-sparc64/pbm.h b/include/asm-sparc64/pbm.h index 07f58adb591a..43b07b9b42c5 100644 --- a/include/asm-sparc64/pbm.h +++ b/include/asm-sparc64/pbm.h @@ -215,9 +215,6 @@ struct pci_controller_info { */ int index; - /* Do the PBMs both exist in the same PCI domain? */ - int pbms_same_domain; - /* The PCI bus modules controlled by us. */ struct pci_pbm_info pbm_A; struct pci_pbm_info pbm_B; -- cgit v1.2.3-59-g8ed1b From 0bba2dd823fd995ed805ae5cbd5a1c1381257a12 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Thu, 8 Mar 2007 23:06:39 -0800 Subject: [SPARC64]: Kill pbm->pci_first_slot. Set but never used. Signed-off-by: David S. Miller --- arch/sparc64/kernel/of_device.c | 6 ------ arch/sparc64/kernel/pci_psycho.c | 7 ++----- arch/sparc64/kernel/pci_sabre.c | 1 - arch/sparc64/kernel/pci_schizo.c | 1 - arch/sparc64/kernel/pci_sun4v.c | 1 - include/asm-sparc64/pbm.h | 3 --- 6 files changed, 2 insertions(+), 17 deletions(-) (limited to 'include') diff --git a/arch/sparc64/kernel/of_device.c b/arch/sparc64/kernel/of_device.c index 8964eacb0048..f18eec6ad691 100644 --- a/arch/sparc64/kernel/of_device.c +++ b/arch/sparc64/kernel/of_device.c @@ -759,12 +759,6 @@ static unsigned int __init pci_irq_swizzle(struct device_node *dp, * D: 2-bit slot number, derived from PCI device number as * (dev - 1) for bus A, or (dev - 2) for bus B * L: 2-bit line number - * - * Actually, more "portable" way to calculate the funky - * slot number is to subtract pbm->pci_first_slot from the - * device number, and that's exactly what the pre-OF - * sparc64 code did, but we're building this stuff generically - * using the OBP tree, not in the PCI controller layer. */ if (bus & 0x80) { /* PBM-A */ diff --git a/arch/sparc64/kernel/pci_psycho.c b/arch/sparc64/kernel/pci_psycho.c index 0ab2aa0261ce..2e5d40a26dbd 100644 --- a/arch/sparc64/kernel/pci_psycho.c +++ b/arch/sparc64/kernel/pci_psycho.c @@ -1085,13 +1085,10 @@ static void psycho_pbm_init(struct pci_controller_info *p, struct property *prop; struct pci_pbm_info *pbm; - if (is_pbm_a) { + if (is_pbm_a) pbm = &p->pbm_A; - pbm->pci_first_slot = 1; - } else { + else pbm = &p->pbm_B; - pbm->pci_first_slot = 2; - } pbm->chip_type = PBM_CHIP_TYPE_PSYCHO; pbm->chip_version = 0; diff --git a/arch/sparc64/kernel/pci_sabre.c b/arch/sparc64/kernel/pci_sabre.c index cef81c88548a..1bd7fc7c05e6 100644 --- a/arch/sparc64/kernel/pci_sabre.c +++ b/arch/sparc64/kernel/pci_sabre.c @@ -995,7 +995,6 @@ static void sabre_pbm_init(struct pci_controller_info *p, struct device_node *dp pbm->chip_type = PBM_CHIP_TYPE_SABRE; pbm->parent = p; pbm->prom_node = dp; - pbm->pci_first_slot = 1; pbm->pci_first_busno = p->pci_first_busno; pbm->pci_last_busno = p->pci_last_busno; diff --git a/arch/sparc64/kernel/pci_schizo.c b/arch/sparc64/kernel/pci_schizo.c index 99912db4e7e8..f1f105af8d3c 100644 --- a/arch/sparc64/kernel/pci_schizo.c +++ b/arch/sparc64/kernel/pci_schizo.c @@ -1528,7 +1528,6 @@ static void schizo_pbm_init(struct pci_controller_info *p, pbm->portid = portid; pbm->parent = p; pbm->prom_node = dp; - pbm->pci_first_slot = 1; pbm->chip_type = chip_type; pbm->chip_version = of_getintprop_default(dp, "version#", 0); diff --git a/arch/sparc64/kernel/pci_sun4v.c b/arch/sparc64/kernel/pci_sun4v.c index b63ef26abf02..c6feae6e9690 100644 --- a/arch/sparc64/kernel/pci_sun4v.c +++ b/arch/sparc64/kernel/pci_sun4v.c @@ -1256,7 +1256,6 @@ static void pci_sun4v_pbm_init(struct pci_controller_info *p, struct device_node pbm->parent = p; pbm->prom_node = dp; - pbm->pci_first_slot = 1; pbm->devhandle = devhandle; diff --git a/include/asm-sparc64/pbm.h b/include/asm-sparc64/pbm.h index 43b07b9b42c5..3a811c2cd3c2 100644 --- a/include/asm-sparc64/pbm.h +++ b/include/asm-sparc64/pbm.h @@ -197,9 +197,6 @@ struct pci_pbm_info { /* IOMMU state, potentially shared by both PBM segments. */ struct pci_iommu *iommu; - /* PCI slot mapping. */ - unsigned int pci_first_slot; - /* Now things for the actual PCI bus probes. */ unsigned int pci_first_busno; unsigned int pci_last_busno; -- cgit v1.2.3-59-g8ed1b From c6e87566ea080bbbe926c0e429fed48e6f680d93 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Fri, 9 Mar 2007 16:58:43 -0800 Subject: [SPARC64]: Const'ify pci_iommu_ops. Based upon a similar patch for x86_64 written by Stephen Hemminger. Signed-off-by: David S. Miller --- arch/sparc64/kernel/pci.c | 4 ++-- arch/sparc64/kernel/pci_iommu.c | 2 +- arch/sparc64/kernel/pci_sun4v.c | 2 +- include/asm-sparc64/pci.h | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/arch/sparc64/kernel/pci.c b/arch/sparc64/kernel/pci.c index a7809a00324c..914a216da339 100644 --- a/arch/sparc64/kernel/pci.c +++ b/arch/sparc64/kernel/pci.c @@ -282,10 +282,10 @@ int __init pcic_present(void) return pci_controller_scan(pci_is_controller); } -struct pci_iommu_ops *pci_iommu_ops; +const struct pci_iommu_ops *pci_iommu_ops; EXPORT_SYMBOL(pci_iommu_ops); -extern struct pci_iommu_ops pci_sun4u_iommu_ops, +extern const struct pci_iommu_ops pci_sun4u_iommu_ops, pci_sun4v_iommu_ops; /* Find each controller in the system, attach and initialize diff --git a/arch/sparc64/kernel/pci_iommu.c b/arch/sparc64/kernel/pci_iommu.c index 554daabb381e..aefdcc51045b 100644 --- a/arch/sparc64/kernel/pci_iommu.c +++ b/arch/sparc64/kernel/pci_iommu.c @@ -759,7 +759,7 @@ static void pci_4u_dma_sync_sg_for_cpu(struct pci_dev *pdev, struct scatterlist spin_unlock_irqrestore(&iommu->lock, flags); } -struct pci_iommu_ops pci_sun4u_iommu_ops = { +const struct pci_iommu_ops pci_sun4u_iommu_ops = { .alloc_consistent = pci_4u_alloc_consistent, .free_consistent = pci_4u_free_consistent, .map_single = pci_4u_map_single, diff --git a/arch/sparc64/kernel/pci_sun4v.c b/arch/sparc64/kernel/pci_sun4v.c index c6feae6e9690..a8c12c1e8039 100644 --- a/arch/sparc64/kernel/pci_sun4v.c +++ b/arch/sparc64/kernel/pci_sun4v.c @@ -583,7 +583,7 @@ static void pci_4v_dma_sync_sg_for_cpu(struct pci_dev *pdev, struct scatterlist /* Nothing to do... */ } -struct pci_iommu_ops pci_sun4v_iommu_ops = { +const struct pci_iommu_ops pci_sun4v_iommu_ops = { .alloc_consistent = pci_4v_alloc_consistent, .free_consistent = pci_4v_free_consistent, .map_single = pci_4v_map_single, diff --git a/include/asm-sparc64/pci.h b/include/asm-sparc64/pci.h index b14a725b430d..47cea16e1bad 100644 --- a/include/asm-sparc64/pci.h +++ b/include/asm-sparc64/pci.h @@ -54,7 +54,7 @@ struct pci_iommu_ops { void (*dma_sync_sg_for_cpu)(struct pci_dev *, struct scatterlist *, int, int); }; -extern struct pci_iommu_ops *pci_iommu_ops; +extern const struct pci_iommu_ops *pci_iommu_ops; /* Allocate and map kernel buffer using consistent mode DMA for a device. * hwdev should be valid struct pci_dev pointer for PCI devices. -- cgit v1.2.3-59-g8ed1b From 43bed127376ff2ef9c268cf6688a43d0fbed2ff4 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Wed, 14 Mar 2007 18:33:49 -0700 Subject: [SPARC64]: Use DECLARE_BITMAP in struct pci_iommu. Signed-off-by: David S. Miller --- include/asm-sparc64/pbm.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/asm-sparc64/pbm.h b/include/asm-sparc64/pbm.h index 3a811c2cd3c2..9afcc06a575d 100644 --- a/include/asm-sparc64/pbm.h +++ b/include/asm-sparc64/pbm.h @@ -78,7 +78,7 @@ struct pci_iommu { /* CTX allocation. */ unsigned long ctx_lowest_free; - unsigned long ctx_bitmap[IOMMU_NUM_CTXS / (sizeof(unsigned long) * 8)]; + DECLARE_BITMAP(ctx_bitmap, IOMMU_NUM_CTXS); /* Here a PCI controller driver describes the areas of * PCI memory space where DMA to/from physical memory -- cgit v1.2.3-59-g8ed1b From d78d0891d3dd976a2fb707c6c691d9cd5ed60727 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Wed, 14 Mar 2007 22:47:01 -0700 Subject: [SPARC64]: Use SPARSEMEM_STATIC Decrease the SECTION_SIZE_BITS --> MAX_PHYSADDR_BITS range a little bit. The cost of going to SPARSEMEM_STATIC becomes 8K of BSS space, and in return we save a pointer dereferences on every page struct lookup. Even better we hit the main kernel image for the base address which is in a hugepage locked TLB entry. Signed-off-by: David S. Miller --- arch/sparc64/Kconfig | 1 + include/asm-sparc64/sparsemem.h | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/arch/sparc64/Kconfig b/arch/sparc64/Kconfig index 51c87fdd998c..590a41b864b9 100644 --- a/arch/sparc64/Kconfig +++ b/arch/sparc64/Kconfig @@ -220,6 +220,7 @@ config ARCH_SPARSEMEM_ENABLE config ARCH_SPARSEMEM_DEFAULT def_bool y + select SPARSEMEM_STATIC config LARGE_ALLOCS def_bool y diff --git a/include/asm-sparc64/sparsemem.h b/include/asm-sparc64/sparsemem.h index ed5c9d8541e2..411e0e255cf3 100644 --- a/include/asm-sparc64/sparsemem.h +++ b/include/asm-sparc64/sparsemem.h @@ -3,9 +3,9 @@ #ifdef __KERNEL__ -#define SECTION_SIZE_BITS 26 -#define MAX_PHYSADDR_BITS 42 -#define MAX_PHYSMEM_BITS 42 +#define SECTION_SIZE_BITS 31 +#define MAX_PHYSADDR_BITS 41 +#define MAX_PHYSMEM_BITS 41 #endif /* !(__KERNEL__) */ -- cgit v1.2.3-59-g8ed1b From 4e286d5be63c93b17f8a82d6f3618faa9c1b025c Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Thu, 15 Mar 2007 00:21:45 -0700 Subject: [SPARC64]: MAX_PHYSADDR_BITS et al. really need to be 42 bits not 41. Signed-off-by: David S. Miller --- include/asm-sparc64/sparsemem.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/asm-sparc64/sparsemem.h b/include/asm-sparc64/sparsemem.h index 411e0e255cf3..77bcd2bfa53c 100644 --- a/include/asm-sparc64/sparsemem.h +++ b/include/asm-sparc64/sparsemem.h @@ -4,8 +4,8 @@ #ifdef __KERNEL__ #define SECTION_SIZE_BITS 31 -#define MAX_PHYSADDR_BITS 41 -#define MAX_PHYSMEM_BITS 41 +#define MAX_PHYSADDR_BITS 42 +#define MAX_PHYSMEM_BITS 42 #endif /* !(__KERNEL__) */ -- cgit v1.2.3-59-g8ed1b From 4be5c34dc47b5a9e6f91c8f5937a93c464870b8e Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Thu, 15 Mar 2007 15:44:05 -0700 Subject: [SPARC64]: Privatize sun4u_get_pte() and fix name. __get_phys is only called from init.c as is prom_virt_to_phys(), __get_iospace() is not called at all, and sun4u_get_pte() is largely misnamed. Privatize the implementation and helper functions of sun4u_get_phys() to mm/init.c, and rename to kvaddr_to_paddr(). The only used of this thing is flush_icache_range(), and thus things can be considerably further simplified. For example, we should only see module or PAGE_OFFSET kernel addresses here, so we don't need the OBP firmware range handling at all. Signed-off-by: David S. Miller --- arch/sparc64/mm/init.c | 129 ++++++++++++++++++++---------------------- include/asm-sparc64/pgtable.h | 14 ----- 2 files changed, 62 insertions(+), 81 deletions(-) (limited to 'include') diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index 56be943a6f0e..3cacea5da6ce 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -392,6 +392,67 @@ out: put_cpu(); } +struct linux_prom_translation { + unsigned long virt; + unsigned long size; + unsigned long data; +}; + +/* Exported for kernel TLB miss handling in ktlb.S */ +struct linux_prom_translation prom_trans[512] __read_mostly; +unsigned int prom_trans_ents __read_mostly; + +/* + * Translate PROM's mapping we capture at boot time into physical address. + * The second parameter is only set from prom_callback() invocations. + */ +static unsigned long prom_virt_to_phys(unsigned long promva) +{ + unsigned long mask; + int i; + + mask = _PAGE_PADDR_4U; + if (tlb_type == hypervisor) + mask = _PAGE_PADDR_4V; + + for (i = 0; i < prom_trans_ents; i++) { + struct linux_prom_translation *p = &prom_trans[i]; + + if (promva >= p->virt && + promva < (p->virt + p->size)) { + unsigned long base = p->data & mask; + + return base + (promva & (8192 - 1)); + } + } + return 0UL; +} + +static unsigned long kvaddr_to_phys(unsigned long addr) +{ + pgd_t *pgdp; + pud_t *pudp; + pmd_t *pmdp; + pte_t *ptep; + unsigned long mask = _PAGE_PADDR_4U; + + if (tlb_type == hypervisor) + mask = _PAGE_PADDR_4V; + + if (addr >= PAGE_OFFSET) + return addr & mask; + + if ((addr >= LOW_OBP_ADDRESS) && (addr < HI_OBP_ADDRESS)) + return prom_virt_to_phys(addr); + + pgdp = pgd_offset_k(addr); + pudp = pud_offset(pgdp, addr); + pmdp = pmd_offset(pudp, addr); + ptep = pte_offset_kernel(pmdp, addr); + + return pte_val(*ptep) & mask; +} + void __kprobes flush_icache_range(unsigned long start, unsigned long end) { /* Cheetah and Hypervisor platform cpus have coherent I-cache. */ @@ -399,7 +460,7 @@ void __kprobes flush_icache_range(unsigned long start, unsigned long end) unsigned long kaddr; for (kaddr = start; kaddr < end; kaddr += PAGE_SIZE) - __flush_icache_page(__get_phys(kaddr)); + __flush_icache_page(kvaddr_to_phys(kaddr)); } } @@ -436,16 +497,6 @@ void mmu_info(struct seq_file *m) #endif /* CONFIG_DEBUG_DCFLUSH */ } -struct linux_prom_translation { - unsigned long virt; - unsigned long size; - unsigned long data; -}; - -/* Exported for kernel TLB miss handling in ktlb.S */ -struct linux_prom_translation prom_trans[512] __read_mostly; -unsigned int prom_trans_ents __read_mostly; - /* Exported for SMP bootup purposes. */ unsigned long kern_locked_tte_data; @@ -1875,62 +1926,6 @@ static unsigned long kern_large_tte(unsigned long paddr) return val | paddr; } -/* - * Translate PROM's mapping we capture at boot time into physical address. - * The second parameter is only set from prom_callback() invocations. - */ -unsigned long prom_virt_to_phys(unsigned long promva, int *error) -{ - unsigned long mask; - int i; - - mask = _PAGE_PADDR_4U; - if (tlb_type == hypervisor) - mask = _PAGE_PADDR_4V; - - for (i = 0; i < prom_trans_ents; i++) { - struct linux_prom_translation *p = &prom_trans[i]; - - if (promva >= p->virt && - promva < (p->virt + p->size)) { - unsigned long base = p->data & mask; - - if (error) - *error = 0; - return base + (promva & (8192 - 1)); - } - } - if (error) - *error = 1; - return 0UL; -} - -/* XXX We should kill off this ugly thing at so me point. XXX */ -unsigned long sun4u_get_pte(unsigned long addr) -{ - pgd_t *pgdp; - pud_t *pudp; - pmd_t *pmdp; - pte_t *ptep; - unsigned long mask = _PAGE_PADDR_4U; - - if (tlb_type == hypervisor) - mask = _PAGE_PADDR_4V; - - if (addr >= PAGE_OFFSET) - return addr & mask; - - if ((addr >= LOW_OBP_ADDRESS) && (addr < HI_OBP_ADDRESS)) - return prom_virt_to_phys(addr, NULL); - - pgdp = pgd_offset_k(addr); - pudp = pud_offset(pgdp, addr); - pmdp = pmd_offset(pudp, addr); - ptep = pte_offset_kernel(pmdp, addr); - - return pte_val(*ptep) & mask; -} - /* If not locked, zap it. */ void __flush_tlb_all(void) { diff --git a/include/asm-sparc64/pgtable.h b/include/asm-sparc64/pgtable.h index b12be7a869f6..fd46dd615b6f 100644 --- a/include/asm-sparc64/pgtable.h +++ b/include/asm-sparc64/pgtable.h @@ -737,20 +737,6 @@ extern unsigned long pte_file(pte_t); extern pte_t pgoff_to_pte(unsigned long); #define PTE_FILE_MAX_BITS (64UL - PAGE_SHIFT - 1UL) -extern unsigned long prom_virt_to_phys(unsigned long, int *); - -extern unsigned long sun4u_get_pte(unsigned long); - -static inline unsigned long __get_phys(unsigned long addr) -{ - return sun4u_get_pte(addr); -} - -static inline int __get_iospace(unsigned long addr) -{ - return ((sun4u_get_pte(addr) & 0xf0000000) >> 28); -} - extern unsigned long *sparc64_valid_addr_bitmap; /* Needs to be defined here and not in linux/mm.h, as it is arch dependent */ -- cgit v1.2.3-59-g8ed1b From b93f2620231d4641bdbaaa952d3e8890687124bb Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Thu, 15 Mar 2007 18:29:13 -0700 Subject: [SPARC64]: Add proper header file extern for cmdline_memory_size. Signed-off-by: David S. Miller --- arch/sparc64/mm/init.c | 2 -- include/asm-sparc64/pgtable.h | 2 ++ 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index 668b7bba90e8..df4e37e0b0fc 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -761,8 +761,6 @@ out: smp_new_mmu_context_version(); } -extern unsigned long cmdline_memory_size; - /* Find a free area for the bootmem map, avoiding the kernel image * and the initial ramdisk. */ diff --git a/include/asm-sparc64/pgtable.h b/include/asm-sparc64/pgtable.h index fd46dd615b6f..46705ef47d27 100644 --- a/include/asm-sparc64/pgtable.h +++ b/include/asm-sparc64/pgtable.h @@ -777,6 +777,8 @@ extern void pgtable_cache_init(void); extern void sun4v_register_fault_status(void); extern void sun4v_ktsb_register(void); +extern unsigned long cmdline_memory_size; + #endif /* !(__ASSEMBLY__) */ #endif /* !(_SPARC64_PGTABLE_H) */ -- cgit v1.2.3-59-g8ed1b From 3dfe10ee7caae9802d84a06fe7724274dea24020 Mon Sep 17 00:00:00 2001 From: Stephen Rothwell Date: Thu, 29 Mar 2007 11:22:57 -0700 Subject: [SPARC64]: constify some paramaters of OF routines This starts bringing the PowerPC and Sparc64 implemetations back closer together. Signed-off-by: Stephen Rothwell Signed-off-by: David S. Miller --- arch/sparc64/kernel/prom.c | 11 +++++++---- include/asm-sparc64/prom.h | 10 ++++++---- 2 files changed, 13 insertions(+), 8 deletions(-) (limited to 'include') diff --git a/arch/sparc64/kernel/prom.c b/arch/sparc64/kernel/prom.c index 13734b50a6e8..5e1fcd05160d 100644 --- a/arch/sparc64/kernel/prom.c +++ b/arch/sparc64/kernel/prom.c @@ -36,12 +36,13 @@ static struct device_node *allnodes; */ static DEFINE_RWLOCK(devtree_lock); -int of_device_is_compatible(struct device_node *device, const char *compat) +int of_device_is_compatible(const struct device_node *device, + const char *compat) { const char* cp; int cplen, l; - cp = (char *) of_get_property(device, "compatible", &cplen); + cp = of_get_property(device, "compatible", &cplen); if (cp == NULL) return 0; while (cplen > 0) { @@ -154,7 +155,8 @@ struct device_node *of_find_compatible_node(struct device_node *from, } EXPORT_SYMBOL(of_find_compatible_node); -struct property *of_find_property(struct device_node *np, const char *name, +struct property *of_find_property(const struct device_node *np, + const char *name, int *lenp) { struct property *pp; @@ -174,7 +176,8 @@ EXPORT_SYMBOL(of_find_property); * Find a property with a given name for a given node * and return the value. */ -const void *of_get_property(struct device_node *np, const char *name, int *lenp) +const void *of_get_property(const struct device_node *np, const char *name, + int *lenp) { struct property *pp = of_find_property(np,name,lenp); return pp ? pp->value : NULL; diff --git a/include/asm-sparc64/prom.h b/include/asm-sparc64/prom.h index 50b03387c97b..6394bd1cdcd4 100644 --- a/include/asm-sparc64/prom.h +++ b/include/asm-sparc64/prom.h @@ -93,12 +93,14 @@ extern struct device_node *of_find_node_by_phandle(phandle handle); extern struct device_node *of_get_parent(const struct device_node *node); extern struct device_node *of_get_next_child(const struct device_node *node, struct device_node *prev); -extern struct property *of_find_property(struct device_node *np, +extern struct property *of_find_property(const struct device_node *np, const char *name, int *lenp); -extern int of_device_is_compatible(struct device_node *device, const char *); -extern const void *of_get_property(struct device_node *node, const char *name, - int *lenp); +extern int of_device_is_compatible(const struct device_node *device, + const char *); +extern const void *of_get_property(const struct device_node *node, + const char *name, + int *lenp); #define get_property(node,name,lenp) of_get_property(node,name,lenp) extern int of_set_property(struct device_node *node, const char *name, void *val, int len); extern int of_getintprop_default(struct device_node *np, -- cgit v1.2.3-59-g8ed1b From 711b360d64418e88ed45f812e0ebd202073d888d Mon Sep 17 00:00:00 2001 From: Stephen Rothwell Date: Thu, 12 Apr 2007 14:38:34 -0700 Subject: [SPARC]: Make device_node name and type const Signed-off-by: Stephen Rothwell Signed-off-by: David S. Miller --- include/asm-sparc/prom.h | 4 ++-- include/asm-sparc64/prom.h | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/asm-sparc/prom.h b/include/asm-sparc/prom.h index 39ad2224a117..9ea105ebe2ff 100644 --- a/include/asm-sparc/prom.h +++ b/include/asm-sparc/prom.h @@ -35,8 +35,8 @@ struct property { }; struct device_node { - char *name; - char *type; + const char *name; + const char *type; phandle node; char *path_component_name; char *full_name; diff --git a/include/asm-sparc64/prom.h b/include/asm-sparc64/prom.h index 6394bd1cdcd4..ddad5f99ac7f 100644 --- a/include/asm-sparc64/prom.h +++ b/include/asm-sparc64/prom.h @@ -36,8 +36,8 @@ struct property { struct of_irq_controller; struct device_node { - char *name; - char *type; + const char *name; + const char *type; phandle node; char *path_component_name; char *full_name; -- cgit v1.2.3-59-g8ed1b From 9b3627f389c07c5be9c86ac4d472a0d4fd47feac Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 24 Apr 2007 23:51:18 -0700 Subject: [SPARC64]: Consolidate {sbus,pci}_iommu_arena. Move to asm-sparc64/iommu.h and rename to plain "iommu_arena". Signed-off-by: David S. Miller --- arch/sparc64/kernel/pci_iommu.c | 4 ++-- arch/sparc64/kernel/pci_sun4v.c | 6 +++--- arch/sparc64/kernel/sbus.c | 12 +++--------- include/asm-sparc64/iommu.h | 22 ++++++++++++++-------- include/asm-sparc64/pbm.h | 8 +------- 5 files changed, 23 insertions(+), 29 deletions(-) (limited to 'include') diff --git a/arch/sparc64/kernel/pci_iommu.c b/arch/sparc64/kernel/pci_iommu.c index aefdcc51045b..355ed0ba937a 100644 --- a/arch/sparc64/kernel/pci_iommu.c +++ b/arch/sparc64/kernel/pci_iommu.c @@ -77,7 +77,7 @@ static inline void iopte_make_dummy(struct pci_iommu *iommu, iopte_t *iopte) /* Based largely upon the ppc64 iommu allocator. */ static long pci_arena_alloc(struct pci_iommu *iommu, unsigned long npages) { - struct pci_iommu_arena *arena = &iommu->arena; + struct iommu_arena *arena = &iommu->arena; unsigned long n, i, start, end, limit; int pass; @@ -116,7 +116,7 @@ again: return n; } -static void pci_arena_free(struct pci_iommu_arena *arena, unsigned long base, unsigned long npages) +static void pci_arena_free(struct iommu_arena *arena, unsigned long base, unsigned long npages) { unsigned long i; diff --git a/arch/sparc64/kernel/pci_sun4v.c b/arch/sparc64/kernel/pci_sun4v.c index 52bd4563492d..0e99808f2121 100644 --- a/arch/sparc64/kernel/pci_sun4v.c +++ b/arch/sparc64/kernel/pci_sun4v.c @@ -110,7 +110,7 @@ static inline long pci_iommu_batch_end(void) return pci_iommu_batch_flush(p); } -static long pci_arena_alloc(struct pci_iommu_arena *arena, unsigned long npages) +static long pci_arena_alloc(struct iommu_arena *arena, unsigned long npages) { unsigned long n, i, start, end, limit; int pass; @@ -149,7 +149,7 @@ again: return n; } -static void pci_arena_free(struct pci_iommu_arena *arena, unsigned long base, unsigned long npages) +static void pci_arena_free(struct iommu_arena *arena, unsigned long base, unsigned long npages) { unsigned long i; @@ -707,7 +707,7 @@ static void pci_sun4v_scan_bus(struct pci_controller_info *p) static unsigned long probe_existing_entries(struct pci_pbm_info *pbm, struct pci_iommu *iommu) { - struct pci_iommu_arena *arena = &iommu->arena; + struct iommu_arena *arena = &iommu->arena; unsigned long i, cnt = 0; u32 devhandle; diff --git a/arch/sparc64/kernel/sbus.c b/arch/sparc64/kernel/sbus.c index d403f3df2dc2..279758d56b29 100644 --- a/arch/sparc64/kernel/sbus.c +++ b/arch/sparc64/kernel/sbus.c @@ -26,16 +26,10 @@ #define MAP_BASE ((u32)0xc0000000) -struct sbus_iommu_arena { - unsigned long *map; - unsigned int hint; - unsigned int limit; -}; - struct sbus_iommu { spinlock_t lock; - struct sbus_iommu_arena arena; + struct iommu_arena arena; iopte_t *page_table; unsigned long strbuf_regs; @@ -123,7 +117,7 @@ static void sbus_strbuf_flush(struct sbus_iommu *iommu, u32 base, unsigned long /* Based largely upon the ppc64 iommu allocator. */ static long sbus_arena_alloc(struct sbus_iommu *iommu, unsigned long npages) { - struct sbus_iommu_arena *arena = &iommu->arena; + struct iommu_arena *arena = &iommu->arena; unsigned long n, i, start, end, limit; int pass; @@ -162,7 +156,7 @@ again: return n; } -static void sbus_arena_free(struct sbus_iommu_arena *arena, unsigned long base, unsigned long npages) +static void sbus_arena_free(struct iommu_arena *arena, unsigned long base, unsigned long npages) { unsigned long i; diff --git a/include/asm-sparc64/iommu.h b/include/asm-sparc64/iommu.h index 0de7a3da79cd..d8d98f8f747f 100644 --- a/include/asm-sparc64/iommu.h +++ b/include/asm-sparc64/iommu.h @@ -7,15 +7,21 @@ #define _SPARC64_IOMMU_H /* The format of an iopte in the page tables. */ -#define IOPTE_VALID 0x8000000000000000UL /* IOPTE is valid */ -#define IOPTE_64K 0x2000000000000000UL /* IOPTE is for 64k page */ -#define IOPTE_STBUF 0x1000000000000000UL /* DVMA can use streaming buffer */ -#define IOPTE_INTRA 0x0800000000000000UL /* SBUS slot-->slot direct transfer*/ -#define IOPTE_CONTEXT 0x07ff800000000000UL /* Context number */ -#define IOPTE_PAGE 0x00007fffffffe000UL /* Physical page number (PA[42:13])*/ -#define IOPTE_CACHE 0x0000000000000010UL /* Cached (in UPA E-cache) */ -#define IOPTE_WRITE 0x0000000000000002UL /* Writeable */ +#define IOPTE_VALID 0x8000000000000000UL +#define IOPTE_64K 0x2000000000000000UL +#define IOPTE_STBUF 0x1000000000000000UL +#define IOPTE_INTRA 0x0800000000000000UL +#define IOPTE_CONTEXT 0x07ff800000000000UL +#define IOPTE_PAGE 0x00007fffffffe000UL +#define IOPTE_CACHE 0x0000000000000010UL +#define IOPTE_WRITE 0x0000000000000002UL #define IOMMU_NUM_CTXS 4096 +struct iommu_arena { + unsigned long *map; + unsigned int hint; + unsigned int limit; +}; + #endif /* !(_SPARC_IOMMU_H) */ diff --git a/include/asm-sparc64/pbm.h b/include/asm-sparc64/pbm.h index 9afcc06a575d..4a0ed2ea950c 100644 --- a/include/asm-sparc64/pbm.h +++ b/include/asm-sparc64/pbm.h @@ -35,19 +35,13 @@ struct pci_controller_info; /* This contains the software state necessary to drive a PCI * controller's IOMMU. */ -struct pci_iommu_arena { - unsigned long *map; - unsigned int hint; - unsigned int limit; -}; - struct pci_iommu { /* This protects the controller's IOMMU and all * streaming buffers underneath. */ spinlock_t lock; - struct pci_iommu_arena arena; + struct iommu_arena arena; /* IOMMU page table, a linear array of ioptes. */ iopte_t *page_table; /* The page table itself. */ -- cgit v1.2.3-59-g8ed1b From 66875088098f314af1a4d9e0cc47e617d643bffd Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Wed, 25 Apr 2007 00:12:09 -0700 Subject: [SPARC64]: Add generic iommu and strbuf structs to iommu.h Signed-off-by: David S. Miller --- include/asm-sparc64/iommu.h | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) (limited to 'include') diff --git a/include/asm-sparc64/iommu.h b/include/asm-sparc64/iommu.h index d8d98f8f747f..e199594a1e9b 100644 --- a/include/asm-sparc64/iommu.h +++ b/include/asm-sparc64/iommu.h @@ -24,4 +24,33 @@ struct iommu_arena { unsigned int limit; }; +struct iommu { + spinlock_t lock; + struct iommu_arena arena; + iopte_t *page_table; + u32 page_table_map_base; + unsigned long iommu_control; + unsigned long iommu_tsbbase; + unsigned long iommu_flush; + unsigned long iommu_ctxflush; + unsigned long write_complete_reg; + unsigned long dummy_page; + unsigned long dummy_page_pa; + unsigned long ctx_lowest_free; + DECLARE_BITMAP(ctx_bitmap, IOMMU_NUM_CTXS); + u32 dma_addr_mask; +}; + +struct strbuf { + int strbuf_enabled; + unsigned long strbuf_control; + unsigned long strbuf_pflush; + unsigned long strbuf_fsync; + unsigned long strbuf_ctxflush; + unsigned long strbuf_ctxmatch_base; + unsigned long strbuf_flushflag_pa; + volatile unsigned long *strbuf_flushflag; + volatile unsigned long __flushflag_buf[(64+(64-1)) / sizeof(long)]; +}; + #endif /* !(_SPARC_IOMMU_H) */ -- cgit v1.2.3-59-g8ed1b From 5b04aa3a64f854244bc40a6f528176ed50b5c4f6 Mon Sep 17 00:00:00 2001 From: Mark Fasheh Date: Thu, 1 Mar 2007 11:01:55 -0800 Subject: [PATCH] Turn do_sync_file_range() into do_sync_mapping_range() do_sync_file_range() accepts a file * from which it takes an address_space to sync. Abstract out the bulk of the function into do_sync_mapping_range() which takes the address_space directly. This way callers who want to sync an address_space directly can take advantage of the functionality provided. do_sync_file_range() is preserved as a small wrapper around do_sync_mapping_range(). Ocfs2 in particular would like to use this to initiate a sync of a specific inode range during truncate, where a file * may not be available. Signed-off-by: Mark Fasheh Cc: Christoph Hellwig Signed-off-by: Andrew Morton --- fs/sync.c | 8 +++----- include/linux/fs.h | 9 +++++++-- 2 files changed, 10 insertions(+), 7 deletions(-) (limited to 'include') diff --git a/fs/sync.c b/fs/sync.c index d0feff61e6aa..5cb9e7e43383 100644 --- a/fs/sync.c +++ b/fs/sync.c @@ -239,13 +239,11 @@ out: /* * `endbyte' is inclusive */ -int do_sync_file_range(struct file *file, loff_t offset, loff_t endbyte, - unsigned int flags) +int do_sync_mapping_range(struct address_space *mapping, loff_t offset, + loff_t endbyte, unsigned int flags) { int ret; - struct address_space *mapping; - mapping = file->f_mapping; if (!mapping) { ret = -EINVAL; goto out; @@ -275,4 +273,4 @@ int do_sync_file_range(struct file *file, loff_t offset, loff_t endbyte, out: return ret; } -EXPORT_SYMBOL_GPL(do_sync_file_range); +EXPORT_SYMBOL_GPL(do_sync_mapping_range); diff --git a/include/linux/fs.h b/include/linux/fs.h index 86ec3f4a7da6..095a9c9a64fb 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -843,8 +843,13 @@ extern int fcntl_setlease(unsigned int fd, struct file *filp, long arg); extern int fcntl_getlease(struct file *filp); /* fs/sync.c */ -extern int do_sync_file_range(struct file *file, loff_t offset, loff_t endbyte, - unsigned int flags); +extern int do_sync_mapping_range(struct address_space *mapping, loff_t offset, + loff_t endbyte, unsigned int flags); +static inline int do_sync_file_range(struct file *file, loff_t offset, + loff_t endbyte, unsigned int flags) +{ + return do_sync_mapping_range(file->f_mapping, offset, endbyte, flags); +} /* fs/locks.c */ extern void locks_init_lock(struct file_lock *); -- cgit v1.2.3-59-g8ed1b From 071b638689464c6b39407025eedd810d5b5e6f5d Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Thu, 26 Apr 2007 15:45:32 -0700 Subject: [WORKQUEUE]: cancel_delayed_work: use del_timer() instead of del_timer_sync() del_timer_sync() buys nothing for cancel_delayed_work(), but it is less efficient since it locks the timer unconditionally, and may wait for the completion of the delayed_work_timer_fn(). cancel_delayed_work() == 0 means: before this patch: work->func may still be running or queued after this patch: work->func may still be running or queued, or delayed_work_timer_fn->__queue_work() in progress. The latter doesn't differ from the caller's POV, delayed_work_timer_fn() is called with _PENDING bit set. cancel_delayed_work() == 1 with this patch adds a new possibility: delayed_work->work was cancelled, but delayed_work_timer_fn is still running (this is only possible for the re-arming works on single-threaded workqueue). In this case the timer was re-started by work->func(), nobody else can do this. This in turn means that delayed_work_timer_fn has already passed __queue_work() (and wont't touch delayed_work) because nobody else can queue delayed_work->work. Signed-off-by: Oleg Nesterov Signed-Off-By: David Howells Signed-off-by: David S. Miller --- include/linux/workqueue.h | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 2a7b38d87018..b8abfc74d038 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -191,14 +191,15 @@ int execute_in_process_context(work_func_t fn, struct execute_work *); /* * Kill off a pending schedule_delayed_work(). Note that the work callback - * function may still be running on return from cancel_delayed_work(). Run - * flush_scheduled_work() to wait on it. + * function may still be running on return from cancel_delayed_work(), unless + * it returns 1 and the work doesn't re-arm itself. Run flush_workqueue() or + * cancel_work_sync() to wait on it. */ static inline int cancel_delayed_work(struct delayed_work *work) { int ret; - ret = del_timer_sync(&work->timer); + ret = del_timer(&work->timer); if (ret) work_release(&work->work); return ret; -- cgit v1.2.3-59-g8ed1b From 7318226ea2931a627f3572e5f4804c91ca19ecbc Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 26 Apr 2007 15:46:23 -0700 Subject: [AF_RXRPC]: Key facility changes for AF_RXRPC Export the keyring key type definition and document its availability. Add alternative types into the key's type_data union to make it more useful. Not all users necessarily want to use it as a list_head (AF_RXRPC doesn't, for example), so make it clear that it can be used in other ways. Signed-off-by: David Howells Signed-off-by: David S. Miller --- Documentation/keys.txt | 12 ++++++++++++ include/linux/key.h | 2 ++ security/keys/keyring.c | 2 ++ 3 files changed, 16 insertions(+) (limited to 'include') diff --git a/Documentation/keys.txt b/Documentation/keys.txt index 60c665d9cfaa..81d9aa097298 100644 --- a/Documentation/keys.txt +++ b/Documentation/keys.txt @@ -859,6 +859,18 @@ payload contents" for more information. void unregister_key_type(struct key_type *type); +Under some circumstances, it may be desirable to desirable to deal with a +bundle of keys. The facility provides access to the keyring type for managing +such a bundle: + + struct key_type key_type_keyring; + +This can be used with a function such as request_key() to find a specific +keyring in a process's keyrings. A keyring thus found can then be searched +with keyring_search(). Note that it is not possible to use request_key() to +search a specific keyring, so using keyrings in this way is of limited utility. + + =================================== NOTES ON ACCESSING PAYLOAD CONTENTS =================================== diff --git a/include/linux/key.h b/include/linux/key.h index 169f05e4863e..a9220e75782e 100644 --- a/include/linux/key.h +++ b/include/linux/key.h @@ -160,6 +160,8 @@ struct key { */ union { struct list_head link; + unsigned long x[2]; + void *p[2]; } type_data; /* key data diff --git a/security/keys/keyring.c b/security/keys/keyring.c index ad45ce73964b..88292e3dee96 100644 --- a/security/keys/keyring.c +++ b/security/keys/keyring.c @@ -66,6 +66,8 @@ struct key_type key_type_keyring = { .read = keyring_read, }; +EXPORT_SYMBOL(key_type_keyring); + /* * semaphore to serialise link/link calls to prevent two link calls in parallel * introducing a cycle -- cgit v1.2.3-59-g8ed1b From 17926a79320afa9b95df6b977b40cca6d8713cea Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 26 Apr 2007 15:48:28 -0700 Subject: [AF_RXRPC]: Provide secure RxRPC sockets for use by userspace and kernel both Provide AF_RXRPC sockets that can be used to talk to AFS servers, or serve answers to AFS clients. KerberosIV security is fully supported. The patches and some example test programs can be found in: http://people.redhat.com/~dhowells/rxrpc/ This will eventually replace the old implementation of kernel-only RxRPC currently resident in net/rxrpc/. Signed-off-by: David Howells Signed-off-by: David S. Miller --- Documentation/networking/rxrpc.txt | 663 +++++++++++++++++++ include/keys/rxrpc-type.h | 22 + include/linux/net.h | 2 +- include/linux/rxrpc.h | 62 ++ include/linux/socket.h | 5 +- include/net/af_rxrpc.h | 17 + include/rxrpc/packet.h | 85 ++- net/Kconfig | 1 + net/Makefile | 1 + net/core/sock.c | 6 +- net/rxrpc/Kconfig | 37 ++ net/rxrpc/Makefile | 31 +- net/rxrpc/af_rxrpc.c | 754 ++++++++++++++++++++++ net/rxrpc/ar-accept.c | 399 ++++++++++++ net/rxrpc/ar-ack.c | 1250 ++++++++++++++++++++++++++++++++++++ net/rxrpc/ar-call.c | 787 +++++++++++++++++++++++ net/rxrpc/ar-connection.c | 895 ++++++++++++++++++++++++++ net/rxrpc/ar-connevent.c | 387 +++++++++++ net/rxrpc/ar-error.c | 253 ++++++++ net/rxrpc/ar-input.c | 791 +++++++++++++++++++++++ net/rxrpc/ar-internal.h | 842 ++++++++++++++++++++++++ net/rxrpc/ar-key.c | 334 ++++++++++ net/rxrpc/ar-local.c | 309 +++++++++ net/rxrpc/ar-output.c | 658 +++++++++++++++++++ net/rxrpc/ar-peer.c | 273 ++++++++ net/rxrpc/ar-proc.c | 247 +++++++ net/rxrpc/ar-recvmsg.c | 366 +++++++++++ net/rxrpc/ar-security.c | 258 ++++++++ net/rxrpc/ar-skbuff.c | 118 ++++ net/rxrpc/ar-transport.c | 276 ++++++++ net/rxrpc/rxkad.c | 1153 +++++++++++++++++++++++++++++++++ 31 files changed, 11275 insertions(+), 7 deletions(-) create mode 100644 Documentation/networking/rxrpc.txt create mode 100644 include/keys/rxrpc-type.h create mode 100644 include/linux/rxrpc.h create mode 100644 include/net/af_rxrpc.h create mode 100644 net/rxrpc/Kconfig create mode 100644 net/rxrpc/af_rxrpc.c create mode 100644 net/rxrpc/ar-accept.c create mode 100644 net/rxrpc/ar-ack.c create mode 100644 net/rxrpc/ar-call.c create mode 100644 net/rxrpc/ar-connection.c create mode 100644 net/rxrpc/ar-connevent.c create mode 100644 net/rxrpc/ar-error.c create mode 100644 net/rxrpc/ar-input.c create mode 100644 net/rxrpc/ar-internal.h create mode 100644 net/rxrpc/ar-key.c create mode 100644 net/rxrpc/ar-local.c create mode 100644 net/rxrpc/ar-output.c create mode 100644 net/rxrpc/ar-peer.c create mode 100644 net/rxrpc/ar-proc.c create mode 100644 net/rxrpc/ar-recvmsg.c create mode 100644 net/rxrpc/ar-security.c create mode 100644 net/rxrpc/ar-skbuff.c create mode 100644 net/rxrpc/ar-transport.c create mode 100644 net/rxrpc/rxkad.c (limited to 'include') diff --git a/Documentation/networking/rxrpc.txt b/Documentation/networking/rxrpc.txt new file mode 100644 index 000000000000..fb809b738a0d --- /dev/null +++ b/Documentation/networking/rxrpc.txt @@ -0,0 +1,663 @@ + ====================== + RxRPC NETWORK PROTOCOL + ====================== + +The RxRPC protocol driver provides a reliable two-phase transport on top of UDP +that can be used to perform RxRPC remote operations. This is done over sockets +of AF_RXRPC family, using sendmsg() and recvmsg() with control data to send and +receive data, aborts and errors. + +Contents of this document: + + (*) Overview. + + (*) RxRPC protocol summary. + + (*) AF_RXRPC driver model. + + (*) Control messages. + + (*) Socket options. + + (*) Security. + + (*) Example client usage. + + (*) Example server usage. + + +======== +OVERVIEW +======== + +RxRPC is a two-layer protocol. There is a session layer which provides +reliable virtual connections using UDP over IPv4 (or IPv6) as the transport +layer, but implements a real network protocol; and there's the presentation +layer which renders structured data to binary blobs and back again using XDR +(as does SunRPC): + + +-------------+ + | Application | + +-------------+ + | XDR | Presentation + +-------------+ + | RxRPC | Session + +-------------+ + | UDP | Transport + +-------------+ + + +AF_RXRPC provides: + + (1) Part of an RxRPC facility for both kernel and userspace applications by + making the session part of it a Linux network protocol (AF_RXRPC). + + (2) A two-phase protocol. The client transmits a blob (the request) and then + receives a blob (the reply), and the server receives the request and then + transmits the reply. + + (3) Retention of the reusable bits of the transport system set up for one call + to speed up subsequent calls. + + (4) A secure protocol, using the Linux kernel's key retention facility to + manage security on the client end. The server end must of necessity be + more active in security negotiations. + +AF_RXRPC does not provide XDR marshalling/presentation facilities. That is +left to the application. AF_RXRPC only deals in blobs. Even the operation ID +is just the first four bytes of the request blob, and as such is beyond the +kernel's interest. + + +Sockets of AF_RXRPC family are: + + (1) created as type SOCK_DGRAM; + + (2) provided with a protocol of the type of underlying transport they're going + to use - currently only PF_INET is supported. + + +The Andrew File System (AFS) is an example of an application that uses this and +that has both kernel (filesystem) and userspace (utility) components. + + +====================== +RXRPC PROTOCOL SUMMARY +====================== + +An overview of the RxRPC protocol: + + (*) RxRPC sits on top of another networking protocol (UDP is the only option + currently), and uses this to provide network transport. UDP ports, for + example, provide transport endpoints. + + (*) RxRPC supports multiple virtual "connections" from any given transport + endpoint, thus allowing the endpoints to be shared, even to the same + remote endpoint. + + (*) Each connection goes to a particular "service". A connection may not go + to multiple services. A service may be considered the RxRPC equivalent of + a port number. AF_RXRPC permits multiple services to share an endpoint. + + (*) Client-originating packets are marked, thus a transport endpoint can be + shared between client and server connections (connections have a + direction). + + (*) Up to a billion connections may be supported concurrently between one + local transport endpoint and one service on one remote endpoint. An RxRPC + connection is described by seven numbers: + + Local address } + Local port } Transport (UDP) address + Remote address } + Remote port } + Direction + Connection ID + Service ID + + (*) Each RxRPC operation is a "call". A connection may make up to four + billion calls, but only up to four calls may be in progress on a + connection at any one time. + + (*) Calls are two-phase and asymmetric: the client sends its request data, + which the service receives; then the service transmits the reply data + which the client receives. + + (*) The data blobs are of indefinite size, the end of a phase is marked with a + flag in the packet. The number of packets of data making up one blob may + not exceed 4 billion, however, as this would cause the sequence number to + wrap. + + (*) The first four bytes of the request data are the service operation ID. + + (*) Security is negotiated on a per-connection basis. The connection is + initiated by the first data packet on it arriving. If security is + requested, the server then issues a "challenge" and then the client + replies with a "response". If the response is successful, the security is + set for the lifetime of that connection, and all subsequent calls made + upon it use that same security. In the event that the server lets a + connection lapse before the client, the security will be renegotiated if + the client uses the connection again. + + (*) Calls use ACK packets to handle reliability. Data packets are also + explicitly sequenced per call. + + (*) There are two types of positive acknowledgement: hard-ACKs and soft-ACKs. + A hard-ACK indicates to the far side that all the data received to a point + has been received and processed; a soft-ACK indicates that the data has + been received but may yet be discarded and re-requested. The sender may + not discard any transmittable packets until they've been hard-ACK'd. + + (*) Reception of a reply data packet implicitly hard-ACK's all the data + packets that make up the request. + + (*) An call is complete when the request has been sent, the reply has been + received and the final hard-ACK on the last packet of the reply has + reached the server. + + (*) An call may be aborted by either end at any time up to its completion. + + +===================== +AF_RXRPC DRIVER MODEL +===================== + +About the AF_RXRPC driver: + + (*) The AF_RXRPC protocol transparently uses internal sockets of the transport + protocol to represent transport endpoints. + + (*) AF_RXRPC sockets map onto RxRPC connection bundles. Actual RxRPC + connections are handled transparently. One client socket may be used to + make multiple simultaneous calls to the same service. One server socket + may handle calls from many clients. + + (*) Additional parallel client connections will be initiated to support extra + concurrent calls, up to a tunable limit. + + (*) Each connection is retained for a certain amount of time [tunable] after + the last call currently using it has completed in case a new call is made + that could reuse it. + + (*) Each internal UDP socket is retained [tunable] for a certain amount of + time [tunable] after the last connection using it discarded, in case a new + connection is made that could use it. + + (*) A client-side connection is only shared between calls if they have have + the same key struct describing their security (and assuming the calls + would otherwise share the connection). Non-secured calls would also be + able to share connections with each other. + + (*) A server-side connection is shared if the client says it is. + + (*) ACK'ing is handled by the protocol driver automatically, including ping + replying. + + (*) SO_KEEPALIVE automatically pings the other side to keep the connection + alive [TODO]. + + (*) If an ICMP error is received, all calls affected by that error will be + aborted with an appropriate network error passed through recvmsg(). + + +Interaction with the user of the RxRPC socket: + + (*) A socket is made into a server socket by binding an address with a + non-zero service ID. + + (*) In the client, sending a request is achieved with one or more sendmsgs, + followed by the reply being received with one or more recvmsgs. + + (*) The first sendmsg for a request to be sent from a client contains a tag to + be used in all other sendmsgs or recvmsgs associated with that call. The + tag is carried in the control data. + + (*) connect() is used to supply a default destination address for a client + socket. This may be overridden by supplying an alternate address to the + first sendmsg() of a call (struct msghdr::msg_name). + + (*) If connect() is called on an unbound client, a random local port will + bound before the operation takes place. + + (*) A server socket may also be used to make client calls. To do this, the + first sendmsg() of the call must specify the target address. The server's + transport endpoint is used to send the packets. + + (*) Once the application has received the last message associated with a call, + the tag is guaranteed not to be seen again, and so it can be used to pin + client resources. A new call can then be initiated with the same tag + without fear of interference. + + (*) In the server, a request is received with one or more recvmsgs, then the + the reply is transmitted with one or more sendmsgs, and then the final ACK + is received with a last recvmsg. + + (*) When sending data for a call, sendmsg is given MSG_MORE if there's more + data to come on that call. + + (*) When receiving data for a call, recvmsg flags MSG_MORE if there's more + data to come for that call. + + (*) When receiving data or messages for a call, MSG_EOR is flagged by recvmsg + to indicate the terminal message for that call. + + (*) A call may be aborted by adding an abort control message to the control + data. Issuing an abort terminates the kernel's use of that call's tag. + Any messages waiting in the receive queue for that call will be discarded. + + (*) Aborts, busy notifications and challenge packets are delivered by recvmsg, + and control data messages will be set to indicate the context. Receiving + an abort or a busy message terminates the kernel's use of that call's tag. + + (*) The control data part of the msghdr struct is used for a number of things: + + (*) The tag of the intended or affected call. + + (*) Sending or receiving errors, aborts and busy notifications. + + (*) Notifications of incoming calls. + + (*) Sending debug requests and receiving debug replies [TODO]. + + (*) When the kernel has received and set up an incoming call, it sends a + message to server application to let it know there's a new call awaiting + its acceptance [recvmsg reports a special control message]. The server + application then uses sendmsg to assign a tag to the new call. Once that + is done, the first part of the request data will be delivered by recvmsg. + + (*) The server application has to provide the server socket with a keyring of + secret keys corresponding to the security types it permits. When a secure + connection is being set up, the kernel looks up the appropriate secret key + in the keyring and then sends a challenge packet to the client and + receives a response packet. The kernel then checks the authorisation of + the packet and either aborts the connection or sets up the security. + + (*) The name of the key a client will use to secure its communications is + nominated by a socket option. + + +Notes on recvmsg: + + (*) If there's a sequence of data messages belonging to a particular call on + the receive queue, then recvmsg will keep working through them until: + + (a) it meets the end of that call's received data, + + (b) it meets a non-data message, + + (c) it meets a message belonging to a different call, or + + (d) it fills the user buffer. + + If recvmsg is called in blocking mode, it will keep sleeping, awaiting the + reception of further data, until one of the above four conditions is met. + + (2) MSG_PEEK operates similarly, but will return immediately if it has put any + data in the buffer rather than sleeping until it can fill the buffer. + + (3) If a data message is only partially consumed in filling a user buffer, + then the remainder of that message will be left on the front of the queue + for the next taker. MSG_TRUNC will never be flagged. + + (4) If there is more data to be had on a call (it hasn't copied the last byte + of the last data message in that phase yet), then MSG_MORE will be + flagged. + + +================ +CONTROL MESSAGES +================ + +AF_RXRPC makes use of control messages in sendmsg() and recvmsg() to multiplex +calls, to invoke certain actions and to report certain conditions. These are: + + MESSAGE ID SRT DATA MEANING + ======================= === =========== =============================== + RXRPC_USER_CALL_ID sr- User ID App's call specifier + RXRPC_ABORT srt Abort code Abort code to issue/received + RXRPC_ACK -rt n/a Final ACK received + RXRPC_NET_ERROR -rt error num Network error on call + RXRPC_BUSY -rt n/a Call rejected (server busy) + RXRPC_LOCAL_ERROR -rt error num Local error encountered + RXRPC_NEW_CALL -r- n/a New call received + RXRPC_ACCEPT s-- n/a Accept new call + + (SRT = usable in Sendmsg / delivered by Recvmsg / Terminal message) + + (*) RXRPC_USER_CALL_ID + + This is used to indicate the application's call ID. It's an unsigned long + that the app specifies in the client by attaching it to the first data + message or in the server by passing it in association with an RXRPC_ACCEPT + message. recvmsg() passes it in conjunction with all messages except + those of the RXRPC_NEW_CALL message. + + (*) RXRPC_ABORT + + This is can be used by an application to abort a call by passing it to + sendmsg, or it can be delivered by recvmsg to indicate a remote abort was + received. Either way, it must be associated with an RXRPC_USER_CALL_ID to + specify the call affected. If an abort is being sent, then error EBADSLT + will be returned if there is no call with that user ID. + + (*) RXRPC_ACK + + This is delivered to a server application to indicate that the final ACK + of a call was received from the client. It will be associated with an + RXRPC_USER_CALL_ID to indicate the call that's now complete. + + (*) RXRPC_NET_ERROR + + This is delivered to an application to indicate that an ICMP error message + was encountered in the process of trying to talk to the peer. An + errno-class integer value will be included in the control message data + indicating the problem, and an RXRPC_USER_CALL_ID will indicate the call + affected. + + (*) RXRPC_BUSY + + This is delivered to a client application to indicate that a call was + rejected by the server due to the server being busy. It will be + associated with an RXRPC_USER_CALL_ID to indicate the rejected call. + + (*) RXRPC_LOCAL_ERROR + + This is delivered to an application to indicate that a local error was + encountered and that a call has been aborted because of it. An + errno-class integer value will be included in the control message data + indicating the problem, and an RXRPC_USER_CALL_ID will indicate the call + affected. + + (*) RXRPC_NEW_CALL + + This is delivered to indicate to a server application that a new call has + arrived and is awaiting acceptance. No user ID is associated with this, + as a user ID must subsequently be assigned by doing an RXRPC_ACCEPT. + + (*) RXRPC_ACCEPT + + This is used by a server application to attempt to accept a call and + assign it a user ID. It should be associated with an RXRPC_USER_CALL_ID + to indicate the user ID to be assigned. If there is no call to be + accepted (it may have timed out, been aborted, etc.), then sendmsg will + return error ENODATA. If the user ID is already in use by another call, + then error EBADSLT will be returned. + + +============== +SOCKET OPTIONS +============== + +AF_RXRPC sockets support a few socket options at the SOL_RXRPC level: + + (*) RXRPC_SECURITY_KEY + + This is used to specify the description of the key to be used. The key is + extracted from the calling process's keyrings with request_key() and + should be of "rxrpc" type. + + The optval pointer points to the description string, and optlen indicates + how long the string is, without the NUL terminator. + + (*) RXRPC_SECURITY_KEYRING + + Similar to above but specifies a keyring of server secret keys to use (key + type "keyring"). See the "Security" section. + + (*) RXRPC_EXCLUSIVE_CONNECTION + + This is used to request that new connections should be used for each call + made subsequently on this socket. optval should be NULL and optlen 0. + + (*) RXRPC_MIN_SECURITY_LEVEL + + This is used to specify the minimum security level required for calls on + this socket. optval must point to an int containing one of the following + values: + + (a) RXRPC_SECURITY_PLAIN + + Encrypted checksum only. + + (b) RXRPC_SECURITY_AUTH + + Encrypted checksum plus packet padded and first eight bytes of packet + encrypted - which includes the actual packet length. + + (c) RXRPC_SECURITY_ENCRYPTED + + Encrypted checksum plus entire packet padded and encrypted, including + actual packet length. + + +======== +SECURITY +======== + +Currently, only the kerberos 4 equivalent protocol has been implemented +(security index 2 - rxkad). This requires the rxkad module to be loaded and, +on the client, tickets of the appropriate type to be obtained from the AFS +kaserver or the kerberos server and installed as "rxrpc" type keys. This is +normally done using the klog program. An example simple klog program can be +found at: + + http://people.redhat.com/~dhowells/rxrpc/klog.c + +The payload provided to add_key() on the client should be of the following +form: + + struct rxrpc_key_sec2_v1 { + uint16_t security_index; /* 2 */ + uint16_t ticket_length; /* length of ticket[] */ + uint32_t expiry; /* time at which expires */ + uint8_t kvno; /* key version number */ + uint8_t __pad[3]; + uint8_t session_key[8]; /* DES session key */ + uint8_t ticket[0]; /* the encrypted ticket */ + }; + +Where the ticket blob is just appended to the above structure. + + +For the server, keys of type "rxrpc_s" must be made available to the server. +They have a description of ":" (eg: "52:2" for an +rxkad key for the AFS VL service). When such a key is created, it should be +given the server's secret key as the instantiation data (see the example +below). + + add_key("rxrpc_s", "52:2", secret_key, 8, keyring); + +A keyring is passed to the server socket by naming it in a sockopt. The server +socket then looks the server secret keys up in this keyring when secure +incoming connections are made. This can be seen in an example program that can +be found at: + + http://people.redhat.com/~dhowells/rxrpc/listen.c + + +==================== +EXAMPLE CLIENT USAGE +==================== + +A client would issue an operation by: + + (1) An RxRPC socket is set up by: + + client = socket(AF_RXRPC, SOCK_DGRAM, PF_INET); + + Where the third parameter indicates the protocol family of the transport + socket used - usually IPv4 but it can also be IPv6 [TODO]. + + (2) A local address can optionally be bound: + + struct sockaddr_rxrpc srx = { + .srx_family = AF_RXRPC, + .srx_service = 0, /* we're a client */ + .transport_type = SOCK_DGRAM, /* type of transport socket */ + .transport.sin_family = AF_INET, + .transport.sin_port = htons(7000), /* AFS callback */ + .transport.sin_address = 0, /* all local interfaces */ + }; + bind(client, &srx, sizeof(srx)); + + This specifies the local UDP port to be used. If not given, a random + non-privileged port will be used. A UDP port may be shared between + several unrelated RxRPC sockets. Security is handled on a basis of + per-RxRPC virtual connection. + + (3) The security is set: + + const char *key = "AFS:cambridge.redhat.com"; + setsockopt(client, SOL_RXRPC, RXRPC_SECURITY_KEY, key, strlen(key)); + + This issues a request_key() to get the key representing the security + context. The minimum security level can be set: + + unsigned int sec = RXRPC_SECURITY_ENCRYPTED; + setsockopt(client, SOL_RXRPC, RXRPC_MIN_SECURITY_LEVEL, + &sec, sizeof(sec)); + + (4) The server to be contacted can then be specified (alternatively this can + be done through sendmsg): + + struct sockaddr_rxrpc srx = { + .srx_family = AF_RXRPC, + .srx_service = VL_SERVICE_ID, + .transport_type = SOCK_DGRAM, /* type of transport socket */ + .transport.sin_family = AF_INET, + .transport.sin_port = htons(7005), /* AFS volume manager */ + .transport.sin_address = ..., + }; + connect(client, &srx, sizeof(srx)); + + (5) The request data should then be posted to the server socket using a series + of sendmsg() calls, each with the following control message attached: + + RXRPC_USER_CALL_ID - specifies the user ID for this call + + MSG_MORE should be set in msghdr::msg_flags on all but the last part of + the request. Multiple requests may be made simultaneously. + + If a call is intended to go to a destination other then the default + specified through connect(), then msghdr::msg_name should be set on the + first request message of that call. + + (6) The reply data will then be posted to the server socket for recvmsg() to + pick up. MSG_MORE will be flagged by recvmsg() if there's more reply data + for a particular call to be read. MSG_EOR will be set on the terminal + read for a call. + + All data will be delivered with the following control message attached: + + RXRPC_USER_CALL_ID - specifies the user ID for this call + + If an abort or error occurred, this will be returned in the control data + buffer instead, and MSG_EOR will be flagged to indicate the end of that + call. + + +==================== +EXAMPLE SERVER USAGE +==================== + +A server would be set up to accept operations in the following manner: + + (1) An RxRPC socket is created by: + + server = socket(AF_RXRPC, SOCK_DGRAM, PF_INET); + + Where the third parameter indicates the address type of the transport + socket used - usually IPv4. + + (2) Security is set up if desired by giving the socket a keyring with server + secret keys in it: + + keyring = add_key("keyring", "AFSkeys", NULL, 0, + KEY_SPEC_PROCESS_KEYRING); + + const char secret_key[8] = { + 0xa7, 0x83, 0x8a, 0xcb, 0xc7, 0x83, 0xec, 0x94 }; + add_key("rxrpc_s", "52:2", secret_key, 8, keyring); + + setsockopt(server, SOL_RXRPC, RXRPC_SECURITY_KEYRING, "AFSkeys", 7); + + The keyring can be manipulated after it has been given to the socket. This + permits the server to add more keys, replace keys, etc. whilst it is live. + + (2) A local address must then be bound: + + struct sockaddr_rxrpc srx = { + .srx_family = AF_RXRPC, + .srx_service = VL_SERVICE_ID, /* RxRPC service ID */ + .transport_type = SOCK_DGRAM, /* type of transport socket */ + .transport.sin_family = AF_INET, + .transport.sin_port = htons(7000), /* AFS callback */ + .transport.sin_address = 0, /* all local interfaces */ + }; + bind(server, &srx, sizeof(srx)); + + (3) The server is then set to listen out for incoming calls: + + listen(server, 100); + + (4) The kernel notifies the server of pending incoming connections by sending + it a message for each. This is received with recvmsg() on the server + socket. It has no data, and has a single dataless control message + attached: + + RXRPC_NEW_CALL + + The address that can be passed back by recvmsg() at this point should be + ignored since the call for which the message was posted may have gone by + the time it is accepted - in which case the first call still on the queue + will be accepted. + + (5) The server then accepts the new call by issuing a sendmsg() with two + pieces of control data and no actual data: + + RXRPC_ACCEPT - indicate connection acceptance + RXRPC_USER_CALL_ID - specify user ID for this call + + (6) The first request data packet will then be posted to the server socket for + recvmsg() to pick up. At that point, the RxRPC address for the call can + be read from the address fields in the msghdr struct. + + Subsequent request data will be posted to the server socket for recvmsg() + to collect as it arrives. All but the last piece of the request data will + be delivered with MSG_MORE flagged. + + All data will be delivered with the following control message attached: + + RXRPC_USER_CALL_ID - specifies the user ID for this call + + (8) The reply data should then be posted to the server socket using a series + of sendmsg() calls, each with the following control messages attached: + + RXRPC_USER_CALL_ID - specifies the user ID for this call + + MSG_MORE should be set in msghdr::msg_flags on all but the last message + for a particular call. + + (9) The final ACK from the client will be posted for retrieval by recvmsg() + when it is received. It will take the form of a dataless message with two + control messages attached: + + RXRPC_USER_CALL_ID - specifies the user ID for this call + RXRPC_ACK - indicates final ACK (no data) + + MSG_EOR will be flagged to indicate that this is the final message for + this call. + +(10) Up to the point the final packet of reply data is sent, the call can be + aborted by calling sendmsg() with a dataless message with the following + control messages attached: + + RXRPC_USER_CALL_ID - specifies the user ID for this call + RXRPC_ABORT - indicates abort code (4 byte data) + + Any packets waiting in the socket's receive queue will be discarded if + this is issued. + +Note that all the communications for a particular service take place through +the one server socket, using control messages on sendmsg() and recvmsg() to +determine the call affected. diff --git a/include/keys/rxrpc-type.h b/include/keys/rxrpc-type.h new file mode 100644 index 000000000000..e2ee73aef0ee --- /dev/null +++ b/include/keys/rxrpc-type.h @@ -0,0 +1,22 @@ +/* RxRPC key type + * + * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#ifndef _KEYS_RXRPC_TYPE_H +#define _KEYS_RXRPC_TYPE_H + +#include + +/* + * key type for AF_RXRPC keys + */ +extern struct key_type key_type_rxrpc; + +#endif /* _KEYS_USER_TYPE_H */ diff --git a/include/linux/net.h b/include/linux/net.h index 4db21e63d8d2..efc45177b503 100644 --- a/include/linux/net.h +++ b/include/linux/net.h @@ -24,7 +24,7 @@ struct poll_table_struct; struct inode; -#define NPROTO 33 /* should be enough for now.. */ +#define NPROTO 34 /* should be enough for now.. */ #define SYS_SOCKET 1 /* sys_socket(2) */ #define SYS_BIND 2 /* sys_bind(2) */ diff --git a/include/linux/rxrpc.h b/include/linux/rxrpc.h new file mode 100644 index 000000000000..f7b826b565c7 --- /dev/null +++ b/include/linux/rxrpc.h @@ -0,0 +1,62 @@ +/* AF_RXRPC parameters + * + * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#ifndef _LINUX_RXRPC_H +#define _LINUX_RXRPC_H + +#include +#include + +/* + * RxRPC socket address + */ +struct sockaddr_rxrpc { + sa_family_t srx_family; /* address family */ + u16 srx_service; /* service desired */ + u16 transport_type; /* type of transport socket (SOCK_DGRAM) */ + u16 transport_len; /* length of transport address */ + union { + sa_family_t family; /* transport address family */ + struct sockaddr_in sin; /* IPv4 transport address */ + struct sockaddr_in6 sin6; /* IPv6 transport address */ + } transport; +}; + +/* + * RxRPC socket options + */ +#define RXRPC_SECURITY_KEY 1 /* [clnt] set client security key */ +#define RXRPC_SECURITY_KEYRING 2 /* [srvr] set ring of server security keys */ +#define RXRPC_EXCLUSIVE_CONNECTION 3 /* [clnt] use exclusive RxRPC connection */ +#define RXRPC_MIN_SECURITY_LEVEL 4 /* minimum security level */ + +/* + * RxRPC control messages + * - terminal messages mean that a user call ID tag can be recycled + */ +#define RXRPC_USER_CALL_ID 1 /* user call ID specifier */ +#define RXRPC_ABORT 2 /* abort request / notification [terminal] */ +#define RXRPC_ACK 3 /* [Server] RPC op final ACK received [terminal] */ +#define RXRPC_NET_ERROR 5 /* network error received [terminal] */ +#define RXRPC_BUSY 6 /* server busy received [terminal] */ +#define RXRPC_LOCAL_ERROR 7 /* local error generated [terminal] */ +#define RXRPC_NEW_CALL 8 /* [Server] new incoming call notification */ +#define RXRPC_ACCEPT 9 /* [Server] accept request */ + +/* + * RxRPC security levels + */ +#define RXRPC_SECURITY_PLAIN 0 /* plain secure-checksummed packets only */ +#define RXRPC_SECURITY_AUTH 1 /* authenticated packets */ +#define RXRPC_SECURITY_ENCRYPT 2 /* encrypted packets */ + + +#endif /* _LINUX_RXRPC_H */ diff --git a/include/linux/socket.h b/include/linux/socket.h index fcd35a210e7f..6e7c9483a6a6 100644 --- a/include/linux/socket.h +++ b/include/linux/socket.h @@ -188,7 +188,8 @@ struct ucred { #define AF_TIPC 30 /* TIPC sockets */ #define AF_BLUETOOTH 31 /* Bluetooth sockets */ #define AF_IUCV 32 /* IUCV sockets */ -#define AF_MAX 33 /* For now.. */ +#define AF_RXRPC 33 /* RxRPC sockets */ +#define AF_MAX 34 /* For now.. */ /* Protocol families, same as address families. */ #define PF_UNSPEC AF_UNSPEC @@ -222,6 +223,7 @@ struct ucred { #define PF_TIPC AF_TIPC #define PF_BLUETOOTH AF_BLUETOOTH #define PF_IUCV AF_IUCV +#define PF_RXRPC AF_RXRPC #define PF_MAX AF_MAX /* Maximum queue length specifiable by listen. */ @@ -284,6 +286,7 @@ struct ucred { #define SOL_DCCP 269 #define SOL_NETLINK 270 #define SOL_TIPC 271 +#define SOL_RXRPC 272 /* IPX options */ #define IPX_TYPE 1 diff --git a/include/net/af_rxrpc.h b/include/net/af_rxrpc.h new file mode 100644 index 000000000000..b01ca2589d69 --- /dev/null +++ b/include/net/af_rxrpc.h @@ -0,0 +1,17 @@ +/* RxRPC definitions + * + * Copyright (C) 2006 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#ifndef _NET_RXRPC_H +#define _NET_RXRPC_H + +#include + +#endif /* _NET_RXRPC_H */ diff --git a/include/rxrpc/packet.h b/include/rxrpc/packet.h index 1447f0aaa0eb..452a9bb02d48 100644 --- a/include/rxrpc/packet.h +++ b/include/rxrpc/packet.h @@ -33,7 +33,8 @@ struct rxrpc_header #define RXRPC_MAXCALLS 4 /* max active calls per conn */ #define RXRPC_CHANNELMASK (RXRPC_MAXCALLS-1) /* mask for channel ID */ #define RXRPC_CIDMASK (~RXRPC_CHANNELMASK) /* mask for connection ID */ -#define RXRPC_CIDSHIFT 2 /* shift for connection ID */ +#define RXRPC_CIDSHIFT ilog2(RXRPC_MAXCALLS) /* shift for connection ID */ +#define RXRPC_CID_INC (1 << RXRPC_CIDSHIFT) /* connection ID increment */ __be32 callNumber; /* call ID (0 for connection-level packets) */ #define RXRPC_PROCESS_MAXCALLS (1<<2) /* maximum number of active calls per conn (power of 2) */ @@ -62,7 +63,10 @@ struct rxrpc_header uint8_t userStatus; /* app-layer defined status */ uint8_t securityIndex; /* security protocol ID */ - __be16 _rsvd; /* reserved (used by kerberos security as cksum) */ + union { + __be16 _rsvd; /* reserved */ + __be16 cksum; /* kerberos security checksum */ + }; __be16 serviceId; /* service ID */ } __attribute__((packed)); @@ -124,4 +128,81 @@ struct rxrpc_ackpacket } __attribute__((packed)); +/* + * ACK packets can have a further piece of information tagged on the end + */ +struct rxrpc_ackinfo { + __be32 rxMTU; /* maximum Rx MTU size (bytes) [AFS 3.3] */ + __be32 maxMTU; /* maximum interface MTU size (bytes) [AFS 3.3] */ + __be32 rwind; /* Rx window size (packets) [AFS 3.4] */ + __be32 jumbo_max; /* max packets to stick into a jumbo packet [AFS 3.5] */ +}; + +/*****************************************************************************/ +/* + * Kerberos security type-2 challenge packet + */ +struct rxkad_challenge { + __be32 version; /* version of this challenge type */ + __be32 nonce; /* encrypted random number */ + __be32 min_level; /* minimum security level */ + __be32 __padding; /* padding to 8-byte boundary */ +} __attribute__((packed)); + +/*****************************************************************************/ +/* + * Kerberos security type-2 response packet + */ +struct rxkad_response { + __be32 version; /* version of this reponse type */ + __be32 __pad; + + /* encrypted bit of the response */ + struct { + __be32 epoch; /* current epoch */ + __be32 cid; /* parent connection ID */ + __be32 checksum; /* checksum */ + __be32 securityIndex; /* security type */ + __be32 call_id[4]; /* encrypted call IDs */ + __be32 inc_nonce; /* challenge nonce + 1 */ + __be32 level; /* desired level */ + } encrypted; + + __be32 kvno; /* Kerberos key version number */ + __be32 ticket_len; /* Kerberos ticket length */ +} __attribute__((packed)); + +/*****************************************************************************/ +/* + * RxRPC-level abort codes + */ +#define RX_CALL_DEAD -1 /* call/conn has been inactive and is shut down */ +#define RX_INVALID_OPERATION -2 /* invalid operation requested / attempted */ +#define RX_CALL_TIMEOUT -3 /* call timeout exceeded */ +#define RX_EOF -4 /* unexpected end of data on read op */ +#define RX_PROTOCOL_ERROR -5 /* low-level protocol error */ +#define RX_USER_ABORT -6 /* generic user abort */ +#define RX_ADDRINUSE -7 /* UDP port in use */ +#define RX_DEBUGI_BADTYPE -8 /* bad debugging packet type */ + +/* + * Rx kerberos security abort codes + * - unfortunately we have no generalised security abort codes to say things + * like "unsupported security", so we have to use these instead and hope the + * other side understands + */ +#define RXKADINCONSISTENCY 19270400 /* security module structure inconsistent */ +#define RXKADPACKETSHORT 19270401 /* packet too short for security challenge */ +#define RXKADLEVELFAIL 19270402 /* security level negotiation failed */ +#define RXKADTICKETLEN 19270403 /* ticket length too short or too long */ +#define RXKADOUTOFSEQUENCE 19270404 /* packet had bad sequence number */ +#define RXKADNOAUTH 19270405 /* caller not authorised */ +#define RXKADBADKEY 19270406 /* illegal key: bad parity or weak */ +#define RXKADBADTICKET 19270407 /* security object was passed a bad ticket */ +#define RXKADUNKNOWNKEY 19270408 /* ticket contained unknown key version number */ +#define RXKADEXPIRED 19270409 /* authentication expired */ +#define RXKADSEALEDINCON 19270410 /* sealed data inconsistent */ +#define RXKADDATALEN 19270411 /* user data too long */ +#define RXKADILLEGALLEVEL 19270412 /* caller not authorised to use encrypted conns */ + #endif /* _LINUX_RXRPC_PACKET_H */ diff --git a/net/Kconfig b/net/Kconfig index ae1817dc51b8..2fc8e77b1e62 100644 --- a/net/Kconfig +++ b/net/Kconfig @@ -212,6 +212,7 @@ endmenu source "net/ax25/Kconfig" source "net/irda/Kconfig" source "net/bluetooth/Kconfig" +source "net/rxrpc/Kconfig" config FIB_RULES bool diff --git a/net/Makefile b/net/Makefile index 29bbe19d87f1..6b74d4118c5b 100644 --- a/net/Makefile +++ b/net/Makefile @@ -38,6 +38,7 @@ obj-$(CONFIG_IRDA) += irda/ obj-$(CONFIG_BT) += bluetooth/ obj-$(CONFIG_SUNRPC) += sunrpc/ obj-$(CONFIG_RXRPC) += rxrpc/ +obj-$(CONFIG_AF_RXRPC) += rxrpc/ obj-$(CONFIG_ATM) += atm/ obj-$(CONFIG_DECNET) += decnet/ obj-$(CONFIG_ECONET) += econet/ diff --git a/net/core/sock.c b/net/core/sock.c index 043bdc05d211..22183c2ef284 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -154,7 +154,8 @@ static const char *af_family_key_strings[AF_MAX+1] = { "sk_lock-21" , "sk_lock-AF_SNA" , "sk_lock-AF_IRDA" , "sk_lock-AF_PPPOX" , "sk_lock-AF_WANPIPE" , "sk_lock-AF_LLC" , "sk_lock-27" , "sk_lock-28" , "sk_lock-29" , - "sk_lock-AF_TIPC" , "sk_lock-AF_BLUETOOTH", "sk_lock-AF_MAX" + "sk_lock-AF_TIPC" , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV" , + "sk_lock-AF_RXRPC" , "sk_lock-AF_MAX" }; static const char *af_family_slock_key_strings[AF_MAX+1] = { "slock-AF_UNSPEC", "slock-AF_UNIX" , "slock-AF_INET" , @@ -167,7 +168,8 @@ static const char *af_family_slock_key_strings[AF_MAX+1] = { "slock-21" , "slock-AF_SNA" , "slock-AF_IRDA" , "slock-AF_PPPOX" , "slock-AF_WANPIPE" , "slock-AF_LLC" , "slock-27" , "slock-28" , "slock-29" , - "slock-AF_TIPC" , "slock-AF_BLUETOOTH", "slock-AF_MAX" + "slock-AF_TIPC" , "slock-AF_BLUETOOTH", "slock-AF_IUCV" , + "slock-AF_RXRPC" , "slock-AF_MAX" }; #endif diff --git a/net/rxrpc/Kconfig b/net/rxrpc/Kconfig new file mode 100644 index 000000000000..d72380e304ae --- /dev/null +++ b/net/rxrpc/Kconfig @@ -0,0 +1,37 @@ +# +# RxRPC session sockets +# + +config AF_RXRPC + tristate "RxRPC session sockets" + depends on EXPERIMENTAL + help + Say Y or M here to include support for RxRPC session sockets (just + the transport part, not the presentation part: (un)marshalling is + left to the application). + + These are used for AFS kernel filesystem and userspace utilities. + + This module at the moment only supports client operations and is + currently incomplete. + + See Documentation/networking/rxrpc.txt. + + +config AF_RXRPC_DEBUG + bool "RxRPC dynamic debugging" + depends on AF_RXRPC + help + Say Y here to make runtime controllable debugging messages appear. + + See Documentation/networking/rxrpc.txt. + + +config RXKAD + tristate "RxRPC Kerberos security" + depends on AF_RXRPC && KEYS + help + Provide kerberos 4 and AFS kaserver security handling for AF_RXRPC + through the use of the key retention service. + + See Documentation/networking/rxrpc.txt. diff --git a/net/rxrpc/Makefile b/net/rxrpc/Makefile index 6efcb6f162a0..07bf82ffec6a 100644 --- a/net/rxrpc/Makefile +++ b/net/rxrpc/Makefile @@ -4,6 +4,35 @@ #CFLAGS += -finstrument-functions +af-rxrpc-objs := \ + af_rxrpc.o \ + ar-accept.o \ + ar-ack.o \ + ar-call.o \ + ar-connection.o \ + ar-connevent.o \ + ar-error.o \ + ar-input.o \ + ar-key.o \ + ar-local.o \ + ar-output.o \ + ar-peer.o \ + ar-recvmsg.o \ + ar-security.o \ + ar-skbuff.o \ + ar-transport.o + +ifeq ($(CONFIG_PROC_FS),y) +af-rxrpc-objs += ar-proc.o +endif + +obj-$(CONFIG_AF_RXRPC) += af-rxrpc.o + +obj-$(CONFIG_RXKAD) += rxkad.o + +# +# obsolete RxRPC interface, still used by fs/afs/ +# rxrpc-objs := \ call.o \ connection.o \ @@ -22,4 +51,4 @@ ifeq ($(CONFIG_SYSCTL),y) rxrpc-objs += sysctl.o endif -obj-$(CONFIG_RXRPC) := rxrpc.o +obj-$(CONFIG_RXRPC) += rxrpc.o diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c new file mode 100644 index 000000000000..bfa8822e2286 --- /dev/null +++ b/net/rxrpc/af_rxrpc.c @@ -0,0 +1,754 @@ +/* AF_RXRPC implementation + * + * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include "ar-internal.h" + +MODULE_DESCRIPTION("RxRPC network protocol"); +MODULE_AUTHOR("Red Hat, Inc."); +MODULE_LICENSE("GPL"); +MODULE_ALIAS_NETPROTO(PF_RXRPC); + +unsigned rxrpc_debug; // = RXRPC_DEBUG_KPROTO; +module_param_named(debug, rxrpc_debug, uint, S_IWUSR | S_IRUGO); +MODULE_PARM_DESC(rxrpc_debug, "RxRPC debugging mask"); + +static int sysctl_rxrpc_max_qlen __read_mostly = 10; + +static struct proto rxrpc_proto; +static const struct proto_ops rxrpc_rpc_ops; + +/* local epoch for detecting local-end reset */ +__be32 rxrpc_epoch; + +/* current debugging ID */ +atomic_t rxrpc_debug_id; + +/* count of skbs currently in use */ +atomic_t rxrpc_n_skbs; + +static void rxrpc_sock_destructor(struct sock *); + +/* + * see if an RxRPC socket is currently writable + */ +static inline int rxrpc_writable(struct sock *sk) +{ + return atomic_read(&sk->sk_wmem_alloc) < (size_t) sk->sk_sndbuf; +} + +/* + * wait for write bufferage to become available + */ +static void rxrpc_write_space(struct sock *sk) +{ + _enter("%p", sk); + read_lock(&sk->sk_callback_lock); + if (rxrpc_writable(sk)) { + if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) + wake_up_interruptible(sk->sk_sleep); + sk_wake_async(sk, 2, POLL_OUT); + } + read_unlock(&sk->sk_callback_lock); +} + +/* + * validate an RxRPC address + */ +static int rxrpc_validate_address(struct rxrpc_sock *rx, + struct sockaddr_rxrpc *srx, + int len) +{ + if (len < sizeof(struct sockaddr_rxrpc)) + return -EINVAL; + + if (srx->srx_family != AF_RXRPC) + return -EAFNOSUPPORT; + + if (srx->transport_type != SOCK_DGRAM) + return -ESOCKTNOSUPPORT; + + len -= offsetof(struct sockaddr_rxrpc, transport); + if (srx->transport_len < sizeof(sa_family_t) || + srx->transport_len > len) + return -EINVAL; + + if (srx->transport.family != rx->proto) + return -EAFNOSUPPORT; + + switch (srx->transport.family) { + case AF_INET: + _debug("INET: %x @ %u.%u.%u.%u", + ntohs(srx->transport.sin.sin_port), + NIPQUAD(srx->transport.sin.sin_addr)); + if (srx->transport_len > 8) + memset((void *)&srx->transport + 8, 0, + srx->transport_len - 8); + break; + + case AF_INET6: + default: + return -EAFNOSUPPORT; + } + + return 0; +} + +/* + * bind a local address to an RxRPC socket + */ +static int rxrpc_bind(struct socket *sock, struct sockaddr *saddr, int len) +{ + struct sockaddr_rxrpc *srx = (struct sockaddr_rxrpc *) saddr; + struct sock *sk = sock->sk; + struct rxrpc_local *local; + struct rxrpc_sock *rx = rxrpc_sk(sk), *prx; + __be16 service_id; + int ret; + + _enter("%p,%p,%d", rx, saddr, len); + + ret = rxrpc_validate_address(rx, srx, len); + if (ret < 0) + goto error; + + lock_sock(&rx->sk); + + if (rx->sk.sk_state != RXRPC_UNCONNECTED) { + ret = -EINVAL; + goto error_unlock; + } + + memcpy(&rx->srx, srx, sizeof(rx->srx)); + + /* find a local transport endpoint if we don't have one already */ + local = rxrpc_lookup_local(&rx->srx); + if (IS_ERR(local)) { + ret = PTR_ERR(local); + goto error_unlock; + } + + rx->local = local; + if (srx->srx_service) { + service_id = htons(srx->srx_service); + write_lock_bh(&local->services_lock); + list_for_each_entry(prx, &local->services, listen_link) { + if (prx->service_id == service_id) + goto service_in_use; + } + + rx->service_id = service_id; + list_add_tail(&rx->listen_link, &local->services); + write_unlock_bh(&local->services_lock); + + rx->sk.sk_state = RXRPC_SERVER_BOUND; + } else { + rx->sk.sk_state = RXRPC_CLIENT_BOUND; + } + + release_sock(&rx->sk); + _leave(" = 0"); + return 0; + +service_in_use: + ret = -EADDRINUSE; + write_unlock_bh(&local->services_lock); +error_unlock: + release_sock(&rx->sk); +error: + _leave(" = %d", ret); + return ret; +} + +/* + * set the number of pending calls permitted on a listening socket + */ +static int rxrpc_listen(struct socket *sock, int backlog) +{ + struct sock *sk = sock->sk; + struct rxrpc_sock *rx = rxrpc_sk(sk); + int ret; + + _enter("%p,%d", rx, backlog); + + lock_sock(&rx->sk); + + switch (rx->sk.sk_state) { + case RXRPC_UNCONNECTED: + ret = -EADDRNOTAVAIL; + break; + case RXRPC_CLIENT_BOUND: + case RXRPC_CLIENT_CONNECTED: + default: + ret = -EBUSY; + break; + case RXRPC_SERVER_BOUND: + ASSERT(rx->local != NULL); + sk->sk_max_ack_backlog = backlog; + rx->sk.sk_state = RXRPC_SERVER_LISTENING; + ret = 0; + break; + } + + release_sock(&rx->sk); + _leave(" = %d", ret); + return ret; +} + +/* + * find a transport by address + */ +static struct rxrpc_transport *rxrpc_name_to_transport(struct socket *sock, + struct sockaddr *addr, + int addr_len, int flags) +{ + struct sockaddr_rxrpc *srx = (struct sockaddr_rxrpc *) addr; + struct rxrpc_transport *trans; + struct rxrpc_sock *rx = rxrpc_sk(sock->sk); + struct rxrpc_peer *peer; + + _enter("%p,%p,%d,%d", rx, addr, addr_len, flags); + + ASSERT(rx->local != NULL); + ASSERT(rx->sk.sk_state > RXRPC_UNCONNECTED); + + if (rx->srx.transport_type != srx->transport_type) + return ERR_PTR(-ESOCKTNOSUPPORT); + if (rx->srx.transport.family != srx->transport.family) + return ERR_PTR(-EAFNOSUPPORT); + + /* find a remote transport endpoint from the local one */ + peer = rxrpc_get_peer(srx, GFP_KERNEL); + if (IS_ERR(peer)) + return ERR_PTR(PTR_ERR(peer)); + + /* find a transport */ + trans = rxrpc_get_transport(rx->local, peer, GFP_KERNEL); + rxrpc_put_peer(peer); + _leave(" = %p", trans); + return trans; +} + +/* + * connect an RxRPC socket + * - this just targets it at a specific destination; no actual connection + * negotiation takes place + */ +static int rxrpc_connect(struct socket *sock, struct sockaddr *addr, + int addr_len, int flags) +{ + struct sockaddr_rxrpc *srx = (struct sockaddr_rxrpc *) addr; + struct sock *sk = sock->sk; + struct rxrpc_transport *trans; + struct rxrpc_local *local; + struct rxrpc_sock *rx = rxrpc_sk(sk); + int ret; + + _enter("%p,%p,%d,%d", rx, addr, addr_len, flags); + + ret = rxrpc_validate_address(rx, srx, addr_len); + if (ret < 0) { + _leave(" = %d [bad addr]", ret); + return ret; + } + + lock_sock(&rx->sk); + + switch (rx->sk.sk_state) { + case RXRPC_UNCONNECTED: + /* find a local transport endpoint if we don't have one already */ + ASSERTCMP(rx->local, ==, NULL); + rx->srx.srx_family = AF_RXRPC; + rx->srx.srx_service = 0; + rx->srx.transport_type = srx->transport_type; + rx->srx.transport_len = sizeof(sa_family_t); + rx->srx.transport.family = srx->transport.family; + local = rxrpc_lookup_local(&rx->srx); + if (IS_ERR(local)) { + release_sock(&rx->sk); + return PTR_ERR(local); + } + rx->local = local; + rx->sk.sk_state = RXRPC_CLIENT_BOUND; + case RXRPC_CLIENT_BOUND: + break; + case RXRPC_CLIENT_CONNECTED: + release_sock(&rx->sk); + return -EISCONN; + default: + release_sock(&rx->sk); + return -EBUSY; /* server sockets can't connect as well */ + } + + trans = rxrpc_name_to_transport(sock, addr, addr_len, flags); + if (IS_ERR(trans)) { + release_sock(&rx->sk); + _leave(" = %ld", PTR_ERR(trans)); + return PTR_ERR(trans); + } + + rx->trans = trans; + rx->service_id = htons(srx->srx_service); + rx->sk.sk_state = RXRPC_CLIENT_CONNECTED; + + release_sock(&rx->sk); + return 0; +} + +/* + * send a message through an RxRPC socket + * - in a client this does a number of things: + * - finds/sets up a connection for the security specified (if any) + * - initiates a call (ID in control data) + * - ends the request phase of a call (if MSG_MORE is not set) + * - sends a call data packet + * - may send an abort (abort code in control data) + */ +static int rxrpc_sendmsg(struct kiocb *iocb, struct socket *sock, + struct msghdr *m, size_t len) +{ + struct rxrpc_transport *trans; + struct rxrpc_sock *rx = rxrpc_sk(sock->sk); + int ret; + + _enter(",{%d},,%zu", rx->sk.sk_state, len); + + if (m->msg_flags & MSG_OOB) + return -EOPNOTSUPP; + + if (m->msg_name) { + ret = rxrpc_validate_address(rx, m->msg_name, m->msg_namelen); + if (ret < 0) { + _leave(" = %d [bad addr]", ret); + return ret; + } + } + + trans = NULL; + lock_sock(&rx->sk); + + if (m->msg_name) { + ret = -EISCONN; + trans = rxrpc_name_to_transport(sock, m->msg_name, + m->msg_namelen, 0); + if (IS_ERR(trans)) { + ret = PTR_ERR(trans); + trans = NULL; + goto out; + } + } else { + trans = rx->trans; + if (trans) + atomic_inc(&trans->usage); + } + + switch (rx->sk.sk_state) { + case RXRPC_SERVER_LISTENING: + if (!m->msg_name) { + ret = rxrpc_server_sendmsg(iocb, rx, m, len); + break; + } + case RXRPC_SERVER_BOUND: + case RXRPC_CLIENT_BOUND: + if (!m->msg_name) { + ret = -ENOTCONN; + break; + } + case RXRPC_CLIENT_CONNECTED: + ret = rxrpc_client_sendmsg(iocb, rx, trans, m, len); + break; + default: + ret = -ENOTCONN; + break; + } + +out: + release_sock(&rx->sk); + if (trans) + rxrpc_put_transport(trans); + _leave(" = %d", ret); + return ret; +} + +/* + * set RxRPC socket options + */ +static int rxrpc_setsockopt(struct socket *sock, int level, int optname, + char __user *optval, int optlen) +{ + struct rxrpc_sock *rx = rxrpc_sk(sock->sk); + unsigned min_sec_level; + int ret; + + _enter(",%d,%d,,%d", level, optname, optlen); + + lock_sock(&rx->sk); + ret = -EOPNOTSUPP; + + if (level == SOL_RXRPC) { + switch (optname) { + case RXRPC_EXCLUSIVE_CONNECTION: + ret = -EINVAL; + if (optlen != 0) + goto error; + ret = -EISCONN; + if (rx->sk.sk_state != RXRPC_UNCONNECTED) + goto error; + set_bit(RXRPC_SOCK_EXCLUSIVE_CONN, &rx->flags); + goto success; + + case RXRPC_SECURITY_KEY: + ret = -EINVAL; + if (rx->key) + goto error; + ret = -EISCONN; + if (rx->sk.sk_state != RXRPC_UNCONNECTED) + goto error; + ret = rxrpc_request_key(rx, optval, optlen); + goto error; + + case RXRPC_SECURITY_KEYRING: + ret = -EINVAL; + if (rx->key) + goto error; + ret = -EISCONN; + if (rx->sk.sk_state != RXRPC_UNCONNECTED) + goto error; + ret = rxrpc_server_keyring(rx, optval, optlen); + goto error; + + case RXRPC_MIN_SECURITY_LEVEL: + ret = -EINVAL; + if (optlen != sizeof(unsigned)) + goto error; + ret = -EISCONN; + if (rx->sk.sk_state != RXRPC_UNCONNECTED) + goto error; + ret = get_user(min_sec_level, + (unsigned __user *) optval); + if (ret < 0) + goto error; + ret = -EINVAL; + if (min_sec_level > RXRPC_SECURITY_MAX) + goto error; + rx->min_sec_level = min_sec_level; + goto success; + + default: + break; + } + } + +success: + ret = 0; +error: + release_sock(&rx->sk); + return ret; +} + +/* + * permit an RxRPC socket to be polled + */ +static unsigned int rxrpc_poll(struct file *file, struct socket *sock, + poll_table *wait) +{ + unsigned int mask; + struct sock *sk = sock->sk; + + poll_wait(file, sk->sk_sleep, wait); + mask = 0; + + /* the socket is readable if there are any messages waiting on the Rx + * queue */ + if (!skb_queue_empty(&sk->sk_receive_queue)) + mask |= POLLIN | POLLRDNORM; + + /* the socket is writable if there is space to add new data to the + * socket; there is no guarantee that any particular call in progress + * on the socket may have space in the Tx ACK window */ + if (rxrpc_writable(sk)) + mask |= POLLOUT | POLLWRNORM; + + return mask; +} + +/* + * create an RxRPC socket + */ +static int rxrpc_create(struct socket *sock, int protocol) +{ + struct rxrpc_sock *rx; + struct sock *sk; + + _enter("%p,%d", sock, protocol); + + /* we support transport protocol UDP only */ + if (protocol != PF_INET) + return -EPROTONOSUPPORT; + + if (sock->type != SOCK_DGRAM) + return -ESOCKTNOSUPPORT; + + sock->ops = &rxrpc_rpc_ops; + sock->state = SS_UNCONNECTED; + + sk = sk_alloc(PF_RXRPC, GFP_KERNEL, &rxrpc_proto, 1); + if (!sk) + return -ENOMEM; + + sock_init_data(sock, sk); + sk->sk_state = RXRPC_UNCONNECTED; + sk->sk_write_space = rxrpc_write_space; + sk->sk_max_ack_backlog = sysctl_rxrpc_max_qlen; + sk->sk_destruct = rxrpc_sock_destructor; + + rx = rxrpc_sk(sk); + rx->proto = protocol; + rx->calls = RB_ROOT; + + INIT_LIST_HEAD(&rx->listen_link); + INIT_LIST_HEAD(&rx->secureq); + INIT_LIST_HEAD(&rx->acceptq); + rwlock_init(&rx->call_lock); + memset(&rx->srx, 0, sizeof(rx->srx)); + + _leave(" = 0 [%p]", rx); + return 0; +} + +/* + * RxRPC socket destructor + */ +static void rxrpc_sock_destructor(struct sock *sk) +{ + _enter("%p", sk); + + rxrpc_purge_queue(&sk->sk_receive_queue); + + BUG_TRAP(!atomic_read(&sk->sk_wmem_alloc)); + BUG_TRAP(sk_unhashed(sk)); + BUG_TRAP(!sk->sk_socket); + + if (!sock_flag(sk, SOCK_DEAD)) { + printk("Attempt to release alive rxrpc socket: %p\n", sk); + return; + } +} + +/* + * release an RxRPC socket + */ +static int rxrpc_release_sock(struct sock *sk) +{ + struct rxrpc_sock *rx = rxrpc_sk(sk); + + _enter("%p{%d,%d}", sk, sk->sk_state, atomic_read(&sk->sk_refcnt)); + + /* declare the socket closed for business */ + sock_orphan(sk); + sk->sk_shutdown = SHUTDOWN_MASK; + + spin_lock_bh(&sk->sk_receive_queue.lock); + sk->sk_state = RXRPC_CLOSE; + spin_unlock_bh(&sk->sk_receive_queue.lock); + + ASSERTCMP(rx->listen_link.next, !=, LIST_POISON1); + + if (!list_empty(&rx->listen_link)) { + write_lock_bh(&rx->local->services_lock); + list_del(&rx->listen_link); + write_unlock_bh(&rx->local->services_lock); + } + + /* try to flush out this socket */ + rxrpc_release_calls_on_socket(rx); + flush_scheduled_work(); + rxrpc_purge_queue(&sk->sk_receive_queue); + + if (rx->conn) { + rxrpc_put_connection(rx->conn); + rx->conn = NULL; + } + + if (rx->bundle) { + rxrpc_put_bundle(rx->trans, rx->bundle); + rx->bundle = NULL; + } + if (rx->trans) { + rxrpc_put_transport(rx->trans); + rx->trans = NULL; + } + if (rx->local) { + rxrpc_put_local(rx->local); + rx->local = NULL; + } + + key_put(rx->key); + rx->key = NULL; + key_put(rx->securities); + rx->securities = NULL; + sock_put(sk); + + _leave(" = 0"); + return 0; +} + +/* + * release an RxRPC BSD socket on close() or equivalent + */ +static int rxrpc_release(struct socket *sock) +{ + struct sock *sk = sock->sk; + + _enter("%p{%p}", sock, sk); + + if (!sk) + return 0; + + sock->sk = NULL; + + return rxrpc_release_sock(sk); +} + +/* + * RxRPC network protocol + */ +static const struct proto_ops rxrpc_rpc_ops = { + .family = PF_UNIX, + .owner = THIS_MODULE, + .release = rxrpc_release, + .bind = rxrpc_bind, + .connect = rxrpc_connect, + .socketpair = sock_no_socketpair, + .accept = sock_no_accept, + .getname = sock_no_getname, + .poll = rxrpc_poll, + .ioctl = sock_no_ioctl, + .listen = rxrpc_listen, + .shutdown = sock_no_shutdown, + .setsockopt = rxrpc_setsockopt, + .getsockopt = sock_no_getsockopt, + .sendmsg = rxrpc_sendmsg, + .recvmsg = rxrpc_recvmsg, + .mmap = sock_no_mmap, + .sendpage = sock_no_sendpage, +}; + +static struct proto rxrpc_proto = { + .name = "RXRPC", + .owner = THIS_MODULE, + .obj_size = sizeof(struct rxrpc_sock), + .max_header = sizeof(struct rxrpc_header), +}; + +static struct net_proto_family rxrpc_family_ops = { + .family = PF_RXRPC, + .create = rxrpc_create, + .owner = THIS_MODULE, +}; + +/* + * initialise and register the RxRPC protocol + */ +static int __init af_rxrpc_init(void) +{ + struct sk_buff *dummy_skb; + int ret = -1; + + BUILD_BUG_ON(sizeof(struct rxrpc_skb_priv) > sizeof(dummy_skb->cb)); + + rxrpc_epoch = htonl(xtime.tv_sec); + + rxrpc_call_jar = kmem_cache_create( + "rxrpc_call_jar", sizeof(struct rxrpc_call), 0, + SLAB_HWCACHE_ALIGN, NULL, NULL); + if (!rxrpc_call_jar) { + printk(KERN_NOTICE "RxRPC: Failed to allocate call jar\n"); + ret = -ENOMEM; + goto error_call_jar; + } + + ret = proto_register(&rxrpc_proto, 1); + if (ret < 0) { + printk(KERN_CRIT "RxRPC: Cannot register protocol\n"); + goto error_proto; + } + + ret = sock_register(&rxrpc_family_ops); + if (ret < 0) { + printk(KERN_CRIT "RxRPC: Cannot register socket family\n"); + goto error_sock; + } + + ret = register_key_type(&key_type_rxrpc); + if (ret < 0) { + printk(KERN_CRIT "RxRPC: Cannot register client key type\n"); + goto error_key_type; + } + + ret = register_key_type(&key_type_rxrpc_s); + if (ret < 0) { + printk(KERN_CRIT "RxRPC: Cannot register server key type\n"); + goto error_key_type_s; + } + +#ifdef CONFIG_PROC_FS + proc_net_fops_create("rxrpc_calls", 0, &rxrpc_call_seq_fops); + proc_net_fops_create("rxrpc_conns", 0, &rxrpc_connection_seq_fops); +#endif + return 0; + +error_key_type_s: + unregister_key_type(&key_type_rxrpc); +error_key_type: + sock_unregister(PF_RXRPC); +error_sock: + proto_unregister(&rxrpc_proto); +error_proto: + kmem_cache_destroy(rxrpc_call_jar); +error_call_jar: + return ret; +} + +/* + * unregister the RxRPC protocol + */ +static void __exit af_rxrpc_exit(void) +{ + _enter(""); + unregister_key_type(&key_type_rxrpc_s); + unregister_key_type(&key_type_rxrpc); + sock_unregister(PF_RXRPC); + proto_unregister(&rxrpc_proto); + rxrpc_destroy_all_calls(); + rxrpc_destroy_all_connections(); + rxrpc_destroy_all_transports(); + rxrpc_destroy_all_peers(); + rxrpc_destroy_all_locals(); + + ASSERTCMP(atomic_read(&rxrpc_n_skbs), ==, 0); + + _debug("flush scheduled work"); + flush_scheduled_work(); + proc_net_remove("rxrpc_conns"); + proc_net_remove("rxrpc_calls"); + kmem_cache_destroy(rxrpc_call_jar); + _leave(""); +} + +module_init(af_rxrpc_init); +module_exit(af_rxrpc_exit); diff --git a/net/rxrpc/ar-accept.c b/net/rxrpc/ar-accept.c new file mode 100644 index 000000000000..e7af780cd6f9 --- /dev/null +++ b/net/rxrpc/ar-accept.c @@ -0,0 +1,399 @@ +/* incoming call handling + * + * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "ar-internal.h" + +/* + * generate a connection-level abort + */ +static int rxrpc_busy(struct rxrpc_local *local, struct sockaddr_rxrpc *srx, + struct rxrpc_header *hdr) +{ + struct msghdr msg; + struct kvec iov[1]; + size_t len; + int ret; + + _enter("%d,,", local->debug_id); + + msg.msg_name = &srx->transport.sin; + msg.msg_namelen = sizeof(srx->transport.sin); + msg.msg_control = NULL; + msg.msg_controllen = 0; + msg.msg_flags = 0; + + hdr->seq = 0; + hdr->type = RXRPC_PACKET_TYPE_BUSY; + hdr->flags = 0; + hdr->userStatus = 0; + hdr->_rsvd = 0; + + iov[0].iov_base = hdr; + iov[0].iov_len = sizeof(*hdr); + + len = iov[0].iov_len; + + hdr->serial = htonl(1); + _proto("Tx BUSY %%%u", ntohl(hdr->serial)); + + ret = kernel_sendmsg(local->socket, &msg, iov, 1, len); + if (ret < 0) { + _leave(" = -EAGAIN [sendmsg failed: %d]", ret); + return -EAGAIN; + } + + _leave(" = 0"); + return 0; +} + +/* + * accept an incoming call that needs peer, transport and/or connection setting + * up + */ +static int rxrpc_accept_incoming_call(struct rxrpc_local *local, + struct rxrpc_sock *rx, + struct sk_buff *skb, + struct sockaddr_rxrpc *srx) +{ + struct rxrpc_connection *conn; + struct rxrpc_transport *trans; + struct rxrpc_skb_priv *sp, *nsp; + struct rxrpc_peer *peer; + struct rxrpc_call *call; + struct sk_buff *notification; + int ret; + + _enter(""); + + sp = rxrpc_skb(skb); + + /* get a notification message to send to the server app */ + notification = alloc_skb(0, GFP_NOFS); + rxrpc_new_skb(notification); + notification->mark = RXRPC_SKB_MARK_NEW_CALL; + + peer = rxrpc_get_peer(srx, GFP_NOIO); + if (IS_ERR(peer)) { + _debug("no peer"); + ret = -EBUSY; + goto error; + } + + trans = rxrpc_get_transport(local, peer, GFP_NOIO); + rxrpc_put_peer(peer); + if (!trans) { + _debug("no trans"); + ret = -EBUSY; + goto error; + } + + conn = rxrpc_incoming_connection(trans, &sp->hdr, GFP_NOIO); + rxrpc_put_transport(trans); + if (IS_ERR(conn)) { + _debug("no conn"); + ret = PTR_ERR(conn); + goto error; + } + + call = rxrpc_incoming_call(rx, conn, &sp->hdr, GFP_NOIO); + rxrpc_put_connection(conn); + if (IS_ERR(call)) { + _debug("no call"); + ret = PTR_ERR(call); + goto error; + } + + /* attach the call to the socket */ + read_lock_bh(&local->services_lock); + if (rx->sk.sk_state == RXRPC_CLOSE) + goto invalid_service; + + write_lock(&rx->call_lock); + if (!test_and_set_bit(RXRPC_CALL_INIT_ACCEPT, &call->flags)) { + rxrpc_get_call(call); + + spin_lock(&call->conn->state_lock); + if (sp->hdr.securityIndex > 0 && + call->conn->state == RXRPC_CONN_SERVER_UNSECURED) { + _debug("await conn sec"); + list_add_tail(&call->accept_link, &rx->secureq); + call->conn->state = RXRPC_CONN_SERVER_CHALLENGING; + atomic_inc(&call->conn->usage); + set_bit(RXRPC_CONN_CHALLENGE, &call->conn->events); + schedule_work(&call->conn->processor); + } else { + _debug("conn ready"); + call->state = RXRPC_CALL_SERVER_ACCEPTING; + list_add_tail(&call->accept_link, &rx->acceptq); + rxrpc_get_call(call); + nsp = rxrpc_skb(notification); + nsp->call = call; + + ASSERTCMP(atomic_read(&call->usage), >=, 3); + + _debug("notify"); + spin_lock(&call->lock); + ret = rxrpc_queue_rcv_skb(call, notification, true, + false); + spin_unlock(&call->lock); + notification = NULL; + if (ret < 0) + BUG(); + } + spin_unlock(&call->conn->state_lock); + + _debug("queued"); + } + write_unlock(&rx->call_lock); + + _debug("process"); + rxrpc_fast_process_packet(call, skb); + + _debug("done"); + read_unlock_bh(&local->services_lock); + rxrpc_free_skb(notification); + rxrpc_put_call(call); + _leave(" = 0"); + return 0; + +invalid_service: + _debug("invalid"); + read_unlock_bh(&local->services_lock); + + read_lock_bh(&call->state_lock); + if (!test_bit(RXRPC_CALL_RELEASE, &call->flags) && + !test_and_set_bit(RXRPC_CALL_RELEASE, &call->events)) { + rxrpc_get_call(call); + schedule_work(&call->processor); + } + read_unlock_bh(&call->state_lock); + rxrpc_put_call(call); + ret = -ECONNREFUSED; +error: + rxrpc_free_skb(notification); + _leave(" = %d", ret); + return ret; +} + +/* + * accept incoming calls that need peer, transport and/or connection setting up + * - the packets we get are all incoming client DATA packets that have seq == 1 + */ +void rxrpc_accept_incoming_calls(struct work_struct *work) +{ + struct rxrpc_local *local = + container_of(work, struct rxrpc_local, acceptor); + struct rxrpc_skb_priv *sp; + struct sockaddr_rxrpc srx; + struct rxrpc_sock *rx; + struct sk_buff *skb; + __be16 service_id; + int ret; + + _enter("%d", local->debug_id); + + read_lock_bh(&rxrpc_local_lock); + if (atomic_read(&local->usage) > 0) + rxrpc_get_local(local); + else + local = NULL; + read_unlock_bh(&rxrpc_local_lock); + if (!local) { + _leave(" [local dead]"); + return; + } + +process_next_packet: + skb = skb_dequeue(&local->accept_queue); + if (!skb) { + rxrpc_put_local(local); + _leave("\n"); + return; + } + + _net("incoming call skb %p", skb); + + sp = rxrpc_skb(skb); + + /* determine the remote address */ + memset(&srx, 0, sizeof(srx)); + srx.srx_family = AF_RXRPC; + srx.transport.family = local->srx.transport.family; + srx.transport_type = local->srx.transport_type; + switch (srx.transport.family) { + case AF_INET: + srx.transport_len = sizeof(struct sockaddr_in); + srx.transport.sin.sin_port = udp_hdr(skb)->source; + srx.transport.sin.sin_addr.s_addr = ip_hdr(skb)->saddr; + break; + default: + goto busy; + } + + /* get the socket providing the service */ + service_id = sp->hdr.serviceId; + read_lock_bh(&local->services_lock); + list_for_each_entry(rx, &local->services, listen_link) { + if (rx->service_id == service_id && + rx->sk.sk_state != RXRPC_CLOSE) + goto found_service; + } + read_unlock_bh(&local->services_lock); + goto invalid_service; + +found_service: + _debug("found service %hd", ntohs(rx->service_id)); + if (sk_acceptq_is_full(&rx->sk)) + goto backlog_full; + sk_acceptq_added(&rx->sk); + sock_hold(&rx->sk); + read_unlock_bh(&local->services_lock); + + ret = rxrpc_accept_incoming_call(local, rx, skb, &srx); + if (ret < 0) + sk_acceptq_removed(&rx->sk); + sock_put(&rx->sk); + switch (ret) { + case -ECONNRESET: /* old calls are ignored */ + case -ECONNABORTED: /* aborted calls are reaborted or ignored */ + case 0: + goto process_next_packet; + case -ECONNREFUSED: + goto invalid_service; + case -EBUSY: + goto busy; + case -EKEYREJECTED: + goto security_mismatch; + default: + BUG(); + } + +backlog_full: + read_unlock_bh(&local->services_lock); +busy: + rxrpc_busy(local, &srx, &sp->hdr); + rxrpc_free_skb(skb); + goto process_next_packet; + +invalid_service: + skb->priority = RX_INVALID_OPERATION; + rxrpc_reject_packet(local, skb); + goto process_next_packet; + + /* can't change connection security type mid-flow */ +security_mismatch: + skb->priority = RX_PROTOCOL_ERROR; + rxrpc_reject_packet(local, skb); + goto process_next_packet; +} + +/* + * handle acceptance of a call by userspace + * - assign the user call ID to the call at the front of the queue + */ +int rxrpc_accept_call(struct rxrpc_sock *rx, unsigned long user_call_ID) +{ + struct rxrpc_call *call; + struct rb_node *parent, **pp; + int ret; + + _enter(",%lx", user_call_ID); + + ASSERT(!irqs_disabled()); + + write_lock(&rx->call_lock); + + ret = -ENODATA; + if (list_empty(&rx->acceptq)) + goto out; + + /* check the user ID isn't already in use */ + ret = -EBADSLT; + pp = &rx->calls.rb_node; + parent = NULL; + while (*pp) { + parent = *pp; + call = rb_entry(parent, struct rxrpc_call, sock_node); + + if (user_call_ID < call->user_call_ID) + pp = &(*pp)->rb_left; + else if (user_call_ID > call->user_call_ID) + pp = &(*pp)->rb_right; + else + goto out; + } + + /* dequeue the first call and check it's still valid */ + call = list_entry(rx->acceptq.next, struct rxrpc_call, accept_link); + list_del_init(&call->accept_link); + sk_acceptq_removed(&rx->sk); + + write_lock_bh(&call->state_lock); + switch (call->state) { + case RXRPC_CALL_SERVER_ACCEPTING: + call->state = RXRPC_CALL_SERVER_RECV_REQUEST; + break; + case RXRPC_CALL_REMOTELY_ABORTED: + case RXRPC_CALL_LOCALLY_ABORTED: + ret = -ECONNABORTED; + goto out_release; + case RXRPC_CALL_NETWORK_ERROR: + ret = call->conn->error; + goto out_release; + case RXRPC_CALL_DEAD: + ret = -ETIME; + goto out_discard; + default: + BUG(); + } + + /* formalise the acceptance */ + call->user_call_ID = user_call_ID; + rb_link_node(&call->sock_node, parent, pp); + rb_insert_color(&call->sock_node, &rx->calls); + if (test_and_set_bit(RXRPC_CALL_HAS_USERID, &call->flags)) + BUG(); + if (test_and_set_bit(RXRPC_CALL_ACCEPTED, &call->events)) + BUG(); + schedule_work(&call->processor); + + write_unlock_bh(&call->state_lock); + write_unlock(&rx->call_lock); + _leave(" = 0"); + return 0; + + /* if the call is already dying or dead, then we leave the socket's ref + * on it to be released by rxrpc_dead_call_expired() as induced by + * rxrpc_release_call() */ +out_release: + _debug("release %p", call); + if (!test_bit(RXRPC_CALL_RELEASED, &call->flags) && + !test_and_set_bit(RXRPC_CALL_RELEASE, &call->events)) + schedule_work(&call->processor); +out_discard: + write_unlock_bh(&call->state_lock); + _debug("discard %p", call); +out: + write_unlock(&rx->call_lock); + _leave(" = %d", ret); + return ret; +} diff --git a/net/rxrpc/ar-ack.c b/net/rxrpc/ar-ack.c new file mode 100644 index 000000000000..8f7764eca96c --- /dev/null +++ b/net/rxrpc/ar-ack.c @@ -0,0 +1,1250 @@ +/* Management of Tx window, Tx resend, ACKs and out-of-sequence reception + * + * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include "ar-internal.h" + +static unsigned rxrpc_ack_defer = 1; + +static const char *rxrpc_acks[] = { + "---", "REQ", "DUP", "OOS", "WIN", "MEM", "PNG", "PNR", "DLY", "IDL", + "-?-" +}; + +static const s8 rxrpc_ack_priority[] = { + [0] = 0, + [RXRPC_ACK_DELAY] = 1, + [RXRPC_ACK_REQUESTED] = 2, + [RXRPC_ACK_IDLE] = 3, + [RXRPC_ACK_PING_RESPONSE] = 4, + [RXRPC_ACK_DUPLICATE] = 5, + [RXRPC_ACK_OUT_OF_SEQUENCE] = 6, + [RXRPC_ACK_EXCEEDS_WINDOW] = 7, + [RXRPC_ACK_NOSPACE] = 8, +}; + +/* + * propose an ACK be sent + */ +void __rxrpc_propose_ACK(struct rxrpc_call *call, uint8_t ack_reason, + __be32 serial, bool immediate) +{ + unsigned long expiry; + s8 prior = rxrpc_ack_priority[ack_reason]; + + ASSERTCMP(prior, >, 0); + + _enter("{%d},%s,%%%x,%u", + call->debug_id, rxrpc_acks[ack_reason], ntohl(serial), + immediate); + + if (prior < rxrpc_ack_priority[call->ackr_reason]) { + if (immediate) + goto cancel_timer; + return; + } + + /* update DELAY, IDLE, REQUESTED and PING_RESPONSE ACK serial + * numbers */ + if (prior == rxrpc_ack_priority[call->ackr_reason]) { + if (prior <= 4) + call->ackr_serial = serial; + if (immediate) + goto cancel_timer; + return; + } + + call->ackr_reason = ack_reason; + call->ackr_serial = serial; + + switch (ack_reason) { + case RXRPC_ACK_DELAY: + _debug("run delay timer"); + call->ack_timer.expires = jiffies + rxrpc_ack_timeout * HZ; + add_timer(&call->ack_timer); + return; + + case RXRPC_ACK_IDLE: + if (!immediate) { + _debug("run defer timer"); + expiry = 1; + goto run_timer; + } + goto cancel_timer; + + case RXRPC_ACK_REQUESTED: + if (!rxrpc_ack_defer) + goto cancel_timer; + if (!immediate || serial == cpu_to_be32(1)) { + _debug("run defer timer"); + expiry = rxrpc_ack_defer; + goto run_timer; + } + + default: + _debug("immediate ACK"); + goto cancel_timer; + } + +run_timer: + expiry += jiffies; + if (!timer_pending(&call->ack_timer) || + time_after(call->ack_timer.expires, expiry)) + mod_timer(&call->ack_timer, expiry); + return; + +cancel_timer: + _debug("cancel timer %%%u", ntohl(serial)); + try_to_del_timer_sync(&call->ack_timer); + read_lock_bh(&call->state_lock); + if (call->state <= RXRPC_CALL_COMPLETE && + !test_and_set_bit(RXRPC_CALL_ACK, &call->events)) + schedule_work(&call->processor); + read_unlock_bh(&call->state_lock); +} + +/* + * propose an ACK be sent, locking the call structure + */ +void rxrpc_propose_ACK(struct rxrpc_call *call, uint8_t ack_reason, + __be32 serial, bool immediate) +{ + s8 prior = rxrpc_ack_priority[ack_reason]; + + if (prior > rxrpc_ack_priority[call->ackr_reason]) { + spin_lock_bh(&call->lock); + __rxrpc_propose_ACK(call, ack_reason, serial, immediate); + spin_unlock_bh(&call->lock); + } +} + +/* + * set the resend timer + */ +static void rxrpc_set_resend(struct rxrpc_call *call, u8 resend, + unsigned long resend_at) +{ + read_lock_bh(&call->state_lock); + if (call->state >= RXRPC_CALL_COMPLETE) + resend = 0; + + if (resend & 1) { + _debug("SET RESEND"); + set_bit(RXRPC_CALL_RESEND, &call->events); + } + + if (resend & 2) { + _debug("MODIFY RESEND TIMER"); + set_bit(RXRPC_CALL_RUN_RTIMER, &call->flags); + mod_timer(&call->resend_timer, resend_at); + } else { + _debug("KILL RESEND TIMER"); + del_timer_sync(&call->resend_timer); + clear_bit(RXRPC_CALL_RESEND_TIMER, &call->events); + clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags); + } + read_unlock_bh(&call->state_lock); +} + +/* + * resend packets + */ +static void rxrpc_resend(struct rxrpc_call *call) +{ + struct rxrpc_skb_priv *sp; + struct rxrpc_header *hdr; + struct sk_buff *txb; + unsigned long *p_txb, resend_at; + int loop, stop; + u8 resend; + + _enter("{%d,%d,%d,%d},", + call->acks_hard, call->acks_unacked, + atomic_read(&call->sequence), + CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz)); + + stop = 0; + resend = 0; + resend_at = 0; + + for (loop = call->acks_tail; + loop != call->acks_head || stop; + loop = (loop + 1) & (call->acks_winsz - 1) + ) { + p_txb = call->acks_window + loop; + smp_read_barrier_depends(); + if (*p_txb & 1) + continue; + + txb = (struct sk_buff *) *p_txb; + sp = rxrpc_skb(txb); + + if (sp->need_resend) { + sp->need_resend = 0; + + /* each Tx packet has a new serial number */ + sp->hdr.serial = + htonl(atomic_inc_return(&call->conn->serial)); + + hdr = (struct rxrpc_header *) txb->head; + hdr->serial = sp->hdr.serial; + + _proto("Tx DATA %%%u { #%d }", + ntohl(sp->hdr.serial), ntohl(sp->hdr.seq)); + if (rxrpc_send_packet(call->conn->trans, txb) < 0) { + stop = 0; + sp->resend_at = jiffies + 3; + } else { + sp->resend_at = + jiffies + rxrpc_resend_timeout * HZ; + } + } + + if (time_after_eq(jiffies + 1, sp->resend_at)) { + sp->need_resend = 1; + resend |= 1; + } else if (resend & 2) { + if (time_before(sp->resend_at, resend_at)) + resend_at = sp->resend_at; + } else { + resend_at = sp->resend_at; + resend |= 2; + } + } + + rxrpc_set_resend(call, resend, resend_at); + _leave(""); +} + +/* + * handle resend timer expiry + */ +static void rxrpc_resend_timer(struct rxrpc_call *call) +{ + struct rxrpc_skb_priv *sp; + struct sk_buff *txb; + unsigned long *p_txb, resend_at; + int loop; + u8 resend; + + _enter("%d,%d,%d", + call->acks_tail, call->acks_unacked, call->acks_head); + + resend = 0; + resend_at = 0; + + for (loop = call->acks_unacked; + loop != call->acks_head; + loop = (loop + 1) & (call->acks_winsz - 1) + ) { + p_txb = call->acks_window + loop; + smp_read_barrier_depends(); + txb = (struct sk_buff *) (*p_txb & ~1); + sp = rxrpc_skb(txb); + + ASSERT(!(*p_txb & 1)); + + if (sp->need_resend) { + ; + } else if (time_after_eq(jiffies + 1, sp->resend_at)) { + sp->need_resend = 1; + resend |= 1; + } else if (resend & 2) { + if (time_before(sp->resend_at, resend_at)) + resend_at = sp->resend_at; + } else { + resend_at = sp->resend_at; + resend |= 2; + } + } + + rxrpc_set_resend(call, resend, resend_at); + _leave(""); +} + +/* + * process soft ACKs of our transmitted packets + * - these indicate packets the peer has or has not received, but hasn't yet + * given to the consumer, and so can still be discarded and re-requested + */ +static int rxrpc_process_soft_ACKs(struct rxrpc_call *call, + struct rxrpc_ackpacket *ack, + struct sk_buff *skb) +{ + struct rxrpc_skb_priv *sp; + struct sk_buff *txb; + unsigned long *p_txb, resend_at; + int loop; + u8 sacks[RXRPC_MAXACKS], resend; + + _enter("{%d,%d},{%d},", + call->acks_hard, + CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz), + ack->nAcks); + + if (skb_copy_bits(skb, 0, sacks, ack->nAcks) < 0) + goto protocol_error; + + resend = 0; + resend_at = 0; + for (loop = 0; loop < ack->nAcks; loop++) { + p_txb = call->acks_window; + p_txb += (call->acks_tail + loop) & (call->acks_winsz - 1); + smp_read_barrier_depends(); + txb = (struct sk_buff *) (*p_txb & ~1); + sp = rxrpc_skb(txb); + + switch (sacks[loop]) { + case RXRPC_ACK_TYPE_ACK: + sp->need_resend = 0; + *p_txb |= 1; + break; + case RXRPC_ACK_TYPE_NACK: + sp->need_resend = 1; + *p_txb &= ~1; + resend = 1; + break; + default: + _debug("Unsupported ACK type %d", sacks[loop]); + goto protocol_error; + } + } + + smp_mb(); + call->acks_unacked = (call->acks_tail + loop) & (call->acks_winsz - 1); + + /* anything not explicitly ACK'd is implicitly NACK'd, but may just not + * have been received or processed yet by the far end */ + for (loop = call->acks_unacked; + loop != call->acks_head; + loop = (loop + 1) & (call->acks_winsz - 1) + ) { + p_txb = call->acks_window + loop; + smp_read_barrier_depends(); + txb = (struct sk_buff *) (*p_txb & ~1); + sp = rxrpc_skb(txb); + + if (*p_txb & 1) { + /* packet must have been discarded */ + sp->need_resend = 1; + *p_txb &= ~1; + resend |= 1; + } else if (sp->need_resend) { + ; + } else if (time_after_eq(jiffies + 1, sp->resend_at)) { + sp->need_resend = 1; + resend |= 1; + } else if (resend & 2) { + if (time_before(sp->resend_at, resend_at)) + resend_at = sp->resend_at; + } else { + resend_at = sp->resend_at; + resend |= 2; + } + } + + rxrpc_set_resend(call, resend, resend_at); + _leave(" = 0"); + return 0; + +protocol_error: + _leave(" = -EPROTO"); + return -EPROTO; +} + +/* + * discard hard-ACK'd packets from the Tx window + */ +static void rxrpc_rotate_tx_window(struct rxrpc_call *call, u32 hard) +{ + struct rxrpc_skb_priv *sp; + unsigned long _skb; + int tail = call->acks_tail, old_tail; + int win = CIRC_CNT(call->acks_head, tail, call->acks_winsz); + + _enter("{%u,%u},%u", call->acks_hard, win, hard); + + ASSERTCMP(hard - call->acks_hard, <=, win); + + while (call->acks_hard < hard) { + smp_read_barrier_depends(); + _skb = call->acks_window[tail] & ~1; + sp = rxrpc_skb((struct sk_buff *) _skb); + rxrpc_free_skb((struct sk_buff *) _skb); + old_tail = tail; + tail = (tail + 1) & (call->acks_winsz - 1); + call->acks_tail = tail; + if (call->acks_unacked == old_tail) + call->acks_unacked = tail; + call->acks_hard++; + } + + wake_up(&call->tx_waitq); +} + +/* + * clear the Tx window in the event of a failure + */ +static void rxrpc_clear_tx_window(struct rxrpc_call *call) +{ + rxrpc_rotate_tx_window(call, atomic_read(&call->sequence)); +} + +/* + * drain the out of sequence received packet queue into the packet Rx queue + */ +static int rxrpc_drain_rx_oos_queue(struct rxrpc_call *call) +{ + struct rxrpc_skb_priv *sp; + struct sk_buff *skb; + bool terminal; + int ret; + + _enter("{%d,%d}", call->rx_data_post, call->rx_first_oos); + + spin_lock_bh(&call->lock); + + ret = -ECONNRESET; + if (test_bit(RXRPC_CALL_RELEASED, &call->flags)) + goto socket_unavailable; + + skb = skb_dequeue(&call->rx_oos_queue); + if (skb) { + sp = rxrpc_skb(skb); + + _debug("drain OOS packet %d [%d]", + ntohl(sp->hdr.seq), call->rx_first_oos); + + if (ntohl(sp->hdr.seq) != call->rx_first_oos) { + skb_queue_head(&call->rx_oos_queue, skb); + call->rx_first_oos = ntohl(rxrpc_skb(skb)->hdr.seq); + _debug("requeue %p {%u}", skb, call->rx_first_oos); + } else { + skb->mark = RXRPC_SKB_MARK_DATA; + terminal = ((sp->hdr.flags & RXRPC_LAST_PACKET) && + !(sp->hdr.flags & RXRPC_CLIENT_INITIATED)); + ret = rxrpc_queue_rcv_skb(call, skb, true, terminal); + BUG_ON(ret < 0); + _debug("drain #%u", call->rx_data_post); + call->rx_data_post++; + + /* find out what the next packet is */ + skb = skb_peek(&call->rx_oos_queue); + if (skb) + call->rx_first_oos = + ntohl(rxrpc_skb(skb)->hdr.seq); + else + call->rx_first_oos = 0; + _debug("peek %p {%u}", skb, call->rx_first_oos); + } + } + + ret = 0; +socket_unavailable: + spin_unlock_bh(&call->lock); + _leave(" = %d", ret); + return ret; +} + +/* + * insert an out of sequence packet into the buffer + */ +static void rxrpc_insert_oos_packet(struct rxrpc_call *call, + struct sk_buff *skb) +{ + struct rxrpc_skb_priv *sp, *psp; + struct sk_buff *p; + u32 seq; + + sp = rxrpc_skb(skb); + seq = ntohl(sp->hdr.seq); + _enter(",,{%u}", seq); + + skb->destructor = rxrpc_packet_destructor; + ASSERTCMP(sp->call, ==, NULL); + sp->call = call; + rxrpc_get_call(call); + + /* insert into the buffer in sequence order */ + spin_lock_bh(&call->lock); + + skb_queue_walk(&call->rx_oos_queue, p) { + psp = rxrpc_skb(p); + if (ntohl(psp->hdr.seq) > seq) { + _debug("insert oos #%u before #%u", + seq, ntohl(psp->hdr.seq)); + skb_insert(p, skb, &call->rx_oos_queue); + goto inserted; + } + } + + _debug("append oos #%u", seq); + skb_queue_tail(&call->rx_oos_queue, skb); +inserted: + + /* we might now have a new front to the queue */ + if (call->rx_first_oos == 0 || seq < call->rx_first_oos) + call->rx_first_oos = seq; + + read_lock(&call->state_lock); + if (call->state < RXRPC_CALL_COMPLETE && + call->rx_data_post == call->rx_first_oos) { + _debug("drain rx oos now"); + set_bit(RXRPC_CALL_DRAIN_RX_OOS, &call->events); + } + read_unlock(&call->state_lock); + + spin_unlock_bh(&call->lock); + _leave(" [stored #%u]", call->rx_first_oos); +} + +/* + * clear the Tx window on final ACK reception + */ +static void rxrpc_zap_tx_window(struct rxrpc_call *call) +{ + struct rxrpc_skb_priv *sp; + struct sk_buff *skb; + unsigned long _skb, *acks_window; + uint8_t winsz = call->acks_winsz; + int tail; + + acks_window = call->acks_window; + call->acks_window = NULL; + + while (CIRC_CNT(call->acks_head, call->acks_tail, winsz) > 0) { + tail = call->acks_tail; + smp_read_barrier_depends(); + _skb = acks_window[tail] & ~1; + smp_mb(); + call->acks_tail = (call->acks_tail + 1) & (winsz - 1); + + skb = (struct sk_buff *) _skb; + sp = rxrpc_skb(skb); + _debug("+++ clear Tx %u", ntohl(sp->hdr.seq)); + rxrpc_free_skb(skb); + } + + kfree(acks_window); +} + +/* + * process packets in the reception queue + */ +static int rxrpc_process_rx_queue(struct rxrpc_call *call, + u32 *_abort_code) +{ + struct rxrpc_ackpacket ack; + struct rxrpc_skb_priv *sp; + struct sk_buff *skb; + bool post_ACK; + int latest; + u32 hard, tx; + + _enter(""); + +process_further: + skb = skb_dequeue(&call->rx_queue); + if (!skb) + return -EAGAIN; + + _net("deferred skb %p", skb); + + sp = rxrpc_skb(skb); + + _debug("process %s [st %d]", rxrpc_pkts[sp->hdr.type], call->state); + + post_ACK = false; + + switch (sp->hdr.type) { + /* data packets that wind up here have been received out of + * order, need security processing or are jumbo packets */ + case RXRPC_PACKET_TYPE_DATA: + _proto("OOSQ DATA %%%u { #%u }", + ntohl(sp->hdr.serial), ntohl(sp->hdr.seq)); + + /* secured packets must be verified and possibly decrypted */ + if (rxrpc_verify_packet(call, skb, _abort_code) < 0) + goto protocol_error; + + rxrpc_insert_oos_packet(call, skb); + goto process_further; + + /* partial ACK to process */ + case RXRPC_PACKET_TYPE_ACK: + if (skb_copy_bits(skb, 0, &ack, sizeof(ack)) < 0) { + _debug("extraction failure"); + goto protocol_error; + } + if (!skb_pull(skb, sizeof(ack))) + BUG(); + + latest = ntohl(sp->hdr.serial); + hard = ntohl(ack.firstPacket); + tx = atomic_read(&call->sequence); + + _proto("Rx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }", + latest, + ntohs(ack.maxSkew), + hard, + ntohl(ack.previousPacket), + ntohl(ack.serial), + rxrpc_acks[ack.reason], + ack.nAcks); + + if (ack.reason == RXRPC_ACK_PING) { + _proto("Rx ACK %%%u PING Request", latest); + rxrpc_propose_ACK(call, RXRPC_ACK_PING_RESPONSE, + sp->hdr.serial, true); + } + + /* discard any out-of-order or duplicate ACKs */ + if (latest - call->acks_latest <= 0) { + _debug("discard ACK %d <= %d", + latest, call->acks_latest); + goto discard; + } + call->acks_latest = latest; + + if (call->state != RXRPC_CALL_CLIENT_SEND_REQUEST && + call->state != RXRPC_CALL_CLIENT_AWAIT_REPLY && + call->state != RXRPC_CALL_SERVER_SEND_REPLY && + call->state != RXRPC_CALL_SERVER_AWAIT_ACK) + goto discard; + + _debug("Tx=%d H=%u S=%d", tx, call->acks_hard, call->state); + + if (hard > 0) { + if (hard - 1 > tx) { + _debug("hard-ACK'd packet %d not transmitted" + " (%d top)", + hard - 1, tx); + goto protocol_error; + } + + if ((call->state == RXRPC_CALL_CLIENT_AWAIT_REPLY || + call->state == RXRPC_CALL_SERVER_AWAIT_ACK) && + hard > tx) + goto all_acked; + + smp_rmb(); + rxrpc_rotate_tx_window(call, hard - 1); + } + + if (ack.nAcks > 0) { + if (hard - 1 + ack.nAcks > tx) { + _debug("soft-ACK'd packet %d+%d not" + " transmitted (%d top)", + hard - 1, ack.nAcks, tx); + goto protocol_error; + } + + if (rxrpc_process_soft_ACKs(call, &ack, skb) < 0) + goto protocol_error; + } + goto discard; + + /* complete ACK to process */ + case RXRPC_PACKET_TYPE_ACKALL: + goto all_acked; + + /* abort and busy are handled elsewhere */ + case RXRPC_PACKET_TYPE_BUSY: + case RXRPC_PACKET_TYPE_ABORT: + BUG(); + + /* connection level events - also handled elsewhere */ + case RXRPC_PACKET_TYPE_CHALLENGE: + case RXRPC_PACKET_TYPE_RESPONSE: + case RXRPC_PACKET_TYPE_DEBUG: + BUG(); + } + + /* if we've had a hard ACK that covers all the packets we've sent, then + * that ends that phase of the operation */ +all_acked: + write_lock_bh(&call->state_lock); + _debug("ack all %d", call->state); + + switch (call->state) { + case RXRPC_CALL_CLIENT_AWAIT_REPLY: + call->state = RXRPC_CALL_CLIENT_RECV_REPLY; + break; + case RXRPC_CALL_SERVER_AWAIT_ACK: + _debug("srv complete"); + call->state = RXRPC_CALL_COMPLETE; + post_ACK = true; + break; + case RXRPC_CALL_CLIENT_SEND_REQUEST: + case RXRPC_CALL_SERVER_RECV_REQUEST: + goto protocol_error_unlock; /* can't occur yet */ + default: + write_unlock_bh(&call->state_lock); + goto discard; /* assume packet left over from earlier phase */ + } + + write_unlock_bh(&call->state_lock); + + /* if all the packets we sent are hard-ACK'd, then we can discard + * whatever we've got left */ + _debug("clear Tx %d", + CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz)); + + del_timer_sync(&call->resend_timer); + clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags); + clear_bit(RXRPC_CALL_RESEND_TIMER, &call->events); + + if (call->acks_window) + rxrpc_zap_tx_window(call); + + if (post_ACK) { + /* post the final ACK message for userspace to pick up */ + _debug("post ACK"); + skb->mark = RXRPC_SKB_MARK_FINAL_ACK; + sp->call = call; + rxrpc_get_call(call); + spin_lock_bh(&call->lock); + if (rxrpc_queue_rcv_skb(call, skb, true, true) < 0) + BUG(); + spin_unlock_bh(&call->lock); + goto process_further; + } + +discard: + rxrpc_free_skb(skb); + goto process_further; + +protocol_error_unlock: + write_unlock_bh(&call->state_lock); +protocol_error: + rxrpc_free_skb(skb); + _leave(" = -EPROTO"); + return -EPROTO; +} + +/* + * post a message to the socket Rx queue for recvmsg() to pick up + */ +static int rxrpc_post_message(struct rxrpc_call *call, u32 mark, u32 error, + bool fatal) +{ + struct rxrpc_skb_priv *sp; + struct sk_buff *skb; + int ret; + + _enter("{%d,%lx},%u,%u,%d", + call->debug_id, call->flags, mark, error, fatal); + + /* remove timers and things for fatal messages */ + if (fatal) { + del_timer_sync(&call->resend_timer); + del_timer_sync(&call->ack_timer); + clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags); + } + + if (mark != RXRPC_SKB_MARK_NEW_CALL && + !test_bit(RXRPC_CALL_HAS_USERID, &call->flags)) { + _leave("[no userid]"); + return 0; + } + + if (!test_bit(RXRPC_CALL_TERMINAL_MSG, &call->flags)) { + skb = alloc_skb(0, GFP_NOFS); + if (!skb) + return -ENOMEM; + + rxrpc_new_skb(skb); + + skb->mark = mark; + + sp = rxrpc_skb(skb); + memset(sp, 0, sizeof(*sp)); + sp->error = error; + sp->call = call; + rxrpc_get_call(call); + + spin_lock_bh(&call->lock); + ret = rxrpc_queue_rcv_skb(call, skb, true, fatal); + spin_unlock_bh(&call->lock); + if (ret < 0) + BUG(); + } + + return 0; +} + +/* + * handle background processing of incoming call packets and ACK / abort + * generation + */ +void rxrpc_process_call(struct work_struct *work) +{ + struct rxrpc_call *call = + container_of(work, struct rxrpc_call, processor); + struct rxrpc_ackpacket ack; + struct rxrpc_ackinfo ackinfo; + struct rxrpc_header hdr; + struct msghdr msg; + struct kvec iov[5]; + unsigned long bits; + __be32 data; + size_t len; + int genbit, loop, nbit, ioc, ret; + u32 abort_code = RX_PROTOCOL_ERROR; + u8 *acks = NULL; + + //printk("\n--------------------\n"); + _enter("{%d,%s,%lx} [%lu]", + call->debug_id, rxrpc_call_states[call->state], call->events, + (jiffies - call->creation_jif) / (HZ / 10)); + + if (test_and_set_bit(RXRPC_CALL_PROC_BUSY, &call->flags)) { + _debug("XXXXXXXXXXXXX RUNNING ON MULTIPLE CPUS XXXXXXXXXXXXX"); + return; + } + + /* there's a good chance we're going to have to send a message, so set + * one up in advance */ + msg.msg_name = &call->conn->trans->peer->srx.transport.sin; + msg.msg_namelen = sizeof(call->conn->trans->peer->srx.transport.sin); + msg.msg_control = NULL; + msg.msg_controllen = 0; + msg.msg_flags = 0; + + hdr.epoch = call->conn->epoch; + hdr.cid = call->cid; + hdr.callNumber = call->call_id; + hdr.seq = 0; + hdr.type = RXRPC_PACKET_TYPE_ACK; + hdr.flags = call->conn->out_clientflag; + hdr.userStatus = 0; + hdr.securityIndex = call->conn->security_ix; + hdr._rsvd = 0; + hdr.serviceId = call->conn->service_id; + + memset(iov, 0, sizeof(iov)); + iov[0].iov_base = &hdr; + iov[0].iov_len = sizeof(hdr); + + /* deal with events of a final nature */ + if (test_bit(RXRPC_CALL_RELEASE, &call->events)) { + rxrpc_release_call(call); + clear_bit(RXRPC_CALL_RELEASE, &call->events); + } + + if (test_bit(RXRPC_CALL_RCVD_ERROR, &call->events)) { + int error; + + clear_bit(RXRPC_CALL_CONN_ABORT, &call->events); + clear_bit(RXRPC_CALL_REJECT_BUSY, &call->events); + clear_bit(RXRPC_CALL_ABORT, &call->events); + + error = call->conn->trans->peer->net_error; + _debug("post net error %d", error); + + if (rxrpc_post_message(call, RXRPC_SKB_MARK_NET_ERROR, + error, true) < 0) + goto no_mem; + clear_bit(RXRPC_CALL_RCVD_ERROR, &call->events); + goto kill_ACKs; + } + + if (test_bit(RXRPC_CALL_CONN_ABORT, &call->events)) { + ASSERTCMP(call->state, >, RXRPC_CALL_COMPLETE); + + clear_bit(RXRPC_CALL_REJECT_BUSY, &call->events); + clear_bit(RXRPC_CALL_ABORT, &call->events); + + _debug("post conn abort"); + + if (rxrpc_post_message(call, RXRPC_SKB_MARK_LOCAL_ERROR, + call->conn->error, true) < 0) + goto no_mem; + clear_bit(RXRPC_CALL_CONN_ABORT, &call->events); + goto kill_ACKs; + } + + if (test_bit(RXRPC_CALL_REJECT_BUSY, &call->events)) { + hdr.type = RXRPC_PACKET_TYPE_BUSY; + genbit = RXRPC_CALL_REJECT_BUSY; + goto send_message; + } + + if (test_bit(RXRPC_CALL_ABORT, &call->events)) { + ASSERTCMP(call->state, >, RXRPC_CALL_COMPLETE); + + if (rxrpc_post_message(call, RXRPC_SKB_MARK_LOCAL_ERROR, + ECONNABORTED, true) < 0) + goto no_mem; + hdr.type = RXRPC_PACKET_TYPE_ABORT; + data = htonl(call->abort_code); + iov[1].iov_base = &data; + iov[1].iov_len = sizeof(data); + genbit = RXRPC_CALL_ABORT; + goto send_message; + } + + if (test_bit(RXRPC_CALL_ACK_FINAL, &call->events)) { + hdr.type = RXRPC_PACKET_TYPE_ACKALL; + genbit = RXRPC_CALL_ACK_FINAL; + goto send_message; + } + + if (call->events & ((1 << RXRPC_CALL_RCVD_BUSY) | + (1 << RXRPC_CALL_RCVD_ABORT)) + ) { + u32 mark; + + if (test_bit(RXRPC_CALL_RCVD_ABORT, &call->events)) + mark = RXRPC_SKB_MARK_REMOTE_ABORT; + else + mark = RXRPC_SKB_MARK_BUSY; + + _debug("post abort/busy"); + rxrpc_clear_tx_window(call); + if (rxrpc_post_message(call, mark, ECONNABORTED, true) < 0) + goto no_mem; + + clear_bit(RXRPC_CALL_RCVD_BUSY, &call->events); + clear_bit(RXRPC_CALL_RCVD_ABORT, &call->events); + goto kill_ACKs; + } + + if (test_and_clear_bit(RXRPC_CALL_RCVD_ACKALL, &call->events)) { + _debug("do implicit ackall"); + rxrpc_clear_tx_window(call); + } + + if (test_bit(RXRPC_CALL_LIFE_TIMER, &call->events)) { + write_lock_bh(&call->state_lock); + if (call->state <= RXRPC_CALL_COMPLETE) { + call->state = RXRPC_CALL_LOCALLY_ABORTED; + call->abort_code = RX_CALL_TIMEOUT; + set_bit(RXRPC_CALL_ABORT, &call->events); + } + write_unlock_bh(&call->state_lock); + + _debug("post timeout"); + if (rxrpc_post_message(call, RXRPC_SKB_MARK_LOCAL_ERROR, + ETIME, true) < 0) + goto no_mem; + + clear_bit(RXRPC_CALL_LIFE_TIMER, &call->events); + goto kill_ACKs; + } + + /* deal with assorted inbound messages */ + if (!skb_queue_empty(&call->rx_queue)) { + switch (rxrpc_process_rx_queue(call, &abort_code)) { + case 0: + case -EAGAIN: + break; + case -ENOMEM: + goto no_mem; + case -EKEYEXPIRED: + case -EKEYREJECTED: + case -EPROTO: + rxrpc_abort_call(call, abort_code); + goto kill_ACKs; + } + } + + /* handle resending */ + if (test_and_clear_bit(RXRPC_CALL_RESEND_TIMER, &call->events)) + rxrpc_resend_timer(call); + if (test_and_clear_bit(RXRPC_CALL_RESEND, &call->events)) + rxrpc_resend(call); + + /* consider sending an ordinary ACK */ + if (test_bit(RXRPC_CALL_ACK, &call->events)) { + __be32 pad; + + _debug("send ACK: window: %d - %d { %lx }", + call->rx_data_eaten, call->ackr_win_top, + call->ackr_window[0]); + + if (call->state > RXRPC_CALL_SERVER_ACK_REQUEST && + call->ackr_reason != RXRPC_ACK_PING_RESPONSE) { + /* ACK by sending reply DATA packet in this state */ + clear_bit(RXRPC_CALL_ACK, &call->events); + goto maybe_reschedule; + } + + genbit = RXRPC_CALL_ACK; + + acks = kzalloc(call->ackr_win_top - call->rx_data_eaten, + GFP_NOFS); + if (!acks) + goto no_mem; + + //hdr.flags = RXRPC_SLOW_START_OK; + ack.bufferSpace = htons(8); + ack.maxSkew = 0; + ack.serial = 0; + ack.reason = 0; + + ackinfo.rxMTU = htonl(5692); +// ackinfo.rxMTU = htonl(call->conn->trans->peer->maxdata); + ackinfo.maxMTU = htonl(call->conn->trans->peer->maxdata); + ackinfo.rwind = htonl(32); + ackinfo.jumbo_max = htonl(4); + + spin_lock_bh(&call->lock); + ack.reason = call->ackr_reason; + ack.serial = call->ackr_serial; + ack.previousPacket = call->ackr_prev_seq; + ack.firstPacket = htonl(call->rx_data_eaten + 1); + + ack.nAcks = 0; + for (loop = 0; loop < RXRPC_ACKR_WINDOW_ASZ; loop++) { + nbit = loop * BITS_PER_LONG; + for (bits = call->ackr_window[loop]; bits; bits >>= 1 + ) { + _debug("- l=%d n=%d b=%lx", loop, nbit, bits); + if (bits & 1) { + acks[nbit] = RXRPC_ACK_TYPE_ACK; + ack.nAcks = nbit + 1; + } + nbit++; + } + } + call->ackr_reason = 0; + spin_unlock_bh(&call->lock); + + pad = 0; + + iov[1].iov_base = &ack; + iov[1].iov_len = sizeof(ack); + iov[2].iov_base = acks; + iov[2].iov_len = ack.nAcks; + iov[3].iov_base = &pad; + iov[3].iov_len = 3; + iov[4].iov_base = &ackinfo; + iov[4].iov_len = sizeof(ackinfo); + + switch (ack.reason) { + case RXRPC_ACK_REQUESTED: + case RXRPC_ACK_DUPLICATE: + case RXRPC_ACK_OUT_OF_SEQUENCE: + case RXRPC_ACK_EXCEEDS_WINDOW: + case RXRPC_ACK_NOSPACE: + case RXRPC_ACK_PING: + case RXRPC_ACK_PING_RESPONSE: + goto send_ACK_with_skew; + case RXRPC_ACK_DELAY: + case RXRPC_ACK_IDLE: + goto send_ACK; + } + } + + /* handle completion of security negotiations on an incoming + * connection */ + if (test_and_clear_bit(RXRPC_CALL_SECURED, &call->events)) { + _debug("secured"); + spin_lock_bh(&call->lock); + + if (call->state == RXRPC_CALL_SERVER_SECURING) { + _debug("securing"); + write_lock(&call->conn->lock); + if (!test_bit(RXRPC_CALL_RELEASED, &call->flags) && + !test_bit(RXRPC_CALL_RELEASE, &call->events)) { + _debug("not released"); + call->state = RXRPC_CALL_SERVER_ACCEPTING; + list_move_tail(&call->accept_link, + &call->socket->acceptq); + } + write_unlock(&call->conn->lock); + read_lock(&call->state_lock); + if (call->state < RXRPC_CALL_COMPLETE) + set_bit(RXRPC_CALL_POST_ACCEPT, &call->events); + read_unlock(&call->state_lock); + } + + spin_unlock_bh(&call->lock); + if (!test_bit(RXRPC_CALL_POST_ACCEPT, &call->events)) + goto maybe_reschedule; + } + + /* post a notification of an acceptable connection to the app */ + if (test_bit(RXRPC_CALL_POST_ACCEPT, &call->events)) { + _debug("post accept"); + if (rxrpc_post_message(call, RXRPC_SKB_MARK_NEW_CALL, + 0, false) < 0) + goto no_mem; + clear_bit(RXRPC_CALL_POST_ACCEPT, &call->events); + goto maybe_reschedule; + } + + /* handle incoming call acceptance */ + if (test_and_clear_bit(RXRPC_CALL_ACCEPTED, &call->events)) { + _debug("accepted"); + ASSERTCMP(call->rx_data_post, ==, 0); + call->rx_data_post = 1; + read_lock_bh(&call->state_lock); + if (call->state < RXRPC_CALL_COMPLETE) + set_bit(RXRPC_CALL_DRAIN_RX_OOS, &call->events); + read_unlock_bh(&call->state_lock); + } + + /* drain the out of sequence received packet queue into the packet Rx + * queue */ + if (test_and_clear_bit(RXRPC_CALL_DRAIN_RX_OOS, &call->events)) { + while (call->rx_data_post == call->rx_first_oos) + if (rxrpc_drain_rx_oos_queue(call) < 0) + break; + goto maybe_reschedule; + } + + /* other events may have been raised since we started checking */ + goto maybe_reschedule; + +send_ACK_with_skew: + ack.maxSkew = htons(atomic_read(&call->conn->hi_serial) - + ntohl(ack.serial)); +send_ACK: + hdr.serial = htonl(atomic_inc_return(&call->conn->serial)); + _proto("Tx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }", + ntohl(hdr.serial), + ntohs(ack.maxSkew), + ntohl(ack.firstPacket), + ntohl(ack.previousPacket), + ntohl(ack.serial), + rxrpc_acks[ack.reason], + ack.nAcks); + + del_timer_sync(&call->ack_timer); + if (ack.nAcks > 0) + set_bit(RXRPC_CALL_TX_SOFT_ACK, &call->flags); + goto send_message_2; + +send_message: + _debug("send message"); + + hdr.serial = htonl(atomic_inc_return(&call->conn->serial)); + _proto("Tx %s %%%u", rxrpc_pkts[hdr.type], ntohl(hdr.serial)); +send_message_2: + + len = iov[0].iov_len; + ioc = 1; + if (iov[4].iov_len) { + ioc = 5; + len += iov[4].iov_len; + len += iov[3].iov_len; + len += iov[2].iov_len; + len += iov[1].iov_len; + } else if (iov[3].iov_len) { + ioc = 4; + len += iov[3].iov_len; + len += iov[2].iov_len; + len += iov[1].iov_len; + } else if (iov[2].iov_len) { + ioc = 3; + len += iov[2].iov_len; + len += iov[1].iov_len; + } else if (iov[1].iov_len) { + ioc = 2; + len += iov[1].iov_len; + } + + ret = kernel_sendmsg(call->conn->trans->local->socket, + &msg, iov, ioc, len); + if (ret < 0) { + _debug("sendmsg failed: %d", ret); + read_lock_bh(&call->state_lock); + if (call->state < RXRPC_CALL_DEAD) + schedule_work(&call->processor); + read_unlock_bh(&call->state_lock); + goto error; + } + + switch (genbit) { + case RXRPC_CALL_ABORT: + clear_bit(genbit, &call->events); + clear_bit(RXRPC_CALL_RCVD_ABORT, &call->events); + goto kill_ACKs; + + case RXRPC_CALL_ACK_FINAL: + write_lock_bh(&call->state_lock); + if (call->state == RXRPC_CALL_CLIENT_FINAL_ACK) + call->state = RXRPC_CALL_COMPLETE; + write_unlock_bh(&call->state_lock); + goto kill_ACKs; + + default: + clear_bit(genbit, &call->events); + switch (call->state) { + case RXRPC_CALL_CLIENT_AWAIT_REPLY: + case RXRPC_CALL_CLIENT_RECV_REPLY: + case RXRPC_CALL_SERVER_RECV_REQUEST: + case RXRPC_CALL_SERVER_ACK_REQUEST: + _debug("start ACK timer"); + rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, + call->ackr_serial, false); + default: + break; + } + goto maybe_reschedule; + } + +kill_ACKs: + del_timer_sync(&call->ack_timer); + if (test_and_clear_bit(RXRPC_CALL_ACK_FINAL, &call->events)) + rxrpc_put_call(call); + clear_bit(RXRPC_CALL_ACK, &call->events); + +maybe_reschedule: + if (call->events || !skb_queue_empty(&call->rx_queue)) { + read_lock_bh(&call->state_lock); + if (call->state < RXRPC_CALL_DEAD) + schedule_work(&call->processor); + read_unlock_bh(&call->state_lock); + } + + /* don't leave aborted connections on the accept queue */ + if (call->state >= RXRPC_CALL_COMPLETE && + !list_empty(&call->accept_link)) { + _debug("X unlinking once-pending call %p { e=%lx f=%lx c=%x }", + call, call->events, call->flags, + ntohl(call->conn->cid)); + + read_lock_bh(&call->state_lock); + if (!test_bit(RXRPC_CALL_RELEASED, &call->flags) && + !test_and_set_bit(RXRPC_CALL_RELEASE, &call->events)) + schedule_work(&call->processor); + read_unlock_bh(&call->state_lock); + } + +error: + clear_bit(RXRPC_CALL_PROC_BUSY, &call->flags); + kfree(acks); + + /* because we don't want two CPUs both processing the work item for one + * call at the same time, we use a flag to note when it's busy; however + * this means there's a race between clearing the flag and setting the + * work pending bit and the work item being processed again */ + if (call->events && !work_pending(&call->processor)) { + _debug("jumpstart %x", ntohl(call->conn->cid)); + schedule_work(&call->processor); + } + + _leave(""); + return; + +no_mem: + _debug("out of memory"); + goto maybe_reschedule; +} diff --git a/net/rxrpc/ar-call.c b/net/rxrpc/ar-call.c new file mode 100644 index 000000000000..ac31cceda2f1 --- /dev/null +++ b/net/rxrpc/ar-call.c @@ -0,0 +1,787 @@ +/* RxRPC individual remote procedure call handling + * + * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include +#include +#include +#include +#include "ar-internal.h" + +struct kmem_cache *rxrpc_call_jar; +LIST_HEAD(rxrpc_calls); +DEFINE_RWLOCK(rxrpc_call_lock); +static unsigned rxrpc_call_max_lifetime = 60; +static unsigned rxrpc_dead_call_timeout = 10; + +static void rxrpc_destroy_call(struct work_struct *work); +static void rxrpc_call_life_expired(unsigned long _call); +static void rxrpc_dead_call_expired(unsigned long _call); +static void rxrpc_ack_time_expired(unsigned long _call); +static void rxrpc_resend_time_expired(unsigned long _call); + +/* + * allocate a new call + */ +static struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp) +{ + struct rxrpc_call *call; + + call = kmem_cache_zalloc(rxrpc_call_jar, gfp); + if (!call) + return NULL; + + call->acks_winsz = 16; + call->acks_window = kmalloc(call->acks_winsz * sizeof(unsigned long), + gfp); + if (!call->acks_window) { + kmem_cache_free(rxrpc_call_jar, call); + return NULL; + } + + setup_timer(&call->lifetimer, &rxrpc_call_life_expired, + (unsigned long) call); + setup_timer(&call->deadspan, &rxrpc_dead_call_expired, + (unsigned long) call); + setup_timer(&call->ack_timer, &rxrpc_ack_time_expired, + (unsigned long) call); + setup_timer(&call->resend_timer, &rxrpc_resend_time_expired, + (unsigned long) call); + INIT_WORK(&call->destroyer, &rxrpc_destroy_call); + INIT_WORK(&call->processor, &rxrpc_process_call); + INIT_LIST_HEAD(&call->accept_link); + skb_queue_head_init(&call->rx_queue); + skb_queue_head_init(&call->rx_oos_queue); + init_waitqueue_head(&call->tx_waitq); + spin_lock_init(&call->lock); + rwlock_init(&call->state_lock); + atomic_set(&call->usage, 1); + call->debug_id = atomic_inc_return(&rxrpc_debug_id); + call->state = RXRPC_CALL_CLIENT_SEND_REQUEST; + + memset(&call->sock_node, 0xed, sizeof(call->sock_node)); + + call->rx_data_expect = 1; + call->rx_data_eaten = 0; + call->rx_first_oos = 0; + call->ackr_win_top = call->rx_data_eaten + 1 + RXRPC_MAXACKS; + call->creation_jif = jiffies; + return call; +} + +/* + * allocate a new client call and attempt to to get a connection slot for it + */ +static struct rxrpc_call *rxrpc_alloc_client_call( + struct rxrpc_sock *rx, + struct rxrpc_transport *trans, + struct rxrpc_conn_bundle *bundle, + gfp_t gfp) +{ + struct rxrpc_call *call; + int ret; + + _enter(""); + + ASSERT(rx != NULL); + ASSERT(trans != NULL); + ASSERT(bundle != NULL); + + call = rxrpc_alloc_call(gfp); + if (!call) + return ERR_PTR(-ENOMEM); + + sock_hold(&rx->sk); + call->socket = rx; + call->rx_data_post = 1; + + ret = rxrpc_connect_call(rx, trans, bundle, call, gfp); + if (ret < 0) { + kmem_cache_free(rxrpc_call_jar, call); + return ERR_PTR(ret); + } + + spin_lock(&call->conn->trans->peer->lock); + list_add(&call->error_link, &call->conn->trans->peer->error_targets); + spin_unlock(&call->conn->trans->peer->lock); + + call->lifetimer.expires = jiffies + rxrpc_call_max_lifetime * HZ; + add_timer(&call->lifetimer); + + _leave(" = %p", call); + return call; +} + +/* + * set up a call for the given data + * - called in process context with IRQs enabled + */ +struct rxrpc_call *rxrpc_get_client_call(struct rxrpc_sock *rx, + struct rxrpc_transport *trans, + struct rxrpc_conn_bundle *bundle, + unsigned long user_call_ID, + int create, + gfp_t gfp) +{ + struct rxrpc_call *call, *candidate; + struct rb_node *p, *parent, **pp; + + _enter("%p,%d,%d,%lx,%d", + rx, trans ? trans->debug_id : -1, bundle ? bundle->debug_id : -1, + user_call_ID, create); + + /* search the extant calls first for one that matches the specified + * user ID */ + read_lock(&rx->call_lock); + + p = rx->calls.rb_node; + while (p) { + call = rb_entry(p, struct rxrpc_call, sock_node); + + if (user_call_ID < call->user_call_ID) + p = p->rb_left; + else if (user_call_ID > call->user_call_ID) + p = p->rb_right; + else + goto found_extant_call; + } + + read_unlock(&rx->call_lock); + + if (!create || !trans) + return ERR_PTR(-EBADSLT); + + /* not yet present - create a candidate for a new record and then + * redo the search */ + candidate = rxrpc_alloc_client_call(rx, trans, bundle, gfp); + if (IS_ERR(candidate)) { + _leave(" = %ld", PTR_ERR(candidate)); + return candidate; + } + + candidate->user_call_ID = user_call_ID; + __set_bit(RXRPC_CALL_HAS_USERID, &candidate->flags); + + write_lock(&rx->call_lock); + + pp = &rx->calls.rb_node; + parent = NULL; + while (*pp) { + parent = *pp; + call = rb_entry(parent, struct rxrpc_call, sock_node); + + if (user_call_ID < call->user_call_ID) + pp = &(*pp)->rb_left; + else if (user_call_ID > call->user_call_ID) + pp = &(*pp)->rb_right; + else + goto found_extant_second; + } + + /* second search also failed; add the new call */ + call = candidate; + candidate = NULL; + rxrpc_get_call(call); + + rb_link_node(&call->sock_node, parent, pp); + rb_insert_color(&call->sock_node, &rx->calls); + write_unlock(&rx->call_lock); + + write_lock_bh(&rxrpc_call_lock); + list_add_tail(&call->link, &rxrpc_calls); + write_unlock_bh(&rxrpc_call_lock); + + _net("CALL new %d on CONN %d", call->debug_id, call->conn->debug_id); + + _leave(" = %p [new]", call); + return call; + + /* we found the call in the list immediately */ +found_extant_call: + rxrpc_get_call(call); + read_unlock(&rx->call_lock); + _leave(" = %p [extant %d]", call, atomic_read(&call->usage)); + return call; + + /* we found the call on the second time through the list */ +found_extant_second: + rxrpc_get_call(call); + write_unlock(&rx->call_lock); + rxrpc_put_call(candidate); + _leave(" = %p [second %d]", call, atomic_read(&call->usage)); + return call; +} + +/* + * set up an incoming call + * - called in process context with IRQs enabled + */ +struct rxrpc_call *rxrpc_incoming_call(struct rxrpc_sock *rx, + struct rxrpc_connection *conn, + struct rxrpc_header *hdr, + gfp_t gfp) +{ + struct rxrpc_call *call, *candidate; + struct rb_node **p, *parent; + __be32 call_id; + + _enter(",%d,,%x", conn->debug_id, gfp); + + ASSERT(rx != NULL); + + candidate = rxrpc_alloc_call(gfp); + if (!candidate) + return ERR_PTR(-EBUSY); + + candidate->socket = rx; + candidate->conn = conn; + candidate->cid = hdr->cid; + candidate->call_id = hdr->callNumber; + candidate->channel = ntohl(hdr->cid) & RXRPC_CHANNELMASK; + candidate->rx_data_post = 0; + candidate->state = RXRPC_CALL_SERVER_ACCEPTING; + if (conn->security_ix > 0) + candidate->state = RXRPC_CALL_SERVER_SECURING; + + write_lock_bh(&conn->lock); + + /* set the channel for this call */ + call = conn->channels[candidate->channel]; + _debug("channel[%u] is %p", candidate->channel, call); + if (call && call->call_id == hdr->callNumber) { + /* already set; must've been a duplicate packet */ + _debug("extant call [%d]", call->state); + ASSERTCMP(call->conn, ==, conn); + + read_lock(&call->state_lock); + switch (call->state) { + case RXRPC_CALL_LOCALLY_ABORTED: + if (!test_and_set_bit(RXRPC_CALL_ABORT, &call->events)) + schedule_work(&call->processor); + case RXRPC_CALL_REMOTELY_ABORTED: + read_unlock(&call->state_lock); + goto aborted_call; + default: + rxrpc_get_call(call); + read_unlock(&call->state_lock); + goto extant_call; + } + } + + if (call) { + /* it seems the channel is still in use from the previous call + * - ditch the old binding if its call is now complete */ + _debug("CALL: %u { %s }", + call->debug_id, rxrpc_call_states[call->state]); + + if (call->state >= RXRPC_CALL_COMPLETE) { + conn->channels[call->channel] = NULL; + } else { + write_unlock_bh(&conn->lock); + kmem_cache_free(rxrpc_call_jar, candidate); + _leave(" = -EBUSY"); + return ERR_PTR(-EBUSY); + } + } + + /* check the call number isn't duplicate */ + _debug("check dup"); + call_id = hdr->callNumber; + p = &conn->calls.rb_node; + parent = NULL; + while (*p) { + parent = *p; + call = rb_entry(parent, struct rxrpc_call, conn_node); + + if (call_id < call->call_id) + p = &(*p)->rb_left; + else if (call_id > call->call_id) + p = &(*p)->rb_right; + else + goto old_call; + } + + /* make the call available */ + _debug("new call"); + call = candidate; + candidate = NULL; + rb_link_node(&call->conn_node, parent, p); + rb_insert_color(&call->conn_node, &conn->calls); + conn->channels[call->channel] = call; + sock_hold(&rx->sk); + atomic_inc(&conn->usage); + write_unlock_bh(&conn->lock); + + spin_lock(&conn->trans->peer->lock); + list_add(&call->error_link, &conn->trans->peer->error_targets); + spin_unlock(&conn->trans->peer->lock); + + write_lock_bh(&rxrpc_call_lock); + list_add_tail(&call->link, &rxrpc_calls); + write_unlock_bh(&rxrpc_call_lock); + + _net("CALL incoming %d on CONN %d", call->debug_id, call->conn->debug_id); + + call->lifetimer.expires = jiffies + rxrpc_call_max_lifetime * HZ; + add_timer(&call->lifetimer); + _leave(" = %p {%d} [new]", call, call->debug_id); + return call; + +extant_call: + write_unlock_bh(&conn->lock); + kmem_cache_free(rxrpc_call_jar, candidate); + _leave(" = %p {%d} [extant]", call, call ? call->debug_id : -1); + return call; + +aborted_call: + write_unlock_bh(&conn->lock); + kmem_cache_free(rxrpc_call_jar, candidate); + _leave(" = -ECONNABORTED"); + return ERR_PTR(-ECONNABORTED); + +old_call: + write_unlock_bh(&conn->lock); + kmem_cache_free(rxrpc_call_jar, candidate); + _leave(" = -ECONNRESET [old]"); + return ERR_PTR(-ECONNRESET); +} + +/* + * find an extant server call + * - called in process context with IRQs enabled + */ +struct rxrpc_call *rxrpc_find_server_call(struct rxrpc_sock *rx, + unsigned long user_call_ID) +{ + struct rxrpc_call *call; + struct rb_node *p; + + _enter("%p,%lx", rx, user_call_ID); + + /* search the extant calls for one that matches the specified user + * ID */ + read_lock(&rx->call_lock); + + p = rx->calls.rb_node; + while (p) { + call = rb_entry(p, struct rxrpc_call, sock_node); + + if (user_call_ID < call->user_call_ID) + p = p->rb_left; + else if (user_call_ID > call->user_call_ID) + p = p->rb_right; + else + goto found_extant_call; + } + + read_unlock(&rx->call_lock); + _leave(" = NULL"); + return NULL; + + /* we found the call in the list immediately */ +found_extant_call: + rxrpc_get_call(call); + read_unlock(&rx->call_lock); + _leave(" = %p [%d]", call, atomic_read(&call->usage)); + return call; +} + +/* + * detach a call from a socket and set up for release + */ +void rxrpc_release_call(struct rxrpc_call *call) +{ + struct rxrpc_sock *rx = call->socket; + + _enter("{%d,%d,%d,%d}", + call->debug_id, atomic_read(&call->usage), + atomic_read(&call->ackr_not_idle), + call->rx_first_oos); + + spin_lock_bh(&call->lock); + if (test_and_set_bit(RXRPC_CALL_RELEASED, &call->flags)) + BUG(); + spin_unlock_bh(&call->lock); + + /* dissociate from the socket + * - the socket's ref on the call is passed to the death timer + */ + _debug("RELEASE CALL %p (%d CONN %p)", + call, call->debug_id, call->conn); + + write_lock_bh(&rx->call_lock); + if (!list_empty(&call->accept_link)) { + _debug("unlinking once-pending call %p { e=%lx f=%lx }", + call, call->events, call->flags); + ASSERT(!test_bit(RXRPC_CALL_HAS_USERID, &call->flags)); + list_del_init(&call->accept_link); + sk_acceptq_removed(&rx->sk); + } else if (test_bit(RXRPC_CALL_HAS_USERID, &call->flags)) { + rb_erase(&call->sock_node, &rx->calls); + memset(&call->sock_node, 0xdd, sizeof(call->sock_node)); + clear_bit(RXRPC_CALL_HAS_USERID, &call->flags); + } + write_unlock_bh(&rx->call_lock); + + if (call->conn->out_clientflag) + spin_lock(&call->conn->trans->client_lock); + write_lock_bh(&call->conn->lock); + + /* free up the channel for reuse */ + if (call->conn->out_clientflag) { + call->conn->avail_calls++; + if (call->conn->avail_calls == RXRPC_MAXCALLS) + list_move_tail(&call->conn->bundle_link, + &call->conn->bundle->unused_conns); + else if (call->conn->avail_calls == 1) + list_move_tail(&call->conn->bundle_link, + &call->conn->bundle->avail_conns); + } + + write_lock(&call->state_lock); + if (call->conn->channels[call->channel] == call) + call->conn->channels[call->channel] = NULL; + + if (call->state < RXRPC_CALL_COMPLETE && + call->state != RXRPC_CALL_CLIENT_FINAL_ACK) { + _debug("+++ ABORTING STATE %d +++\n", call->state); + call->state = RXRPC_CALL_LOCALLY_ABORTED; + call->abort_code = RX_CALL_DEAD; + set_bit(RXRPC_CALL_ABORT, &call->events); + schedule_work(&call->processor); + } + write_unlock(&call->state_lock); + write_unlock_bh(&call->conn->lock); + if (call->conn->out_clientflag) + spin_unlock(&call->conn->trans->client_lock); + + if (!skb_queue_empty(&call->rx_queue) || + !skb_queue_empty(&call->rx_oos_queue)) { + struct rxrpc_skb_priv *sp; + struct sk_buff *skb; + + _debug("purge Rx queues"); + + spin_lock_bh(&call->lock); + while ((skb = skb_dequeue(&call->rx_queue)) || + (skb = skb_dequeue(&call->rx_oos_queue))) { + sp = rxrpc_skb(skb); + if (sp->call) { + ASSERTCMP(sp->call, ==, call); + rxrpc_put_call(call); + sp->call = NULL; + } + skb->destructor = NULL; + spin_unlock_bh(&call->lock); + + _debug("- zap %s %%%u #%u", + rxrpc_pkts[sp->hdr.type], + ntohl(sp->hdr.serial), + ntohl(sp->hdr.seq)); + rxrpc_free_skb(skb); + spin_lock_bh(&call->lock); + } + spin_unlock_bh(&call->lock); + + ASSERTCMP(call->state, !=, RXRPC_CALL_COMPLETE); + } + + del_timer_sync(&call->resend_timer); + del_timer_sync(&call->ack_timer); + del_timer_sync(&call->lifetimer); + call->deadspan.expires = jiffies + rxrpc_dead_call_timeout * HZ; + add_timer(&call->deadspan); + + _leave(""); +} + +/* + * handle a dead call being ready for reaping + */ +static void rxrpc_dead_call_expired(unsigned long _call) +{ + struct rxrpc_call *call = (struct rxrpc_call *) _call; + + _enter("{%d}", call->debug_id); + + write_lock_bh(&call->state_lock); + call->state = RXRPC_CALL_DEAD; + write_unlock_bh(&call->state_lock); + rxrpc_put_call(call); +} + +/* + * mark a call as to be released, aborting it if it's still in progress + * - called with softirqs disabled + */ +static void rxrpc_mark_call_released(struct rxrpc_call *call) +{ + bool sched; + + write_lock(&call->state_lock); + if (call->state < RXRPC_CALL_DEAD) { + sched = false; + if (call->state < RXRPC_CALL_COMPLETE) { + _debug("abort call %p", call); + call->state = RXRPC_CALL_LOCALLY_ABORTED; + call->abort_code = RX_CALL_DEAD; + if (!test_and_set_bit(RXRPC_CALL_ABORT, &call->events)) + sched = true; + } + if (!test_and_set_bit(RXRPC_CALL_RELEASE, &call->events)) + sched = true; + if (sched) + schedule_work(&call->processor); + } + write_unlock(&call->state_lock); +} + +/* + * release all the calls associated with a socket + */ +void rxrpc_release_calls_on_socket(struct rxrpc_sock *rx) +{ + struct rxrpc_call *call; + struct rb_node *p; + + _enter("%p", rx); + + read_lock_bh(&rx->call_lock); + + /* mark all the calls as no longer wanting incoming packets */ + for (p = rb_first(&rx->calls); p; p = rb_next(p)) { + call = rb_entry(p, struct rxrpc_call, sock_node); + rxrpc_mark_call_released(call); + } + + /* kill the not-yet-accepted incoming calls */ + list_for_each_entry(call, &rx->secureq, accept_link) { + rxrpc_mark_call_released(call); + } + + list_for_each_entry(call, &rx->acceptq, accept_link) { + rxrpc_mark_call_released(call); + } + + read_unlock_bh(&rx->call_lock); + _leave(""); +} + +/* + * release a call + */ +void __rxrpc_put_call(struct rxrpc_call *call) +{ + ASSERT(call != NULL); + + _enter("%p{u=%d}", call, atomic_read(&call->usage)); + + ASSERTCMP(atomic_read(&call->usage), >, 0); + + if (atomic_dec_and_test(&call->usage)) { + _debug("call %d dead", call->debug_id); + ASSERTCMP(call->state, ==, RXRPC_CALL_DEAD); + schedule_work(&call->destroyer); + } + _leave(""); +} + +/* + * clean up a call + */ +static void rxrpc_cleanup_call(struct rxrpc_call *call) +{ + _net("DESTROY CALL %d", call->debug_id); + + ASSERT(call->socket); + + memset(&call->sock_node, 0xcd, sizeof(call->sock_node)); + + del_timer_sync(&call->lifetimer); + del_timer_sync(&call->deadspan); + del_timer_sync(&call->ack_timer); + del_timer_sync(&call->resend_timer); + + ASSERT(test_bit(RXRPC_CALL_RELEASED, &call->flags)); + ASSERTCMP(call->events, ==, 0); + if (work_pending(&call->processor)) { + _debug("defer destroy"); + schedule_work(&call->destroyer); + return; + } + + if (call->conn) { + spin_lock(&call->conn->trans->peer->lock); + list_del(&call->error_link); + spin_unlock(&call->conn->trans->peer->lock); + + write_lock_bh(&call->conn->lock); + rb_erase(&call->conn_node, &call->conn->calls); + write_unlock_bh(&call->conn->lock); + rxrpc_put_connection(call->conn); + } + + if (call->acks_window) { + _debug("kill Tx window %d", + CIRC_CNT(call->acks_head, call->acks_tail, + call->acks_winsz)); + smp_mb(); + while (CIRC_CNT(call->acks_head, call->acks_tail, + call->acks_winsz) > 0) { + struct rxrpc_skb_priv *sp; + unsigned long _skb; + + _skb = call->acks_window[call->acks_tail] & ~1; + sp = rxrpc_skb((struct sk_buff *) _skb); + _debug("+++ clear Tx %u", ntohl(sp->hdr.seq)); + rxrpc_free_skb((struct sk_buff *) _skb); + call->acks_tail = + (call->acks_tail + 1) & (call->acks_winsz - 1); + } + + kfree(call->acks_window); + } + + rxrpc_free_skb(call->tx_pending); + + rxrpc_purge_queue(&call->rx_queue); + ASSERT(skb_queue_empty(&call->rx_oos_queue)); + sock_put(&call->socket->sk); + kmem_cache_free(rxrpc_call_jar, call); +} + +/* + * destroy a call + */ +static void rxrpc_destroy_call(struct work_struct *work) +{ + struct rxrpc_call *call = + container_of(work, struct rxrpc_call, destroyer); + + _enter("%p{%d,%d,%p}", + call, atomic_read(&call->usage), call->channel, call->conn); + + ASSERTCMP(call->state, ==, RXRPC_CALL_DEAD); + + write_lock_bh(&rxrpc_call_lock); + list_del_init(&call->link); + write_unlock_bh(&rxrpc_call_lock); + + rxrpc_cleanup_call(call); + _leave(""); +} + +/* + * preemptively destroy all the call records from a transport endpoint rather + * than waiting for them to time out + */ +void __exit rxrpc_destroy_all_calls(void) +{ + struct rxrpc_call *call; + + _enter(""); + write_lock_bh(&rxrpc_call_lock); + + while (!list_empty(&rxrpc_calls)) { + call = list_entry(rxrpc_calls.next, struct rxrpc_call, link); + _debug("Zapping call %p", call); + + list_del_init(&call->link); + + switch (atomic_read(&call->usage)) { + case 0: + ASSERTCMP(call->state, ==, RXRPC_CALL_DEAD); + break; + case 1: + if (del_timer_sync(&call->deadspan) != 0 && + call->state != RXRPC_CALL_DEAD) + rxrpc_dead_call_expired((unsigned long) call); + if (call->state != RXRPC_CALL_DEAD) + break; + default: + printk(KERN_ERR "RXRPC:" + " Call %p still in use (%d,%d,%s,%lx,%lx)!\n", + call, atomic_read(&call->usage), + atomic_read(&call->ackr_not_idle), + rxrpc_call_states[call->state], + call->flags, call->events); + if (!skb_queue_empty(&call->rx_queue)) + printk(KERN_ERR"RXRPC: Rx queue occupied\n"); + if (!skb_queue_empty(&call->rx_oos_queue)) + printk(KERN_ERR"RXRPC: OOS queue occupied\n"); + break; + } + + write_unlock_bh(&rxrpc_call_lock); + cond_resched(); + write_lock_bh(&rxrpc_call_lock); + } + + write_unlock_bh(&rxrpc_call_lock); + _leave(""); +} + +/* + * handle call lifetime being exceeded + */ +static void rxrpc_call_life_expired(unsigned long _call) +{ + struct rxrpc_call *call = (struct rxrpc_call *) _call; + + if (call->state >= RXRPC_CALL_COMPLETE) + return; + + _enter("{%d}", call->debug_id); + read_lock_bh(&call->state_lock); + if (call->state < RXRPC_CALL_COMPLETE) { + set_bit(RXRPC_CALL_LIFE_TIMER, &call->events); + schedule_work(&call->processor); + } + read_unlock_bh(&call->state_lock); +} + +/* + * handle resend timer expiry + */ +static void rxrpc_resend_time_expired(unsigned long _call) +{ + struct rxrpc_call *call = (struct rxrpc_call *) _call; + + _enter("{%d}", call->debug_id); + + if (call->state >= RXRPC_CALL_COMPLETE) + return; + + read_lock_bh(&call->state_lock); + clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags); + if (call->state < RXRPC_CALL_COMPLETE && + !test_and_set_bit(RXRPC_CALL_RESEND_TIMER, &call->events)) + schedule_work(&call->processor); + read_unlock_bh(&call->state_lock); +} + +/* + * handle ACK timer expiry + */ +static void rxrpc_ack_time_expired(unsigned long _call) +{ + struct rxrpc_call *call = (struct rxrpc_call *) _call; + + _enter("{%d}", call->debug_id); + + if (call->state >= RXRPC_CALL_COMPLETE) + return; + + read_lock_bh(&call->state_lock); + if (call->state < RXRPC_CALL_COMPLETE && + !test_and_set_bit(RXRPC_CALL_ACK, &call->events)) + schedule_work(&call->processor); + read_unlock_bh(&call->state_lock); +} diff --git a/net/rxrpc/ar-connection.c b/net/rxrpc/ar-connection.c new file mode 100644 index 000000000000..01eb33c30571 --- /dev/null +++ b/net/rxrpc/ar-connection.c @@ -0,0 +1,895 @@ +/* RxRPC virtual connection handler + * + * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include "ar-internal.h" + +static void rxrpc_connection_reaper(struct work_struct *work); + +LIST_HEAD(rxrpc_connections); +DEFINE_RWLOCK(rxrpc_connection_lock); +static unsigned long rxrpc_connection_timeout = 10 * 60; +static DECLARE_DELAYED_WORK(rxrpc_connection_reap, rxrpc_connection_reaper); + +/* + * allocate a new client connection bundle + */ +static struct rxrpc_conn_bundle *rxrpc_alloc_bundle(gfp_t gfp) +{ + struct rxrpc_conn_bundle *bundle; + + _enter(""); + + bundle = kzalloc(sizeof(struct rxrpc_conn_bundle), gfp); + if (bundle) { + INIT_LIST_HEAD(&bundle->unused_conns); + INIT_LIST_HEAD(&bundle->avail_conns); + INIT_LIST_HEAD(&bundle->busy_conns); + init_waitqueue_head(&bundle->chanwait); + atomic_set(&bundle->usage, 1); + } + + _leave(" = %p", bundle); + return bundle; +} + +/* + * compare bundle parameters with what we're looking for + * - return -ve, 0 or +ve + */ +static inline +int rxrpc_cmp_bundle(const struct rxrpc_conn_bundle *bundle, + struct key *key, __be16 service_id) +{ + return (bundle->service_id - service_id) ?: + ((unsigned long) bundle->key - (unsigned long) key); +} + +/* + * get bundle of client connections that a client socket can make use of + */ +struct rxrpc_conn_bundle *rxrpc_get_bundle(struct rxrpc_sock *rx, + struct rxrpc_transport *trans, + struct key *key, + __be16 service_id, + gfp_t gfp) +{ + struct rxrpc_conn_bundle *bundle, *candidate; + struct rb_node *p, *parent, **pp; + + _enter("%p{%x},%x,%hx,", + rx, key_serial(key), trans->debug_id, ntohl(service_id)); + + if (rx->trans == trans && rx->bundle) { + atomic_inc(&rx->bundle->usage); + return rx->bundle; + } + + /* search the extant bundles first for one that matches the specified + * user ID */ + spin_lock(&trans->client_lock); + + p = trans->bundles.rb_node; + while (p) { + bundle = rb_entry(p, struct rxrpc_conn_bundle, node); + + if (rxrpc_cmp_bundle(bundle, key, service_id) < 0) + p = p->rb_left; + else if (rxrpc_cmp_bundle(bundle, key, service_id) > 0) + p = p->rb_right; + else + goto found_extant_bundle; + } + + spin_unlock(&trans->client_lock); + + /* not yet present - create a candidate for a new record and then + * redo the search */ + candidate = rxrpc_alloc_bundle(gfp); + if (!candidate) { + _leave(" = -ENOMEM"); + return ERR_PTR(-ENOMEM); + } + + candidate->key = key_get(key); + candidate->service_id = service_id; + + spin_lock(&trans->client_lock); + + pp = &trans->bundles.rb_node; + parent = NULL; + while (*pp) { + parent = *pp; + bundle = rb_entry(parent, struct rxrpc_conn_bundle, node); + + if (rxrpc_cmp_bundle(bundle, key, service_id) < 0) + pp = &(*pp)->rb_left; + else if (rxrpc_cmp_bundle(bundle, key, service_id) > 0) + pp = &(*pp)->rb_right; + else + goto found_extant_second; + } + + /* second search also failed; add the new bundle */ + bundle = candidate; + candidate = NULL; + + rb_link_node(&bundle->node, parent, pp); + rb_insert_color(&bundle->node, &trans->bundles); + spin_unlock(&trans->client_lock); + _net("BUNDLE new on trans %d", trans->debug_id); + if (!rx->bundle && rx->sk.sk_state == RXRPC_CLIENT_CONNECTED) { + atomic_inc(&bundle->usage); + rx->bundle = bundle; + } + _leave(" = %p [new]", bundle); + return bundle; + + /* we found the bundle in the list immediately */ +found_extant_bundle: + atomic_inc(&bundle->usage); + spin_unlock(&trans->client_lock); + _net("BUNDLE old on trans %d", trans->debug_id); + if (!rx->bundle && rx->sk.sk_state == RXRPC_CLIENT_CONNECTED) { + atomic_inc(&bundle->usage); + rx->bundle = bundle; + } + _leave(" = %p [extant %d]", bundle, atomic_read(&bundle->usage)); + return bundle; + + /* we found the bundle on the second time through the list */ +found_extant_second: + atomic_inc(&bundle->usage); + spin_unlock(&trans->client_lock); + kfree(candidate); + _net("BUNDLE old2 on trans %d", trans->debug_id); + if (!rx->bundle && rx->sk.sk_state == RXRPC_CLIENT_CONNECTED) { + atomic_inc(&bundle->usage); + rx->bundle = bundle; + } + _leave(" = %p [second %d]", bundle, atomic_read(&bundle->usage)); + return bundle; +} + +/* + * release a bundle + */ +void rxrpc_put_bundle(struct rxrpc_transport *trans, + struct rxrpc_conn_bundle *bundle) +{ + _enter("%p,%p{%d}",trans, bundle, atomic_read(&bundle->usage)); + + if (atomic_dec_and_lock(&bundle->usage, &trans->client_lock)) { + _debug("Destroy bundle"); + rb_erase(&bundle->node, &trans->bundles); + spin_unlock(&trans->client_lock); + ASSERT(list_empty(&bundle->unused_conns)); + ASSERT(list_empty(&bundle->avail_conns)); + ASSERT(list_empty(&bundle->busy_conns)); + ASSERTCMP(bundle->num_conns, ==, 0); + key_put(bundle->key); + kfree(bundle); + } + + _leave(""); +} + +/* + * allocate a new connection + */ +static struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp) +{ + struct rxrpc_connection *conn; + + _enter(""); + + conn = kzalloc(sizeof(struct rxrpc_connection), gfp); + if (conn) { + INIT_WORK(&conn->processor, &rxrpc_process_connection); + INIT_LIST_HEAD(&conn->bundle_link); + conn->calls = RB_ROOT; + skb_queue_head_init(&conn->rx_queue); + rwlock_init(&conn->lock); + spin_lock_init(&conn->state_lock); + atomic_set(&conn->usage, 1); + conn->debug_id = atomic_inc_return(&rxrpc_debug_id); + conn->avail_calls = RXRPC_MAXCALLS; + conn->size_align = 4; + conn->header_size = sizeof(struct rxrpc_header); + } + + _leave(" = %p{%d}", conn, conn->debug_id); + return conn; +} + +/* + * assign a connection ID to a connection and add it to the transport's + * connection lookup tree + * - called with transport client lock held + */ +static void rxrpc_assign_connection_id(struct rxrpc_connection *conn) +{ + struct rxrpc_connection *xconn; + struct rb_node *parent, **p; + __be32 epoch; + u32 real_conn_id; + + _enter(""); + + epoch = conn->epoch; + + write_lock_bh(&conn->trans->conn_lock); + + conn->trans->conn_idcounter += RXRPC_CID_INC; + if (conn->trans->conn_idcounter < RXRPC_CID_INC) + conn->trans->conn_idcounter = RXRPC_CID_INC; + real_conn_id = conn->trans->conn_idcounter; + +attempt_insertion: + parent = NULL; + p = &conn->trans->client_conns.rb_node; + + while (*p) { + parent = *p; + xconn = rb_entry(parent, struct rxrpc_connection, node); + + if (epoch < xconn->epoch) + p = &(*p)->rb_left; + else if (epoch > xconn->epoch) + p = &(*p)->rb_right; + else if (real_conn_id < xconn->real_conn_id) + p = &(*p)->rb_left; + else if (real_conn_id > xconn->real_conn_id) + p = &(*p)->rb_right; + else + goto id_exists; + } + + /* we've found a suitable hole - arrange for this connection to occupy + * it */ + rb_link_node(&conn->node, parent, p); + rb_insert_color(&conn->node, &conn->trans->client_conns); + + conn->real_conn_id = real_conn_id; + conn->cid = htonl(real_conn_id); + write_unlock_bh(&conn->trans->conn_lock); + _leave(" [CONNID %x CID %x]", real_conn_id, ntohl(conn->cid)); + return; + + /* we found a connection with the proposed ID - walk the tree from that + * point looking for the next unused ID */ +id_exists: + for (;;) { + real_conn_id += RXRPC_CID_INC; + if (real_conn_id < RXRPC_CID_INC) { + real_conn_id = RXRPC_CID_INC; + conn->trans->conn_idcounter = real_conn_id; + goto attempt_insertion; + } + + parent = rb_next(parent); + if (!parent) + goto attempt_insertion; + + xconn = rb_entry(parent, struct rxrpc_connection, node); + if (epoch < xconn->epoch || + real_conn_id < xconn->real_conn_id) + goto attempt_insertion; + } +} + +/* + * add a call to a connection's call-by-ID tree + */ +static void rxrpc_add_call_ID_to_conn(struct rxrpc_connection *conn, + struct rxrpc_call *call) +{ + struct rxrpc_call *xcall; + struct rb_node *parent, **p; + __be32 call_id; + + write_lock_bh(&conn->lock); + + call_id = call->call_id; + p = &conn->calls.rb_node; + parent = NULL; + while (*p) { + parent = *p; + xcall = rb_entry(parent, struct rxrpc_call, conn_node); + + if (call_id < xcall->call_id) + p = &(*p)->rb_left; + else if (call_id > xcall->call_id) + p = &(*p)->rb_right; + else + BUG(); + } + + rb_link_node(&call->conn_node, parent, p); + rb_insert_color(&call->conn_node, &conn->calls); + + write_unlock_bh(&conn->lock); +} + +/* + * connect a call on an exclusive connection + */ +static int rxrpc_connect_exclusive(struct rxrpc_sock *rx, + struct rxrpc_transport *trans, + __be16 service_id, + struct rxrpc_call *call, + gfp_t gfp) +{ + struct rxrpc_connection *conn; + int chan, ret; + + _enter(""); + + conn = rx->conn; + if (!conn) { + /* not yet present - create a candidate for a new connection + * and then redo the check */ + conn = rxrpc_alloc_connection(gfp); + if (IS_ERR(conn)) { + _leave(" = %ld", PTR_ERR(conn)); + return PTR_ERR(conn); + } + + conn->trans = trans; + conn->bundle = NULL; + conn->service_id = service_id; + conn->epoch = rxrpc_epoch; + conn->in_clientflag = 0; + conn->out_clientflag = RXRPC_CLIENT_INITIATED; + conn->cid = 0; + conn->state = RXRPC_CONN_CLIENT; + conn->avail_calls = RXRPC_MAXCALLS; + conn->security_level = rx->min_sec_level; + conn->key = key_get(rx->key); + + ret = rxrpc_init_client_conn_security(conn); + if (ret < 0) { + key_put(conn->key); + kfree(conn); + _leave(" = %d [key]", ret); + return ret; + } + + write_lock_bh(&rxrpc_connection_lock); + list_add_tail(&conn->link, &rxrpc_connections); + write_unlock_bh(&rxrpc_connection_lock); + + spin_lock(&trans->client_lock); + atomic_inc(&trans->usage); + + _net("CONNECT EXCL new %d on TRANS %d", + conn->debug_id, conn->trans->debug_id); + + rxrpc_assign_connection_id(conn); + rx->conn = conn; + } + + /* we've got a connection with a free channel and we can now attach the + * call to it + * - we're holding the transport's client lock + * - we're holding a reference on the connection + */ + for (chan = 0; chan < RXRPC_MAXCALLS; chan++) + if (!conn->channels[chan]) + goto found_channel; + goto no_free_channels; + +found_channel: + atomic_inc(&conn->usage); + conn->channels[chan] = call; + call->conn = conn; + call->channel = chan; + call->cid = conn->cid | htonl(chan); + call->call_id = htonl(++conn->call_counter); + + _net("CONNECT client on conn %d chan %d as call %x", + conn->debug_id, chan, ntohl(call->call_id)); + + spin_unlock(&trans->client_lock); + + rxrpc_add_call_ID_to_conn(conn, call); + _leave(" = 0"); + return 0; + +no_free_channels: + spin_unlock(&trans->client_lock); + _leave(" = -ENOSR"); + return -ENOSR; +} + +/* + * find a connection for a call + * - called in process context with IRQs enabled + */ +int rxrpc_connect_call(struct rxrpc_sock *rx, + struct rxrpc_transport *trans, + struct rxrpc_conn_bundle *bundle, + struct rxrpc_call *call, + gfp_t gfp) +{ + struct rxrpc_connection *conn, *candidate; + int chan, ret; + + DECLARE_WAITQUEUE(myself, current); + + _enter("%p,%lx,", rx, call->user_call_ID); + + if (test_bit(RXRPC_SOCK_EXCLUSIVE_CONN, &rx->flags)) + return rxrpc_connect_exclusive(rx, trans, bundle->service_id, + call, gfp); + + spin_lock(&trans->client_lock); + for (;;) { + /* see if the bundle has a call slot available */ + if (!list_empty(&bundle->avail_conns)) { + _debug("avail"); + conn = list_entry(bundle->avail_conns.next, + struct rxrpc_connection, + bundle_link); + if (--conn->avail_calls == 0) + list_move(&conn->bundle_link, + &bundle->busy_conns); + atomic_inc(&conn->usage); + break; + } + + if (!list_empty(&bundle->unused_conns)) { + _debug("unused"); + conn = list_entry(bundle->unused_conns.next, + struct rxrpc_connection, + bundle_link); + atomic_inc(&conn->usage); + list_move(&conn->bundle_link, &bundle->avail_conns); + break; + } + + /* need to allocate a new connection */ + _debug("get new conn [%d]", bundle->num_conns); + + spin_unlock(&trans->client_lock); + + if (signal_pending(current)) + goto interrupted; + + if (bundle->num_conns >= 20) { + _debug("too many conns"); + + if (!(gfp & __GFP_WAIT)) { + _leave(" = -EAGAIN"); + return -EAGAIN; + } + + add_wait_queue(&bundle->chanwait, &myself); + for (;;) { + set_current_state(TASK_INTERRUPTIBLE); + if (bundle->num_conns < 20 || + !list_empty(&bundle->unused_conns) || + !list_empty(&bundle->avail_conns)) + break; + if (signal_pending(current)) + goto interrupted_dequeue; + schedule(); + } + remove_wait_queue(&bundle->chanwait, &myself); + __set_current_state(TASK_RUNNING); + spin_lock(&trans->client_lock); + continue; + } + + /* not yet present - create a candidate for a new connection and then + * redo the check */ + candidate = rxrpc_alloc_connection(gfp); + if (IS_ERR(candidate)) { + _leave(" = %ld", PTR_ERR(candidate)); + return PTR_ERR(candidate); + } + + candidate->trans = trans; + candidate->bundle = bundle; + candidate->service_id = bundle->service_id; + candidate->epoch = rxrpc_epoch; + candidate->in_clientflag = 0; + candidate->out_clientflag = RXRPC_CLIENT_INITIATED; + candidate->cid = 0; + candidate->state = RXRPC_CONN_CLIENT; + candidate->avail_calls = RXRPC_MAXCALLS; + candidate->security_level = rx->min_sec_level; + candidate->key = key_get(rx->key); + + ret = rxrpc_init_client_conn_security(candidate); + if (ret < 0) { + key_put(candidate->key); + kfree(candidate); + _leave(" = %d [key]", ret); + return ret; + } + + write_lock_bh(&rxrpc_connection_lock); + list_add_tail(&candidate->link, &rxrpc_connections); + write_unlock_bh(&rxrpc_connection_lock); + + spin_lock(&trans->client_lock); + + list_add(&candidate->bundle_link, &bundle->unused_conns); + bundle->num_conns++; + atomic_inc(&bundle->usage); + atomic_inc(&trans->usage); + + _net("CONNECT new %d on TRANS %d", + candidate->debug_id, candidate->trans->debug_id); + + rxrpc_assign_connection_id(candidate); + if (candidate->security) + candidate->security->prime_packet_security(candidate); + + /* leave the candidate lurking in zombie mode attached to the + * bundle until we're ready for it */ + rxrpc_put_connection(candidate); + candidate = NULL; + } + + /* we've got a connection with a free channel and we can now attach the + * call to it + * - we're holding the transport's client lock + * - we're holding a reference on the connection + * - we're holding a reference on the bundle + */ + for (chan = 0; chan < RXRPC_MAXCALLS; chan++) + if (!conn->channels[chan]) + goto found_channel; + BUG(); + +found_channel: + conn->channels[chan] = call; + call->conn = conn; + call->channel = chan; + call->cid = conn->cid | htonl(chan); + call->call_id = htonl(++conn->call_counter); + + _net("CONNECT client on conn %d chan %d as call %x", + conn->debug_id, chan, ntohl(call->call_id)); + + spin_unlock(&trans->client_lock); + + rxrpc_add_call_ID_to_conn(conn, call); + + _leave(" = 0"); + return 0; + +interrupted_dequeue: + remove_wait_queue(&bundle->chanwait, &myself); + __set_current_state(TASK_RUNNING); +interrupted: + _leave(" = -ERESTARTSYS"); + return -ERESTARTSYS; +} + +/* + * get a record of an incoming connection + */ +struct rxrpc_connection * +rxrpc_incoming_connection(struct rxrpc_transport *trans, + struct rxrpc_header *hdr, + gfp_t gfp) +{ + struct rxrpc_connection *conn, *candidate = NULL; + struct rb_node *p, **pp; + const char *new = "old"; + __be32 epoch; + u32 conn_id; + + _enter(""); + + ASSERT(hdr->flags & RXRPC_CLIENT_INITIATED); + + epoch = hdr->epoch; + conn_id = ntohl(hdr->cid) & RXRPC_CIDMASK; + + /* search the connection list first */ + read_lock_bh(&trans->conn_lock); + + p = trans->server_conns.rb_node; + while (p) { + conn = rb_entry(p, struct rxrpc_connection, node); + + _debug("maybe %x", conn->real_conn_id); + + if (epoch < conn->epoch) + p = p->rb_left; + else if (epoch > conn->epoch) + p = p->rb_right; + else if (conn_id < conn->real_conn_id) + p = p->rb_left; + else if (conn_id > conn->real_conn_id) + p = p->rb_right; + else + goto found_extant_connection; + } + read_unlock_bh(&trans->conn_lock); + + /* not yet present - create a candidate for a new record and then + * redo the search */ + candidate = rxrpc_alloc_connection(gfp); + if (!candidate) { + _leave(" = -ENOMEM"); + return ERR_PTR(-ENOMEM); + } + + candidate->trans = trans; + candidate->epoch = hdr->epoch; + candidate->cid = hdr->cid & __constant_cpu_to_be32(RXRPC_CIDMASK); + candidate->service_id = hdr->serviceId; + candidate->security_ix = hdr->securityIndex; + candidate->in_clientflag = RXRPC_CLIENT_INITIATED; + candidate->out_clientflag = 0; + candidate->real_conn_id = conn_id; + candidate->state = RXRPC_CONN_SERVER; + if (candidate->service_id) + candidate->state = RXRPC_CONN_SERVER_UNSECURED; + + write_lock_bh(&trans->conn_lock); + + pp = &trans->server_conns.rb_node; + p = NULL; + while (*pp) { + p = *pp; + conn = rb_entry(p, struct rxrpc_connection, node); + + if (epoch < conn->epoch) + pp = &(*pp)->rb_left; + else if (epoch > conn->epoch) + pp = &(*pp)->rb_right; + else if (conn_id < conn->real_conn_id) + pp = &(*pp)->rb_left; + else if (conn_id > conn->real_conn_id) + pp = &(*pp)->rb_right; + else + goto found_extant_second; + } + + /* we can now add the new candidate to the list */ + conn = candidate; + candidate = NULL; + rb_link_node(&conn->node, p, pp); + rb_insert_color(&conn->node, &trans->server_conns); + atomic_inc(&conn->trans->usage); + + write_unlock_bh(&trans->conn_lock); + + write_lock_bh(&rxrpc_connection_lock); + list_add_tail(&conn->link, &rxrpc_connections); + write_unlock_bh(&rxrpc_connection_lock); + + new = "new"; + +success: + _net("CONNECTION %s %d {%x}", new, conn->debug_id, conn->real_conn_id); + + _leave(" = %p {u=%d}", conn, atomic_read(&conn->usage)); + return conn; + + /* we found the connection in the list immediately */ +found_extant_connection: + if (hdr->securityIndex != conn->security_ix) { + read_unlock_bh(&trans->conn_lock); + goto security_mismatch; + } + atomic_inc(&conn->usage); + read_unlock_bh(&trans->conn_lock); + goto success; + + /* we found the connection on the second time through the list */ +found_extant_second: + if (hdr->securityIndex != conn->security_ix) { + write_unlock_bh(&trans->conn_lock); + goto security_mismatch; + } + atomic_inc(&conn->usage); + write_unlock_bh(&trans->conn_lock); + kfree(candidate); + goto success; + +security_mismatch: + kfree(candidate); + _leave(" = -EKEYREJECTED"); + return ERR_PTR(-EKEYREJECTED); +} + +/* + * find a connection based on transport and RxRPC connection ID for an incoming + * packet + */ +struct rxrpc_connection *rxrpc_find_connection(struct rxrpc_transport *trans, + struct rxrpc_header *hdr) +{ + struct rxrpc_connection *conn; + struct rb_node *p; + __be32 epoch; + u32 conn_id; + + _enter(",{%x,%x}", ntohl(hdr->cid), hdr->flags); + + read_lock_bh(&trans->conn_lock); + + conn_id = ntohl(hdr->cid) & RXRPC_CIDMASK; + epoch = hdr->epoch; + + if (hdr->flags & RXRPC_CLIENT_INITIATED) + p = trans->server_conns.rb_node; + else + p = trans->client_conns.rb_node; + + while (p) { + conn = rb_entry(p, struct rxrpc_connection, node); + + _debug("maybe %x", conn->real_conn_id); + + if (epoch < conn->epoch) + p = p->rb_left; + else if (epoch > conn->epoch) + p = p->rb_right; + else if (conn_id < conn->real_conn_id) + p = p->rb_left; + else if (conn_id > conn->real_conn_id) + p = p->rb_right; + else + goto found; + } + + read_unlock_bh(&trans->conn_lock); + _leave(" = NULL"); + return NULL; + +found: + atomic_inc(&conn->usage); + read_unlock_bh(&trans->conn_lock); + _leave(" = %p", conn); + return conn; +} + +/* + * release a virtual connection + */ +void rxrpc_put_connection(struct rxrpc_connection *conn) +{ + _enter("%p{u=%d,d=%d}", + conn, atomic_read(&conn->usage), conn->debug_id); + + ASSERTCMP(atomic_read(&conn->usage), >, 0); + + conn->put_time = xtime.tv_sec; + if (atomic_dec_and_test(&conn->usage)) { + _debug("zombie"); + schedule_delayed_work(&rxrpc_connection_reap, 0); + } + + _leave(""); +} + +/* + * destroy a virtual connection + */ +static void rxrpc_destroy_connection(struct rxrpc_connection *conn) +{ + _enter("%p{%d}", conn, atomic_read(&conn->usage)); + + ASSERTCMP(atomic_read(&conn->usage), ==, 0); + + _net("DESTROY CONN %d", conn->debug_id); + + if (conn->bundle) + rxrpc_put_bundle(conn->trans, conn->bundle); + + ASSERT(RB_EMPTY_ROOT(&conn->calls)); + rxrpc_purge_queue(&conn->rx_queue); + + rxrpc_clear_conn_security(conn); + rxrpc_put_transport(conn->trans); + kfree(conn); + _leave(""); +} + +/* + * reap dead connections + */ +void rxrpc_connection_reaper(struct work_struct *work) +{ + struct rxrpc_connection *conn, *_p; + unsigned long now, earliest, reap_time; + + LIST_HEAD(graveyard); + + _enter(""); + + now = xtime.tv_sec; + earliest = ULONG_MAX; + + write_lock_bh(&rxrpc_connection_lock); + list_for_each_entry_safe(conn, _p, &rxrpc_connections, link) { + _debug("reap CONN %d { u=%d,t=%ld }", + conn->debug_id, atomic_read(&conn->usage), + (long) now - (long) conn->put_time); + + if (likely(atomic_read(&conn->usage) > 0)) + continue; + + spin_lock(&conn->trans->client_lock); + write_lock(&conn->trans->conn_lock); + reap_time = conn->put_time + rxrpc_connection_timeout; + + if (atomic_read(&conn->usage) > 0) { + ; + } else if (reap_time <= now) { + list_move_tail(&conn->link, &graveyard); + if (conn->out_clientflag) + rb_erase(&conn->node, + &conn->trans->client_conns); + else + rb_erase(&conn->node, + &conn->trans->server_conns); + if (conn->bundle) { + list_del_init(&conn->bundle_link); + conn->bundle->num_conns--; + } + + } else if (reap_time < earliest) { + earliest = reap_time; + } + + write_unlock(&conn->trans->conn_lock); + spin_unlock(&conn->trans->client_lock); + } + write_unlock_bh(&rxrpc_connection_lock); + + if (earliest != ULONG_MAX) { + _debug("reschedule reaper %ld", (long) earliest - now); + ASSERTCMP(earliest, >, now); + schedule_delayed_work(&rxrpc_connection_reap, + (earliest - now) * HZ); + } + + /* then destroy all those pulled out */ + while (!list_empty(&graveyard)) { + conn = list_entry(graveyard.next, struct rxrpc_connection, + link); + list_del_init(&conn->link); + + ASSERTCMP(atomic_read(&conn->usage), ==, 0); + rxrpc_destroy_connection(conn); + } + + _leave(""); +} + +/* + * preemptively destroy all the connection records rather than waiting for them + * to time out + */ +void __exit rxrpc_destroy_all_connections(void) +{ + _enter(""); + + rxrpc_connection_timeout = 0; + cancel_delayed_work(&rxrpc_connection_reap); + schedule_delayed_work(&rxrpc_connection_reap, 0); + + _leave(""); +} diff --git a/net/rxrpc/ar-connevent.c b/net/rxrpc/ar-connevent.c new file mode 100644 index 000000000000..4b02815c1ded --- /dev/null +++ b/net/rxrpc/ar-connevent.c @@ -0,0 +1,387 @@ +/* connection-level event handling + * + * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "ar-internal.h" + +/* + * pass a connection-level abort onto all calls on that connection + */ +static void rxrpc_abort_calls(struct rxrpc_connection *conn, int state, + u32 abort_code) +{ + struct rxrpc_call *call; + struct rb_node *p; + + _enter("{%d},%x", conn->debug_id, abort_code); + + read_lock_bh(&conn->lock); + + for (p = rb_first(&conn->calls); p; p = rb_next(p)) { + call = rb_entry(p, struct rxrpc_call, conn_node); + write_lock(&call->state_lock); + if (call->state <= RXRPC_CALL_COMPLETE) { + call->state = state; + call->abort_code = abort_code; + if (state == RXRPC_CALL_LOCALLY_ABORTED) + set_bit(RXRPC_CALL_CONN_ABORT, &call->events); + else + set_bit(RXRPC_CALL_RCVD_ABORT, &call->events); + schedule_work(&call->processor); + } + write_unlock(&call->state_lock); + } + + read_unlock_bh(&conn->lock); + _leave(""); +} + +/* + * generate a connection-level abort + */ +static int rxrpc_abort_connection(struct rxrpc_connection *conn, + u32 error, u32 abort_code) +{ + struct rxrpc_header hdr; + struct msghdr msg; + struct kvec iov[2]; + __be32 word; + size_t len; + int ret; + + _enter("%d,,%u,%u", conn->debug_id, error, abort_code); + + /* generate a connection-level abort */ + spin_lock_bh(&conn->state_lock); + if (conn->state < RXRPC_CONN_REMOTELY_ABORTED) { + conn->state = RXRPC_CONN_LOCALLY_ABORTED; + conn->error = error; + spin_unlock_bh(&conn->state_lock); + } else { + spin_unlock_bh(&conn->state_lock); + _leave(" = 0 [already dead]"); + return 0; + } + + rxrpc_abort_calls(conn, RXRPC_CALL_LOCALLY_ABORTED, abort_code); + + msg.msg_name = &conn->trans->peer->srx.transport.sin; + msg.msg_namelen = sizeof(conn->trans->peer->srx.transport.sin); + msg.msg_control = NULL; + msg.msg_controllen = 0; + msg.msg_flags = 0; + + hdr.epoch = conn->epoch; + hdr.cid = conn->cid; + hdr.callNumber = 0; + hdr.seq = 0; + hdr.type = RXRPC_PACKET_TYPE_ABORT; + hdr.flags = conn->out_clientflag; + hdr.userStatus = 0; + hdr.securityIndex = conn->security_ix; + hdr._rsvd = 0; + hdr.serviceId = conn->service_id; + + word = htonl(abort_code); + + iov[0].iov_base = &hdr; + iov[0].iov_len = sizeof(hdr); + iov[1].iov_base = &word; + iov[1].iov_len = sizeof(word); + + len = iov[0].iov_len + iov[1].iov_len; + + hdr.serial = htonl(atomic_inc_return(&conn->serial)); + _proto("Tx CONN ABORT %%%u { %d }", ntohl(hdr.serial), abort_code); + + ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len); + if (ret < 0) { + _debug("sendmsg failed: %d", ret); + return -EAGAIN; + } + + _leave(" = 0"); + return 0; +} + +/* + * mark a call as being on a now-secured channel + * - must be called with softirqs disabled + */ +void rxrpc_call_is_secure(struct rxrpc_call *call) +{ + _enter("%p", call); + if (call) { + read_lock(&call->state_lock); + if (call->state < RXRPC_CALL_COMPLETE && + !test_and_set_bit(RXRPC_CALL_SECURED, &call->events)) + schedule_work(&call->processor); + read_unlock(&call->state_lock); + } +} + +/* + * connection-level Rx packet processor + */ +static int rxrpc_process_event(struct rxrpc_connection *conn, + struct sk_buff *skb, + u32 *_abort_code) +{ + struct rxrpc_skb_priv *sp = rxrpc_skb(skb); + __be32 tmp; + u32 serial; + int loop, ret; + + if (conn->state >= RXRPC_CONN_REMOTELY_ABORTED) + return -ECONNABORTED; + + serial = ntohl(sp->hdr.serial); + + switch (sp->hdr.type) { + case RXRPC_PACKET_TYPE_ABORT: + if (skb_copy_bits(skb, 0, &tmp, sizeof(tmp)) < 0) + return -EPROTO; + _proto("Rx ABORT %%%u { ac=%d }", serial, ntohl(tmp)); + + conn->state = RXRPC_CONN_REMOTELY_ABORTED; + rxrpc_abort_calls(conn, RXRPC_CALL_REMOTELY_ABORTED, + ntohl(tmp)); + return -ECONNABORTED; + + case RXRPC_PACKET_TYPE_CHALLENGE: + if (conn->security) + return conn->security->respond_to_challenge( + conn, skb, _abort_code); + return -EPROTO; + + case RXRPC_PACKET_TYPE_RESPONSE: + if (!conn->security) + return -EPROTO; + + ret = conn->security->verify_response(conn, skb, _abort_code); + if (ret < 0) + return ret; + + ret = conn->security->init_connection_security(conn); + if (ret < 0) + return ret; + + conn->security->prime_packet_security(conn); + read_lock_bh(&conn->lock); + spin_lock(&conn->state_lock); + + if (conn->state == RXRPC_CONN_SERVER_CHALLENGING) { + conn->state = RXRPC_CONN_SERVER; + for (loop = 0; loop < RXRPC_MAXCALLS; loop++) + rxrpc_call_is_secure(conn->channels[loop]); + } + + spin_unlock(&conn->state_lock); + read_unlock_bh(&conn->lock); + return 0; + + default: + return -EPROTO; + } +} + +/* + * set up security and issue a challenge + */ +static void rxrpc_secure_connection(struct rxrpc_connection *conn) +{ + u32 abort_code; + int ret; + + _enter("{%d}", conn->debug_id); + + ASSERT(conn->security_ix != 0); + + if (!conn->key) { + _debug("set up security"); + ret = rxrpc_init_server_conn_security(conn); + switch (ret) { + case 0: + break; + case -ENOENT: + abort_code = RX_CALL_DEAD; + goto abort; + default: + abort_code = RXKADNOAUTH; + goto abort; + } + } + + ASSERT(conn->security != NULL); + + if (conn->security->issue_challenge(conn) < 0) { + abort_code = RX_CALL_DEAD; + ret = -ENOMEM; + goto abort; + } + + _leave(""); + return; + +abort: + _debug("abort %d, %d", ret, abort_code); + rxrpc_abort_connection(conn, -ret, abort_code); + _leave(" [aborted]"); +} + +/* + * connection-level event processor + */ +void rxrpc_process_connection(struct work_struct *work) +{ + struct rxrpc_connection *conn = + container_of(work, struct rxrpc_connection, processor); + struct rxrpc_skb_priv *sp; + struct sk_buff *skb; + u32 abort_code = RX_PROTOCOL_ERROR; + int ret; + + _enter("{%d}", conn->debug_id); + + atomic_inc(&conn->usage); + + if (test_and_clear_bit(RXRPC_CONN_CHALLENGE, &conn->events)) { + rxrpc_secure_connection(conn); + rxrpc_put_connection(conn); + } + + /* go through the conn-level event packets, releasing the ref on this + * connection that each one has when we've finished with it */ + while ((skb = skb_dequeue(&conn->rx_queue))) { + sp = rxrpc_skb(skb); + + ret = rxrpc_process_event(conn, skb, &abort_code); + switch (ret) { + case -EPROTO: + case -EKEYEXPIRED: + case -EKEYREJECTED: + goto protocol_error; + case -EAGAIN: + goto requeue_and_leave; + case -ECONNABORTED: + default: + rxrpc_put_connection(conn); + rxrpc_free_skb(skb); + break; + } + } + +out: + rxrpc_put_connection(conn); + _leave(""); + return; + +requeue_and_leave: + skb_queue_head(&conn->rx_queue, skb); + goto out; + +protocol_error: + if (rxrpc_abort_connection(conn, -ret, abort_code) < 0) + goto requeue_and_leave; + rxrpc_put_connection(conn); + rxrpc_free_skb(skb); + _leave(" [EPROTO]"); + goto out; +} + +/* + * reject packets through the local endpoint + */ +void rxrpc_reject_packets(struct work_struct *work) +{ + union { + struct sockaddr sa; + struct sockaddr_in sin; + } sa; + struct rxrpc_skb_priv *sp; + struct rxrpc_header hdr; + struct rxrpc_local *local; + struct sk_buff *skb; + struct msghdr msg; + struct kvec iov[2]; + size_t size; + __be32 code; + + local = container_of(work, struct rxrpc_local, rejecter); + rxrpc_get_local(local); + + _enter("%d", local->debug_id); + + iov[0].iov_base = &hdr; + iov[0].iov_len = sizeof(hdr); + iov[1].iov_base = &code; + iov[1].iov_len = sizeof(code); + size = sizeof(hdr) + sizeof(code); + + msg.msg_name = &sa; + msg.msg_control = NULL; + msg.msg_controllen = 0; + msg.msg_flags = 0; + + memset(&sa, 0, sizeof(sa)); + sa.sa.sa_family = local->srx.transport.family; + switch (sa.sa.sa_family) { + case AF_INET: + msg.msg_namelen = sizeof(sa.sin); + break; + default: + msg.msg_namelen = 0; + break; + } + + memset(&hdr, 0, sizeof(hdr)); + hdr.type = RXRPC_PACKET_TYPE_ABORT; + + while ((skb = skb_dequeue(&local->reject_queue))) { + sp = rxrpc_skb(skb); + switch (sa.sa.sa_family) { + case AF_INET: + sa.sin.sin_port = udp_hdr(skb)->source; + sa.sin.sin_addr.s_addr = ip_hdr(skb)->saddr; + code = htonl(skb->priority); + + hdr.epoch = sp->hdr.epoch; + hdr.cid = sp->hdr.cid; + hdr.callNumber = sp->hdr.callNumber; + hdr.serviceId = sp->hdr.serviceId; + hdr.flags = sp->hdr.flags; + hdr.flags ^= RXRPC_CLIENT_INITIATED; + hdr.flags &= RXRPC_CLIENT_INITIATED; + + kernel_sendmsg(local->socket, &msg, iov, 2, size); + break; + + default: + break; + } + + rxrpc_free_skb(skb); + rxrpc_put_local(local); + } + + rxrpc_put_local(local); + _leave(""); +} diff --git a/net/rxrpc/ar-error.c b/net/rxrpc/ar-error.c new file mode 100644 index 000000000000..f5539e2f7b58 --- /dev/null +++ b/net/rxrpc/ar-error.c @@ -0,0 +1,253 @@ +/* Error message handling (ICMP) + * + * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "ar-internal.h" + +/* + * handle an error received on the local endpoint + */ +void rxrpc_UDP_error_report(struct sock *sk) +{ + struct sock_exterr_skb *serr; + struct rxrpc_transport *trans; + struct rxrpc_local *local = sk->sk_user_data; + struct rxrpc_peer *peer; + struct sk_buff *skb; + __be32 addr; + __be16 port; + + _enter("%p{%d}", sk, local->debug_id); + + skb = skb_dequeue(&sk->sk_error_queue); + if (!skb) { + _leave("UDP socket errqueue empty"); + return; + } + + rxrpc_new_skb(skb); + + serr = SKB_EXT_ERR(skb); + addr = *(__be32 *)(skb_network_header(skb) + serr->addr_offset); + port = serr->port; + + _net("Rx UDP Error from "NIPQUAD_FMT":%hu", + NIPQUAD(addr), ntohs(port)); + _debug("Msg l:%d d:%d", skb->len, skb->data_len); + + peer = rxrpc_find_peer(local, addr, port); + if (IS_ERR(peer)) { + rxrpc_free_skb(skb); + _leave(" [no peer]"); + return; + } + + trans = rxrpc_find_transport(local, peer); + if (!trans) { + rxrpc_put_peer(peer); + rxrpc_free_skb(skb); + _leave(" [no trans]"); + return; + } + + if (serr->ee.ee_origin == SO_EE_ORIGIN_ICMP && + serr->ee.ee_type == ICMP_DEST_UNREACH && + serr->ee.ee_code == ICMP_FRAG_NEEDED + ) { + u32 mtu = serr->ee.ee_info; + + _net("Rx Received ICMP Fragmentation Needed (%d)", mtu); + + /* wind down the local interface MTU */ + if (mtu > 0 && peer->if_mtu == 65535 && mtu < peer->if_mtu) { + peer->if_mtu = mtu; + _net("I/F MTU %u", mtu); + } + + /* ip_rt_frag_needed() may have eaten the info */ + if (mtu == 0) + mtu = ntohs(icmp_hdr(skb)->un.frag.mtu); + + if (mtu == 0) { + /* they didn't give us a size, estimate one */ + if (mtu > 1500) { + mtu >>= 1; + if (mtu < 1500) + mtu = 1500; + } else { + mtu -= 100; + if (mtu < peer->hdrsize) + mtu = peer->hdrsize + 4; + } + } + + if (mtu < peer->mtu) { + peer->mtu = mtu; + peer->maxdata = peer->mtu - peer->hdrsize; + _net("Net MTU %u (maxdata %u)", + peer->mtu, peer->maxdata); + } + } + + rxrpc_put_peer(peer); + + /* pass the transport ref to error_handler to release */ + skb_queue_tail(&trans->error_queue, skb); + schedule_work(&trans->error_handler); + + /* reset and regenerate socket error */ + spin_lock_bh(&sk->sk_error_queue.lock); + sk->sk_err = 0; + skb = skb_peek(&sk->sk_error_queue); + if (skb) { + sk->sk_err = SKB_EXT_ERR(skb)->ee.ee_errno; + spin_unlock_bh(&sk->sk_error_queue.lock); + sk->sk_error_report(sk); + } else { + spin_unlock_bh(&sk->sk_error_queue.lock); + } + + _leave(""); +} + +/* + * deal with UDP error messages + */ +void rxrpc_UDP_error_handler(struct work_struct *work) +{ + struct sock_extended_err *ee; + struct sock_exterr_skb *serr; + struct rxrpc_transport *trans = + container_of(work, struct rxrpc_transport, error_handler); + struct sk_buff *skb; + int local, err; + + _enter(""); + + skb = skb_dequeue(&trans->error_queue); + if (!skb) + return; + + serr = SKB_EXT_ERR(skb); + ee = &serr->ee; + + _net("Rx Error o=%d t=%d c=%d e=%d", + ee->ee_origin, ee->ee_type, ee->ee_code, ee->ee_errno); + + err = ee->ee_errno; + + switch (ee->ee_origin) { + case SO_EE_ORIGIN_ICMP: + local = 0; + switch (ee->ee_type) { + case ICMP_DEST_UNREACH: + switch (ee->ee_code) { + case ICMP_NET_UNREACH: + _net("Rx Received ICMP Network Unreachable"); + err = ENETUNREACH; + break; + case ICMP_HOST_UNREACH: + _net("Rx Received ICMP Host Unreachable"); + err = EHOSTUNREACH; + break; + case ICMP_PORT_UNREACH: + _net("Rx Received ICMP Port Unreachable"); + err = ECONNREFUSED; + break; + case ICMP_FRAG_NEEDED: + _net("Rx Received ICMP Fragmentation Needed (%d)", + ee->ee_info); + err = 0; /* dealt with elsewhere */ + break; + case ICMP_NET_UNKNOWN: + _net("Rx Received ICMP Unknown Network"); + err = ENETUNREACH; + break; + case ICMP_HOST_UNKNOWN: + _net("Rx Received ICMP Unknown Host"); + err = EHOSTUNREACH; + break; + default: + _net("Rx Received ICMP DestUnreach code=%u", + ee->ee_code); + break; + } + break; + + case ICMP_TIME_EXCEEDED: + _net("Rx Received ICMP TTL Exceeded"); + break; + + default: + _proto("Rx Received ICMP error { type=%u code=%u }", + ee->ee_type, ee->ee_code); + break; + } + break; + + case SO_EE_ORIGIN_LOCAL: + _proto("Rx Received local error { error=%d }", + ee->ee_errno); + local = 1; + break; + + case SO_EE_ORIGIN_NONE: + case SO_EE_ORIGIN_ICMP6: + default: + _proto("Rx Received error report { orig=%u }", + ee->ee_origin); + local = 0; + break; + } + + /* terminate all the affected calls if there's an unrecoverable + * error */ + if (err) { + struct rxrpc_call *call, *_n; + + _debug("ISSUE ERROR %d", err); + + spin_lock_bh(&trans->peer->lock); + trans->peer->net_error = err; + + list_for_each_entry_safe(call, _n, &trans->peer->error_targets, + error_link) { + write_lock(&call->state_lock); + if (call->state != RXRPC_CALL_COMPLETE && + call->state < RXRPC_CALL_NETWORK_ERROR) { + call->state = RXRPC_CALL_NETWORK_ERROR; + set_bit(RXRPC_CALL_RCVD_ERROR, &call->events); + schedule_work(&call->processor); + } + write_unlock(&call->state_lock); + list_del_init(&call->error_link); + } + + spin_unlock_bh(&trans->peer->lock); + } + + if (!skb_queue_empty(&trans->error_queue)) + schedule_work(&trans->error_handler); + + rxrpc_free_skb(skb); + rxrpc_put_transport(trans); + _leave(""); +} diff --git a/net/rxrpc/ar-input.c b/net/rxrpc/ar-input.c new file mode 100644 index 000000000000..323c3454561c --- /dev/null +++ b/net/rxrpc/ar-input.c @@ -0,0 +1,791 @@ +/* RxRPC packet reception + * + * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "ar-internal.h" + +unsigned long rxrpc_ack_timeout = 1; + +const char *rxrpc_pkts[] = { + "?00", + "DATA", "ACK", "BUSY", "ABORT", "ACKALL", "CHALL", "RESP", "DEBUG", + "?09", "?10", "?11", "?12", "?13", "?14", "?15" +}; + +/* + * queue a packet for recvmsg to pass to userspace + * - the caller must hold a lock on call->lock + * - must not be called with interrupts disabled (sk_filter() disables BH's) + * - eats the packet whether successful or not + * - there must be just one reference to the packet, which the caller passes to + * this function + */ +int rxrpc_queue_rcv_skb(struct rxrpc_call *call, struct sk_buff *skb, + bool force, bool terminal) +{ + struct rxrpc_skb_priv *sp; + struct sock *sk; + int skb_len, ret; + + _enter(",,%d,%d", force, terminal); + + ASSERT(!irqs_disabled()); + + sp = rxrpc_skb(skb); + ASSERTCMP(sp->call, ==, call); + + /* if we've already posted the terminal message for a call, then we + * don't post any more */ + if (test_bit(RXRPC_CALL_TERMINAL_MSG, &call->flags)) { + _debug("already terminated"); + ASSERTCMP(call->state, >=, RXRPC_CALL_COMPLETE); + skb->destructor = NULL; + sp->call = NULL; + rxrpc_put_call(call); + rxrpc_free_skb(skb); + return 0; + } + + sk = &call->socket->sk; + + if (!force) { + /* cast skb->rcvbuf to unsigned... It's pointless, but + * reduces number of warnings when compiling with -W + * --ANK */ +// ret = -ENOBUFS; +// if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= +// (unsigned) sk->sk_rcvbuf) +// goto out; + + ret = sk_filter(sk, skb); + if (ret < 0) + goto out; + } + + spin_lock_bh(&sk->sk_receive_queue.lock); + if (!test_bit(RXRPC_CALL_TERMINAL_MSG, &call->flags) && + !test_bit(RXRPC_CALL_RELEASED, &call->flags) && + call->socket->sk.sk_state != RXRPC_CLOSE) { + skb->destructor = rxrpc_packet_destructor; + skb->dev = NULL; + skb->sk = sk; + atomic_add(skb->truesize, &sk->sk_rmem_alloc); + + /* Cache the SKB length before we tack it onto the receive + * queue. Once it is added it no longer belongs to us and + * may be freed by other threads of control pulling packets + * from the queue. + */ + skb_len = skb->len; + + _net("post skb %p", skb); + __skb_queue_tail(&sk->sk_receive_queue, skb); + spin_unlock_bh(&sk->sk_receive_queue.lock); + + if (!sock_flag(sk, SOCK_DEAD)) + sk->sk_data_ready(sk, skb_len); + + if (terminal) { + _debug("<<<< TERMINAL MESSAGE >>>>"); + set_bit(RXRPC_CALL_TERMINAL_MSG, &call->flags); + } + + skb = NULL; + } else { + spin_unlock_bh(&sk->sk_receive_queue.lock); + } + ret = 0; + +out: + /* release the socket buffer */ + if (skb) { + skb->destructor = NULL; + sp->call = NULL; + rxrpc_put_call(call); + rxrpc_free_skb(skb); + } + + _leave(" = %d", ret); + return ret; +} + +/* + * process a DATA packet, posting the packet to the appropriate queue + * - eats the packet if successful + */ +static int rxrpc_fast_process_data(struct rxrpc_call *call, + struct sk_buff *skb, u32 seq) +{ + struct rxrpc_skb_priv *sp; + bool terminal; + int ret, ackbit, ack; + + _enter("{%u,%u},,{%u}", call->rx_data_post, call->rx_first_oos, seq); + + sp = rxrpc_skb(skb); + ASSERTCMP(sp->call, ==, NULL); + + spin_lock(&call->lock); + + if (call->state > RXRPC_CALL_COMPLETE) + goto discard; + + ASSERTCMP(call->rx_data_expect, >=, call->rx_data_post); + ASSERTCMP(call->rx_data_post, >=, call->rx_data_recv); + ASSERTCMP(call->rx_data_recv, >=, call->rx_data_eaten); + + if (seq < call->rx_data_post) { + _debug("dup #%u [-%u]", seq, call->rx_data_post); + ack = RXRPC_ACK_DUPLICATE; + ret = -ENOBUFS; + goto discard_and_ack; + } + + /* we may already have the packet in the out of sequence queue */ + ackbit = seq - (call->rx_data_eaten + 1); + ASSERTCMP(ackbit, >=, 0); + if (__test_and_set_bit(ackbit, &call->ackr_window)) { + _debug("dup oos #%u [%u,%u]", + seq, call->rx_data_eaten, call->rx_data_post); + ack = RXRPC_ACK_DUPLICATE; + goto discard_and_ack; + } + + if (seq >= call->ackr_win_top) { + _debug("exceed #%u [%u]", seq, call->ackr_win_top); + __clear_bit(ackbit, &call->ackr_window); + ack = RXRPC_ACK_EXCEEDS_WINDOW; + goto discard_and_ack; + } + + if (seq == call->rx_data_expect) { + clear_bit(RXRPC_CALL_EXPECT_OOS, &call->flags); + call->rx_data_expect++; + } else if (seq > call->rx_data_expect) { + _debug("oos #%u [%u]", seq, call->rx_data_expect); + call->rx_data_expect = seq + 1; + if (test_and_set_bit(RXRPC_CALL_EXPECT_OOS, &call->flags)) { + ack = RXRPC_ACK_OUT_OF_SEQUENCE; + goto enqueue_and_ack; + } + goto enqueue_packet; + } + + if (seq != call->rx_data_post) { + _debug("ahead #%u [%u]", seq, call->rx_data_post); + goto enqueue_packet; + } + + if (test_bit(RXRPC_CALL_RCVD_LAST, &call->flags)) + goto protocol_error; + + /* if the packet need security things doing to it, then it goes down + * the slow path */ + if (call->conn->security) + goto enqueue_packet; + + sp->call = call; + rxrpc_get_call(call); + terminal = ((sp->hdr.flags & RXRPC_LAST_PACKET) && + !(sp->hdr.flags & RXRPC_CLIENT_INITIATED)); + ret = rxrpc_queue_rcv_skb(call, skb, false, terminal); + if (ret < 0) { + if (ret == -ENOMEM || ret == -ENOBUFS) { + __clear_bit(ackbit, &call->ackr_window); + ack = RXRPC_ACK_NOSPACE; + goto discard_and_ack; + } + goto out; + } + + skb = NULL; + + _debug("post #%u", seq); + ASSERTCMP(call->rx_data_post, ==, seq); + call->rx_data_post++; + + if (sp->hdr.flags & RXRPC_LAST_PACKET) + set_bit(RXRPC_CALL_RCVD_LAST, &call->flags); + + /* if we've reached an out of sequence packet then we need to drain + * that queue into the socket Rx queue now */ + if (call->rx_data_post == call->rx_first_oos) { + _debug("drain rx oos now"); + read_lock(&call->state_lock); + if (call->state < RXRPC_CALL_COMPLETE && + !test_and_set_bit(RXRPC_CALL_DRAIN_RX_OOS, &call->events)) + schedule_work(&call->processor); + read_unlock(&call->state_lock); + } + + spin_unlock(&call->lock); + atomic_inc(&call->ackr_not_idle); + rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, sp->hdr.serial, false); + _leave(" = 0 [posted]"); + return 0; + +protocol_error: + ret = -EBADMSG; +out: + spin_unlock(&call->lock); + _leave(" = %d", ret); + return ret; + +discard_and_ack: + _debug("discard and ACK packet %p", skb); + __rxrpc_propose_ACK(call, ack, sp->hdr.serial, true); +discard: + spin_unlock(&call->lock); + rxrpc_free_skb(skb); + _leave(" = 0 [discarded]"); + return 0; + +enqueue_and_ack: + __rxrpc_propose_ACK(call, ack, sp->hdr.serial, true); +enqueue_packet: + _net("defer skb %p", skb); + spin_unlock(&call->lock); + skb_queue_tail(&call->rx_queue, skb); + atomic_inc(&call->ackr_not_idle); + read_lock(&call->state_lock); + if (call->state < RXRPC_CALL_DEAD) + schedule_work(&call->processor); + read_unlock(&call->state_lock); + _leave(" = 0 [queued]"); + return 0; +} + +/* + * assume an implicit ACKALL of the transmission phase of a client socket upon + * reception of the first reply packet + */ +static void rxrpc_assume_implicit_ackall(struct rxrpc_call *call, u32 serial) +{ + write_lock_bh(&call->state_lock); + + switch (call->state) { + case RXRPC_CALL_CLIENT_AWAIT_REPLY: + call->state = RXRPC_CALL_CLIENT_RECV_REPLY; + call->acks_latest = serial; + + _debug("implicit ACKALL %%%u", call->acks_latest); + set_bit(RXRPC_CALL_RCVD_ACKALL, &call->events); + write_unlock_bh(&call->state_lock); + + if (try_to_del_timer_sync(&call->resend_timer) >= 0) { + clear_bit(RXRPC_CALL_RESEND_TIMER, &call->events); + clear_bit(RXRPC_CALL_RESEND, &call->events); + clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags); + } + break; + + default: + write_unlock_bh(&call->state_lock); + break; + } +} + +/* + * post an incoming packet to the nominated call to deal with + * - must get rid of the sk_buff, either by freeing it or by queuing it + */ +void rxrpc_fast_process_packet(struct rxrpc_call *call, struct sk_buff *skb) +{ + struct rxrpc_skb_priv *sp = rxrpc_skb(skb); + __be32 _abort_code; + u32 serial, hi_serial, seq, abort_code; + + _enter("%p,%p", call, skb); + + ASSERT(!irqs_disabled()); + +#if 0 // INJECT RX ERROR + if (sp->hdr.type == RXRPC_PACKET_TYPE_DATA) { + static int skip = 0; + if (++skip == 3) { + printk("DROPPED 3RD PACKET!!!!!!!!!!!!!\n"); + skip = 0; + goto free_packet; + } + } +#endif + + /* track the latest serial number on this connection for ACK packet + * information */ + serial = ntohl(sp->hdr.serial); + hi_serial = atomic_read(&call->conn->hi_serial); + while (serial > hi_serial) + hi_serial = atomic_cmpxchg(&call->conn->hi_serial, hi_serial, + serial); + + /* request ACK generation for any ACK or DATA packet that requests + * it */ + if (sp->hdr.flags & RXRPC_REQUEST_ACK) { + _proto("ACK Requested on %%%u", serial); + rxrpc_propose_ACK(call, RXRPC_ACK_REQUESTED, sp->hdr.serial, + !(sp->hdr.flags & RXRPC_MORE_PACKETS)); + } + + switch (sp->hdr.type) { + case RXRPC_PACKET_TYPE_ABORT: + _debug("abort"); + + if (skb_copy_bits(skb, 0, &_abort_code, + sizeof(_abort_code)) < 0) + goto protocol_error; + + abort_code = ntohl(_abort_code); + _proto("Rx ABORT %%%u { %x }", serial, abort_code); + + write_lock_bh(&call->state_lock); + if (call->state < RXRPC_CALL_COMPLETE) { + call->state = RXRPC_CALL_REMOTELY_ABORTED; + call->abort_code = abort_code; + set_bit(RXRPC_CALL_RCVD_ABORT, &call->events); + schedule_work(&call->processor); + } + goto free_packet_unlock; + + case RXRPC_PACKET_TYPE_BUSY: + _proto("Rx BUSY %%%u", serial); + + if (call->conn->out_clientflag) + goto protocol_error; + + write_lock_bh(&call->state_lock); + switch (call->state) { + case RXRPC_CALL_CLIENT_SEND_REQUEST: + call->state = RXRPC_CALL_SERVER_BUSY; + set_bit(RXRPC_CALL_RCVD_BUSY, &call->events); + schedule_work(&call->processor); + case RXRPC_CALL_SERVER_BUSY: + goto free_packet_unlock; + default: + goto protocol_error_locked; + } + + default: + _proto("Rx %s %%%u", rxrpc_pkts[sp->hdr.type], serial); + goto protocol_error; + + case RXRPC_PACKET_TYPE_DATA: + seq = ntohl(sp->hdr.seq); + + _proto("Rx DATA %%%u { #%u }", serial, seq); + + if (seq == 0) + goto protocol_error; + + call->ackr_prev_seq = sp->hdr.seq; + + /* received data implicitly ACKs all of the request packets we + * sent when we're acting as a client */ + if (call->state == RXRPC_CALL_CLIENT_AWAIT_REPLY) + rxrpc_assume_implicit_ackall(call, serial); + + switch (rxrpc_fast_process_data(call, skb, seq)) { + case 0: + skb = NULL; + goto done; + + default: + BUG(); + + /* data packet received beyond the last packet */ + case -EBADMSG: + goto protocol_error; + } + + case RXRPC_PACKET_TYPE_ACK: + /* ACK processing is done in process context */ + read_lock_bh(&call->state_lock); + if (call->state < RXRPC_CALL_DEAD) { + skb_queue_tail(&call->rx_queue, skb); + schedule_work(&call->processor); + skb = NULL; + } + read_unlock_bh(&call->state_lock); + goto free_packet; + } + +protocol_error: + _debug("protocol error"); + write_lock_bh(&call->state_lock); +protocol_error_locked: + if (call->state <= RXRPC_CALL_COMPLETE) { + call->state = RXRPC_CALL_LOCALLY_ABORTED; + call->abort_code = RX_PROTOCOL_ERROR; + set_bit(RXRPC_CALL_ABORT, &call->events); + schedule_work(&call->processor); + } +free_packet_unlock: + write_unlock_bh(&call->state_lock); +free_packet: + rxrpc_free_skb(skb); +done: + _leave(""); +} + +/* + * split up a jumbo data packet + */ +static void rxrpc_process_jumbo_packet(struct rxrpc_call *call, + struct sk_buff *jumbo) +{ + struct rxrpc_jumbo_header jhdr; + struct rxrpc_skb_priv *sp; + struct sk_buff *part; + + _enter(",{%u,%u}", jumbo->data_len, jumbo->len); + + sp = rxrpc_skb(jumbo); + + do { + sp->hdr.flags &= ~RXRPC_JUMBO_PACKET; + + /* make a clone to represent the first subpacket in what's left + * of the jumbo packet */ + part = skb_clone(jumbo, GFP_ATOMIC); + if (!part) { + /* simply ditch the tail in the event of ENOMEM */ + pskb_trim(jumbo, RXRPC_JUMBO_DATALEN); + break; + } + rxrpc_new_skb(part); + + pskb_trim(part, RXRPC_JUMBO_DATALEN); + + if (!pskb_pull(jumbo, RXRPC_JUMBO_DATALEN)) + goto protocol_error; + + if (skb_copy_bits(jumbo, 0, &jhdr, sizeof(jhdr)) < 0) + goto protocol_error; + if (!pskb_pull(jumbo, sizeof(jhdr))) + BUG(); + + sp->hdr.seq = htonl(ntohl(sp->hdr.seq) + 1); + sp->hdr.serial = htonl(ntohl(sp->hdr.serial) + 1); + sp->hdr.flags = jhdr.flags; + sp->hdr._rsvd = jhdr._rsvd; + + _proto("Rx DATA Jumbo %%%u", ntohl(sp->hdr.serial) - 1); + + rxrpc_fast_process_packet(call, part); + part = NULL; + + } while (sp->hdr.flags & RXRPC_JUMBO_PACKET); + + rxrpc_fast_process_packet(call, jumbo); + _leave(""); + return; + +protocol_error: + _debug("protocol error"); + rxrpc_free_skb(part); + rxrpc_free_skb(jumbo); + write_lock_bh(&call->state_lock); + if (call->state <= RXRPC_CALL_COMPLETE) { + call->state = RXRPC_CALL_LOCALLY_ABORTED; + call->abort_code = RX_PROTOCOL_ERROR; + set_bit(RXRPC_CALL_ABORT, &call->events); + schedule_work(&call->processor); + } + write_unlock_bh(&call->state_lock); + _leave(""); +} + +/* + * post an incoming packet to the appropriate call/socket to deal with + * - must get rid of the sk_buff, either by freeing it or by queuing it + */ +static void rxrpc_post_packet_to_call(struct rxrpc_connection *conn, + struct sk_buff *skb) +{ + struct rxrpc_skb_priv *sp; + struct rxrpc_call *call; + struct rb_node *p; + __be32 call_id; + + _enter("%p,%p", conn, skb); + + read_lock_bh(&conn->lock); + + sp = rxrpc_skb(skb); + + /* look at extant calls by channel number first */ + call = conn->channels[ntohl(sp->hdr.cid) & RXRPC_CHANNELMASK]; + if (!call || call->call_id != sp->hdr.callNumber) + goto call_not_extant; + + _debug("extant call [%d]", call->state); + ASSERTCMP(call->conn, ==, conn); + + read_lock(&call->state_lock); + switch (call->state) { + case RXRPC_CALL_LOCALLY_ABORTED: + if (!test_and_set_bit(RXRPC_CALL_ABORT, &call->events)) + schedule_work(&call->processor); + case RXRPC_CALL_REMOTELY_ABORTED: + case RXRPC_CALL_NETWORK_ERROR: + case RXRPC_CALL_DEAD: + goto free_unlock; + default: + break; + } + + read_unlock(&call->state_lock); + rxrpc_get_call(call); + read_unlock_bh(&conn->lock); + + if (sp->hdr.type == RXRPC_PACKET_TYPE_DATA && + sp->hdr.flags & RXRPC_JUMBO_PACKET) + rxrpc_process_jumbo_packet(call, skb); + else + rxrpc_fast_process_packet(call, skb); + + rxrpc_put_call(call); + goto done; + +call_not_extant: + /* search the completed calls in case what we're dealing with is + * there */ + _debug("call not extant"); + + call_id = sp->hdr.callNumber; + p = conn->calls.rb_node; + while (p) { + call = rb_entry(p, struct rxrpc_call, conn_node); + + if (call_id < call->call_id) + p = p->rb_left; + else if (call_id > call->call_id) + p = p->rb_right; + else + goto found_completed_call; + } + +dead_call: + /* it's a either a really old call that we no longer remember or its a + * new incoming call */ + read_unlock_bh(&conn->lock); + + if (sp->hdr.flags & RXRPC_CLIENT_INITIATED && + sp->hdr.seq == __constant_cpu_to_be32(1)) { + _debug("incoming call"); + skb_queue_tail(&conn->trans->local->accept_queue, skb); + schedule_work(&conn->trans->local->acceptor); + goto done; + } + + _debug("dead call"); + skb->priority = RX_CALL_DEAD; + rxrpc_reject_packet(conn->trans->local, skb); + goto done; + + /* resend last packet of a completed call + * - client calls may have been aborted or ACK'd + * - server calls may have been aborted + */ +found_completed_call: + _debug("completed call"); + + if (atomic_read(&call->usage) == 0) + goto dead_call; + + /* synchronise any state changes */ + read_lock(&call->state_lock); + ASSERTIFCMP(call->state != RXRPC_CALL_CLIENT_FINAL_ACK, + call->state, >=, RXRPC_CALL_COMPLETE); + + if (call->state == RXRPC_CALL_LOCALLY_ABORTED || + call->state == RXRPC_CALL_REMOTELY_ABORTED || + call->state == RXRPC_CALL_DEAD) { + read_unlock(&call->state_lock); + goto dead_call; + } + + if (call->conn->in_clientflag) { + read_unlock(&call->state_lock); + goto dead_call; /* complete server call */ + } + + _debug("final ack again"); + rxrpc_get_call(call); + set_bit(RXRPC_CALL_ACK_FINAL, &call->events); + schedule_work(&call->processor); + +free_unlock: + read_unlock(&call->state_lock); + read_unlock_bh(&conn->lock); + rxrpc_free_skb(skb); +done: + _leave(""); +} + +/* + * post connection-level events to the connection + * - this includes challenges, responses and some aborts + */ +static void rxrpc_post_packet_to_conn(struct rxrpc_connection *conn, + struct sk_buff *skb) +{ + _enter("%p,%p", conn, skb); + + atomic_inc(&conn->usage); + skb_queue_tail(&conn->rx_queue, skb); + schedule_work(&conn->processor); +} + +/* + * handle data received on the local endpoint + * - may be called in interrupt context + */ +void rxrpc_data_ready(struct sock *sk, int count) +{ + struct rxrpc_connection *conn; + struct rxrpc_transport *trans; + struct rxrpc_skb_priv *sp; + struct rxrpc_local *local; + struct rxrpc_peer *peer; + struct sk_buff *skb; + int ret; + + _enter("%p, %d", sk, count); + + ASSERT(!irqs_disabled()); + + read_lock_bh(&rxrpc_local_lock); + local = sk->sk_user_data; + if (local && atomic_read(&local->usage) > 0) + rxrpc_get_local(local); + else + local = NULL; + read_unlock_bh(&rxrpc_local_lock); + if (!local) { + _leave(" [local dead]"); + return; + } + + skb = skb_recv_datagram(sk, 0, 1, &ret); + if (!skb) { + rxrpc_put_local(local); + if (ret == -EAGAIN) + return; + _debug("UDP socket error %d", ret); + return; + } + + rxrpc_new_skb(skb); + + _net("recv skb %p", skb); + + /* we'll probably need to checksum it (didn't call sock_recvmsg) */ + if (skb_checksum_complete(skb)) { + rxrpc_free_skb(skb); + rxrpc_put_local(local); + _leave(" [CSUM failed]"); + return; + } + + /* the socket buffer we have is owned by UDP, with UDP's data all over + * it, but we really want our own */ + skb_orphan(skb); + sp = rxrpc_skb(skb); + memset(sp, 0, sizeof(*sp)); + + _net("Rx UDP packet from %08x:%04hu", + ntohl(ip_hdr(skb)->saddr), ntohs(udp_hdr(skb)->source)); + + /* dig out the RxRPC connection details */ + if (skb_copy_bits(skb, sizeof(struct udphdr), &sp->hdr, + sizeof(sp->hdr)) < 0) + goto bad_message; + if (!pskb_pull(skb, sizeof(struct udphdr) + sizeof(sp->hdr))) + BUG(); + + _net("Rx RxRPC %s ep=%x call=%x:%x", + sp->hdr.flags & RXRPC_CLIENT_INITIATED ? "ToServer" : "ToClient", + ntohl(sp->hdr.epoch), + ntohl(sp->hdr.cid), + ntohl(sp->hdr.callNumber)); + + if (sp->hdr.type == 0 || sp->hdr.type >= RXRPC_N_PACKET_TYPES) { + _proto("Rx Bad Packet Type %u", sp->hdr.type); + goto bad_message; + } + + if (sp->hdr.type == RXRPC_PACKET_TYPE_DATA && + (sp->hdr.callNumber == 0 || sp->hdr.seq == 0)) + goto bad_message; + + peer = rxrpc_find_peer(local, ip_hdr(skb)->saddr, udp_hdr(skb)->source); + if (IS_ERR(peer)) + goto cant_route_call; + + trans = rxrpc_find_transport(local, peer); + rxrpc_put_peer(peer); + if (!trans) + goto cant_route_call; + + conn = rxrpc_find_connection(trans, &sp->hdr); + rxrpc_put_transport(trans); + if (!conn) + goto cant_route_call; + + _debug("CONN %p {%d}", conn, conn->debug_id); + + if (sp->hdr.callNumber == 0) + rxrpc_post_packet_to_conn(conn, skb); + else + rxrpc_post_packet_to_call(conn, skb); + rxrpc_put_connection(conn); + rxrpc_put_local(local); + return; + +cant_route_call: + _debug("can't route call"); + if (sp->hdr.flags & RXRPC_CLIENT_INITIATED && + sp->hdr.type == RXRPC_PACKET_TYPE_DATA) { + if (sp->hdr.seq == __constant_cpu_to_be32(1)) { + _debug("first packet"); + skb_queue_tail(&local->accept_queue, skb); + schedule_work(&local->acceptor); + rxrpc_put_local(local); + _leave(" [incoming]"); + return; + } + skb->priority = RX_INVALID_OPERATION; + } else { + skb->priority = RX_CALL_DEAD; + } + + _debug("reject"); + rxrpc_reject_packet(local, skb); + rxrpc_put_local(local); + _leave(" [no call]"); + return; + +bad_message: + skb->priority = RX_PROTOCOL_ERROR; + rxrpc_reject_packet(local, skb); + rxrpc_put_local(local); + _leave(" [badmsg]"); +} diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h new file mode 100644 index 000000000000..7bfbf471c81e --- /dev/null +++ b/net/rxrpc/ar-internal.h @@ -0,0 +1,842 @@ +/* AF_RXRPC internal definitions + * + * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include + +#if 0 +#define CHECK_SLAB_OKAY(X) \ + BUG_ON(atomic_read((X)) >> (sizeof(atomic_t) - 2) == \ + (POISON_FREE << 8 | POISON_FREE)) +#else +#define CHECK_SLAB_OKAY(X) do {} while(0) +#endif + +extern atomic_t rxrpc_n_skbs; + +#define FCRYPT_BSIZE 8 +struct rxrpc_crypt { + union { + u8 x[FCRYPT_BSIZE]; + u32 n[2]; + }; +} __attribute__((aligned(8))); + +extern __be32 rxrpc_epoch; /* local epoch for detecting local-end reset */ +extern atomic_t rxrpc_debug_id; /* current debugging ID */ + +/* + * sk_state for RxRPC sockets + */ +enum { + RXRPC_UNCONNECTED = 0, + RXRPC_CLIENT_BOUND, /* client local address bound */ + RXRPC_CLIENT_CONNECTED, /* client is connected */ + RXRPC_SERVER_BOUND, /* server local address bound */ + RXRPC_SERVER_LISTENING, /* server listening for connections */ + RXRPC_CLOSE, /* socket is being closed */ +}; + +/* + * RxRPC socket definition + */ +struct rxrpc_sock { + /* WARNING: sk has to be the first member */ + struct sock sk; + struct rxrpc_local *local; /* local endpoint */ + struct rxrpc_transport *trans; /* transport handler */ + struct rxrpc_conn_bundle *bundle; /* virtual connection bundle */ + struct rxrpc_connection *conn; /* exclusive virtual connection */ + struct list_head listen_link; /* link in the local endpoint's listen list */ + struct list_head secureq; /* calls awaiting connection security clearance */ + struct list_head acceptq; /* calls awaiting acceptance */ + struct key *key; /* security for this socket */ + struct key *securities; /* list of server security descriptors */ + struct rb_root calls; /* outstanding calls on this socket */ + unsigned long flags; +#define RXRPC_SOCK_EXCLUSIVE_CONN 1 /* exclusive connection for a client socket */ + rwlock_t call_lock; /* lock for calls */ + u32 min_sec_level; /* minimum security level */ +#define RXRPC_SECURITY_MAX RXRPC_SECURITY_ENCRYPT + struct sockaddr_rxrpc srx; /* local address */ + sa_family_t proto; /* protocol created with */ + __be16 service_id; /* service ID of local/remote service */ +}; + +#define rxrpc_sk(__sk) container_of((__sk), struct rxrpc_sock, sk) + +/* + * RxRPC socket buffer private variables + * - max 48 bytes (struct sk_buff::cb) + */ +struct rxrpc_skb_priv { + struct rxrpc_call *call; /* call with which associated */ + unsigned long resend_at; /* time in jiffies at which to resend */ + union { + unsigned offset; /* offset into buffer of next read */ + int remain; /* amount of space remaining for next write */ + u32 error; /* network error code */ + bool need_resend; /* T if needs resending */ + }; + + struct rxrpc_header hdr; /* RxRPC packet header from this packet */ +}; + +#define rxrpc_skb(__skb) ((struct rxrpc_skb_priv *) &(__skb)->cb) + +enum { + RXRPC_SKB_MARK_DATA, /* data message */ + RXRPC_SKB_MARK_FINAL_ACK, /* final ACK received message */ + RXRPC_SKB_MARK_BUSY, /* server busy message */ + RXRPC_SKB_MARK_REMOTE_ABORT, /* remote abort message */ + RXRPC_SKB_MARK_NET_ERROR, /* network error message */ + RXRPC_SKB_MARK_LOCAL_ERROR, /* local error message */ + RXRPC_SKB_MARK_NEW_CALL, /* local error message */ +}; + +enum rxrpc_command { + RXRPC_CMD_SEND_DATA, /* send data message */ + RXRPC_CMD_SEND_ABORT, /* request abort generation */ + RXRPC_CMD_ACCEPT, /* [server] accept incoming call */ + RXRPC_CMD_REJECT_BUSY, /* [server] reject a call as busy */ +}; + +/* + * RxRPC security module interface + */ +struct rxrpc_security { + struct module *owner; /* providing module */ + struct list_head link; /* link in master list */ + const char *name; /* name of this service */ + u8 security_index; /* security type provided */ + + /* initialise a connection's security */ + int (*init_connection_security)(struct rxrpc_connection *); + + /* prime a connection's packet security */ + void (*prime_packet_security)(struct rxrpc_connection *); + + /* impose security on a packet */ + int (*secure_packet)(const struct rxrpc_call *, + struct sk_buff *, + size_t, + void *); + + /* verify the security on a received packet */ + int (*verify_packet)(const struct rxrpc_call *, struct sk_buff *, + u32 *); + + /* issue a challenge */ + int (*issue_challenge)(struct rxrpc_connection *); + + /* respond to a challenge */ + int (*respond_to_challenge)(struct rxrpc_connection *, + struct sk_buff *, + u32 *); + + /* verify a response */ + int (*verify_response)(struct rxrpc_connection *, + struct sk_buff *, + u32 *); + + /* clear connection security */ + void (*clear)(struct rxrpc_connection *); +}; + +/* + * RxRPC local transport endpoint definition + * - matched by local port, address and protocol type + */ +struct rxrpc_local { + struct socket *socket; /* my UDP socket */ + struct work_struct destroyer; /* endpoint destroyer */ + struct work_struct acceptor; /* incoming call processor */ + struct work_struct rejecter; /* packet reject writer */ + struct list_head services; /* services listening on this endpoint */ + struct list_head link; /* link in endpoint list */ + struct rw_semaphore defrag_sem; /* control re-enablement of IP DF bit */ + struct sk_buff_head accept_queue; /* incoming calls awaiting acceptance */ + struct sk_buff_head reject_queue; /* packets awaiting rejection */ + spinlock_t lock; /* access lock */ + rwlock_t services_lock; /* lock for services list */ + atomic_t usage; + int debug_id; /* debug ID for printks */ + volatile char error_rcvd; /* T if received ICMP error outstanding */ + struct sockaddr_rxrpc srx; /* local address */ +}; + +/* + * RxRPC remote transport endpoint definition + * - matched by remote port, address and protocol type + * - holds the connection ID counter for connections between the two endpoints + */ +struct rxrpc_peer { + struct work_struct destroyer; /* peer destroyer */ + struct list_head link; /* link in master peer list */ + struct list_head error_targets; /* targets for net error distribution */ + spinlock_t lock; /* access lock */ + atomic_t usage; + unsigned if_mtu; /* interface MTU for this peer */ + unsigned mtu; /* network MTU for this peer */ + unsigned maxdata; /* data size (MTU - hdrsize) */ + unsigned short hdrsize; /* header size (IP + UDP + RxRPC) */ + int debug_id; /* debug ID for printks */ + int net_error; /* network error distributed */ + struct sockaddr_rxrpc srx; /* remote address */ + + /* calculated RTT cache */ +#define RXRPC_RTT_CACHE_SIZE 32 + suseconds_t rtt; /* current RTT estimate (in uS) */ + unsigned rtt_point; /* next entry at which to insert */ + unsigned rtt_usage; /* amount of cache actually used */ + suseconds_t rtt_cache[RXRPC_RTT_CACHE_SIZE]; /* calculated RTT cache */ +}; + +/* + * RxRPC point-to-point transport / connection manager definition + * - handles a bundle of connections between two endpoints + * - matched by { local, peer } + */ +struct rxrpc_transport { + struct rxrpc_local *local; /* local transport endpoint */ + struct rxrpc_peer *peer; /* remote transport endpoint */ + struct work_struct error_handler; /* network error distributor */ + struct rb_root bundles; /* client connection bundles on this transport */ + struct rb_root client_conns; /* client connections on this transport */ + struct rb_root server_conns; /* server connections on this transport */ + struct list_head link; /* link in master session list */ + struct sk_buff_head error_queue; /* error packets awaiting processing */ + time_t put_time; /* time at which to reap */ + spinlock_t client_lock; /* client connection allocation lock */ + rwlock_t conn_lock; /* lock for active/dead connections */ + atomic_t usage; + int debug_id; /* debug ID for printks */ + unsigned int conn_idcounter; /* connection ID counter (client) */ +}; + +/* + * RxRPC client connection bundle + * - matched by { transport, service_id, key } + */ +struct rxrpc_conn_bundle { + struct rb_node node; /* node in transport's lookup tree */ + struct list_head unused_conns; /* unused connections in this bundle */ + struct list_head avail_conns; /* available connections in this bundle */ + struct list_head busy_conns; /* busy connections in this bundle */ + struct key *key; /* security for this bundle */ + wait_queue_head_t chanwait; /* wait for channel to become available */ + atomic_t usage; + int debug_id; /* debug ID for printks */ + unsigned short num_conns; /* number of connections in this bundle */ + __be16 service_id; /* service ID */ + uint8_t security_ix; /* security type */ +}; + +/* + * RxRPC connection definition + * - matched by { transport, service_id, conn_id, direction, key } + * - each connection can only handle four simultaneous calls + */ +struct rxrpc_connection { + struct rxrpc_transport *trans; /* transport session */ + struct rxrpc_conn_bundle *bundle; /* connection bundle (client) */ + struct work_struct processor; /* connection event processor */ + struct rb_node node; /* node in transport's lookup tree */ + struct list_head link; /* link in master connection list */ + struct list_head bundle_link; /* link in bundle */ + struct rb_root calls; /* calls on this connection */ + struct sk_buff_head rx_queue; /* received conn-level packets */ + struct rxrpc_call *channels[RXRPC_MAXCALLS]; /* channels (active calls) */ + struct rxrpc_security *security; /* applied security module */ + struct key *key; /* security for this connection (client) */ + struct key *server_key; /* security for this service */ + struct crypto_blkcipher *cipher; /* encryption handle */ + struct rxrpc_crypt csum_iv; /* packet checksum base */ + unsigned long events; +#define RXRPC_CONN_CHALLENGE 0 /* send challenge packet */ + time_t put_time; /* time at which to reap */ + rwlock_t lock; /* access lock */ + spinlock_t state_lock; /* state-change lock */ + atomic_t usage; + u32 real_conn_id; /* connection ID (host-endian) */ + enum { /* current state of connection */ + RXRPC_CONN_UNUSED, /* - connection not yet attempted */ + RXRPC_CONN_CLIENT, /* - client connection */ + RXRPC_CONN_SERVER_UNSECURED, /* - server unsecured connection */ + RXRPC_CONN_SERVER_CHALLENGING, /* - server challenging for security */ + RXRPC_CONN_SERVER, /* - server secured connection */ + RXRPC_CONN_REMOTELY_ABORTED, /* - conn aborted by peer */ + RXRPC_CONN_LOCALLY_ABORTED, /* - conn aborted locally */ + RXRPC_CONN_NETWORK_ERROR, /* - conn terminated by network error */ + } state; + int error; /* error code for local abort */ + int debug_id; /* debug ID for printks */ + unsigned call_counter; /* call ID counter */ + atomic_t serial; /* packet serial number counter */ + atomic_t hi_serial; /* highest serial number received */ + u8 avail_calls; /* number of calls available */ + u8 size_align; /* data size alignment (for security) */ + u8 header_size; /* rxrpc + security header size */ + u8 security_size; /* security header size */ + u32 security_level; /* security level negotiated */ + u32 security_nonce; /* response re-use preventer */ + + /* the following are all in net order */ + __be32 epoch; /* epoch of this connection */ + __be32 cid; /* connection ID */ + __be16 service_id; /* service ID */ + u8 security_ix; /* security type */ + u8 in_clientflag; /* RXRPC_CLIENT_INITIATED if we are server */ + u8 out_clientflag; /* RXRPC_CLIENT_INITIATED if we are client */ +}; + +/* + * RxRPC call definition + * - matched by { connection, call_id } + */ +struct rxrpc_call { + struct rxrpc_connection *conn; /* connection carrying call */ + struct rxrpc_sock *socket; /* socket responsible */ + struct timer_list lifetimer; /* lifetime remaining on call */ + struct timer_list deadspan; /* reap timer for re-ACK'ing, etc */ + struct timer_list ack_timer; /* ACK generation timer */ + struct timer_list resend_timer; /* Tx resend timer */ + struct work_struct destroyer; /* call destroyer */ + struct work_struct processor; /* packet processor and ACK generator */ + struct list_head link; /* link in master call list */ + struct list_head error_link; /* link in error distribution list */ + struct list_head accept_link; /* calls awaiting acceptance */ + struct rb_node sock_node; /* node in socket call tree */ + struct rb_node conn_node; /* node in connection call tree */ + struct sk_buff_head rx_queue; /* received packets */ + struct sk_buff_head rx_oos_queue; /* packets received out of sequence */ + struct sk_buff *tx_pending; /* Tx socket buffer being filled */ + wait_queue_head_t tx_waitq; /* wait for Tx window space to become available */ + unsigned long user_call_ID; /* user-defined call ID */ + unsigned long creation_jif; /* time of call creation */ + unsigned long flags; +#define RXRPC_CALL_RELEASED 0 /* call has been released - no more message to userspace */ +#define RXRPC_CALL_TERMINAL_MSG 1 /* call has given the socket its final message */ +#define RXRPC_CALL_RCVD_LAST 2 /* all packets received */ +#define RXRPC_CALL_RUN_RTIMER 3 /* Tx resend timer started */ +#define RXRPC_CALL_TX_SOFT_ACK 4 /* sent some soft ACKs */ +#define RXRPC_CALL_PROC_BUSY 5 /* the processor is busy */ +#define RXRPC_CALL_INIT_ACCEPT 6 /* acceptance was initiated */ +#define RXRPC_CALL_HAS_USERID 7 /* has a user ID attached */ +#define RXRPC_CALL_EXPECT_OOS 8 /* expect out of sequence packets */ + unsigned long events; +#define RXRPC_CALL_RCVD_ACKALL 0 /* ACKALL or reply received */ +#define RXRPC_CALL_RCVD_BUSY 1 /* busy packet received */ +#define RXRPC_CALL_RCVD_ABORT 2 /* abort packet received */ +#define RXRPC_CALL_RCVD_ERROR 3 /* network error received */ +#define RXRPC_CALL_ACK_FINAL 4 /* need to generate final ACK (and release call) */ +#define RXRPC_CALL_ACK 5 /* need to generate ACK */ +#define RXRPC_CALL_REJECT_BUSY 6 /* need to generate busy message */ +#define RXRPC_CALL_ABORT 7 /* need to generate abort */ +#define RXRPC_CALL_CONN_ABORT 8 /* local connection abort generated */ +#define RXRPC_CALL_RESEND_TIMER 9 /* Tx resend timer expired */ +#define RXRPC_CALL_RESEND 10 /* Tx resend required */ +#define RXRPC_CALL_DRAIN_RX_OOS 11 /* drain the Rx out of sequence queue */ +#define RXRPC_CALL_LIFE_TIMER 12 /* call's lifetimer ran out */ +#define RXRPC_CALL_ACCEPTED 13 /* incoming call accepted by userspace app */ +#define RXRPC_CALL_SECURED 14 /* incoming call's connection is now secure */ +#define RXRPC_CALL_POST_ACCEPT 15 /* need to post an "accept?" message to the app */ +#define RXRPC_CALL_RELEASE 16 /* need to release the call's resources */ + + spinlock_t lock; + rwlock_t state_lock; /* lock for state transition */ + atomic_t usage; + atomic_t sequence; /* Tx data packet sequence counter */ + u32 abort_code; /* local/remote abort code */ + enum { /* current state of call */ + RXRPC_CALL_CLIENT_SEND_REQUEST, /* - client sending request phase */ + RXRPC_CALL_CLIENT_AWAIT_REPLY, /* - client awaiting reply */ + RXRPC_CALL_CLIENT_RECV_REPLY, /* - client receiving reply phase */ + RXRPC_CALL_CLIENT_FINAL_ACK, /* - client sending final ACK phase */ + RXRPC_CALL_SERVER_SECURING, /* - server securing request connection */ + RXRPC_CALL_SERVER_ACCEPTING, /* - server accepting request */ + RXRPC_CALL_SERVER_RECV_REQUEST, /* - server receiving request */ + RXRPC_CALL_SERVER_ACK_REQUEST, /* - server pending ACK of request */ + RXRPC_CALL_SERVER_SEND_REPLY, /* - server sending reply */ + RXRPC_CALL_SERVER_AWAIT_ACK, /* - server awaiting final ACK */ + RXRPC_CALL_COMPLETE, /* - call completed */ + RXRPC_CALL_SERVER_BUSY, /* - call rejected by busy server */ + RXRPC_CALL_REMOTELY_ABORTED, /* - call aborted by peer */ + RXRPC_CALL_LOCALLY_ABORTED, /* - call aborted locally on error or close */ + RXRPC_CALL_NETWORK_ERROR, /* - call terminated by network error */ + RXRPC_CALL_DEAD, /* - call is dead */ + } state; + int debug_id; /* debug ID for printks */ + u8 channel; /* connection channel occupied by this call */ + + /* transmission-phase ACK management */ + uint8_t acks_head; /* offset into window of first entry */ + uint8_t acks_tail; /* offset into window of last entry */ + uint8_t acks_winsz; /* size of un-ACK'd window */ + uint8_t acks_unacked; /* lowest unacked packet in last ACK received */ + int acks_latest; /* serial number of latest ACK received */ + rxrpc_seq_t acks_hard; /* highest definitively ACK'd msg seq */ + unsigned long *acks_window; /* sent packet window + * - elements are pointers with LSB set if ACK'd + */ + + /* receive-phase ACK management */ + rxrpc_seq_t rx_data_expect; /* next data seq ID expected to be received */ + rxrpc_seq_t rx_data_post; /* next data seq ID expected to be posted */ + rxrpc_seq_t rx_data_recv; /* last data seq ID encountered by recvmsg */ + rxrpc_seq_t rx_data_eaten; /* last data seq ID consumed by recvmsg */ + rxrpc_seq_t rx_first_oos; /* first packet in rx_oos_queue (or 0) */ + rxrpc_seq_t ackr_win_top; /* top of ACK window (rx_data_eaten is bottom) */ + rxrpc_seq_net_t ackr_prev_seq; /* previous sequence number received */ + uint8_t ackr_reason; /* reason to ACK */ + __be32 ackr_serial; /* serial of packet being ACK'd */ + atomic_t ackr_not_idle; /* number of packets in Rx queue */ + + /* received packet records, 1 bit per record */ +#define RXRPC_ACKR_WINDOW_ASZ DIV_ROUND_UP(RXRPC_MAXACKS, BITS_PER_LONG) + unsigned long ackr_window[RXRPC_ACKR_WINDOW_ASZ + 1]; + + /* the following should all be in net order */ + __be32 cid; /* connection ID + channel index */ + __be32 call_id; /* call ID on connection */ +}; + +/* + * RxRPC key for Kerberos (type-2 security) + */ +struct rxkad_key { + u16 security_index; /* RxRPC header security index */ + u16 ticket_len; /* length of ticket[] */ + u32 expiry; /* time at which expires */ + u32 kvno; /* key version number */ + u8 session_key[8]; /* DES session key */ + u8 ticket[0]; /* the encrypted ticket */ +}; + +struct rxrpc_key_payload { + struct rxkad_key k; +}; + +/* + * locally abort an RxRPC call + */ +static inline void rxrpc_abort_call(struct rxrpc_call *call, u32 abort_code) +{ + write_lock_bh(&call->state_lock); + if (call->state < RXRPC_CALL_COMPLETE) { + call->abort_code = abort_code; + call->state = RXRPC_CALL_LOCALLY_ABORTED; + set_bit(RXRPC_CALL_ABORT, &call->events); + } + write_unlock_bh(&call->state_lock); +} + +/* + * put a packet up for transport-level abort + */ +static inline +void rxrpc_reject_packet(struct rxrpc_local *local, struct sk_buff *skb) +{ + CHECK_SLAB_OKAY(&local->usage); + if (!atomic_inc_not_zero(&local->usage)) { + printk("resurrected on reject\n"); + BUG(); + } + skb_queue_tail(&local->reject_queue, skb); + schedule_work(&local->rejecter); +} + +/* + * ar-accept.c + */ +extern void rxrpc_accept_incoming_calls(struct work_struct *); +extern int rxrpc_accept_call(struct rxrpc_sock *, unsigned long); + +/* + * ar-ack.c + */ +extern void __rxrpc_propose_ACK(struct rxrpc_call *, uint8_t, __be32, bool); +extern void rxrpc_propose_ACK(struct rxrpc_call *, uint8_t, __be32, bool); +extern void rxrpc_process_call(struct work_struct *); + +/* + * ar-call.c + */ +extern struct kmem_cache *rxrpc_call_jar; +extern struct list_head rxrpc_calls; +extern rwlock_t rxrpc_call_lock; + +extern struct rxrpc_call *rxrpc_get_client_call(struct rxrpc_sock *, + struct rxrpc_transport *, + struct rxrpc_conn_bundle *, + unsigned long, int, gfp_t); +extern struct rxrpc_call *rxrpc_incoming_call(struct rxrpc_sock *, + struct rxrpc_connection *, + struct rxrpc_header *, gfp_t); +extern struct rxrpc_call *rxrpc_find_server_call(struct rxrpc_sock *, + unsigned long); +extern void rxrpc_release_call(struct rxrpc_call *); +extern void rxrpc_release_calls_on_socket(struct rxrpc_sock *); +extern void __rxrpc_put_call(struct rxrpc_call *); +extern void __exit rxrpc_destroy_all_calls(void); + +/* + * ar-connection.c + */ +extern struct list_head rxrpc_connections; +extern rwlock_t rxrpc_connection_lock; + +extern struct rxrpc_conn_bundle *rxrpc_get_bundle(struct rxrpc_sock *, + struct rxrpc_transport *, + struct key *, + __be16, gfp_t); +extern void rxrpc_put_bundle(struct rxrpc_transport *, + struct rxrpc_conn_bundle *); +extern int rxrpc_connect_call(struct rxrpc_sock *, struct rxrpc_transport *, + struct rxrpc_conn_bundle *, struct rxrpc_call *, + gfp_t); +extern void rxrpc_put_connection(struct rxrpc_connection *); +extern void __exit rxrpc_destroy_all_connections(void); +extern struct rxrpc_connection *rxrpc_find_connection(struct rxrpc_transport *, + struct rxrpc_header *); +extern struct rxrpc_connection * +rxrpc_incoming_connection(struct rxrpc_transport *, struct rxrpc_header *, + gfp_t); + +/* + * ar-connevent.c + */ +extern void rxrpc_process_connection(struct work_struct *); +extern void rxrpc_reject_packets(struct work_struct *); + +/* + * ar-error.c + */ +extern void rxrpc_UDP_error_report(struct sock *); +extern void rxrpc_UDP_error_handler(struct work_struct *); + +/* + * ar-input.c + */ +extern unsigned long rxrpc_ack_timeout; +extern const char *rxrpc_pkts[]; + +extern void rxrpc_data_ready(struct sock *, int); +extern int rxrpc_queue_rcv_skb(struct rxrpc_call *, struct sk_buff *, bool, + bool); +extern void rxrpc_fast_process_packet(struct rxrpc_call *, struct sk_buff *); + +/* + * ar-local.c + */ +extern rwlock_t rxrpc_local_lock; +extern struct rxrpc_local *rxrpc_lookup_local(struct sockaddr_rxrpc *); +extern void rxrpc_put_local(struct rxrpc_local *); +extern void __exit rxrpc_destroy_all_locals(void); + +/* + * ar-key.c + */ +extern struct key_type key_type_rxrpc; +extern struct key_type key_type_rxrpc_s; + +extern int rxrpc_request_key(struct rxrpc_sock *, char __user *, int); +extern int rxrpc_server_keyring(struct rxrpc_sock *, char __user *, int); +extern int rxrpc_get_server_data_key(struct rxrpc_connection *, const void *, + time_t, u32); + +/* + * ar-output.c + */ +extern int rxrpc_resend_timeout; + +extern int rxrpc_send_packet(struct rxrpc_transport *, struct sk_buff *); +extern int rxrpc_client_sendmsg(struct kiocb *, struct rxrpc_sock *, + struct rxrpc_transport *, struct msghdr *, + size_t); +extern int rxrpc_server_sendmsg(struct kiocb *, struct rxrpc_sock *, + struct msghdr *, size_t); + +/* + * ar-peer.c + */ +extern struct rxrpc_peer *rxrpc_get_peer(struct sockaddr_rxrpc *, gfp_t); +extern void rxrpc_put_peer(struct rxrpc_peer *); +extern struct rxrpc_peer *rxrpc_find_peer(struct rxrpc_local *, + __be32, __be16); +extern void __exit rxrpc_destroy_all_peers(void); + +/* + * ar-proc.c + */ +extern const char *rxrpc_call_states[]; +extern struct file_operations rxrpc_call_seq_fops; +extern struct file_operations rxrpc_connection_seq_fops; + +/* + * ar-recvmsg.c + */ +extern int rxrpc_recvmsg(struct kiocb *, struct socket *, struct msghdr *, + size_t, int); + +/* + * ar-security.c + */ +extern int rxrpc_register_security(struct rxrpc_security *); +extern void rxrpc_unregister_security(struct rxrpc_security *); +extern int rxrpc_init_client_conn_security(struct rxrpc_connection *); +extern int rxrpc_init_server_conn_security(struct rxrpc_connection *); +extern int rxrpc_secure_packet(const struct rxrpc_call *, struct sk_buff *, + size_t, void *); +extern int rxrpc_verify_packet(const struct rxrpc_call *, struct sk_buff *, + u32 *); +extern void rxrpc_clear_conn_security(struct rxrpc_connection *); + +/* + * ar-skbuff.c + */ +extern void rxrpc_packet_destructor(struct sk_buff *); + +/* + * ar-transport.c + */ +extern struct rxrpc_transport *rxrpc_get_transport(struct rxrpc_local *, + struct rxrpc_peer *, + gfp_t); +extern void rxrpc_put_transport(struct rxrpc_transport *); +extern void __exit rxrpc_destroy_all_transports(void); +extern struct rxrpc_transport *rxrpc_find_transport(struct rxrpc_local *, + struct rxrpc_peer *); + +/* + * debug tracing + */ +extern unsigned rxrpc_debug; + +#define dbgprintk(FMT,...) \ + printk("[%x%-6.6s] "FMT"\n", smp_processor_id(), current->comm ,##__VA_ARGS__) + +/* make sure we maintain the format strings, even when debugging is disabled */ +static inline __attribute__((format(printf,1,2))) +void _dbprintk(const char *fmt, ...) +{ +} + +#define kenter(FMT,...) dbgprintk("==> %s("FMT")",__FUNCTION__ ,##__VA_ARGS__) +#define kleave(FMT,...) dbgprintk("<== %s()"FMT"",__FUNCTION__ ,##__VA_ARGS__) +#define kdebug(FMT,...) dbgprintk(" "FMT ,##__VA_ARGS__) +#define kproto(FMT,...) dbgprintk("### "FMT ,##__VA_ARGS__) +#define knet(FMT,...) dbgprintk("@@@ "FMT ,##__VA_ARGS__) + + +#if defined(__KDEBUG) +#define _enter(FMT,...) kenter(FMT,##__VA_ARGS__) +#define _leave(FMT,...) kleave(FMT,##__VA_ARGS__) +#define _debug(FMT,...) kdebug(FMT,##__VA_ARGS__) +#define _proto(FMT,...) kproto(FMT,##__VA_ARGS__) +#define _net(FMT,...) knet(FMT,##__VA_ARGS__) + +#elif defined(CONFIG_AF_RXRPC_DEBUG) +#define RXRPC_DEBUG_KENTER 0x01 +#define RXRPC_DEBUG_KLEAVE 0x02 +#define RXRPC_DEBUG_KDEBUG 0x04 +#define RXRPC_DEBUG_KPROTO 0x08 +#define RXRPC_DEBUG_KNET 0x10 + +#define _enter(FMT,...) \ +do { \ + if (unlikely(rxrpc_debug & RXRPC_DEBUG_KENTER)) \ + kenter(FMT,##__VA_ARGS__); \ +} while (0) + +#define _leave(FMT,...) \ +do { \ + if (unlikely(rxrpc_debug & RXRPC_DEBUG_KLEAVE)) \ + kleave(FMT,##__VA_ARGS__); \ +} while (0) + +#define _debug(FMT,...) \ +do { \ + if (unlikely(rxrpc_debug & RXRPC_DEBUG_KDEBUG)) \ + kdebug(FMT,##__VA_ARGS__); \ +} while (0) + +#define _proto(FMT,...) \ +do { \ + if (unlikely(rxrpc_debug & RXRPC_DEBUG_KPROTO)) \ + kproto(FMT,##__VA_ARGS__); \ +} while (0) + +#define _net(FMT,...) \ +do { \ + if (unlikely(rxrpc_debug & RXRPC_DEBUG_KNET)) \ + knet(FMT,##__VA_ARGS__); \ +} while (0) + +#else +#define _enter(FMT,...) _dbprintk("==> %s("FMT")",__FUNCTION__ ,##__VA_ARGS__) +#define _leave(FMT,...) _dbprintk("<== %s()"FMT"",__FUNCTION__ ,##__VA_ARGS__) +#define _debug(FMT,...) _dbprintk(" "FMT ,##__VA_ARGS__) +#define _proto(FMT,...) _dbprintk("### "FMT ,##__VA_ARGS__) +#define _net(FMT,...) _dbprintk("@@@ "FMT ,##__VA_ARGS__) +#endif + +/* + * debug assertion checking + */ +#if 1 // defined(__KDEBUGALL) + +#define ASSERT(X) \ +do { \ + if (unlikely(!(X))) { \ + printk(KERN_ERR "\n"); \ + printk(KERN_ERR "RxRPC: Assertion failed\n"); \ + BUG(); \ + } \ +} while(0) + +#define ASSERTCMP(X, OP, Y) \ +do { \ + if (unlikely(!((X) OP (Y)))) { \ + printk(KERN_ERR "\n"); \ + printk(KERN_ERR "RxRPC: Assertion failed\n"); \ + printk(KERN_ERR "%lu " #OP " %lu is false\n", \ + (unsigned long)(X), (unsigned long)(Y)); \ + printk(KERN_ERR "0x%lx " #OP " 0x%lx is false\n", \ + (unsigned long)(X), (unsigned long)(Y)); \ + BUG(); \ + } \ +} while(0) + +#define ASSERTIF(C, X) \ +do { \ + if (unlikely((C) && !(X))) { \ + printk(KERN_ERR "\n"); \ + printk(KERN_ERR "RxRPC: Assertion failed\n"); \ + BUG(); \ + } \ +} while(0) + +#define ASSERTIFCMP(C, X, OP, Y) \ +do { \ + if (unlikely((C) && !((X) OP (Y)))) { \ + printk(KERN_ERR "\n"); \ + printk(KERN_ERR "RxRPC: Assertion failed\n"); \ + printk(KERN_ERR "%lu " #OP " %lu is false\n", \ + (unsigned long)(X), (unsigned long)(Y)); \ + printk(KERN_ERR "0x%lx " #OP " 0x%lx is false\n", \ + (unsigned long)(X), (unsigned long)(Y)); \ + BUG(); \ + } \ +} while(0) + +#else + +#define ASSERT(X) \ +do { \ +} while(0) + +#define ASSERTCMP(X, OP, Y) \ +do { \ +} while(0) + +#define ASSERTIF(C, X) \ +do { \ +} while(0) + +#define ASSERTIFCMP(C, X, OP, Y) \ +do { \ +} while(0) + +#endif /* __KDEBUGALL */ + +/* + * socket buffer accounting / leak finding + */ +static inline void __rxrpc_new_skb(struct sk_buff *skb, const char *fn) +{ + //_net("new skb %p %s [%d]", skb, fn, atomic_read(&rxrpc_n_skbs)); + //atomic_inc(&rxrpc_n_skbs); +} + +#define rxrpc_new_skb(skb) __rxrpc_new_skb((skb), __func__) + +static inline void __rxrpc_kill_skb(struct sk_buff *skb, const char *fn) +{ + //_net("kill skb %p %s [%d]", skb, fn, atomic_read(&rxrpc_n_skbs)); + //atomic_dec(&rxrpc_n_skbs); +} + +#define rxrpc_kill_skb(skb) __rxrpc_kill_skb((skb), __func__) + +static inline void __rxrpc_free_skb(struct sk_buff *skb, const char *fn) +{ + if (skb) { + CHECK_SLAB_OKAY(&skb->users); + //_net("free skb %p %s [%d]", + // skb, fn, atomic_read(&rxrpc_n_skbs)); + //atomic_dec(&rxrpc_n_skbs); + kfree_skb(skb); + } +} + +#define rxrpc_free_skb(skb) __rxrpc_free_skb((skb), __func__) + +static inline void rxrpc_purge_queue(struct sk_buff_head *list) +{ + struct sk_buff *skb; + while ((skb = skb_dequeue((list))) != NULL) + rxrpc_free_skb(skb); +} + +static inline void __rxrpc__atomic_inc(atomic_t *v) +{ + CHECK_SLAB_OKAY(v); + atomic_inc(v); +} + +#define atomic_inc(v) __rxrpc__atomic_inc((v)) + +static inline void __rxrpc__atomic_dec(atomic_t *v) +{ + CHECK_SLAB_OKAY(v); + atomic_dec(v); +} + +#define atomic_dec(v) __rxrpc__atomic_dec((v)) + +static inline int __rxrpc__atomic_dec_and_test(atomic_t *v) +{ + CHECK_SLAB_OKAY(v); + return atomic_dec_and_test(v); +} + +#define atomic_dec_and_test(v) __rxrpc__atomic_dec_and_test((v)) + +static inline void __rxrpc_get_local(struct rxrpc_local *local, const char *f) +{ + CHECK_SLAB_OKAY(&local->usage); + if (atomic_inc_return(&local->usage) == 1) + printk("resurrected (%s)\n", f); +} + +#define rxrpc_get_local(LOCAL) __rxrpc_get_local((LOCAL), __func__) + +#define rxrpc_get_call(CALL) \ +do { \ + CHECK_SLAB_OKAY(&(CALL)->usage); \ + if (atomic_inc_return(&(CALL)->usage) == 1) \ + BUG(); \ +} while(0) + +#define rxrpc_put_call(CALL) \ +do { \ + __rxrpc_put_call(CALL); \ +} while(0) diff --git a/net/rxrpc/ar-key.c b/net/rxrpc/ar-key.c new file mode 100644 index 000000000000..7e049ff6ae60 --- /dev/null +++ b/net/rxrpc/ar-key.c @@ -0,0 +1,334 @@ +/* RxRPC key management + * + * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * RxRPC keys should have a description of describing their purpose: + * "afs@CAMBRIDGE.REDHAT.COM> + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "ar-internal.h" + +static int rxrpc_instantiate(struct key *, const void *, size_t); +static int rxrpc_instantiate_s(struct key *, const void *, size_t); +static void rxrpc_destroy(struct key *); +static void rxrpc_destroy_s(struct key *); +static void rxrpc_describe(const struct key *, struct seq_file *); + +/* + * rxrpc defined keys take an arbitrary string as the description and an + * arbitrary blob of data as the payload + */ +struct key_type key_type_rxrpc = { + .name = "rxrpc", + .instantiate = rxrpc_instantiate, + .match = user_match, + .destroy = rxrpc_destroy, + .describe = rxrpc_describe, +}; + +EXPORT_SYMBOL(key_type_rxrpc); + +/* + * rxrpc server defined keys take ":" as the + * description and an 8-byte decryption key as the payload + */ +struct key_type key_type_rxrpc_s = { + .name = "rxrpc_s", + .instantiate = rxrpc_instantiate_s, + .match = user_match, + .destroy = rxrpc_destroy_s, + .describe = rxrpc_describe, +}; + +/* + * instantiate an rxrpc defined key + * data should be of the form: + * OFFSET LEN CONTENT + * 0 4 key interface version number + * 4 2 security index (type) + * 6 2 ticket length + * 8 4 key expiry time (time_t) + * 12 4 kvno + * 16 8 session key + * 24 [len] ticket + * + * if no data is provided, then a no-security key is made + */ +static int rxrpc_instantiate(struct key *key, const void *data, size_t datalen) +{ + const struct rxkad_key *tsec; + struct rxrpc_key_payload *upayload; + size_t plen; + u32 kver; + int ret; + + _enter("{%x},,%zu", key_serial(key), datalen); + + /* handle a no-security key */ + if (!data && datalen == 0) + return 0; + + /* get the key interface version number */ + ret = -EINVAL; + if (datalen <= 4 || !data) + goto error; + memcpy(&kver, data, sizeof(kver)); + data += sizeof(kver); + datalen -= sizeof(kver); + + _debug("KEY I/F VERSION: %u", kver); + + ret = -EKEYREJECTED; + if (kver != 1) + goto error; + + /* deal with a version 1 key */ + ret = -EINVAL; + if (datalen < sizeof(*tsec)) + goto error; + + tsec = data; + if (datalen != sizeof(*tsec) + tsec->ticket_len) + goto error; + + _debug("SCIX: %u", tsec->security_index); + _debug("TLEN: %u", tsec->ticket_len); + _debug("EXPY: %x", tsec->expiry); + _debug("KVNO: %u", tsec->kvno); + _debug("SKEY: %02x%02x%02x%02x%02x%02x%02x%02x", + tsec->session_key[0], tsec->session_key[1], + tsec->session_key[2], tsec->session_key[3], + tsec->session_key[4], tsec->session_key[5], + tsec->session_key[6], tsec->session_key[7]); + if (tsec->ticket_len >= 8) + _debug("TCKT: %02x%02x%02x%02x%02x%02x%02x%02x", + tsec->ticket[0], tsec->ticket[1], + tsec->ticket[2], tsec->ticket[3], + tsec->ticket[4], tsec->ticket[5], + tsec->ticket[6], tsec->ticket[7]); + + ret = -EPROTONOSUPPORT; + if (tsec->security_index != 2) + goto error; + + key->type_data.x[0] = tsec->security_index; + + plen = sizeof(*upayload) + tsec->ticket_len; + ret = key_payload_reserve(key, plen); + if (ret < 0) + goto error; + + ret = -ENOMEM; + upayload = kmalloc(plen, GFP_KERNEL); + if (!upayload) + goto error; + + /* attach the data */ + memcpy(&upayload->k, tsec, sizeof(*tsec)); + memcpy(&upayload->k.ticket, (void *)tsec + sizeof(*tsec), + tsec->ticket_len); + key->payload.data = upayload; + key->expiry = tsec->expiry; + ret = 0; + +error: + return ret; +} + +/* + * instantiate a server secret key + * data should be a pointer to the 8-byte secret key + */ +static int rxrpc_instantiate_s(struct key *key, const void *data, + size_t datalen) +{ + struct crypto_blkcipher *ci; + + _enter("{%x},,%zu", key_serial(key), datalen); + + if (datalen != 8) + return -EINVAL; + + memcpy(&key->type_data, data, 8); + + ci = crypto_alloc_blkcipher("pcbc(des)", 0, CRYPTO_ALG_ASYNC); + if (IS_ERR(ci)) { + _leave(" = %ld", PTR_ERR(ci)); + return PTR_ERR(ci); + } + + if (crypto_blkcipher_setkey(ci, data, 8) < 0) + BUG(); + + key->payload.data = ci; + _leave(" = 0"); + return 0; +} + +/* + * dispose of the data dangling from the corpse of a rxrpc key + */ +static void rxrpc_destroy(struct key *key) +{ + kfree(key->payload.data); +} + +/* + * dispose of the data dangling from the corpse of a rxrpc key + */ +static void rxrpc_destroy_s(struct key *key) +{ + if (key->payload.data) { + crypto_free_blkcipher(key->payload.data); + key->payload.data = NULL; + } +} + +/* + * describe the rxrpc key + */ +static void rxrpc_describe(const struct key *key, struct seq_file *m) +{ + seq_puts(m, key->description); +} + +/* + * grab the security key for a socket + */ +int rxrpc_request_key(struct rxrpc_sock *rx, char __user *optval, int optlen) +{ + struct key *key; + char *description; + + _enter(""); + + if (optlen <= 0 || optlen > PAGE_SIZE - 1) + return -EINVAL; + + description = kmalloc(optlen + 1, GFP_KERNEL); + if (!description) + return -ENOMEM; + + if (copy_from_user(description, optval, optlen)) { + kfree(description); + return -EFAULT; + } + description[optlen] = 0; + + key = request_key(&key_type_rxrpc, description, NULL); + if (IS_ERR(key)) { + kfree(description); + _leave(" = %ld", PTR_ERR(key)); + return PTR_ERR(key); + } + + rx->key = key; + kfree(description); + _leave(" = 0 [key %x]", key->serial); + return 0; +} + +/* + * grab the security keyring for a server socket + */ +int rxrpc_server_keyring(struct rxrpc_sock *rx, char __user *optval, + int optlen) +{ + struct key *key; + char *description; + + _enter(""); + + if (optlen <= 0 || optlen > PAGE_SIZE - 1) + return -EINVAL; + + description = kmalloc(optlen + 1, GFP_KERNEL); + if (!description) + return -ENOMEM; + + if (copy_from_user(description, optval, optlen)) { + kfree(description); + return -EFAULT; + } + description[optlen] = 0; + + key = request_key(&key_type_keyring, description, NULL); + if (IS_ERR(key)) { + kfree(description); + _leave(" = %ld", PTR_ERR(key)); + return PTR_ERR(key); + } + + rx->securities = key; + kfree(description); + _leave(" = 0 [key %x]", key->serial); + return 0; +} + +/* + * generate a server data key + */ +int rxrpc_get_server_data_key(struct rxrpc_connection *conn, + const void *session_key, + time_t expiry, + u32 kvno) +{ + struct key *key; + int ret; + + struct { + u32 kver; + struct rxkad_key tsec; + } data; + + _enter(""); + + key = key_alloc(&key_type_rxrpc, "x", 0, 0, current, 0, + KEY_ALLOC_NOT_IN_QUOTA); + if (IS_ERR(key)) { + _leave(" = -ENOMEM [alloc %ld]", PTR_ERR(key)); + return -ENOMEM; + } + + _debug("key %d", key_serial(key)); + + data.kver = 1; + data.tsec.security_index = 2; + data.tsec.ticket_len = 0; + data.tsec.expiry = expiry; + data.tsec.kvno = 0; + + memcpy(&data.tsec.session_key, session_key, + sizeof(data.tsec.session_key)); + + ret = key_instantiate_and_link(key, &data, sizeof(data), NULL, NULL); + if (ret < 0) + goto error; + + conn->key = key; + _leave(" = 0 [%d]", key_serial(key)); + return 0; + +error: + key_revoke(key); + key_put(key); + _leave(" = -ENOMEM [ins %d]", ret); + return -ENOMEM; +} + +EXPORT_SYMBOL(rxrpc_get_server_data_key); diff --git a/net/rxrpc/ar-local.c b/net/rxrpc/ar-local.c new file mode 100644 index 000000000000..a20a2c0fe105 --- /dev/null +++ b/net/rxrpc/ar-local.c @@ -0,0 +1,309 @@ +/* AF_RXRPC local endpoint management + * + * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include +#include +#include +#include +#include +#include "ar-internal.h" + +static LIST_HEAD(rxrpc_locals); +DEFINE_RWLOCK(rxrpc_local_lock); +static DECLARE_RWSEM(rxrpc_local_sem); +static DECLARE_WAIT_QUEUE_HEAD(rxrpc_local_wq); + +static void rxrpc_destroy_local(struct work_struct *work); + +/* + * allocate a new local + */ +static +struct rxrpc_local *rxrpc_alloc_local(struct sockaddr_rxrpc *srx) +{ + struct rxrpc_local *local; + + local = kzalloc(sizeof(struct rxrpc_local), GFP_KERNEL); + if (local) { + INIT_WORK(&local->destroyer, &rxrpc_destroy_local); + INIT_WORK(&local->acceptor, &rxrpc_accept_incoming_calls); + INIT_WORK(&local->rejecter, &rxrpc_reject_packets); + INIT_LIST_HEAD(&local->services); + INIT_LIST_HEAD(&local->link); + init_rwsem(&local->defrag_sem); + skb_queue_head_init(&local->accept_queue); + skb_queue_head_init(&local->reject_queue); + spin_lock_init(&local->lock); + rwlock_init(&local->services_lock); + atomic_set(&local->usage, 1); + local->debug_id = atomic_inc_return(&rxrpc_debug_id); + memcpy(&local->srx, srx, sizeof(*srx)); + } + + _leave(" = %p", local); + return local; +} + +/* + * create the local socket + * - must be called with rxrpc_local_sem writelocked + */ +static int rxrpc_create_local(struct rxrpc_local *local) +{ + struct sock *sock; + int ret, opt; + + _enter("%p{%d}", local, local->srx.transport_type); + + /* create a socket to represent the local endpoint */ + ret = sock_create_kern(PF_INET, local->srx.transport_type, IPPROTO_UDP, + &local->socket); + if (ret < 0) { + _leave(" = %d [socket]", ret); + return ret; + } + + /* if a local address was supplied then bind it */ + if (local->srx.transport_len > sizeof(sa_family_t)) { + _debug("bind"); + ret = kernel_bind(local->socket, + (struct sockaddr *) &local->srx.transport, + local->srx.transport_len); + if (ret < 0) { + _debug("bind failed"); + goto error; + } + } + + /* we want to receive ICMP errors */ + opt = 1; + ret = kernel_setsockopt(local->socket, SOL_IP, IP_RECVERR, + (char *) &opt, sizeof(opt)); + if (ret < 0) { + _debug("setsockopt failed"); + goto error; + } + + /* we want to set the don't fragment bit */ + opt = IP_PMTUDISC_DO; + ret = kernel_setsockopt(local->socket, SOL_IP, IP_MTU_DISCOVER, + (char *) &opt, sizeof(opt)); + if (ret < 0) { + _debug("setsockopt failed"); + goto error; + } + + write_lock_bh(&rxrpc_local_lock); + list_add(&local->link, &rxrpc_locals); + write_unlock_bh(&rxrpc_local_lock); + + /* set the socket up */ + sock = local->socket->sk; + sock->sk_user_data = local; + sock->sk_data_ready = rxrpc_data_ready; + sock->sk_error_report = rxrpc_UDP_error_report; + _leave(" = 0"); + return 0; + +error: + local->socket->ops->shutdown(local->socket, 2); + local->socket->sk->sk_user_data = NULL; + sock_release(local->socket); + local->socket = NULL; + + _leave(" = %d", ret); + return ret; +} + +/* + * create a new local endpoint using the specified UDP address + */ +struct rxrpc_local *rxrpc_lookup_local(struct sockaddr_rxrpc *srx) +{ + struct rxrpc_local *local; + int ret; + + _enter("{%d,%u,%u.%u.%u.%u+%hu}", + srx->transport_type, + srx->transport.family, + NIPQUAD(srx->transport.sin.sin_addr), + ntohs(srx->transport.sin.sin_port)); + + down_write(&rxrpc_local_sem); + + /* see if we have a suitable local local endpoint already */ + read_lock_bh(&rxrpc_local_lock); + + list_for_each_entry(local, &rxrpc_locals, link) { + _debug("CMP {%d,%u,%u.%u.%u.%u+%hu}", + local->srx.transport_type, + local->srx.transport.family, + NIPQUAD(local->srx.transport.sin.sin_addr), + ntohs(local->srx.transport.sin.sin_port)); + + if (local->srx.transport_type != srx->transport_type || + local->srx.transport.family != srx->transport.family) + continue; + + switch (srx->transport.family) { + case AF_INET: + if (local->srx.transport.sin.sin_port != + srx->transport.sin.sin_port) + continue; + if (memcmp(&local->srx.transport.sin.sin_addr, + &srx->transport.sin.sin_addr, + sizeof(struct in_addr)) != 0) + continue; + goto found_local; + + default: + BUG(); + } + } + + read_unlock_bh(&rxrpc_local_lock); + + /* we didn't find one, so we need to create one */ + local = rxrpc_alloc_local(srx); + if (!local) { + up_write(&rxrpc_local_sem); + return ERR_PTR(-ENOMEM); + } + + ret = rxrpc_create_local(local); + if (ret < 0) { + up_write(&rxrpc_local_sem); + kfree(local); + _leave(" = %d", ret); + return ERR_PTR(ret); + } + + up_write(&rxrpc_local_sem); + + _net("LOCAL new %d {%d,%u,%u.%u.%u.%u+%hu}", + local->debug_id, + local->srx.transport_type, + local->srx.transport.family, + NIPQUAD(local->srx.transport.sin.sin_addr), + ntohs(local->srx.transport.sin.sin_port)); + + _leave(" = %p [new]", local); + return local; + +found_local: + rxrpc_get_local(local); + read_unlock_bh(&rxrpc_local_lock); + up_write(&rxrpc_local_sem); + + _net("LOCAL old %d {%d,%u,%u.%u.%u.%u+%hu}", + local->debug_id, + local->srx.transport_type, + local->srx.transport.family, + NIPQUAD(local->srx.transport.sin.sin_addr), + ntohs(local->srx.transport.sin.sin_port)); + + _leave(" = %p [reuse]", local); + return local; +} + +/* + * release a local endpoint + */ +void rxrpc_put_local(struct rxrpc_local *local) +{ + _enter("%p{u=%d}", local, atomic_read(&local->usage)); + + ASSERTCMP(atomic_read(&local->usage), >, 0); + + /* to prevent a race, the decrement and the dequeue must be effectively + * atomic */ + write_lock_bh(&rxrpc_local_lock); + if (unlikely(atomic_dec_and_test(&local->usage))) { + _debug("destroy local"); + schedule_work(&local->destroyer); + } + write_unlock_bh(&rxrpc_local_lock); + _leave(""); +} + +/* + * destroy a local endpoint + */ +static void rxrpc_destroy_local(struct work_struct *work) +{ + struct rxrpc_local *local = + container_of(work, struct rxrpc_local, destroyer); + + _enter("%p{%d}", local, atomic_read(&local->usage)); + + down_write(&rxrpc_local_sem); + + write_lock_bh(&rxrpc_local_lock); + if (atomic_read(&local->usage) > 0) { + write_unlock_bh(&rxrpc_local_lock); + up_read(&rxrpc_local_sem); + _leave(" [resurrected]"); + return; + } + + list_del(&local->link); + local->socket->sk->sk_user_data = NULL; + write_unlock_bh(&rxrpc_local_lock); + + downgrade_write(&rxrpc_local_sem); + + ASSERT(list_empty(&local->services)); + ASSERT(!work_pending(&local->acceptor)); + ASSERT(!work_pending(&local->rejecter)); + + /* finish cleaning up the local descriptor */ + rxrpc_purge_queue(&local->accept_queue); + rxrpc_purge_queue(&local->reject_queue); + local->socket->ops->shutdown(local->socket, 2); + sock_release(local->socket); + + up_read(&rxrpc_local_sem); + + _net("DESTROY LOCAL %d", local->debug_id); + kfree(local); + + if (list_empty(&rxrpc_locals)) + wake_up_all(&rxrpc_local_wq); + + _leave(""); +} + +/* + * preemptively destroy all local local endpoint rather than waiting for + * them to be destroyed + */ +void __exit rxrpc_destroy_all_locals(void) +{ + DECLARE_WAITQUEUE(myself,current); + + _enter(""); + + /* we simply have to wait for them to go away */ + if (!list_empty(&rxrpc_locals)) { + set_current_state(TASK_UNINTERRUPTIBLE); + add_wait_queue(&rxrpc_local_wq, &myself); + + while (!list_empty(&rxrpc_locals)) { + schedule(); + set_current_state(TASK_UNINTERRUPTIBLE); + } + + remove_wait_queue(&rxrpc_local_wq, &myself); + set_current_state(TASK_RUNNING); + } + + _leave(""); +} diff --git a/net/rxrpc/ar-output.c b/net/rxrpc/ar-output.c new file mode 100644 index 000000000000..67aa9510f09b --- /dev/null +++ b/net/rxrpc/ar-output.c @@ -0,0 +1,658 @@ +/* RxRPC packet transmission + * + * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include +#include +#include +#include +#include +#include "ar-internal.h" + +int rxrpc_resend_timeout = 4; + +static int rxrpc_send_data(struct kiocb *iocb, + struct rxrpc_sock *rx, + struct rxrpc_call *call, + struct msghdr *msg, size_t len); + +/* + * extract control messages from the sendmsg() control buffer + */ +static int rxrpc_sendmsg_cmsg(struct rxrpc_sock *rx, struct msghdr *msg, + unsigned long *user_call_ID, + enum rxrpc_command *command, + u32 *abort_code, + bool server) +{ + struct cmsghdr *cmsg; + int len; + + *command = RXRPC_CMD_SEND_DATA; + + if (msg->msg_controllen == 0) + return -EINVAL; + + for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) { + if (!CMSG_OK(msg, cmsg)) + return -EINVAL; + + len = cmsg->cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr)); + _debug("CMSG %d, %d, %d", + cmsg->cmsg_level, cmsg->cmsg_type, len); + + if (cmsg->cmsg_level != SOL_RXRPC) + continue; + + switch (cmsg->cmsg_type) { + case RXRPC_USER_CALL_ID: + if (msg->msg_flags & MSG_CMSG_COMPAT) { + if (len != sizeof(u32)) + return -EINVAL; + *user_call_ID = *(u32 *) CMSG_DATA(cmsg); + } else { + if (len != sizeof(unsigned long)) + return -EINVAL; + *user_call_ID = *(unsigned long *) + CMSG_DATA(cmsg); + } + _debug("User Call ID %lx", *user_call_ID); + break; + + case RXRPC_ABORT: + if (*command != RXRPC_CMD_SEND_DATA) + return -EINVAL; + *command = RXRPC_CMD_SEND_ABORT; + if (len != sizeof(*abort_code)) + return -EINVAL; + *abort_code = *(unsigned int *) CMSG_DATA(cmsg); + _debug("Abort %x", *abort_code); + if (*abort_code == 0) + return -EINVAL; + break; + + case RXRPC_ACCEPT: + if (*command != RXRPC_CMD_SEND_DATA) + return -EINVAL; + *command = RXRPC_CMD_ACCEPT; + if (len != 0) + return -EINVAL; + if (!server) + return -EISCONN; + break; + + default: + return -EINVAL; + } + } + + _leave(" = 0"); + return 0; +} + +/* + * abort a call, sending an ABORT packet to the peer + */ +static void rxrpc_send_abort(struct rxrpc_call *call, u32 abort_code) +{ + write_lock_bh(&call->state_lock); + + if (call->state <= RXRPC_CALL_COMPLETE) { + call->state = RXRPC_CALL_LOCALLY_ABORTED; + call->abort_code = abort_code; + set_bit(RXRPC_CALL_ABORT, &call->events); + del_timer_sync(&call->resend_timer); + del_timer_sync(&call->ack_timer); + clear_bit(RXRPC_CALL_RESEND_TIMER, &call->events); + clear_bit(RXRPC_CALL_ACK, &call->events); + clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags); + schedule_work(&call->processor); + } + + write_unlock_bh(&call->state_lock); +} + +/* + * send a message forming part of a client call through an RxRPC socket + * - caller holds the socket locked + * - the socket may be either a client socket or a server socket + */ +int rxrpc_client_sendmsg(struct kiocb *iocb, struct rxrpc_sock *rx, + struct rxrpc_transport *trans, struct msghdr *msg, + size_t len) +{ + struct rxrpc_conn_bundle *bundle; + enum rxrpc_command cmd; + struct rxrpc_call *call; + unsigned long user_call_ID = 0; + struct key *key; + __be16 service_id; + u32 abort_code = 0; + int ret; + + _enter(""); + + ASSERT(trans != NULL); + + ret = rxrpc_sendmsg_cmsg(rx, msg, &user_call_ID, &cmd, &abort_code, + false); + if (ret < 0) + return ret; + + bundle = NULL; + if (trans) { + service_id = rx->service_id; + if (msg->msg_name) { + struct sockaddr_rxrpc *srx = + (struct sockaddr_rxrpc *) msg->msg_name; + service_id = htons(srx->srx_service); + } + key = rx->key; + if (key && !rx->key->payload.data) + key = NULL; + bundle = rxrpc_get_bundle(rx, trans, key, service_id, + GFP_KERNEL); + if (IS_ERR(bundle)) + return PTR_ERR(bundle); + } + + call = rxrpc_get_client_call(rx, trans, bundle, user_call_ID, + abort_code == 0, GFP_KERNEL); + if (trans) + rxrpc_put_bundle(trans, bundle); + if (IS_ERR(call)) { + _leave(" = %ld", PTR_ERR(call)); + return PTR_ERR(call); + } + + _debug("CALL %d USR %lx ST %d on CONN %p", + call->debug_id, call->user_call_ID, call->state, call->conn); + + if (call->state >= RXRPC_CALL_COMPLETE) { + /* it's too late for this call */ + ret = -ESHUTDOWN; + } else if (cmd == RXRPC_CMD_SEND_ABORT) { + rxrpc_send_abort(call, abort_code); + } else if (cmd != RXRPC_CMD_SEND_DATA) { + ret = -EINVAL; + } else if (call->state != RXRPC_CALL_CLIENT_SEND_REQUEST) { + /* request phase complete for this client call */ + ret = -EPROTO; + } else { + ret = rxrpc_send_data(iocb, rx, call, msg, len); + } + + rxrpc_put_call(call); + _leave(" = %d", ret); + return ret; +} + +/* + * send a message through a server socket + * - caller holds the socket locked + */ +int rxrpc_server_sendmsg(struct kiocb *iocb, struct rxrpc_sock *rx, + struct msghdr *msg, size_t len) +{ + enum rxrpc_command cmd; + struct rxrpc_call *call; + unsigned long user_call_ID = 0; + u32 abort_code = 0; + int ret; + + _enter(""); + + ret = rxrpc_sendmsg_cmsg(rx, msg, &user_call_ID, &cmd, &abort_code, + true); + if (ret < 0) + return ret; + + if (cmd == RXRPC_CMD_ACCEPT) + return rxrpc_accept_call(rx, user_call_ID); + + call = rxrpc_find_server_call(rx, user_call_ID); + if (!call) + return -EBADSLT; + if (call->state >= RXRPC_CALL_COMPLETE) { + ret = -ESHUTDOWN; + goto out; + } + + switch (cmd) { + case RXRPC_CMD_SEND_DATA: + if (call->state != RXRPC_CALL_CLIENT_SEND_REQUEST && + call->state != RXRPC_CALL_SERVER_ACK_REQUEST && + call->state != RXRPC_CALL_SERVER_SEND_REPLY) { + /* Tx phase not yet begun for this call */ + ret = -EPROTO; + break; + } + + ret = rxrpc_send_data(iocb, rx, call, msg, len); + break; + + case RXRPC_CMD_SEND_ABORT: + rxrpc_send_abort(call, abort_code); + break; + default: + BUG(); + } + + out: + rxrpc_put_call(call); + _leave(" = %d", ret); + return ret; +} + +/* + * send a packet through the transport endpoint + */ +int rxrpc_send_packet(struct rxrpc_transport *trans, struct sk_buff *skb) +{ + struct kvec iov[1]; + struct msghdr msg; + int ret, opt; + + _enter(",{%d}", skb->len); + + iov[0].iov_base = skb->head; + iov[0].iov_len = skb->len; + + msg.msg_name = &trans->peer->srx.transport.sin; + msg.msg_namelen = sizeof(trans->peer->srx.transport.sin); + msg.msg_control = NULL; + msg.msg_controllen = 0; + msg.msg_flags = 0; + + /* send the packet with the don't fragment bit set if we currently + * think it's small enough */ + if (skb->len - sizeof(struct rxrpc_header) < trans->peer->maxdata) { + down_read(&trans->local->defrag_sem); + /* send the packet by UDP + * - returns -EMSGSIZE if UDP would have to fragment the packet + * to go out of the interface + * - in which case, we'll have processed the ICMP error + * message and update the peer record + */ + ret = kernel_sendmsg(trans->local->socket, &msg, iov, 1, + iov[0].iov_len); + + up_read(&trans->local->defrag_sem); + if (ret == -EMSGSIZE) + goto send_fragmentable; + + _leave(" = %d [%u]", ret, trans->peer->maxdata); + return ret; + } + +send_fragmentable: + /* attempt to send this message with fragmentation enabled */ + _debug("send fragment"); + + down_write(&trans->local->defrag_sem); + opt = IP_PMTUDISC_DONT; + ret = kernel_setsockopt(trans->local->socket, SOL_IP, IP_MTU_DISCOVER, + (char *) &opt, sizeof(opt)); + if (ret == 0) { + ret = kernel_sendmsg(trans->local->socket, &msg, iov, 1, + iov[0].iov_len); + + opt = IP_PMTUDISC_DO; + kernel_setsockopt(trans->local->socket, SOL_IP, + IP_MTU_DISCOVER, (char *) &opt, sizeof(opt)); + } + + up_write(&trans->local->defrag_sem); + _leave(" = %d [frag %u]", ret, trans->peer->maxdata); + return ret; +} + +/* + * wait for space to appear in the transmit/ACK window + * - caller holds the socket locked + */ +static int rxrpc_wait_for_tx_window(struct rxrpc_sock *rx, + struct rxrpc_call *call, + long *timeo) +{ + DECLARE_WAITQUEUE(myself, current); + int ret; + + _enter(",{%d},%ld", + CIRC_SPACE(call->acks_head, call->acks_tail, call->acks_winsz), + *timeo); + + add_wait_queue(&call->tx_waitq, &myself); + + for (;;) { + set_current_state(TASK_INTERRUPTIBLE); + ret = 0; + if (CIRC_SPACE(call->acks_head, call->acks_tail, + call->acks_winsz) > 0) + break; + if (signal_pending(current)) { + ret = sock_intr_errno(*timeo); + break; + } + + release_sock(&rx->sk); + *timeo = schedule_timeout(*timeo); + lock_sock(&rx->sk); + } + + remove_wait_queue(&call->tx_waitq, &myself); + set_current_state(TASK_RUNNING); + _leave(" = %d", ret); + return ret; +} + +/* + * attempt to schedule an instant Tx resend + */ +static inline void rxrpc_instant_resend(struct rxrpc_call *call) +{ + read_lock_bh(&call->state_lock); + if (try_to_del_timer_sync(&call->resend_timer) >= 0) { + clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags); + if (call->state < RXRPC_CALL_COMPLETE && + !test_and_set_bit(RXRPC_CALL_RESEND_TIMER, &call->events)) + schedule_work(&call->processor); + } + read_unlock_bh(&call->state_lock); +} + +/* + * queue a packet for transmission, set the resend timer and attempt + * to send the packet immediately + */ +static void rxrpc_queue_packet(struct rxrpc_call *call, struct sk_buff *skb, + bool last) +{ + struct rxrpc_skb_priv *sp = rxrpc_skb(skb); + int ret; + + _net("queue skb %p [%d]", skb, call->acks_head); + + ASSERT(call->acks_window != NULL); + call->acks_window[call->acks_head] = (unsigned long) skb; + smp_wmb(); + call->acks_head = (call->acks_head + 1) & (call->acks_winsz - 1); + + if (last || call->state == RXRPC_CALL_SERVER_ACK_REQUEST) { + _debug("________awaiting reply/ACK__________"); + write_lock_bh(&call->state_lock); + switch (call->state) { + case RXRPC_CALL_CLIENT_SEND_REQUEST: + call->state = RXRPC_CALL_CLIENT_AWAIT_REPLY; + break; + case RXRPC_CALL_SERVER_ACK_REQUEST: + call->state = RXRPC_CALL_SERVER_SEND_REPLY; + if (!last) + break; + case RXRPC_CALL_SERVER_SEND_REPLY: + call->state = RXRPC_CALL_SERVER_AWAIT_ACK; + break; + default: + break; + } + write_unlock_bh(&call->state_lock); + } + + _proto("Tx DATA %%%u { #%u }", + ntohl(sp->hdr.serial), ntohl(sp->hdr.seq)); + + sp->need_resend = 0; + sp->resend_at = jiffies + rxrpc_resend_timeout * HZ; + if (!test_and_set_bit(RXRPC_CALL_RUN_RTIMER, &call->flags)) { + _debug("run timer"); + call->resend_timer.expires = sp->resend_at; + add_timer(&call->resend_timer); + } + + /* attempt to cancel the rx-ACK timer, deferring reply transmission if + * we're ACK'ing the request phase of an incoming call */ + ret = -EAGAIN; + if (try_to_del_timer_sync(&call->ack_timer) >= 0) { + /* the packet may be freed by rxrpc_process_call() before this + * returns */ + ret = rxrpc_send_packet(call->conn->trans, skb); + _net("sent skb %p", skb); + } else { + _debug("failed to delete ACK timer"); + } + + if (ret < 0) { + _debug("need instant resend %d", ret); + sp->need_resend = 1; + rxrpc_instant_resend(call); + } + + _leave(""); +} + +/* + * send data through a socket + * - must be called in process context + * - caller holds the socket locked + */ +static int rxrpc_send_data(struct kiocb *iocb, + struct rxrpc_sock *rx, + struct rxrpc_call *call, + struct msghdr *msg, size_t len) +{ + struct rxrpc_skb_priv *sp; + unsigned char __user *from; + struct sk_buff *skb; + struct iovec *iov; + struct sock *sk = &rx->sk; + long timeo; + bool more; + int ret, ioc, segment, copied; + + _enter(",,,{%zu},%zu", msg->msg_iovlen, len); + + timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); + + /* this should be in poll */ + clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); + + if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) + return -EPIPE; + + iov = msg->msg_iov; + ioc = msg->msg_iovlen - 1; + from = iov->iov_base; + segment = iov->iov_len; + iov++; + more = msg->msg_flags & MSG_MORE; + + skb = call->tx_pending; + call->tx_pending = NULL; + + copied = 0; + do { + int copy; + + if (segment > len) + segment = len; + + _debug("SEGMENT %d @%p", segment, from); + + if (!skb) { + size_t size, chunk, max, space; + + _debug("alloc"); + + if (CIRC_SPACE(call->acks_head, call->acks_tail, + call->acks_winsz) <= 0) { + ret = -EAGAIN; + if (msg->msg_flags & MSG_DONTWAIT) + goto maybe_error; + ret = rxrpc_wait_for_tx_window(rx, call, + &timeo); + if (ret < 0) + goto maybe_error; + } + + max = call->conn->trans->peer->maxdata; + max -= call->conn->security_size; + max &= ~(call->conn->size_align - 1UL); + + chunk = max; + if (chunk > len) + chunk = len; + + space = chunk + call->conn->size_align; + space &= ~(call->conn->size_align - 1UL); + + size = space + call->conn->header_size; + + _debug("SIZE: %zu/%zu/%zu", chunk, space, size); + + /* create a buffer that we can retain until it's ACK'd */ + skb = sock_alloc_send_skb( + sk, size, msg->msg_flags & MSG_DONTWAIT, &ret); + if (!skb) + goto maybe_error; + + rxrpc_new_skb(skb); + + _debug("ALLOC SEND %p", skb); + + ASSERTCMP(skb->mark, ==, 0); + + _debug("HS: %u", call->conn->header_size); + skb_reserve(skb, call->conn->header_size); + skb->len += call->conn->header_size; + + sp = rxrpc_skb(skb); + sp->remain = chunk; + if (sp->remain > skb_tailroom(skb)) + sp->remain = skb_tailroom(skb); + + _net("skb: hr %d, tr %d, hl %d, rm %d", + skb_headroom(skb), + skb_tailroom(skb), + skb_headlen(skb), + sp->remain); + + skb->ip_summed = CHECKSUM_UNNECESSARY; + } + + _debug("append"); + sp = rxrpc_skb(skb); + + /* append next segment of data to the current buffer */ + copy = skb_tailroom(skb); + ASSERTCMP(copy, >, 0); + if (copy > segment) + copy = segment; + if (copy > sp->remain) + copy = sp->remain; + + _debug("add"); + ret = skb_add_data(skb, from, copy); + _debug("added"); + if (ret < 0) + goto efault; + sp->remain -= copy; + skb->mark += copy; + + len -= copy; + segment -= copy; + from += copy; + while (segment == 0 && ioc > 0) { + from = iov->iov_base; + segment = iov->iov_len; + iov++; + ioc--; + } + if (len == 0) { + segment = 0; + ioc = 0; + } + + /* check for the far side aborting the call or a network error + * occurring */ + if (call->state > RXRPC_CALL_COMPLETE) + goto call_aborted; + + /* add the packet to the send queue if it's now full */ + if (sp->remain <= 0 || (segment == 0 && !more)) { + struct rxrpc_connection *conn = call->conn; + size_t pad; + + /* pad out if we're using security */ + if (conn->security) { + pad = conn->security_size + skb->mark; + pad = conn->size_align - pad; + pad &= conn->size_align - 1; + _debug("pad %zu", pad); + if (pad) + memset(skb_put(skb, pad), 0, pad); + } + + sp->hdr.epoch = conn->epoch; + sp->hdr.cid = call->cid; + sp->hdr.callNumber = call->call_id; + sp->hdr.seq = + htonl(atomic_inc_return(&call->sequence)); + sp->hdr.serial = + htonl(atomic_inc_return(&conn->serial)); + sp->hdr.type = RXRPC_PACKET_TYPE_DATA; + sp->hdr.userStatus = 0; + sp->hdr.securityIndex = conn->security_ix; + sp->hdr._rsvd = 0; + sp->hdr.serviceId = conn->service_id; + + sp->hdr.flags = conn->out_clientflag; + if (len == 0 && !more) + sp->hdr.flags |= RXRPC_LAST_PACKET; + else if (CIRC_SPACE(call->acks_head, call->acks_tail, + call->acks_winsz) > 1) + sp->hdr.flags |= RXRPC_MORE_PACKETS; + + ret = rxrpc_secure_packet( + call, skb, skb->mark, + skb->head + sizeof(struct rxrpc_header)); + if (ret < 0) + goto out; + + memcpy(skb->head, &sp->hdr, + sizeof(struct rxrpc_header)); + rxrpc_queue_packet(call, skb, segment == 0 && !more); + skb = NULL; + } + + } while (segment > 0); + +out: + call->tx_pending = skb; + _leave(" = %d", ret); + return ret; + +call_aborted: + rxrpc_free_skb(skb); + if (call->state == RXRPC_CALL_NETWORK_ERROR) + ret = call->conn->trans->peer->net_error; + else + ret = -ECONNABORTED; + _leave(" = %d", ret); + return ret; + +maybe_error: + if (copied) + ret = copied; + goto out; + +efault: + ret = -EFAULT; + goto out; +} diff --git a/net/rxrpc/ar-peer.c b/net/rxrpc/ar-peer.c new file mode 100644 index 000000000000..69ac355546ae --- /dev/null +++ b/net/rxrpc/ar-peer.c @@ -0,0 +1,273 @@ +/* RxRPC remote transport endpoint management + * + * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "ar-internal.h" + +static LIST_HEAD(rxrpc_peers); +static DEFINE_RWLOCK(rxrpc_peer_lock); +static DECLARE_WAIT_QUEUE_HEAD(rxrpc_peer_wq); + +static void rxrpc_destroy_peer(struct work_struct *work); + +/* + * allocate a new peer + */ +static struct rxrpc_peer *rxrpc_alloc_peer(struct sockaddr_rxrpc *srx, + gfp_t gfp) +{ + struct rxrpc_peer *peer; + + _enter(""); + + peer = kzalloc(sizeof(struct rxrpc_peer), gfp); + if (peer) { + INIT_WORK(&peer->destroyer, &rxrpc_destroy_peer); + INIT_LIST_HEAD(&peer->link); + INIT_LIST_HEAD(&peer->error_targets); + spin_lock_init(&peer->lock); + atomic_set(&peer->usage, 1); + peer->debug_id = atomic_inc_return(&rxrpc_debug_id); + memcpy(&peer->srx, srx, sizeof(*srx)); + + peer->mtu = peer->if_mtu = 65535; + + if (srx->transport.family == AF_INET) { + peer->hdrsize = sizeof(struct iphdr); + switch (srx->transport_type) { + case SOCK_DGRAM: + peer->hdrsize += sizeof(struct udphdr); + break; + default: + BUG(); + break; + } + } else { + BUG(); + } + + peer->hdrsize += sizeof(struct rxrpc_header); + peer->maxdata = peer->mtu - peer->hdrsize; + } + + _leave(" = %p", peer); + return peer; +} + +/* + * obtain a remote transport endpoint for the specified address + */ +struct rxrpc_peer *rxrpc_get_peer(struct sockaddr_rxrpc *srx, gfp_t gfp) +{ + struct rxrpc_peer *peer, *candidate; + const char *new = "old"; + int usage; + + _enter("{%d,%d,%u.%u.%u.%u+%hu}", + srx->transport_type, + srx->transport_len, + NIPQUAD(srx->transport.sin.sin_addr), + ntohs(srx->transport.sin.sin_port)); + + /* search the peer list first */ + read_lock_bh(&rxrpc_peer_lock); + list_for_each_entry(peer, &rxrpc_peers, link) { + _debug("check PEER %d { u=%d t=%d l=%d }", + peer->debug_id, + atomic_read(&peer->usage), + peer->srx.transport_type, + peer->srx.transport_len); + + if (atomic_read(&peer->usage) > 0 && + peer->srx.transport_type == srx->transport_type && + peer->srx.transport_len == srx->transport_len && + memcmp(&peer->srx.transport, + &srx->transport, + srx->transport_len) == 0) + goto found_extant_peer; + } + read_unlock_bh(&rxrpc_peer_lock); + + /* not yet present - create a candidate for a new record and then + * redo the search */ + candidate = rxrpc_alloc_peer(srx, gfp); + if (!candidate) { + _leave(" = -ENOMEM"); + return ERR_PTR(-ENOMEM); + } + + write_lock_bh(&rxrpc_peer_lock); + + list_for_each_entry(peer, &rxrpc_peers, link) { + if (atomic_read(&peer->usage) > 0 && + peer->srx.transport_type == srx->transport_type && + peer->srx.transport_len == srx->transport_len && + memcmp(&peer->srx.transport, + &srx->transport, + srx->transport_len) == 0) + goto found_extant_second; + } + + /* we can now add the new candidate to the list */ + peer = candidate; + candidate = NULL; + + list_add_tail(&peer->link, &rxrpc_peers); + write_unlock_bh(&rxrpc_peer_lock); + new = "new"; + +success: + _net("PEER %s %d {%d,%u,%u.%u.%u.%u+%hu}", + new, + peer->debug_id, + peer->srx.transport_type, + peer->srx.transport.family, + NIPQUAD(peer->srx.transport.sin.sin_addr), + ntohs(peer->srx.transport.sin.sin_port)); + + _leave(" = %p {u=%d}", peer, atomic_read(&peer->usage)); + return peer; + + /* we found the peer in the list immediately */ +found_extant_peer: + usage = atomic_inc_return(&peer->usage); + read_unlock_bh(&rxrpc_peer_lock); + goto success; + + /* we found the peer on the second time through the list */ +found_extant_second: + usage = atomic_inc_return(&peer->usage); + write_unlock_bh(&rxrpc_peer_lock); + kfree(candidate); + goto success; +} + +/* + * find the peer associated with a packet + */ +struct rxrpc_peer *rxrpc_find_peer(struct rxrpc_local *local, + __be32 addr, __be16 port) +{ + struct rxrpc_peer *peer; + + _enter(""); + + /* search the peer list */ + read_lock_bh(&rxrpc_peer_lock); + + if (local->srx.transport.family == AF_INET && + local->srx.transport_type == SOCK_DGRAM + ) { + list_for_each_entry(peer, &rxrpc_peers, link) { + if (atomic_read(&peer->usage) > 0 && + peer->srx.transport_type == SOCK_DGRAM && + peer->srx.transport.family == AF_INET && + peer->srx.transport.sin.sin_port == port && + peer->srx.transport.sin.sin_addr.s_addr == addr) + goto found_UDP_peer; + } + + goto new_UDP_peer; + } + + read_unlock_bh(&rxrpc_peer_lock); + _leave(" = -EAFNOSUPPORT"); + return ERR_PTR(-EAFNOSUPPORT); + +found_UDP_peer: + _net("Rx UDP DGRAM from peer %d", peer->debug_id); + atomic_inc(&peer->usage); + read_unlock_bh(&rxrpc_peer_lock); + _leave(" = %p", peer); + return peer; + +new_UDP_peer: + _net("Rx UDP DGRAM from NEW peer %d", peer->debug_id); + read_unlock_bh(&rxrpc_peer_lock); + _leave(" = -EBUSY [new]"); + return ERR_PTR(-EBUSY); +} + +/* + * release a remote transport endpoint + */ +void rxrpc_put_peer(struct rxrpc_peer *peer) +{ + _enter("%p{u=%d}", peer, atomic_read(&peer->usage)); + + ASSERTCMP(atomic_read(&peer->usage), >, 0); + + if (likely(!atomic_dec_and_test(&peer->usage))) { + _leave(" [in use]"); + return; + } + + schedule_work(&peer->destroyer); + _leave(""); +} + +/* + * destroy a remote transport endpoint + */ +static void rxrpc_destroy_peer(struct work_struct *work) +{ + struct rxrpc_peer *peer = + container_of(work, struct rxrpc_peer, destroyer); + + _enter("%p{%d}", peer, atomic_read(&peer->usage)); + + write_lock_bh(&rxrpc_peer_lock); + list_del(&peer->link); + write_unlock_bh(&rxrpc_peer_lock); + + _net("DESTROY PEER %d", peer->debug_id); + kfree(peer); + + if (list_empty(&rxrpc_peers)) + wake_up_all(&rxrpc_peer_wq); + _leave(""); +} + +/* + * preemptively destroy all the peer records from a transport endpoint rather + * than waiting for them to time out + */ +void __exit rxrpc_destroy_all_peers(void) +{ + DECLARE_WAITQUEUE(myself,current); + + _enter(""); + + /* we simply have to wait for them to go away */ + if (!list_empty(&rxrpc_peers)) { + set_current_state(TASK_UNINTERRUPTIBLE); + add_wait_queue(&rxrpc_peer_wq, &myself); + + while (!list_empty(&rxrpc_peers)) { + schedule(); + set_current_state(TASK_UNINTERRUPTIBLE); + } + + remove_wait_queue(&rxrpc_peer_wq, &myself); + set_current_state(TASK_RUNNING); + } + + _leave(""); +} diff --git a/net/rxrpc/ar-proc.c b/net/rxrpc/ar-proc.c new file mode 100644 index 000000000000..58f4b4e5cece --- /dev/null +++ b/net/rxrpc/ar-proc.c @@ -0,0 +1,247 @@ +/* /proc/net/ support for AF_RXRPC + * + * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include +#include +#include +#include "ar-internal.h" + +static const char *rxrpc_conn_states[] = { + [RXRPC_CONN_UNUSED] = "Unused ", + [RXRPC_CONN_CLIENT] = "Client ", + [RXRPC_CONN_SERVER_UNSECURED] = "SvUnsec ", + [RXRPC_CONN_SERVER_CHALLENGING] = "SvChall ", + [RXRPC_CONN_SERVER] = "SvSecure", + [RXRPC_CONN_REMOTELY_ABORTED] = "RmtAbort", + [RXRPC_CONN_LOCALLY_ABORTED] = "LocAbort", + [RXRPC_CONN_NETWORK_ERROR] = "NetError", +}; + +const char *rxrpc_call_states[] = { + [RXRPC_CALL_CLIENT_SEND_REQUEST] = "ClSndReq", + [RXRPC_CALL_CLIENT_AWAIT_REPLY] = "ClAwtRpl", + [RXRPC_CALL_CLIENT_RECV_REPLY] = "ClRcvRpl", + [RXRPC_CALL_CLIENT_FINAL_ACK] = "ClFnlACK", + [RXRPC_CALL_SERVER_SECURING] = "SvSecure", + [RXRPC_CALL_SERVER_ACCEPTING] = "SvAccept", + [RXRPC_CALL_SERVER_RECV_REQUEST] = "SvRcvReq", + [RXRPC_CALL_SERVER_ACK_REQUEST] = "SvAckReq", + [RXRPC_CALL_SERVER_SEND_REPLY] = "SvSndRpl", + [RXRPC_CALL_SERVER_AWAIT_ACK] = "SvAwtACK", + [RXRPC_CALL_COMPLETE] = "Complete", + [RXRPC_CALL_SERVER_BUSY] = "SvBusy ", + [RXRPC_CALL_REMOTELY_ABORTED] = "RmtAbort", + [RXRPC_CALL_LOCALLY_ABORTED] = "LocAbort", + [RXRPC_CALL_NETWORK_ERROR] = "NetError", + [RXRPC_CALL_DEAD] = "Dead ", +}; + +/* + * generate a list of extant and dead calls in /proc/net/rxrpc_calls + */ +static void *rxrpc_call_seq_start(struct seq_file *seq, loff_t *_pos) +{ + struct list_head *_p; + loff_t pos = *_pos; + + read_lock(&rxrpc_call_lock); + if (!pos) + return SEQ_START_TOKEN; + pos--; + + list_for_each(_p, &rxrpc_calls) + if (!pos--) + break; + + return _p != &rxrpc_calls ? _p : NULL; +} + +static void *rxrpc_call_seq_next(struct seq_file *seq, void *v, loff_t *pos) +{ + struct list_head *_p; + + (*pos)++; + + _p = v; + _p = (v == SEQ_START_TOKEN) ? rxrpc_calls.next : _p->next; + + return _p != &rxrpc_calls ? _p : NULL; +} + +static void rxrpc_call_seq_stop(struct seq_file *seq, void *v) +{ + read_unlock(&rxrpc_call_lock); +} + +static int rxrpc_call_seq_show(struct seq_file *seq, void *v) +{ + struct rxrpc_transport *trans; + struct rxrpc_call *call; + char lbuff[4 + 4 + 4 + 4 + 5 + 1], rbuff[4 + 4 + 4 + 4 + 5 + 1]; + + if (v == SEQ_START_TOKEN) { + seq_puts(seq, + "Proto Local Remote " + " SvID ConnID CallID End Use State Abort " + " UserID\n"); + return 0; + } + + call = list_entry(v, struct rxrpc_call, link); + trans = call->conn->trans; + + sprintf(lbuff, NIPQUAD_FMT":%u", + NIPQUAD(trans->local->srx.transport.sin.sin_addr), + ntohs(trans->local->srx.transport.sin.sin_port)); + + sprintf(rbuff, NIPQUAD_FMT":%u", + NIPQUAD(trans->peer->srx.transport.sin.sin_addr), + ntohs(trans->peer->srx.transport.sin.sin_port)); + + seq_printf(seq, + "UDP %-22.22s %-22.22s %4x %08x %08x %s %3u" + " %-8.8s %08x %lx\n", + lbuff, + rbuff, + ntohs(call->conn->service_id), + ntohl(call->conn->cid), + ntohl(call->call_id), + call->conn->in_clientflag ? "Svc" : "Clt", + atomic_read(&call->usage), + rxrpc_call_states[call->state], + call->abort_code, + call->user_call_ID); + + return 0; +} + +static struct seq_operations rxrpc_call_seq_ops = { + .start = rxrpc_call_seq_start, + .next = rxrpc_call_seq_next, + .stop = rxrpc_call_seq_stop, + .show = rxrpc_call_seq_show, +}; + +static int rxrpc_call_seq_open(struct inode *inode, struct file *file) +{ + return seq_open(file, &rxrpc_call_seq_ops); +} + +struct file_operations rxrpc_call_seq_fops = { + .owner = THIS_MODULE, + .open = rxrpc_call_seq_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release_private, +}; + +/* + * generate a list of extant virtual connections in /proc/net/rxrpc_conns + */ +static void *rxrpc_connection_seq_start(struct seq_file *seq, loff_t *_pos) +{ + struct list_head *_p; + loff_t pos = *_pos; + + read_lock(&rxrpc_connection_lock); + if (!pos) + return SEQ_START_TOKEN; + pos--; + + list_for_each(_p, &rxrpc_connections) + if (!pos--) + break; + + return _p != &rxrpc_connections ? _p : NULL; +} + +static void *rxrpc_connection_seq_next(struct seq_file *seq, void *v, + loff_t *pos) +{ + struct list_head *_p; + + (*pos)++; + + _p = v; + _p = (v == SEQ_START_TOKEN) ? rxrpc_connections.next : _p->next; + + return _p != &rxrpc_connections ? _p : NULL; +} + +static void rxrpc_connection_seq_stop(struct seq_file *seq, void *v) +{ + read_unlock(&rxrpc_connection_lock); +} + +static int rxrpc_connection_seq_show(struct seq_file *seq, void *v) +{ + struct rxrpc_connection *conn; + struct rxrpc_transport *trans; + char lbuff[4 + 4 + 4 + 4 + 5 + 1], rbuff[4 + 4 + 4 + 4 + 5 + 1]; + + if (v == SEQ_START_TOKEN) { + seq_puts(seq, + "Proto Local Remote " + " SvID ConnID Calls End Use State Key " + " Serial ISerial\n" + ); + return 0; + } + + conn = list_entry(v, struct rxrpc_connection, link); + trans = conn->trans; + + sprintf(lbuff, NIPQUAD_FMT":%u", + NIPQUAD(trans->local->srx.transport.sin.sin_addr), + ntohs(trans->local->srx.transport.sin.sin_port)); + + sprintf(rbuff, NIPQUAD_FMT":%u", + NIPQUAD(trans->peer->srx.transport.sin.sin_addr), + ntohs(trans->peer->srx.transport.sin.sin_port)); + + seq_printf(seq, + "UDP %-22.22s %-22.22s %4x %08x %08x %s %3u" + " %s %08x %08x %08x\n", + lbuff, + rbuff, + ntohs(conn->service_id), + ntohl(conn->cid), + conn->call_counter, + conn->in_clientflag ? "Svc" : "Clt", + atomic_read(&conn->usage), + rxrpc_conn_states[conn->state], + key_serial(conn->key), + atomic_read(&conn->serial), + atomic_read(&conn->hi_serial)); + + return 0; +} + +static struct seq_operations rxrpc_connection_seq_ops = { + .start = rxrpc_connection_seq_start, + .next = rxrpc_connection_seq_next, + .stop = rxrpc_connection_seq_stop, + .show = rxrpc_connection_seq_show, +}; + + +static int rxrpc_connection_seq_open(struct inode *inode, struct file *file) +{ + return seq_open(file, &rxrpc_connection_seq_ops); +} + +struct file_operations rxrpc_connection_seq_fops = { + .owner = THIS_MODULE, + .open = rxrpc_connection_seq_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release_private, +}; diff --git a/net/rxrpc/ar-recvmsg.c b/net/rxrpc/ar-recvmsg.c new file mode 100644 index 000000000000..e947d5c15900 --- /dev/null +++ b/net/rxrpc/ar-recvmsg.c @@ -0,0 +1,366 @@ +/* RxRPC recvmsg() implementation + * + * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include +#include +#include +#include +#include "ar-internal.h" + +/* + * removal a call's user ID from the socket tree to make the user ID available + * again and so that it won't be seen again in association with that call + */ +static void rxrpc_remove_user_ID(struct rxrpc_sock *rx, struct rxrpc_call *call) +{ + _debug("RELEASE CALL %d", call->debug_id); + + if (test_bit(RXRPC_CALL_HAS_USERID, &call->flags)) { + write_lock_bh(&rx->call_lock); + rb_erase(&call->sock_node, &call->socket->calls); + clear_bit(RXRPC_CALL_HAS_USERID, &call->flags); + write_unlock_bh(&rx->call_lock); + } + + read_lock_bh(&call->state_lock); + if (!test_bit(RXRPC_CALL_RELEASED, &call->flags) && + !test_and_set_bit(RXRPC_CALL_RELEASE, &call->events)) + schedule_work(&call->processor); + read_unlock_bh(&call->state_lock); +} + +/* + * receive a message from an RxRPC socket + * - we need to be careful about two or more threads calling recvmsg + * simultaneously + */ +int rxrpc_recvmsg(struct kiocb *iocb, struct socket *sock, + struct msghdr *msg, size_t len, int flags) +{ + struct rxrpc_skb_priv *sp; + struct rxrpc_call *call = NULL, *continue_call = NULL; + struct rxrpc_sock *rx = rxrpc_sk(sock->sk); + struct sk_buff *skb; + long timeo; + int copy, ret, ullen, offset, copied = 0; + u32 abort_code; + + DEFINE_WAIT(wait); + + _enter(",,,%zu,%d", len, flags); + + if (flags & (MSG_OOB | MSG_TRUNC)) + return -EOPNOTSUPP; + + ullen = msg->msg_flags & MSG_CMSG_COMPAT ? 4 : sizeof(unsigned long); + + timeo = sock_rcvtimeo(&rx->sk, flags & MSG_DONTWAIT); + msg->msg_flags |= MSG_MORE; + + lock_sock(&rx->sk); + + for (;;) { + /* return immediately if a client socket has no outstanding + * calls */ + if (RB_EMPTY_ROOT(&rx->calls)) { + if (copied) + goto out; + if (rx->sk.sk_state != RXRPC_SERVER_LISTENING) { + release_sock(&rx->sk); + if (continue_call) + rxrpc_put_call(continue_call); + return -ENODATA; + } + } + + /* get the next message on the Rx queue */ + skb = skb_peek(&rx->sk.sk_receive_queue); + if (!skb) { + /* nothing remains on the queue */ + if (copied && + (msg->msg_flags & MSG_PEEK || timeo == 0)) + goto out; + + /* wait for a message to turn up */ + release_sock(&rx->sk); + prepare_to_wait_exclusive(rx->sk.sk_sleep, &wait, + TASK_INTERRUPTIBLE); + ret = sock_error(&rx->sk); + if (ret) + goto wait_error; + + if (skb_queue_empty(&rx->sk.sk_receive_queue)) { + if (signal_pending(current)) + goto wait_interrupted; + timeo = schedule_timeout(timeo); + } + finish_wait(rx->sk.sk_sleep, &wait); + lock_sock(&rx->sk); + continue; + } + + peek_next_packet: + sp = rxrpc_skb(skb); + call = sp->call; + ASSERT(call != NULL); + + _debug("next pkt %s", rxrpc_pkts[sp->hdr.type]); + + /* make sure we wait for the state to be updated in this call */ + spin_lock_bh(&call->lock); + spin_unlock_bh(&call->lock); + + if (test_bit(RXRPC_CALL_RELEASED, &call->flags)) { + _debug("packet from released call"); + if (skb_dequeue(&rx->sk.sk_receive_queue) != skb) + BUG(); + rxrpc_free_skb(skb); + continue; + } + + /* determine whether to continue last data receive */ + if (continue_call) { + _debug("maybe cont"); + if (call != continue_call || + skb->mark != RXRPC_SKB_MARK_DATA) { + release_sock(&rx->sk); + rxrpc_put_call(continue_call); + _leave(" = %d [noncont]", copied); + return copied; + } + } + + rxrpc_get_call(call); + + /* copy the peer address and timestamp */ + if (!continue_call) { + if (msg->msg_name && msg->msg_namelen > 0) + memcpy(&msg->msg_name, &call->conn->trans->peer->srx, + sizeof(call->conn->trans->peer->srx)); + sock_recv_timestamp(msg, &rx->sk, skb); + } + + /* receive the message */ + if (skb->mark != RXRPC_SKB_MARK_DATA) + goto receive_non_data_message; + + _debug("recvmsg DATA #%u { %d, %d }", + ntohl(sp->hdr.seq), skb->len, sp->offset); + + if (!continue_call) { + /* only set the control data once per recvmsg() */ + ret = put_cmsg(msg, SOL_RXRPC, RXRPC_USER_CALL_ID, + ullen, &call->user_call_ID); + if (ret < 0) + goto copy_error; + ASSERT(test_bit(RXRPC_CALL_HAS_USERID, &call->flags)); + } + + ASSERTCMP(ntohl(sp->hdr.seq), >=, call->rx_data_recv); + ASSERTCMP(ntohl(sp->hdr.seq), <=, call->rx_data_recv + 1); + call->rx_data_recv = ntohl(sp->hdr.seq); + + ASSERTCMP(ntohl(sp->hdr.seq), >, call->rx_data_eaten); + + offset = sp->offset; + copy = skb->len - offset; + if (copy > len - copied) + copy = len - copied; + + if (skb->ip_summed == CHECKSUM_UNNECESSARY) { + ret = skb_copy_datagram_iovec(skb, offset, + msg->msg_iov, copy); + } else { + ret = skb_copy_and_csum_datagram_iovec(skb, offset, + msg->msg_iov); + if (ret == -EINVAL) + goto csum_copy_error; + } + + if (ret < 0) + goto copy_error; + + /* handle piecemeal consumption of data packets */ + _debug("copied %d+%d", copy, copied); + + offset += copy; + copied += copy; + + if (!(flags & MSG_PEEK)) + sp->offset = offset; + + if (sp->offset < skb->len) { + _debug("buffer full"); + ASSERTCMP(copied, ==, len); + break; + } + + /* we transferred the whole data packet */ + if (sp->hdr.flags & RXRPC_LAST_PACKET) { + _debug("last"); + if (call->conn->out_clientflag) { + /* last byte of reply received */ + ret = copied; + goto terminal_message; + } + + /* last bit of request received */ + if (!(flags & MSG_PEEK)) { + _debug("eat packet"); + if (skb_dequeue(&rx->sk.sk_receive_queue) != + skb) + BUG(); + rxrpc_free_skb(skb); + } + msg->msg_flags &= ~MSG_MORE; + break; + } + + /* move on to the next data message */ + _debug("next"); + if (!continue_call) + continue_call = sp->call; + else + rxrpc_put_call(call); + call = NULL; + + if (flags & MSG_PEEK) { + _debug("peek next"); + skb = skb->next; + if (skb == (struct sk_buff *) &rx->sk.sk_receive_queue) + break; + goto peek_next_packet; + } + + _debug("eat packet"); + if (skb_dequeue(&rx->sk.sk_receive_queue) != skb) + BUG(); + rxrpc_free_skb(skb); + } + + /* end of non-terminal data packet reception for the moment */ + _debug("end rcv data"); +out: + release_sock(&rx->sk); + if (call) + rxrpc_put_call(call); + if (continue_call) + rxrpc_put_call(continue_call); + _leave(" = %d [data]", copied); + return copied; + + /* handle non-DATA messages such as aborts, incoming connections and + * final ACKs */ +receive_non_data_message: + _debug("non-data"); + + if (skb->mark == RXRPC_SKB_MARK_NEW_CALL) { + _debug("RECV NEW CALL"); + ret = put_cmsg(msg, SOL_RXRPC, RXRPC_NEW_CALL, 0, &abort_code); + if (ret < 0) + goto copy_error; + if (!(flags & MSG_PEEK)) { + if (skb_dequeue(&rx->sk.sk_receive_queue) != skb) + BUG(); + rxrpc_free_skb(skb); + } + goto out; + } + + ret = put_cmsg(msg, SOL_RXRPC, RXRPC_USER_CALL_ID, + ullen, &call->user_call_ID); + if (ret < 0) + goto copy_error; + ASSERT(test_bit(RXRPC_CALL_HAS_USERID, &call->flags)); + + switch (skb->mark) { + case RXRPC_SKB_MARK_DATA: + BUG(); + case RXRPC_SKB_MARK_FINAL_ACK: + ret = put_cmsg(msg, SOL_RXRPC, RXRPC_ACK, 0, &abort_code); + break; + case RXRPC_SKB_MARK_BUSY: + ret = put_cmsg(msg, SOL_RXRPC, RXRPC_BUSY, 0, &abort_code); + break; + case RXRPC_SKB_MARK_REMOTE_ABORT: + abort_code = call->abort_code; + ret = put_cmsg(msg, SOL_RXRPC, RXRPC_ABORT, 4, &abort_code); + break; + case RXRPC_SKB_MARK_NET_ERROR: + _debug("RECV NET ERROR %d", sp->error); + abort_code = sp->error; + ret = put_cmsg(msg, SOL_RXRPC, RXRPC_NET_ERROR, 4, &abort_code); + break; + case RXRPC_SKB_MARK_LOCAL_ERROR: + _debug("RECV LOCAL ERROR %d", sp->error); + abort_code = sp->error; + ret = put_cmsg(msg, SOL_RXRPC, RXRPC_LOCAL_ERROR, 4, + &abort_code); + break; + default: + BUG(); + break; + } + + if (ret < 0) + goto copy_error; + +terminal_message: + _debug("terminal"); + msg->msg_flags &= ~MSG_MORE; + msg->msg_flags |= MSG_EOR; + + if (!(flags & MSG_PEEK)) { + _net("free terminal skb %p", skb); + if (skb_dequeue(&rx->sk.sk_receive_queue) != skb) + BUG(); + rxrpc_free_skb(skb); + rxrpc_remove_user_ID(rx, call); + } + + release_sock(&rx->sk); + rxrpc_put_call(call); + if (continue_call) + rxrpc_put_call(continue_call); + _leave(" = %d", ret); + return ret; + +copy_error: + _debug("copy error"); + release_sock(&rx->sk); + rxrpc_put_call(call); + if (continue_call) + rxrpc_put_call(continue_call); + _leave(" = %d", ret); + return ret; + +csum_copy_error: + _debug("csum error"); + release_sock(&rx->sk); + if (continue_call) + rxrpc_put_call(continue_call); + rxrpc_kill_skb(skb); + skb_kill_datagram(&rx->sk, skb, flags); + rxrpc_put_call(call); + return -EAGAIN; + +wait_interrupted: + ret = sock_intr_errno(timeo); +wait_error: + finish_wait(rx->sk.sk_sleep, &wait); + if (continue_call) + rxrpc_put_call(continue_call); + if (copied) + copied = ret; + _leave(" = %d [waitfail %d]", copied, ret); + return copied; + +} diff --git a/net/rxrpc/ar-security.c b/net/rxrpc/ar-security.c new file mode 100644 index 000000000000..60d1d364430a --- /dev/null +++ b/net/rxrpc/ar-security.c @@ -0,0 +1,258 @@ +/* RxRPC security handling + * + * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include "ar-internal.h" + +static LIST_HEAD(rxrpc_security_methods); +static DECLARE_RWSEM(rxrpc_security_sem); + +/* + * get an RxRPC security module + */ +static struct rxrpc_security *rxrpc_security_get(struct rxrpc_security *sec) +{ + return try_module_get(sec->owner) ? sec : NULL; +} + +/* + * release an RxRPC security module + */ +static void rxrpc_security_put(struct rxrpc_security *sec) +{ + module_put(sec->owner); +} + +/* + * look up an rxrpc security module + */ +struct rxrpc_security *rxrpc_security_lookup(u8 security_index) +{ + struct rxrpc_security *sec = NULL; + + _enter(""); + + down_read(&rxrpc_security_sem); + + list_for_each_entry(sec, &rxrpc_security_methods, link) { + if (sec->security_index == security_index) { + if (unlikely(!rxrpc_security_get(sec))) + break; + goto out; + } + } + + sec = NULL; +out: + up_read(&rxrpc_security_sem); + _leave(" = %p [%s]", sec, sec ? sec->name : ""); + return sec; +} + +/** + * rxrpc_register_security - register an RxRPC security handler + * @sec: security module + * + * register an RxRPC security handler for use by RxRPC + */ +int rxrpc_register_security(struct rxrpc_security *sec) +{ + struct rxrpc_security *psec; + int ret; + + _enter(""); + down_write(&rxrpc_security_sem); + + ret = -EEXIST; + list_for_each_entry(psec, &rxrpc_security_methods, link) { + if (psec->security_index == sec->security_index) + goto out; + } + + list_add(&sec->link, &rxrpc_security_methods); + + printk(KERN_NOTICE "RxRPC: Registered security type %d '%s'\n", + sec->security_index, sec->name); + ret = 0; + +out: + up_write(&rxrpc_security_sem); + _leave(" = %d", ret); + return ret; +} + +EXPORT_SYMBOL_GPL(rxrpc_register_security); + +/** + * rxrpc_unregister_security - unregister an RxRPC security handler + * @sec: security module + * + * unregister an RxRPC security handler + */ +void rxrpc_unregister_security(struct rxrpc_security *sec) +{ + + _enter(""); + down_write(&rxrpc_security_sem); + list_del_init(&sec->link); + up_write(&rxrpc_security_sem); + + printk(KERN_NOTICE "RxRPC: Unregistered security type %d '%s'\n", + sec->security_index, sec->name); +} + +EXPORT_SYMBOL_GPL(rxrpc_unregister_security); + +/* + * initialise the security on a client connection + */ +int rxrpc_init_client_conn_security(struct rxrpc_connection *conn) +{ + struct rxrpc_security *sec; + struct key *key = conn->key; + int ret; + + _enter("{%d},{%x}", conn->debug_id, key_serial(key)); + + if (!key) + return 0; + + ret = key_validate(key); + if (ret < 0) + return ret; + + sec = rxrpc_security_lookup(key->type_data.x[0]); + if (!sec) + return -EKEYREJECTED; + conn->security = sec; + + ret = conn->security->init_connection_security(conn); + if (ret < 0) { + rxrpc_security_put(conn->security); + conn->security = NULL; + return ret; + } + + _leave(" = 0"); + return 0; +} + +/* + * initialise the security on a server connection + */ +int rxrpc_init_server_conn_security(struct rxrpc_connection *conn) +{ + struct rxrpc_security *sec; + struct rxrpc_local *local = conn->trans->local; + struct rxrpc_sock *rx; + struct key *key; + key_ref_t kref; + char kdesc[5+1+3+1]; + + _enter(""); + + sprintf(kdesc, "%u:%u", ntohs(conn->service_id), conn->security_ix); + + sec = rxrpc_security_lookup(conn->security_ix); + if (!sec) { + _leave(" = -ENOKEY [lookup]"); + return -ENOKEY; + } + + /* find the service */ + read_lock_bh(&local->services_lock); + list_for_each_entry(rx, &local->services, listen_link) { + if (rx->service_id == conn->service_id) + goto found_service; + } + + /* the service appears to have died */ + read_unlock_bh(&local->services_lock); + rxrpc_security_put(sec); + _leave(" = -ENOENT"); + return -ENOENT; + +found_service: + if (!rx->securities) { + read_unlock_bh(&local->services_lock); + rxrpc_security_put(sec); + _leave(" = -ENOKEY"); + return -ENOKEY; + } + + /* look through the service's keyring */ + kref = keyring_search(make_key_ref(rx->securities, 1UL), + &key_type_rxrpc_s, kdesc); + if (IS_ERR(kref)) { + read_unlock_bh(&local->services_lock); + rxrpc_security_put(sec); + _leave(" = %ld [search]", PTR_ERR(kref)); + return PTR_ERR(kref); + } + + key = key_ref_to_ptr(kref); + read_unlock_bh(&local->services_lock); + + conn->server_key = key; + conn->security = sec; + + _leave(" = 0"); + return 0; +} + +/* + * secure a packet prior to transmission + */ +int rxrpc_secure_packet(const struct rxrpc_call *call, + struct sk_buff *skb, + size_t data_size, + void *sechdr) +{ + if (call->conn->security) + return call->conn->security->secure_packet( + call, skb, data_size, sechdr); + return 0; +} + +/* + * secure a packet prior to transmission + */ +int rxrpc_verify_packet(const struct rxrpc_call *call, struct sk_buff *skb, + u32 *_abort_code) +{ + if (call->conn->security) + return call->conn->security->verify_packet( + call, skb, _abort_code); + return 0; +} + +/* + * clear connection security + */ +void rxrpc_clear_conn_security(struct rxrpc_connection *conn) +{ + _enter("{%d}", conn->debug_id); + + if (conn->security) { + conn->security->clear(conn); + rxrpc_security_put(conn->security); + conn->security = NULL; + } + + key_put(conn->key); + key_put(conn->server_key); +} diff --git a/net/rxrpc/ar-skbuff.c b/net/rxrpc/ar-skbuff.c new file mode 100644 index 000000000000..d73f6fc76011 --- /dev/null +++ b/net/rxrpc/ar-skbuff.c @@ -0,0 +1,118 @@ +/* ar-skbuff.c: socket buffer destruction handling + * + * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include +#include +#include +#include +#include +#include "ar-internal.h" + +/* + * set up for the ACK at the end of the receive phase when we discard the final + * receive phase data packet + * - called with softirqs disabled + */ +static void rxrpc_request_final_ACK(struct rxrpc_call *call) +{ + /* the call may be aborted before we have a chance to ACK it */ + write_lock(&call->state_lock); + + switch (call->state) { + case RXRPC_CALL_CLIENT_RECV_REPLY: + call->state = RXRPC_CALL_CLIENT_FINAL_ACK; + _debug("request final ACK"); + + /* get an extra ref on the call for the final-ACK generator to + * release */ + rxrpc_get_call(call); + set_bit(RXRPC_CALL_ACK_FINAL, &call->events); + if (try_to_del_timer_sync(&call->ack_timer) >= 0) + schedule_work(&call->processor); + break; + + case RXRPC_CALL_SERVER_RECV_REQUEST: + call->state = RXRPC_CALL_SERVER_ACK_REQUEST; + default: + break; + } + + write_unlock(&call->state_lock); +} + +/* + * drop the bottom ACK off of the call ACK window and advance the window + */ +static void rxrpc_hard_ACK_data(struct rxrpc_call *call, + struct rxrpc_skb_priv *sp) +{ + int loop; + u32 seq; + + spin_lock_bh(&call->lock); + + _debug("hard ACK #%u", ntohl(sp->hdr.seq)); + + for (loop = 0; loop < RXRPC_ACKR_WINDOW_ASZ; loop++) { + call->ackr_window[loop] >>= 1; + call->ackr_window[loop] |= + call->ackr_window[loop + 1] << (BITS_PER_LONG - 1); + } + + seq = ntohl(sp->hdr.seq); + ASSERTCMP(seq, ==, call->rx_data_eaten + 1); + call->rx_data_eaten = seq; + + if (call->ackr_win_top < UINT_MAX) + call->ackr_win_top++; + + ASSERTIFCMP(call->state <= RXRPC_CALL_COMPLETE, + call->rx_data_post, >=, call->rx_data_recv); + ASSERTIFCMP(call->state <= RXRPC_CALL_COMPLETE, + call->rx_data_recv, >=, call->rx_data_eaten); + + if (sp->hdr.flags & RXRPC_LAST_PACKET) { + rxrpc_request_final_ACK(call); + } else if (atomic_dec_and_test(&call->ackr_not_idle) && + test_and_clear_bit(RXRPC_CALL_TX_SOFT_ACK, &call->flags)) { + _debug("send Rx idle ACK"); + __rxrpc_propose_ACK(call, RXRPC_ACK_IDLE, sp->hdr.serial, + true); + } + + spin_unlock_bh(&call->lock); +} + +/* + * destroy a packet that has an RxRPC control buffer + * - advance the hard-ACK state of the parent call (done here in case something + * in the kernel bypasses recvmsg() and steals the packet directly off of the + * socket receive queue) + */ +void rxrpc_packet_destructor(struct sk_buff *skb) +{ + struct rxrpc_skb_priv *sp = rxrpc_skb(skb); + struct rxrpc_call *call = sp->call; + + _enter("%p{%p}", skb, call); + + if (call) { + /* send the final ACK on a client call */ + if (sp->hdr.type == RXRPC_PACKET_TYPE_DATA) + rxrpc_hard_ACK_data(call, sp); + rxrpc_put_call(call); + sp->call = NULL; + } + + if (skb->sk) + sock_rfree(skb); + _leave(""); +} diff --git a/net/rxrpc/ar-transport.c b/net/rxrpc/ar-transport.c new file mode 100644 index 000000000000..9b4e5cb545d2 --- /dev/null +++ b/net/rxrpc/ar-transport.c @@ -0,0 +1,276 @@ +/* RxRPC point-to-point transport session management + * + * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include +#include +#include +#include +#include +#include "ar-internal.h" + +static void rxrpc_transport_reaper(struct work_struct *work); + +static LIST_HEAD(rxrpc_transports); +static DEFINE_RWLOCK(rxrpc_transport_lock); +static unsigned long rxrpc_transport_timeout = 3600 * 24; +static DECLARE_DELAYED_WORK(rxrpc_transport_reap, rxrpc_transport_reaper); + +/* + * allocate a new transport session manager + */ +static struct rxrpc_transport *rxrpc_alloc_transport(struct rxrpc_local *local, + struct rxrpc_peer *peer, + gfp_t gfp) +{ + struct rxrpc_transport *trans; + + _enter(""); + + trans = kzalloc(sizeof(struct rxrpc_transport), gfp); + if (trans) { + trans->local = local; + trans->peer = peer; + INIT_LIST_HEAD(&trans->link); + trans->bundles = RB_ROOT; + trans->client_conns = RB_ROOT; + trans->server_conns = RB_ROOT; + skb_queue_head_init(&trans->error_queue); + spin_lock_init(&trans->client_lock); + rwlock_init(&trans->conn_lock); + atomic_set(&trans->usage, 1); + trans->debug_id = atomic_inc_return(&rxrpc_debug_id); + + if (peer->srx.transport.family == AF_INET) { + switch (peer->srx.transport_type) { + case SOCK_DGRAM: + INIT_WORK(&trans->error_handler, + rxrpc_UDP_error_handler); + break; + default: + BUG(); + break; + } + } else { + BUG(); + } + } + + _leave(" = %p", trans); + return trans; +} + +/* + * obtain a transport session for the nominated endpoints + */ +struct rxrpc_transport *rxrpc_get_transport(struct rxrpc_local *local, + struct rxrpc_peer *peer, + gfp_t gfp) +{ + struct rxrpc_transport *trans, *candidate; + const char *new = "old"; + int usage; + + _enter("{%u.%u.%u.%u+%hu},{%u.%u.%u.%u+%hu},", + NIPQUAD(local->srx.transport.sin.sin_addr), + ntohs(local->srx.transport.sin.sin_port), + NIPQUAD(peer->srx.transport.sin.sin_addr), + ntohs(peer->srx.transport.sin.sin_port)); + + /* search the transport list first */ + read_lock_bh(&rxrpc_transport_lock); + list_for_each_entry(trans, &rxrpc_transports, link) { + if (trans->local == local && trans->peer == peer) + goto found_extant_transport; + } + read_unlock_bh(&rxrpc_transport_lock); + + /* not yet present - create a candidate for a new record and then + * redo the search */ + candidate = rxrpc_alloc_transport(local, peer, gfp); + if (!candidate) { + _leave(" = -ENOMEM"); + return ERR_PTR(-ENOMEM); + } + + write_lock_bh(&rxrpc_transport_lock); + + list_for_each_entry(trans, &rxrpc_transports, link) { + if (trans->local == local && trans->peer == peer) + goto found_extant_second; + } + + /* we can now add the new candidate to the list */ + trans = candidate; + candidate = NULL; + + rxrpc_get_local(trans->local); + atomic_inc(&trans->peer->usage); + list_add_tail(&trans->link, &rxrpc_transports); + write_unlock_bh(&rxrpc_transport_lock); + new = "new"; + +success: + _net("TRANSPORT %s %d local %d -> peer %d", + new, + trans->debug_id, + trans->local->debug_id, + trans->peer->debug_id); + + _leave(" = %p {u=%d}", trans, atomic_read(&trans->usage)); + return trans; + + /* we found the transport in the list immediately */ +found_extant_transport: + usage = atomic_inc_return(&trans->usage); + read_unlock_bh(&rxrpc_transport_lock); + goto success; + + /* we found the transport on the second time through the list */ +found_extant_second: + usage = atomic_inc_return(&trans->usage); + write_unlock_bh(&rxrpc_transport_lock); + kfree(candidate); + goto success; +} + +/* + * find the transport connecting two endpoints + */ +struct rxrpc_transport *rxrpc_find_transport(struct rxrpc_local *local, + struct rxrpc_peer *peer) +{ + struct rxrpc_transport *trans; + + _enter("{%u.%u.%u.%u+%hu},{%u.%u.%u.%u+%hu},", + NIPQUAD(local->srx.transport.sin.sin_addr), + ntohs(local->srx.transport.sin.sin_port), + NIPQUAD(peer->srx.transport.sin.sin_addr), + ntohs(peer->srx.transport.sin.sin_port)); + + /* search the transport list */ + read_lock_bh(&rxrpc_transport_lock); + + list_for_each_entry(trans, &rxrpc_transports, link) { + if (trans->local == local && trans->peer == peer) + goto found_extant_transport; + } + + read_unlock_bh(&rxrpc_transport_lock); + _leave(" = NULL"); + return NULL; + +found_extant_transport: + atomic_inc(&trans->usage); + read_unlock_bh(&rxrpc_transport_lock); + _leave(" = %p", trans); + return trans; +} + +/* + * release a transport session + */ +void rxrpc_put_transport(struct rxrpc_transport *trans) +{ + _enter("%p{u=%d}", trans, atomic_read(&trans->usage)); + + ASSERTCMP(atomic_read(&trans->usage), >, 0); + + trans->put_time = xtime.tv_sec; + if (unlikely(atomic_dec_and_test(&trans->usage))) + _debug("zombie"); + /* let the reaper determine the timeout to avoid a race with + * overextending the timeout if the reaper is running at the + * same time */ + schedule_delayed_work(&rxrpc_transport_reap, 0); + _leave(""); +} + +/* + * clean up a transport session + */ +static void rxrpc_cleanup_transport(struct rxrpc_transport *trans) +{ + _net("DESTROY TRANS %d", trans->debug_id); + + rxrpc_purge_queue(&trans->error_queue); + + rxrpc_put_local(trans->local); + rxrpc_put_peer(trans->peer); + kfree(trans); +} + +/* + * reap dead transports that have passed their expiry date + */ +static void rxrpc_transport_reaper(struct work_struct *work) +{ + struct rxrpc_transport *trans, *_p; + unsigned long now, earliest, reap_time; + + LIST_HEAD(graveyard); + + _enter(""); + + now = xtime.tv_sec; + earliest = ULONG_MAX; + + /* extract all the transports that have been dead too long */ + write_lock_bh(&rxrpc_transport_lock); + list_for_each_entry_safe(trans, _p, &rxrpc_transports, link) { + _debug("reap TRANS %d { u=%d t=%ld }", + trans->debug_id, atomic_read(&trans->usage), + (long) now - (long) trans->put_time); + + if (likely(atomic_read(&trans->usage) > 0)) + continue; + + reap_time = trans->put_time + rxrpc_transport_timeout; + if (reap_time <= now) + list_move_tail(&trans->link, &graveyard); + else if (reap_time < earliest) + earliest = reap_time; + } + write_unlock_bh(&rxrpc_transport_lock); + + if (earliest != ULONG_MAX) { + _debug("reschedule reaper %ld", (long) earliest - now); + ASSERTCMP(earliest, >, now); + schedule_delayed_work(&rxrpc_transport_reap, + (earliest - now) * HZ); + } + + /* then destroy all those pulled out */ + while (!list_empty(&graveyard)) { + trans = list_entry(graveyard.next, struct rxrpc_transport, + link); + list_del_init(&trans->link); + + ASSERTCMP(atomic_read(&trans->usage), ==, 0); + rxrpc_cleanup_transport(trans); + } + + _leave(""); +} + +/* + * preemptively destroy all the transport session records rather than waiting + * for them to time out + */ +void __exit rxrpc_destroy_all_transports(void) +{ + _enter(""); + + rxrpc_transport_timeout = 0; + cancel_delayed_work(&rxrpc_transport_reap); + schedule_delayed_work(&rxrpc_transport_reap, 0); + + _leave(""); +} diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c new file mode 100644 index 000000000000..1eaf529efac1 --- /dev/null +++ b/net/rxrpc/rxkad.c @@ -0,0 +1,1153 @@ +/* Kerberos-based RxRPC security + * + * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "ar-internal.h" + +#define RXKAD_VERSION 2 +#define MAXKRB5TICKETLEN 1024 +#define RXKAD_TKT_TYPE_KERBEROS_V5 256 +#define ANAME_SZ 40 /* size of authentication name */ +#define INST_SZ 40 /* size of principal's instance */ +#define REALM_SZ 40 /* size of principal's auth domain */ +#define SNAME_SZ 40 /* size of service name */ + +unsigned rxrpc_debug; +module_param_named(debug, rxrpc_debug, uint, S_IWUSR | S_IRUGO); +MODULE_PARM_DESC(rxrpc_debug, "rxkad debugging mask"); + +struct rxkad_level1_hdr { + __be32 data_size; /* true data size (excluding padding) */ +}; + +struct rxkad_level2_hdr { + __be32 data_size; /* true data size (excluding padding) */ + __be32 checksum; /* decrypted data checksum */ +}; + +MODULE_DESCRIPTION("RxRPC network protocol type-2 security (Kerberos)"); +MODULE_AUTHOR("Red Hat, Inc."); +MODULE_LICENSE("GPL"); + +/* + * this holds a pinned cipher so that keventd doesn't get called by the cipher + * alloc routine, but since we have it to hand, we use it to decrypt RESPONSE + * packets + */ +static struct crypto_blkcipher *rxkad_ci; +static DEFINE_MUTEX(rxkad_ci_mutex); + +/* + * initialise connection security + */ +static int rxkad_init_connection_security(struct rxrpc_connection *conn) +{ + struct rxrpc_key_payload *payload; + struct crypto_blkcipher *ci; + int ret; + + _enter("{%d},{%x}", conn->debug_id, key_serial(conn->key)); + + payload = conn->key->payload.data; + conn->security_ix = payload->k.security_index; + + ci = crypto_alloc_blkcipher("pcbc(fcrypt)", 0, CRYPTO_ALG_ASYNC); + if (IS_ERR(ci)) { + _debug("no cipher"); + ret = PTR_ERR(ci); + goto error; + } + + if (crypto_blkcipher_setkey(ci, payload->k.session_key, + sizeof(payload->k.session_key)) < 0) + BUG(); + + switch (conn->security_level) { + case RXRPC_SECURITY_PLAIN: + break; + case RXRPC_SECURITY_AUTH: + conn->size_align = 8; + conn->security_size = sizeof(struct rxkad_level1_hdr); + conn->header_size += sizeof(struct rxkad_level1_hdr); + break; + case RXRPC_SECURITY_ENCRYPT: + conn->size_align = 8; + conn->security_size = sizeof(struct rxkad_level2_hdr); + conn->header_size += sizeof(struct rxkad_level2_hdr); + break; + default: + ret = -EKEYREJECTED; + goto error; + } + + conn->cipher = ci; + ret = 0; +error: + _leave(" = %d", ret); + return ret; +} + +/* + * prime the encryption state with the invariant parts of a connection's + * description + */ +static void rxkad_prime_packet_security(struct rxrpc_connection *conn) +{ + struct rxrpc_key_payload *payload; + struct blkcipher_desc desc; + struct scatterlist sg[2]; + struct rxrpc_crypt iv; + struct { + __be32 x[4]; + } tmpbuf __attribute__((aligned(16))); /* must all be in same page */ + + _enter(""); + + if (!conn->key) + return; + + payload = conn->key->payload.data; + memcpy(&iv, payload->k.session_key, sizeof(iv)); + + desc.tfm = conn->cipher; + desc.info = iv.x; + desc.flags = 0; + + tmpbuf.x[0] = conn->epoch; + tmpbuf.x[1] = conn->cid; + tmpbuf.x[2] = 0; + tmpbuf.x[3] = htonl(conn->security_ix); + + memset(sg, 0, sizeof(sg)); + sg_set_buf(&sg[0], &tmpbuf, sizeof(tmpbuf)); + sg_set_buf(&sg[1], &tmpbuf, sizeof(tmpbuf)); + crypto_blkcipher_encrypt_iv(&desc, &sg[0], &sg[1], sizeof(tmpbuf)); + + memcpy(&conn->csum_iv, &tmpbuf.x[2], sizeof(conn->csum_iv)); + ASSERTCMP(conn->csum_iv.n[0], ==, tmpbuf.x[2]); + + _leave(""); +} + +/* + * partially encrypt a packet (level 1 security) + */ +static int rxkad_secure_packet_auth(const struct rxrpc_call *call, + struct sk_buff *skb, + u32 data_size, + void *sechdr) +{ + struct rxrpc_skb_priv *sp; + struct blkcipher_desc desc; + struct rxrpc_crypt iv; + struct scatterlist sg[2]; + struct { + struct rxkad_level1_hdr hdr; + __be32 first; /* first four bytes of data and padding */ + } tmpbuf __attribute__((aligned(8))); /* must all be in same page */ + u16 check; + + sp = rxrpc_skb(skb); + + _enter(""); + + check = ntohl(sp->hdr.seq ^ sp->hdr.callNumber); + data_size |= (u32) check << 16; + + tmpbuf.hdr.data_size = htonl(data_size); + memcpy(&tmpbuf.first, sechdr + 4, sizeof(tmpbuf.first)); + + /* start the encryption afresh */ + memset(&iv, 0, sizeof(iv)); + desc.tfm = call->conn->cipher; + desc.info = iv.x; + desc.flags = 0; + + memset(sg, 0, sizeof(sg)); + sg_set_buf(&sg[0], &tmpbuf, sizeof(tmpbuf)); + sg_set_buf(&sg[1], &tmpbuf, sizeof(tmpbuf)); + crypto_blkcipher_encrypt_iv(&desc, &sg[0], &sg[1], sizeof(tmpbuf)); + + memcpy(sechdr, &tmpbuf, sizeof(tmpbuf)); + + _leave(" = 0"); + return 0; +} + +/* + * wholly encrypt a packet (level 2 security) + */ +static int rxkad_secure_packet_encrypt(const struct rxrpc_call *call, + struct sk_buff *skb, + u32 data_size, + void *sechdr) +{ + const struct rxrpc_key_payload *payload; + struct rxkad_level2_hdr rxkhdr + __attribute__((aligned(8))); /* must be all on one page */ + struct rxrpc_skb_priv *sp; + struct blkcipher_desc desc; + struct rxrpc_crypt iv; + struct scatterlist sg[16]; + struct sk_buff *trailer; + unsigned len; + u16 check; + int nsg; + + sp = rxrpc_skb(skb); + + _enter(""); + + check = ntohl(sp->hdr.seq ^ sp->hdr.callNumber); + + rxkhdr.data_size = htonl(data_size | (u32) check << 16); + rxkhdr.checksum = 0; + + /* encrypt from the session key */ + payload = call->conn->key->payload.data; + memcpy(&iv, payload->k.session_key, sizeof(iv)); + desc.tfm = call->conn->cipher; + desc.info = iv.x; + desc.flags = 0; + + memset(sg, 0, sizeof(sg[0]) * 2); + sg_set_buf(&sg[0], sechdr, sizeof(rxkhdr)); + sg_set_buf(&sg[1], &rxkhdr, sizeof(rxkhdr)); + crypto_blkcipher_encrypt_iv(&desc, &sg[0], &sg[1], sizeof(rxkhdr)); + + /* we want to encrypt the skbuff in-place */ + nsg = skb_cow_data(skb, 0, &trailer); + if (nsg < 0 || nsg > 16) + return -ENOMEM; + + len = data_size + call->conn->size_align - 1; + len &= ~(call->conn->size_align - 1); + + skb_to_sgvec(skb, sg, 0, len); + crypto_blkcipher_encrypt_iv(&desc, sg, sg, len); + + _leave(" = 0"); + return 0; +} + +/* + * checksum an RxRPC packet header + */ +static int rxkad_secure_packet(const struct rxrpc_call *call, + struct sk_buff *skb, + size_t data_size, + void *sechdr) +{ + struct rxrpc_skb_priv *sp; + struct blkcipher_desc desc; + struct rxrpc_crypt iv; + struct scatterlist sg[2]; + struct { + __be32 x[2]; + } tmpbuf __attribute__((aligned(8))); /* must all be in same page */ + __be32 x; + int ret; + + sp = rxrpc_skb(skb); + + _enter("{%d{%x}},{#%u},%zu,", + call->debug_id, key_serial(call->conn->key), ntohl(sp->hdr.seq), + data_size); + + if (!call->conn->cipher) + return 0; + + ret = key_validate(call->conn->key); + if (ret < 0) + return ret; + + /* continue encrypting from where we left off */ + memcpy(&iv, call->conn->csum_iv.x, sizeof(iv)); + desc.tfm = call->conn->cipher; + desc.info = iv.x; + desc.flags = 0; + + /* calculate the security checksum */ + x = htonl(call->channel << (32 - RXRPC_CIDSHIFT)); + x |= sp->hdr.seq & __constant_cpu_to_be32(0x3fffffff); + tmpbuf.x[0] = sp->hdr.callNumber; + tmpbuf.x[1] = x; + + memset(&sg, 0, sizeof(sg)); + sg_set_buf(&sg[0], &tmpbuf, sizeof(tmpbuf)); + sg_set_buf(&sg[1], &tmpbuf, sizeof(tmpbuf)); + crypto_blkcipher_encrypt_iv(&desc, &sg[0], &sg[1], sizeof(tmpbuf)); + + x = ntohl(tmpbuf.x[1]); + x = (x >> 16) & 0xffff; + if (x == 0) + x = 1; /* zero checksums are not permitted */ + sp->hdr.cksum = htons(x); + + switch (call->conn->security_level) { + case RXRPC_SECURITY_PLAIN: + ret = 0; + break; + case RXRPC_SECURITY_AUTH: + ret = rxkad_secure_packet_auth(call, skb, data_size, sechdr); + break; + case RXRPC_SECURITY_ENCRYPT: + ret = rxkad_secure_packet_encrypt(call, skb, data_size, + sechdr); + break; + default: + ret = -EPERM; + break; + } + + _leave(" = %d [set %hx]", ret, x); + return ret; +} + +/* + * decrypt partial encryption on a packet (level 1 security) + */ +static int rxkad_verify_packet_auth(const struct rxrpc_call *call, + struct sk_buff *skb, + u32 *_abort_code) +{ + struct rxkad_level1_hdr sechdr; + struct rxrpc_skb_priv *sp; + struct blkcipher_desc desc; + struct rxrpc_crypt iv; + struct scatterlist sg[2]; + struct sk_buff *trailer; + u32 data_size, buf; + u16 check; + + _enter(""); + + sp = rxrpc_skb(skb); + + /* we want to decrypt the skbuff in-place */ + if (skb_cow_data(skb, 0, &trailer) < 0) + goto nomem; + + skb_to_sgvec(skb, sg, 0, 8); + + /* start the decryption afresh */ + memset(&iv, 0, sizeof(iv)); + desc.tfm = call->conn->cipher; + desc.info = iv.x; + desc.flags = 0; + + crypto_blkcipher_decrypt_iv(&desc, sg, sg, 8); + + /* remove the decrypted packet length */ + if (skb_copy_bits(skb, 0, &sechdr, sizeof(sechdr)) < 0) + goto datalen_error; + if (!skb_pull(skb, sizeof(sechdr))) + BUG(); + + buf = ntohl(sechdr.data_size); + data_size = buf & 0xffff; + + check = buf >> 16; + check ^= ntohl(sp->hdr.seq ^ sp->hdr.callNumber); + check &= 0xffff; + if (check != 0) { + *_abort_code = RXKADSEALEDINCON; + goto protocol_error; + } + + /* shorten the packet to remove the padding */ + if (data_size > skb->len) + goto datalen_error; + else if (data_size < skb->len) + skb->len = data_size; + + _leave(" = 0 [dlen=%x]", data_size); + return 0; + +datalen_error: + *_abort_code = RXKADDATALEN; +protocol_error: + _leave(" = -EPROTO"); + return -EPROTO; + +nomem: + _leave(" = -ENOMEM"); + return -ENOMEM; +} + +/* + * wholly decrypt a packet (level 2 security) + */ +static int rxkad_verify_packet_encrypt(const struct rxrpc_call *call, + struct sk_buff *skb, + u32 *_abort_code) +{ + const struct rxrpc_key_payload *payload; + struct rxkad_level2_hdr sechdr; + struct rxrpc_skb_priv *sp; + struct blkcipher_desc desc; + struct rxrpc_crypt iv; + struct scatterlist _sg[4], *sg; + struct sk_buff *trailer; + u32 data_size, buf; + u16 check; + int nsg; + + _enter(",{%d}", skb->len); + + sp = rxrpc_skb(skb); + + /* we want to decrypt the skbuff in-place */ + nsg = skb_cow_data(skb, 0, &trailer); + if (nsg < 0) + goto nomem; + + sg = _sg; + if (unlikely(nsg > 4)) { + sg = kmalloc(sizeof(*sg) * nsg, GFP_NOIO); + if (!sg) + goto nomem; + } + + skb_to_sgvec(skb, sg, 0, skb->len); + + /* decrypt from the session key */ + payload = call->conn->key->payload.data; + memcpy(&iv, payload->k.session_key, sizeof(iv)); + desc.tfm = call->conn->cipher; + desc.info = iv.x; + desc.flags = 0; + + crypto_blkcipher_decrypt_iv(&desc, sg, sg, skb->len); + if (sg != _sg) + kfree(sg); + + /* remove the decrypted packet length */ + if (skb_copy_bits(skb, 0, &sechdr, sizeof(sechdr)) < 0) + goto datalen_error; + if (!skb_pull(skb, sizeof(sechdr))) + BUG(); + + buf = ntohl(sechdr.data_size); + data_size = buf & 0xffff; + + check = buf >> 16; + check ^= ntohl(sp->hdr.seq ^ sp->hdr.callNumber); + check &= 0xffff; + if (check != 0) { + *_abort_code = RXKADSEALEDINCON; + goto protocol_error; + } + + /* shorten the packet to remove the padding */ + if (data_size > skb->len) + goto datalen_error; + else if (data_size < skb->len) + skb->len = data_size; + + _leave(" = 0 [dlen=%x]", data_size); + return 0; + +datalen_error: + *_abort_code = RXKADDATALEN; +protocol_error: + _leave(" = -EPROTO"); + return -EPROTO; + +nomem: + _leave(" = -ENOMEM"); + return -ENOMEM; +} + +/* + * verify the security on a received packet + */ +static int rxkad_verify_packet(const struct rxrpc_call *call, + struct sk_buff *skb, + u32 *_abort_code) +{ + struct blkcipher_desc desc; + struct rxrpc_skb_priv *sp; + struct rxrpc_crypt iv; + struct scatterlist sg[2]; + struct { + __be32 x[2]; + } tmpbuf __attribute__((aligned(8))); /* must all be in same page */ + __be32 x; + __be16 cksum; + int ret; + + sp = rxrpc_skb(skb); + + _enter("{%d{%x}},{#%u}", + call->debug_id, key_serial(call->conn->key), + ntohl(sp->hdr.seq)); + + if (!call->conn->cipher) + return 0; + + if (sp->hdr.securityIndex != 2) { + *_abort_code = RXKADINCONSISTENCY; + _leave(" = -EPROTO [not rxkad]"); + return -EPROTO; + } + + /* continue encrypting from where we left off */ + memcpy(&iv, call->conn->csum_iv.x, sizeof(iv)); + desc.tfm = call->conn->cipher; + desc.info = iv.x; + desc.flags = 0; + + /* validate the security checksum */ + x = htonl(call->channel << (32 - RXRPC_CIDSHIFT)); + x |= sp->hdr.seq & __constant_cpu_to_be32(0x3fffffff); + tmpbuf.x[0] = call->call_id; + tmpbuf.x[1] = x; + + memset(&sg, 0, sizeof(sg)); + sg_set_buf(&sg[0], &tmpbuf, sizeof(tmpbuf)); + sg_set_buf(&sg[1], &tmpbuf, sizeof(tmpbuf)); + crypto_blkcipher_encrypt_iv(&desc, &sg[0], &sg[1], sizeof(tmpbuf)); + + x = ntohl(tmpbuf.x[1]); + x = (x >> 16) & 0xffff; + if (x == 0) + x = 1; /* zero checksums are not permitted */ + + cksum = htons(x); + if (sp->hdr.cksum != cksum) { + *_abort_code = RXKADSEALEDINCON; + _leave(" = -EPROTO [csum failed]"); + return -EPROTO; + } + + switch (call->conn->security_level) { + case RXRPC_SECURITY_PLAIN: + ret = 0; + break; + case RXRPC_SECURITY_AUTH: + ret = rxkad_verify_packet_auth(call, skb, _abort_code); + break; + case RXRPC_SECURITY_ENCRYPT: + ret = rxkad_verify_packet_encrypt(call, skb, _abort_code); + break; + default: + ret = -ENOANO; + break; + } + + _leave(" = %d", ret); + return ret; +} + +/* + * issue a challenge + */ +static int rxkad_issue_challenge(struct rxrpc_connection *conn) +{ + struct rxkad_challenge challenge; + struct rxrpc_header hdr; + struct msghdr msg; + struct kvec iov[2]; + size_t len; + int ret; + + _enter("{%d,%x}", conn->debug_id, key_serial(conn->key)); + + ret = key_validate(conn->key); + if (ret < 0) + return ret; + + get_random_bytes(&conn->security_nonce, sizeof(conn->security_nonce)); + + challenge.version = htonl(2); + challenge.nonce = htonl(conn->security_nonce); + challenge.min_level = htonl(0); + challenge.__padding = 0; + + msg.msg_name = &conn->trans->peer->srx.transport.sin; + msg.msg_namelen = sizeof(conn->trans->peer->srx.transport.sin); + msg.msg_control = NULL; + msg.msg_controllen = 0; + msg.msg_flags = 0; + + hdr.epoch = conn->epoch; + hdr.cid = conn->cid; + hdr.callNumber = 0; + hdr.seq = 0; + hdr.type = RXRPC_PACKET_TYPE_CHALLENGE; + hdr.flags = conn->out_clientflag; + hdr.userStatus = 0; + hdr.securityIndex = conn->security_ix; + hdr._rsvd = 0; + hdr.serviceId = conn->service_id; + + iov[0].iov_base = &hdr; + iov[0].iov_len = sizeof(hdr); + iov[1].iov_base = &challenge; + iov[1].iov_len = sizeof(challenge); + + len = iov[0].iov_len + iov[1].iov_len; + + hdr.serial = htonl(atomic_inc_return(&conn->serial)); + _proto("Tx CHALLENGE %%%u", ntohl(hdr.serial)); + + ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len); + if (ret < 0) { + _debug("sendmsg failed: %d", ret); + return -EAGAIN; + } + + _leave(" = 0"); + return 0; +} + +/* + * send a Kerberos security response + */ +static int rxkad_send_response(struct rxrpc_connection *conn, + struct rxrpc_header *hdr, + struct rxkad_response *resp, + const struct rxkad_key *s2) +{ + struct msghdr msg; + struct kvec iov[3]; + size_t len; + int ret; + + _enter(""); + + msg.msg_name = &conn->trans->peer->srx.transport.sin; + msg.msg_namelen = sizeof(conn->trans->peer->srx.transport.sin); + msg.msg_control = NULL; + msg.msg_controllen = 0; + msg.msg_flags = 0; + + hdr->epoch = conn->epoch; + hdr->seq = 0; + hdr->type = RXRPC_PACKET_TYPE_RESPONSE; + hdr->flags = conn->out_clientflag; + hdr->userStatus = 0; + hdr->_rsvd = 0; + + iov[0].iov_base = hdr; + iov[0].iov_len = sizeof(*hdr); + iov[1].iov_base = resp; + iov[1].iov_len = sizeof(*resp); + iov[2].iov_base = (void *) s2->ticket; + iov[2].iov_len = s2->ticket_len; + + len = iov[0].iov_len + iov[1].iov_len + iov[2].iov_len; + + hdr->serial = htonl(atomic_inc_return(&conn->serial)); + _proto("Tx RESPONSE %%%u", ntohl(hdr->serial)); + + ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 3, len); + if (ret < 0) { + _debug("sendmsg failed: %d", ret); + return -EAGAIN; + } + + _leave(" = 0"); + return 0; +} + +/* + * calculate the response checksum + */ +static void rxkad_calc_response_checksum(struct rxkad_response *response) +{ + u32 csum = 1000003; + int loop; + u8 *p = (u8 *) response; + + for (loop = sizeof(*response); loop > 0; loop--) + csum = csum * 0x10204081 + *p++; + + response->encrypted.checksum = htonl(csum); +} + +/* + * load a scatterlist with a potentially split-page buffer + */ +static void rxkad_sg_set_buf2(struct scatterlist sg[2], + void *buf, size_t buflen) +{ + + memset(sg, 0, sizeof(sg)); + + sg_set_buf(&sg[0], buf, buflen); + if (sg[0].offset + buflen > PAGE_SIZE) { + /* the buffer was split over two pages */ + sg[0].length = PAGE_SIZE - sg[0].offset; + sg_set_buf(&sg[1], buf + sg[0].length, buflen - sg[0].length); + } + + ASSERTCMP(sg[0].length + sg[1].length, ==, buflen); +} + +/* + * encrypt the response packet + */ +static void rxkad_encrypt_response(struct rxrpc_connection *conn, + struct rxkad_response *resp, + const struct rxkad_key *s2) +{ + struct blkcipher_desc desc; + struct rxrpc_crypt iv; + struct scatterlist ssg[2], dsg[2]; + + /* continue encrypting from where we left off */ + memcpy(&iv, s2->session_key, sizeof(iv)); + desc.tfm = conn->cipher; + desc.info = iv.x; + desc.flags = 0; + + rxkad_sg_set_buf2(ssg, &resp->encrypted, sizeof(resp->encrypted)); + memcpy(dsg, ssg, sizeof(dsg)); + crypto_blkcipher_encrypt_iv(&desc, dsg, ssg, sizeof(resp->encrypted)); +} + +/* + * respond to a challenge packet + */ +static int rxkad_respond_to_challenge(struct rxrpc_connection *conn, + struct sk_buff *skb, + u32 *_abort_code) +{ + const struct rxrpc_key_payload *payload; + struct rxkad_challenge challenge; + struct rxkad_response resp + __attribute__((aligned(8))); /* must be aligned for crypto */ + struct rxrpc_skb_priv *sp; + u32 version, nonce, min_level, abort_code; + int ret; + + _enter("{%d,%x}", conn->debug_id, key_serial(conn->key)); + + if (!conn->key) { + _leave(" = -EPROTO [no key]"); + return -EPROTO; + } + + ret = key_validate(conn->key); + if (ret < 0) { + *_abort_code = RXKADEXPIRED; + return ret; + } + + abort_code = RXKADPACKETSHORT; + sp = rxrpc_skb(skb); + if (skb_copy_bits(skb, 0, &challenge, sizeof(challenge)) < 0) + goto protocol_error; + + version = ntohl(challenge.version); + nonce = ntohl(challenge.nonce); + min_level = ntohl(challenge.min_level); + + _proto("Rx CHALLENGE %%%u { v=%u n=%u ml=%u }", + ntohl(sp->hdr.serial), version, nonce, min_level); + + abort_code = RXKADINCONSISTENCY; + if (version != RXKAD_VERSION) + goto protocol_error; + + abort_code = RXKADLEVELFAIL; + if (conn->security_level < min_level) + goto protocol_error; + + payload = conn->key->payload.data; + + /* build the response packet */ + memset(&resp, 0, sizeof(resp)); + + resp.version = RXKAD_VERSION; + resp.encrypted.epoch = conn->epoch; + resp.encrypted.cid = conn->cid; + resp.encrypted.securityIndex = htonl(conn->security_ix); + resp.encrypted.call_id[0] = + (conn->channels[0] ? conn->channels[0]->call_id : 0); + resp.encrypted.call_id[1] = + (conn->channels[1] ? conn->channels[1]->call_id : 0); + resp.encrypted.call_id[2] = + (conn->channels[2] ? conn->channels[2]->call_id : 0); + resp.encrypted.call_id[3] = + (conn->channels[3] ? conn->channels[3]->call_id : 0); + resp.encrypted.inc_nonce = htonl(nonce + 1); + resp.encrypted.level = htonl(conn->security_level); + resp.kvno = htonl(payload->k.kvno); + resp.ticket_len = htonl(payload->k.ticket_len); + + /* calculate the response checksum and then do the encryption */ + rxkad_calc_response_checksum(&resp); + rxkad_encrypt_response(conn, &resp, &payload->k); + return rxkad_send_response(conn, &sp->hdr, &resp, &payload->k); + +protocol_error: + *_abort_code = abort_code; + _leave(" = -EPROTO [%d]", abort_code); + return -EPROTO; +} + +/* + * decrypt the kerberos IV ticket in the response + */ +static int rxkad_decrypt_ticket(struct rxrpc_connection *conn, + void *ticket, size_t ticket_len, + struct rxrpc_crypt *_session_key, + time_t *_expiry, + u32 *_abort_code) +{ + struct blkcipher_desc desc; + struct rxrpc_crypt iv, key; + struct scatterlist ssg[1], dsg[1]; + struct in_addr addr; + unsigned life; + time_t issue, now; + bool little_endian; + int ret; + u8 *p, *q, *name, *end; + + _enter("{%d},{%x}", conn->debug_id, key_serial(conn->server_key)); + + *_expiry = 0; + + ret = key_validate(conn->server_key); + if (ret < 0) { + switch (ret) { + case -EKEYEXPIRED: + *_abort_code = RXKADEXPIRED; + goto error; + default: + *_abort_code = RXKADNOAUTH; + goto error; + } + } + + ASSERT(conn->server_key->payload.data != NULL); + ASSERTCMP((unsigned long) ticket & 7UL, ==, 0); + + memcpy(&iv, &conn->server_key->type_data, sizeof(iv)); + + desc.tfm = conn->server_key->payload.data; + desc.info = iv.x; + desc.flags = 0; + + sg_init_one(&ssg[0], ticket, ticket_len); + memcpy(dsg, ssg, sizeof(dsg)); + crypto_blkcipher_decrypt_iv(&desc, dsg, ssg, ticket_len); + + p = ticket; + end = p + ticket_len; + +#define Z(size) \ + ({ \ + u8 *__str = p; \ + q = memchr(p, 0, end - p); \ + if (!q || q - p > (size)) \ + goto bad_ticket; \ + for (; p < q; p++) \ + if (!isprint(*p)) \ + goto bad_ticket; \ + p++; \ + __str; \ + }) + + /* extract the ticket flags */ + _debug("KIV FLAGS: %x", *p); + little_endian = *p & 1; + p++; + + /* extract the authentication name */ + name = Z(ANAME_SZ); + _debug("KIV ANAME: %s", name); + + /* extract the principal's instance */ + name = Z(INST_SZ); + _debug("KIV INST : %s", name); + + /* extract the principal's authentication domain */ + name = Z(REALM_SZ); + _debug("KIV REALM: %s", name); + + if (end - p < 4 + 8 + 4 + 2) + goto bad_ticket; + + /* get the IPv4 address of the entity that requested the ticket */ + memcpy(&addr, p, sizeof(addr)); + p += 4; + _debug("KIV ADDR : "NIPQUAD_FMT, NIPQUAD(addr)); + + /* get the session key from the ticket */ + memcpy(&key, p, sizeof(key)); + p += 8; + _debug("KIV KEY : %08x %08x", ntohl(key.n[0]), ntohl(key.n[1])); + memcpy(_session_key, &key, sizeof(key)); + + /* get the ticket's lifetime */ + life = *p++ * 5 * 60; + _debug("KIV LIFE : %u", life); + + /* get the issue time of the ticket */ + if (little_endian) { + __le32 stamp; + memcpy(&stamp, p, 4); + issue = le32_to_cpu(stamp); + } else { + __be32 stamp; + memcpy(&stamp, p, 4); + issue = be32_to_cpu(stamp); + } + p += 4; + now = xtime.tv_sec; + _debug("KIV ISSUE: %lx [%lx]", issue, now); + + /* check the ticket is in date */ + if (issue > now) { + *_abort_code = RXKADNOAUTH; + ret = -EKEYREJECTED; + goto error; + } + + if (issue < now - life) { + *_abort_code = RXKADEXPIRED; + ret = -EKEYEXPIRED; + goto error; + } + + *_expiry = issue + life; + + /* get the service name */ + name = Z(SNAME_SZ); + _debug("KIV SNAME: %s", name); + + /* get the service instance name */ + name = Z(INST_SZ); + _debug("KIV SINST: %s", name); + + ret = 0; +error: + _leave(" = %d", ret); + return ret; + +bad_ticket: + *_abort_code = RXKADBADTICKET; + ret = -EBADMSG; + goto error; +} + +/* + * decrypt the response packet + */ +static void rxkad_decrypt_response(struct rxrpc_connection *conn, + struct rxkad_response *resp, + const struct rxrpc_crypt *session_key) +{ + struct blkcipher_desc desc; + struct scatterlist ssg[2], dsg[2]; + struct rxrpc_crypt iv; + + _enter(",,%08x%08x", + ntohl(session_key->n[0]), ntohl(session_key->n[1])); + + ASSERT(rxkad_ci != NULL); + + mutex_lock(&rxkad_ci_mutex); + if (crypto_blkcipher_setkey(rxkad_ci, session_key->x, + sizeof(*session_key)) < 0) + BUG(); + + memcpy(&iv, session_key, sizeof(iv)); + desc.tfm = rxkad_ci; + desc.info = iv.x; + desc.flags = 0; + + rxkad_sg_set_buf2(ssg, &resp->encrypted, sizeof(resp->encrypted)); + memcpy(dsg, ssg, sizeof(dsg)); + crypto_blkcipher_decrypt_iv(&desc, dsg, ssg, sizeof(resp->encrypted)); + mutex_unlock(&rxkad_ci_mutex); + + _leave(""); +} + +/* + * verify a response + */ +static int rxkad_verify_response(struct rxrpc_connection *conn, + struct sk_buff *skb, + u32 *_abort_code) +{ + struct rxkad_response response + __attribute__((aligned(8))); /* must be aligned for crypto */ + struct rxrpc_skb_priv *sp; + struct rxrpc_crypt session_key; + time_t expiry; + void *ticket; + u32 abort_code, version, kvno, ticket_len, csum, level; + int ret; + + _enter("{%d,%x}", conn->debug_id, key_serial(conn->server_key)); + + abort_code = RXKADPACKETSHORT; + if (skb_copy_bits(skb, 0, &response, sizeof(response)) < 0) + goto protocol_error; + if (!pskb_pull(skb, sizeof(response))) + BUG(); + + version = ntohl(response.version); + ticket_len = ntohl(response.ticket_len); + kvno = ntohl(response.kvno); + sp = rxrpc_skb(skb); + _proto("Rx RESPONSE %%%u { v=%u kv=%u tl=%u }", + ntohl(sp->hdr.serial), version, kvno, ticket_len); + + abort_code = RXKADINCONSISTENCY; + if (version != RXKAD_VERSION) + + abort_code = RXKADTICKETLEN; + if (ticket_len < 4 || ticket_len > MAXKRB5TICKETLEN) + goto protocol_error; + + abort_code = RXKADUNKNOWNKEY; + if (kvno >= RXKAD_TKT_TYPE_KERBEROS_V5) + goto protocol_error; + + /* extract the kerberos ticket and decrypt and decode it */ + ticket = kmalloc(ticket_len, GFP_NOFS); + if (!ticket) + return -ENOMEM; + + abort_code = RXKADPACKETSHORT; + if (skb_copy_bits(skb, 0, ticket, ticket_len) < 0) + goto protocol_error_free; + + ret = rxkad_decrypt_ticket(conn, ticket, ticket_len, &session_key, + &expiry, &abort_code); + if (ret < 0) { + *_abort_code = abort_code; + kfree(ticket); + return ret; + } + + /* use the session key from inside the ticket to decrypt the + * response */ + rxkad_decrypt_response(conn, &response, &session_key); + + abort_code = RXKADSEALEDINCON; + if (response.encrypted.epoch != conn->epoch) + goto protocol_error_free; + if (response.encrypted.cid != conn->cid) + goto protocol_error_free; + if (ntohl(response.encrypted.securityIndex) != conn->security_ix) + goto protocol_error_free; + csum = response.encrypted.checksum; + response.encrypted.checksum = 0; + rxkad_calc_response_checksum(&response); + if (response.encrypted.checksum != csum) + goto protocol_error_free; + + if (ntohl(response.encrypted.call_id[0]) > INT_MAX || + ntohl(response.encrypted.call_id[1]) > INT_MAX || + ntohl(response.encrypted.call_id[2]) > INT_MAX || + ntohl(response.encrypted.call_id[3]) > INT_MAX) + goto protocol_error_free; + + abort_code = RXKADOUTOFSEQUENCE; + if (response.encrypted.inc_nonce != htonl(conn->security_nonce + 1)) + goto protocol_error_free; + + abort_code = RXKADLEVELFAIL; + level = ntohl(response.encrypted.level); + if (level > RXRPC_SECURITY_ENCRYPT) + goto protocol_error_free; + conn->security_level = level; + + /* create a key to hold the security data and expiration time - after + * this the connection security can be handled in exactly the same way + * as for a client connection */ + ret = rxrpc_get_server_data_key(conn, &session_key, expiry, kvno); + if (ret < 0) { + kfree(ticket); + return ret; + } + + kfree(ticket); + _leave(" = 0"); + return 0; + +protocol_error_free: + kfree(ticket); +protocol_error: + *_abort_code = abort_code; + _leave(" = -EPROTO [%d]", abort_code); + return -EPROTO; +} + +/* + * clear the connection security + */ +static void rxkad_clear(struct rxrpc_connection *conn) +{ + _enter(""); + + if (conn->cipher) + crypto_free_blkcipher(conn->cipher); +} + +/* + * RxRPC Kerberos-based security + */ +static struct rxrpc_security rxkad = { + .owner = THIS_MODULE, + .name = "rxkad", + .security_index = RXKAD_VERSION, + .init_connection_security = rxkad_init_connection_security, + .prime_packet_security = rxkad_prime_packet_security, + .secure_packet = rxkad_secure_packet, + .verify_packet = rxkad_verify_packet, + .issue_challenge = rxkad_issue_challenge, + .respond_to_challenge = rxkad_respond_to_challenge, + .verify_response = rxkad_verify_response, + .clear = rxkad_clear, +}; + +static __init int rxkad_init(void) +{ + _enter(""); + + /* pin the cipher we need so that the crypto layer doesn't invoke + * keventd to go get it */ + rxkad_ci = crypto_alloc_blkcipher("pcbc(fcrypt)", 0, CRYPTO_ALG_ASYNC); + if (IS_ERR(rxkad_ci)) + return PTR_ERR(rxkad_ci); + + return rxrpc_register_security(&rxkad); +} + +module_init(rxkad_init); + +static __exit void rxkad_exit(void) +{ + _enter(""); + + rxrpc_unregister_security(&rxkad); + crypto_free_blkcipher(rxkad_ci); +} + +module_exit(rxkad_exit); -- cgit v1.2.3-59-g8ed1b From 651350d10f93bed7003c9a66e24cf25e0f8eed3d Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 26 Apr 2007 15:50:17 -0700 Subject: [AF_RXRPC]: Add an interface to the AF_RXRPC module for the AFS filesystem to use Add an interface to the AF_RXRPC module so that the AFS filesystem module can more easily make use of the services available. AFS still opens a socket but then uses the action functions in lieu of sendmsg() and registers an intercept functions to grab messages before they're queued on the socket Rx queue. This permits AFS (or whatever) to: (1) Avoid the overhead of using the recvmsg() call. (2) Use different keys directly on individual client calls on one socket rather than having to open a whole slew of sockets, one for each key it might want to use. (3) Avoid calling request_key() at the point of issue of a call or opening of a socket. This is done instead by AFS at the point of open(), unlink() or other VFS operation and the key handed through. (4) Request the use of something other than GFP_KERNEL to allocate memory. Furthermore: (*) The socket buffer markings used by RxRPC are made available for AFS so that it can interpret the cooked RxRPC messages itself. (*) rxgen (un)marshalling abort codes are made available. The following documentation for the kernel interface is added to Documentation/networking/rxrpc.txt: ========================= AF_RXRPC KERNEL INTERFACE ========================= The AF_RXRPC module also provides an interface for use by in-kernel utilities such as the AFS filesystem. This permits such a utility to: (1) Use different keys directly on individual client calls on one socket rather than having to open a whole slew of sockets, one for each key it might want to use. (2) Avoid having RxRPC call request_key() at the point of issue of a call or opening of a socket. Instead the utility is responsible for requesting a key at the appropriate point. AFS, for instance, would do this during VFS operations such as open() or unlink(). The key is then handed through when the call is initiated. (3) Request the use of something other than GFP_KERNEL to allocate memory. (4) Avoid the overhead of using the recvmsg() call. RxRPC messages can be intercepted before they get put into the socket Rx queue and the socket buffers manipulated directly. To use the RxRPC facility, a kernel utility must still open an AF_RXRPC socket, bind an addess as appropriate and listen if it's to be a server socket, but then it passes this to the kernel interface functions. The kernel interface functions are as follows: (*) Begin a new client call. struct rxrpc_call * rxrpc_kernel_begin_call(struct socket *sock, struct sockaddr_rxrpc *srx, struct key *key, unsigned long user_call_ID, gfp_t gfp); This allocates the infrastructure to make a new RxRPC call and assigns call and connection numbers. The call will be made on the UDP port that the socket is bound to. The call will go to the destination address of a connected client socket unless an alternative is supplied (srx is non-NULL). If a key is supplied then this will be used to secure the call instead of the key bound to the socket with the RXRPC_SECURITY_KEY sockopt. Calls secured in this way will still share connections if at all possible. The user_call_ID is equivalent to that supplied to sendmsg() in the control data buffer. It is entirely feasible to use this to point to a kernel data structure. If this function is successful, an opaque reference to the RxRPC call is returned. The caller now holds a reference on this and it must be properly ended. (*) End a client call. void rxrpc_kernel_end_call(struct rxrpc_call *call); This is used to end a previously begun call. The user_call_ID is expunged from AF_RXRPC's knowledge and will not be seen again in association with the specified call. (*) Send data through a call. int rxrpc_kernel_send_data(struct rxrpc_call *call, struct msghdr *msg, size_t len); This is used to supply either the request part of a client call or the reply part of a server call. msg.msg_iovlen and msg.msg_iov specify the data buffers to be used. msg_iov may not be NULL and must point exclusively to in-kernel virtual addresses. msg.msg_flags may be given MSG_MORE if there will be subsequent data sends for this call. The msg must not specify a destination address, control data or any flags other than MSG_MORE. len is the total amount of data to transmit. (*) Abort a call. void rxrpc_kernel_abort_call(struct rxrpc_call *call, u32 abort_code); This is used to abort a call if it's still in an abortable state. The abort code specified will be placed in the ABORT message sent. (*) Intercept received RxRPC messages. typedef void (*rxrpc_interceptor_t)(struct sock *sk, unsigned long user_call_ID, struct sk_buff *skb); void rxrpc_kernel_intercept_rx_messages(struct socket *sock, rxrpc_interceptor_t interceptor); This installs an interceptor function on the specified AF_RXRPC socket. All messages that would otherwise wind up in the socket's Rx queue are then diverted to this function. Note that care must be taken to process the messages in the right order to maintain DATA message sequentiality. The interceptor function itself is provided with the address of the socket and handling the incoming message, the ID assigned by the kernel utility to the call and the socket buffer containing the message. The skb->mark field indicates the type of message: MARK MEANING =============================== ======================================= RXRPC_SKB_MARK_DATA Data message RXRPC_SKB_MARK_FINAL_ACK Final ACK received for an incoming call RXRPC_SKB_MARK_BUSY Client call rejected as server busy RXRPC_SKB_MARK_REMOTE_ABORT Call aborted by peer RXRPC_SKB_MARK_NET_ERROR Network error detected RXRPC_SKB_MARK_LOCAL_ERROR Local error encountered RXRPC_SKB_MARK_NEW_CALL New incoming call awaiting acceptance The remote abort message can be probed with rxrpc_kernel_get_abort_code(). The two error messages can be probed with rxrpc_kernel_get_error_number(). A new call can be accepted with rxrpc_kernel_accept_call(). Data messages can have their contents extracted with the usual bunch of socket buffer manipulation functions. A data message can be determined to be the last one in a sequence with rxrpc_kernel_is_data_last(). When a data message has been used up, rxrpc_kernel_data_delivered() should be called on it.. Non-data messages should be handled to rxrpc_kernel_free_skb() to dispose of. It is possible to get extra refs on all types of message for later freeing, but this may pin the state of a call until the message is finally freed. (*) Accept an incoming call. struct rxrpc_call * rxrpc_kernel_accept_call(struct socket *sock, unsigned long user_call_ID); This is used to accept an incoming call and to assign it a call ID. This function is similar to rxrpc_kernel_begin_call() and calls accepted must be ended in the same way. If this function is successful, an opaque reference to the RxRPC call is returned. The caller now holds a reference on this and it must be properly ended. (*) Reject an incoming call. int rxrpc_kernel_reject_call(struct socket *sock); This is used to reject the first incoming call on the socket's queue with a BUSY message. -ENODATA is returned if there were no incoming calls. Other errors may be returned if the call had been aborted (-ECONNABORTED) or had timed out (-ETIME). (*) Record the delivery of a data message and free it. void rxrpc_kernel_data_delivered(struct sk_buff *skb); This is used to record a data message as having been delivered and to update the ACK state for the call. The socket buffer will be freed. (*) Free a message. void rxrpc_kernel_free_skb(struct sk_buff *skb); This is used to free a non-DATA socket buffer intercepted from an AF_RXRPC socket. (*) Determine if a data message is the last one on a call. bool rxrpc_kernel_is_data_last(struct sk_buff *skb); This is used to determine if a socket buffer holds the last data message to be received for a call (true will be returned if it does, false if not). The data message will be part of the reply on a client call and the request on an incoming call. In the latter case there will be more messages, but in the former case there will not. (*) Get the abort code from an abort message. u32 rxrpc_kernel_get_abort_code(struct sk_buff *skb); This is used to extract the abort code from a remote abort message. (*) Get the error number from a local or network error message. int rxrpc_kernel_get_error_number(struct sk_buff *skb); This is used to extract the error number from a message indicating either a local error occurred or a network error occurred. Signed-off-by: David Howells Signed-off-by: David S. Miller --- Documentation/networking/rxrpc.txt | 196 +++++++++++++++++++++++++++++++++++++ include/net/af_rxrpc.h | 44 ++++++++- include/rxrpc/packet.h | 12 +++ net/rxrpc/af_rxrpc.c | 141 ++++++++++++++++++++++++-- net/rxrpc/ar-accept.c | 119 ++++++++++++++++++++-- net/rxrpc/ar-ack.c | 10 +- net/rxrpc/ar-call.c | 75 ++++++++------ net/rxrpc/ar-connection.c | 28 ++++-- net/rxrpc/ar-connevent.c | 20 +++- net/rxrpc/ar-error.c | 6 +- net/rxrpc/ar-input.c | 60 +++++++----- net/rxrpc/ar-internal.h | 44 ++++----- net/rxrpc/ar-local.c | 2 +- net/rxrpc/ar-output.c | 84 +++++++++++++++- net/rxrpc/ar-peer.c | 2 +- net/rxrpc/ar-recvmsg.c | 75 +++++++++++++- net/rxrpc/ar-skbuff.c | 16 ++- net/rxrpc/ar-transport.c | 8 +- 18 files changed, 813 insertions(+), 129 deletions(-) (limited to 'include') diff --git a/Documentation/networking/rxrpc.txt b/Documentation/networking/rxrpc.txt index fb809b738a0d..cae231b1c134 100644 --- a/Documentation/networking/rxrpc.txt +++ b/Documentation/networking/rxrpc.txt @@ -25,6 +25,8 @@ Contents of this document: (*) Example server usage. + (*) AF_RXRPC kernel interface. + ======== OVERVIEW @@ -661,3 +663,197 @@ A server would be set up to accept operations in the following manner: Note that all the communications for a particular service take place through the one server socket, using control messages on sendmsg() and recvmsg() to determine the call affected. + + +========================= +AF_RXRPC KERNEL INTERFACE +========================= + +The AF_RXRPC module also provides an interface for use by in-kernel utilities +such as the AFS filesystem. This permits such a utility to: + + (1) Use different keys directly on individual client calls on one socket + rather than having to open a whole slew of sockets, one for each key it + might want to use. + + (2) Avoid having RxRPC call request_key() at the point of issue of a call or + opening of a socket. Instead the utility is responsible for requesting a + key at the appropriate point. AFS, for instance, would do this during VFS + operations such as open() or unlink(). The key is then handed through + when the call is initiated. + + (3) Request the use of something other than GFP_KERNEL to allocate memory. + + (4) Avoid the overhead of using the recvmsg() call. RxRPC messages can be + intercepted before they get put into the socket Rx queue and the socket + buffers manipulated directly. + +To use the RxRPC facility, a kernel utility must still open an AF_RXRPC socket, +bind an addess as appropriate and listen if it's to be a server socket, but +then it passes this to the kernel interface functions. + +The kernel interface functions are as follows: + + (*) Begin a new client call. + + struct rxrpc_call * + rxrpc_kernel_begin_call(struct socket *sock, + struct sockaddr_rxrpc *srx, + struct key *key, + unsigned long user_call_ID, + gfp_t gfp); + + This allocates the infrastructure to make a new RxRPC call and assigns + call and connection numbers. The call will be made on the UDP port that + the socket is bound to. The call will go to the destination address of a + connected client socket unless an alternative is supplied (srx is + non-NULL). + + If a key is supplied then this will be used to secure the call instead of + the key bound to the socket with the RXRPC_SECURITY_KEY sockopt. Calls + secured in this way will still share connections if at all possible. + + The user_call_ID is equivalent to that supplied to sendmsg() in the + control data buffer. It is entirely feasible to use this to point to a + kernel data structure. + + If this function is successful, an opaque reference to the RxRPC call is + returned. The caller now holds a reference on this and it must be + properly ended. + + (*) End a client call. + + void rxrpc_kernel_end_call(struct rxrpc_call *call); + + This is used to end a previously begun call. The user_call_ID is expunged + from AF_RXRPC's knowledge and will not be seen again in association with + the specified call. + + (*) Send data through a call. + + int rxrpc_kernel_send_data(struct rxrpc_call *call, struct msghdr *msg, + size_t len); + + This is used to supply either the request part of a client call or the + reply part of a server call. msg.msg_iovlen and msg.msg_iov specify the + data buffers to be used. msg_iov may not be NULL and must point + exclusively to in-kernel virtual addresses. msg.msg_flags may be given + MSG_MORE if there will be subsequent data sends for this call. + + The msg must not specify a destination address, control data or any flags + other than MSG_MORE. len is the total amount of data to transmit. + + (*) Abort a call. + + void rxrpc_kernel_abort_call(struct rxrpc_call *call, u32 abort_code); + + This is used to abort a call if it's still in an abortable state. The + abort code specified will be placed in the ABORT message sent. + + (*) Intercept received RxRPC messages. + + typedef void (*rxrpc_interceptor_t)(struct sock *sk, + unsigned long user_call_ID, + struct sk_buff *skb); + + void + rxrpc_kernel_intercept_rx_messages(struct socket *sock, + rxrpc_interceptor_t interceptor); + + This installs an interceptor function on the specified AF_RXRPC socket. + All messages that would otherwise wind up in the socket's Rx queue are + then diverted to this function. Note that care must be taken to process + the messages in the right order to maintain DATA message sequentiality. + + The interceptor function itself is provided with the address of the socket + and handling the incoming message, the ID assigned by the kernel utility + to the call and the socket buffer containing the message. + + The skb->mark field indicates the type of message: + + MARK MEANING + =============================== ======================================= + RXRPC_SKB_MARK_DATA Data message + RXRPC_SKB_MARK_FINAL_ACK Final ACK received for an incoming call + RXRPC_SKB_MARK_BUSY Client call rejected as server busy + RXRPC_SKB_MARK_REMOTE_ABORT Call aborted by peer + RXRPC_SKB_MARK_NET_ERROR Network error detected + RXRPC_SKB_MARK_LOCAL_ERROR Local error encountered + RXRPC_SKB_MARK_NEW_CALL New incoming call awaiting acceptance + + The remote abort message can be probed with rxrpc_kernel_get_abort_code(). + The two error messages can be probed with rxrpc_kernel_get_error_number(). + A new call can be accepted with rxrpc_kernel_accept_call(). + + Data messages can have their contents extracted with the usual bunch of + socket buffer manipulation functions. A data message can be determined to + be the last one in a sequence with rxrpc_kernel_is_data_last(). When a + data message has been used up, rxrpc_kernel_data_delivered() should be + called on it.. + + Non-data messages should be handled to rxrpc_kernel_free_skb() to dispose + of. It is possible to get extra refs on all types of message for later + freeing, but this may pin the state of a call until the message is finally + freed. + + (*) Accept an incoming call. + + struct rxrpc_call * + rxrpc_kernel_accept_call(struct socket *sock, + unsigned long user_call_ID); + + This is used to accept an incoming call and to assign it a call ID. This + function is similar to rxrpc_kernel_begin_call() and calls accepted must + be ended in the same way. + + If this function is successful, an opaque reference to the RxRPC call is + returned. The caller now holds a reference on this and it must be + properly ended. + + (*) Reject an incoming call. + + int rxrpc_kernel_reject_call(struct socket *sock); + + This is used to reject the first incoming call on the socket's queue with + a BUSY message. -ENODATA is returned if there were no incoming calls. + Other errors may be returned if the call had been aborted (-ECONNABORTED) + or had timed out (-ETIME). + + (*) Record the delivery of a data message and free it. + + void rxrpc_kernel_data_delivered(struct sk_buff *skb); + + This is used to record a data message as having been delivered and to + update the ACK state for the call. The socket buffer will be freed. + + (*) Free a message. + + void rxrpc_kernel_free_skb(struct sk_buff *skb); + + This is used to free a non-DATA socket buffer intercepted from an AF_RXRPC + socket. + + (*) Determine if a data message is the last one on a call. + + bool rxrpc_kernel_is_data_last(struct sk_buff *skb); + + This is used to determine if a socket buffer holds the last data message + to be received for a call (true will be returned if it does, false + if not). + + The data message will be part of the reply on a client call and the + request on an incoming call. In the latter case there will be more + messages, but in the former case there will not. + + (*) Get the abort code from an abort message. + + u32 rxrpc_kernel_get_abort_code(struct sk_buff *skb); + + This is used to extract the abort code from a remote abort message. + + (*) Get the error number from a local or network error message. + + int rxrpc_kernel_get_error_number(struct sk_buff *skb); + + This is used to extract the error number from a message indicating either + a local error occurred or a network error occurred. diff --git a/include/net/af_rxrpc.h b/include/net/af_rxrpc.h index b01ca2589d69..00c2eaa07c25 100644 --- a/include/net/af_rxrpc.h +++ b/include/net/af_rxrpc.h @@ -1,6 +1,6 @@ -/* RxRPC definitions +/* RxRPC kernel service interface definitions * - * Copyright (C) 2006 Red Hat, Inc. All Rights Reserved. + * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or @@ -12,6 +12,46 @@ #ifndef _NET_RXRPC_H #define _NET_RXRPC_H +#ifdef __KERNEL__ + #include +struct rxrpc_call; + +/* + * the mark applied to socket buffers that may be intercepted + */ +enum { + RXRPC_SKB_MARK_DATA, /* data message */ + RXRPC_SKB_MARK_FINAL_ACK, /* final ACK received message */ + RXRPC_SKB_MARK_BUSY, /* server busy message */ + RXRPC_SKB_MARK_REMOTE_ABORT, /* remote abort message */ + RXRPC_SKB_MARK_NET_ERROR, /* network error message */ + RXRPC_SKB_MARK_LOCAL_ERROR, /* local error message */ + RXRPC_SKB_MARK_NEW_CALL, /* local error message */ +}; + +typedef void (*rxrpc_interceptor_t)(struct sock *, unsigned long, + struct sk_buff *); +extern void rxrpc_kernel_intercept_rx_messages(struct socket *, + rxrpc_interceptor_t); +extern struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *, + struct sockaddr_rxrpc *, + struct key *, + unsigned long, + gfp_t); +extern int rxrpc_kernel_send_data(struct rxrpc_call *, struct msghdr *, + size_t); +extern void rxrpc_kernel_abort_call(struct rxrpc_call *, u32); +extern void rxrpc_kernel_end_call(struct rxrpc_call *); +extern bool rxrpc_kernel_is_data_last(struct sk_buff *); +extern u32 rxrpc_kernel_get_abort_code(struct sk_buff *); +extern int rxrpc_kernel_get_error_number(struct sk_buff *); +extern void rxrpc_kernel_data_delivered(struct sk_buff *); +extern void rxrpc_kernel_free_skb(struct sk_buff *); +extern struct rxrpc_call *rxrpc_kernel_accept_call(struct socket *, + unsigned long); +extern int rxrpc_kernel_reject_call(struct socket *); + +#endif /* __KERNEL__ */ #endif /* _NET_RXRPC_H */ diff --git a/include/rxrpc/packet.h b/include/rxrpc/packet.h index 452a9bb02d48..09b11a1e8d46 100644 --- a/include/rxrpc/packet.h +++ b/include/rxrpc/packet.h @@ -185,6 +185,18 @@ struct rxkad_response { #define RX_ADDRINUSE -7 /* UDP port in use */ #define RX_DEBUGI_BADTYPE -8 /* bad debugging packet type */ +/* + * (un)marshalling abort codes (rxgen) + */ +#define RXGEN_CC_MARSHAL -450 +#define RXGEN_CC_UNMARSHAL -451 +#define RXGEN_SS_MARSHAL -452 +#define RXGEN_SS_UNMARSHAL -453 +#define RXGEN_DECODE -454 +#define RXGEN_OPCODE -455 +#define RXGEN_SS_XDRFREE -456 +#define RXGEN_CC_XDRFREE -457 + /* * Rx kerberos security abort codes * - unfortunately we have no generalised security abort codes to say things diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c index bfa8822e2286..2c57df9c131b 100644 --- a/net/rxrpc/af_rxrpc.c +++ b/net/rxrpc/af_rxrpc.c @@ -41,6 +41,8 @@ atomic_t rxrpc_debug_id; /* count of skbs currently in use */ atomic_t rxrpc_n_skbs; +struct workqueue_struct *rxrpc_workqueue; + static void rxrpc_sock_destructor(struct sock *); /* @@ -214,7 +216,8 @@ static int rxrpc_listen(struct socket *sock, int backlog) */ static struct rxrpc_transport *rxrpc_name_to_transport(struct socket *sock, struct sockaddr *addr, - int addr_len, int flags) + int addr_len, int flags, + gfp_t gfp) { struct sockaddr_rxrpc *srx = (struct sockaddr_rxrpc *) addr; struct rxrpc_transport *trans; @@ -232,17 +235,129 @@ static struct rxrpc_transport *rxrpc_name_to_transport(struct socket *sock, return ERR_PTR(-EAFNOSUPPORT); /* find a remote transport endpoint from the local one */ - peer = rxrpc_get_peer(srx, GFP_KERNEL); + peer = rxrpc_get_peer(srx, gfp); if (IS_ERR(peer)) return ERR_PTR(PTR_ERR(peer)); /* find a transport */ - trans = rxrpc_get_transport(rx->local, peer, GFP_KERNEL); + trans = rxrpc_get_transport(rx->local, peer, gfp); rxrpc_put_peer(peer); _leave(" = %p", trans); return trans; } +/** + * rxrpc_kernel_begin_call - Allow a kernel service to begin a call + * @sock: The socket on which to make the call + * @srx: The address of the peer to contact (defaults to socket setting) + * @key: The security context to use (defaults to socket setting) + * @user_call_ID: The ID to use + * + * Allow a kernel service to begin a call on the nominated socket. This just + * sets up all the internal tracking structures and allocates connection and + * call IDs as appropriate. The call to be used is returned. + * + * The default socket destination address and security may be overridden by + * supplying @srx and @key. + */ +struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock, + struct sockaddr_rxrpc *srx, + struct key *key, + unsigned long user_call_ID, + gfp_t gfp) +{ + struct rxrpc_conn_bundle *bundle; + struct rxrpc_transport *trans; + struct rxrpc_call *call; + struct rxrpc_sock *rx = rxrpc_sk(sock->sk); + __be16 service_id; + + _enter(",,%x,%lx", key_serial(key), user_call_ID); + + lock_sock(&rx->sk); + + if (srx) { + trans = rxrpc_name_to_transport(sock, (struct sockaddr *) srx, + sizeof(*srx), 0, gfp); + if (IS_ERR(trans)) { + call = ERR_PTR(PTR_ERR(trans)); + trans = NULL; + goto out; + } + } else { + trans = rx->trans; + if (!trans) { + call = ERR_PTR(-ENOTCONN); + goto out; + } + atomic_inc(&trans->usage); + } + + service_id = rx->service_id; + if (srx) + service_id = htons(srx->srx_service); + + if (!key) + key = rx->key; + if (key && !key->payload.data) + key = NULL; /* a no-security key */ + + bundle = rxrpc_get_bundle(rx, trans, key, service_id, gfp); + if (IS_ERR(bundle)) { + call = ERR_PTR(PTR_ERR(bundle)); + goto out; + } + + call = rxrpc_get_client_call(rx, trans, bundle, user_call_ID, true, + gfp); + rxrpc_put_bundle(trans, bundle); +out: + rxrpc_put_transport(trans); + release_sock(&rx->sk); + _leave(" = %p", call); + return call; +} + +EXPORT_SYMBOL(rxrpc_kernel_begin_call); + +/** + * rxrpc_kernel_end_call - Allow a kernel service to end a call it was using + * @call: The call to end + * + * Allow a kernel service to end a call it was using. The call must be + * complete before this is called (the call should be aborted if necessary). + */ +void rxrpc_kernel_end_call(struct rxrpc_call *call) +{ + _enter("%d{%d}", call->debug_id, atomic_read(&call->usage)); + rxrpc_remove_user_ID(call->socket, call); + rxrpc_put_call(call); +} + +EXPORT_SYMBOL(rxrpc_kernel_end_call); + +/** + * rxrpc_kernel_intercept_rx_messages - Intercept received RxRPC messages + * @sock: The socket to intercept received messages on + * @interceptor: The function to pass the messages to + * + * Allow a kernel service to intercept messages heading for the Rx queue on an + * RxRPC socket. They get passed to the specified function instead. + * @interceptor should free the socket buffers it is given. @interceptor is + * called with the socket receive queue spinlock held and softirqs disabled - + * this ensures that the messages will be delivered in the right order. + */ +void rxrpc_kernel_intercept_rx_messages(struct socket *sock, + rxrpc_interceptor_t interceptor) +{ + struct rxrpc_sock *rx = rxrpc_sk(sock->sk); + + _enter(""); + rx->interceptor = interceptor; +} + +EXPORT_SYMBOL(rxrpc_kernel_intercept_rx_messages); + /* * connect an RxRPC socket * - this just targets it at a specific destination; no actual connection @@ -294,7 +409,8 @@ static int rxrpc_connect(struct socket *sock, struct sockaddr *addr, return -EBUSY; /* server sockets can't connect as well */ } - trans = rxrpc_name_to_transport(sock, addr, addr_len, flags); + trans = rxrpc_name_to_transport(sock, addr, addr_len, flags, + GFP_KERNEL); if (IS_ERR(trans)) { release_sock(&rx->sk); _leave(" = %ld", PTR_ERR(trans)); @@ -344,7 +460,7 @@ static int rxrpc_sendmsg(struct kiocb *iocb, struct socket *sock, if (m->msg_name) { ret = -EISCONN; trans = rxrpc_name_to_transport(sock, m->msg_name, - m->msg_namelen, 0); + m->msg_namelen, 0, GFP_KERNEL); if (IS_ERR(trans)) { ret = PTR_ERR(trans); trans = NULL; @@ -576,7 +692,7 @@ static int rxrpc_release_sock(struct sock *sk) /* try to flush out this socket */ rxrpc_release_calls_on_socket(rx); - flush_scheduled_work(); + flush_workqueue(rxrpc_workqueue); rxrpc_purge_queue(&sk->sk_receive_queue); if (rx->conn) { @@ -673,15 +789,21 @@ static int __init af_rxrpc_init(void) rxrpc_epoch = htonl(xtime.tv_sec); + ret = -ENOMEM; rxrpc_call_jar = kmem_cache_create( "rxrpc_call_jar", sizeof(struct rxrpc_call), 0, SLAB_HWCACHE_ALIGN, NULL, NULL); if (!rxrpc_call_jar) { printk(KERN_NOTICE "RxRPC: Failed to allocate call jar\n"); - ret = -ENOMEM; goto error_call_jar; } + rxrpc_workqueue = create_workqueue("krxrpcd"); + if (!rxrpc_workqueue) { + printk(KERN_NOTICE "RxRPC: Failed to allocate work queue\n"); + goto error_work_queue; + } + ret = proto_register(&rxrpc_proto, 1); if (ret < 0) { printk(KERN_CRIT "RxRPC: Cannot register protocol\n"); @@ -719,6 +841,8 @@ error_key_type: error_sock: proto_unregister(&rxrpc_proto); error_proto: + destroy_workqueue(rxrpc_workqueue); +error_work_queue: kmem_cache_destroy(rxrpc_call_jar); error_call_jar: return ret; @@ -743,9 +867,10 @@ static void __exit af_rxrpc_exit(void) ASSERTCMP(atomic_read(&rxrpc_n_skbs), ==, 0); _debug("flush scheduled work"); - flush_scheduled_work(); + flush_workqueue(rxrpc_workqueue); proc_net_remove("rxrpc_conns"); proc_net_remove("rxrpc_calls"); + destroy_workqueue(rxrpc_workqueue); kmem_cache_destroy(rxrpc_call_jar); _leave(""); } diff --git a/net/rxrpc/ar-accept.c b/net/rxrpc/ar-accept.c index e7af780cd6f9..92a87fde8bfe 100644 --- a/net/rxrpc/ar-accept.c +++ b/net/rxrpc/ar-accept.c @@ -139,7 +139,7 @@ static int rxrpc_accept_incoming_call(struct rxrpc_local *local, call->conn->state = RXRPC_CONN_SERVER_CHALLENGING; atomic_inc(&call->conn->usage); set_bit(RXRPC_CONN_CHALLENGE, &call->conn->events); - schedule_work(&call->conn->processor); + rxrpc_queue_conn(call->conn); } else { _debug("conn ready"); call->state = RXRPC_CALL_SERVER_ACCEPTING; @@ -183,7 +183,7 @@ invalid_service: if (!test_bit(RXRPC_CALL_RELEASE, &call->flags) && !test_and_set_bit(RXRPC_CALL_RELEASE, &call->events)) { rxrpc_get_call(call); - schedule_work(&call->processor); + rxrpc_queue_call(call); } read_unlock_bh(&call->state_lock); rxrpc_put_call(call); @@ -310,7 +310,8 @@ security_mismatch: * handle acceptance of a call by userspace * - assign the user call ID to the call at the front of the queue */ -int rxrpc_accept_call(struct rxrpc_sock *rx, unsigned long user_call_ID) +struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *rx, + unsigned long user_call_ID) { struct rxrpc_call *call; struct rb_node *parent, **pp; @@ -374,12 +375,76 @@ int rxrpc_accept_call(struct rxrpc_sock *rx, unsigned long user_call_ID) BUG(); if (test_and_set_bit(RXRPC_CALL_ACCEPTED, &call->events)) BUG(); - schedule_work(&call->processor); + rxrpc_queue_call(call); + rxrpc_get_call(call); write_unlock_bh(&call->state_lock); write_unlock(&rx->call_lock); - _leave(" = 0"); - return 0; + _leave(" = %p{%d}", call, call->debug_id); + return call; + + /* if the call is already dying or dead, then we leave the socket's ref + * on it to be released by rxrpc_dead_call_expired() as induced by + * rxrpc_release_call() */ +out_release: + _debug("release %p", call); + if (!test_bit(RXRPC_CALL_RELEASED, &call->flags) && + !test_and_set_bit(RXRPC_CALL_RELEASE, &call->events)) + rxrpc_queue_call(call); +out_discard: + write_unlock_bh(&call->state_lock); + _debug("discard %p", call); +out: + write_unlock(&rx->call_lock); + _leave(" = %d", ret); + return ERR_PTR(ret); +} + +/* + * handle rejectance of a call by userspace + * - reject the call at the front of the queue + */ +int rxrpc_reject_call(struct rxrpc_sock *rx) +{ + struct rxrpc_call *call; + int ret; + + _enter(""); + + ASSERT(!irqs_disabled()); + + write_lock(&rx->call_lock); + + ret = -ENODATA; + if (list_empty(&rx->acceptq)) + goto out; + + /* dequeue the first call and check it's still valid */ + call = list_entry(rx->acceptq.next, struct rxrpc_call, accept_link); + list_del_init(&call->accept_link); + sk_acceptq_removed(&rx->sk); + + write_lock_bh(&call->state_lock); + switch (call->state) { + case RXRPC_CALL_SERVER_ACCEPTING: + call->state = RXRPC_CALL_SERVER_BUSY; + if (test_and_set_bit(RXRPC_CALL_REJECT_BUSY, &call->events)) + rxrpc_queue_call(call); + ret = 0; + goto out_release; + case RXRPC_CALL_REMOTELY_ABORTED: + case RXRPC_CALL_LOCALLY_ABORTED: + ret = -ECONNABORTED; + goto out_release; + case RXRPC_CALL_NETWORK_ERROR: + ret = call->conn->error; + goto out_release; + case RXRPC_CALL_DEAD: + ret = -ETIME; + goto out_discard; + default: + BUG(); + } /* if the call is already dying or dead, then we leave the socket's ref * on it to be released by rxrpc_dead_call_expired() as induced by @@ -388,7 +453,7 @@ out_release: _debug("release %p", call); if (!test_bit(RXRPC_CALL_RELEASED, &call->flags) && !test_and_set_bit(RXRPC_CALL_RELEASE, &call->events)) - schedule_work(&call->processor); + rxrpc_queue_call(call); out_discard: write_unlock_bh(&call->state_lock); _debug("discard %p", call); @@ -397,3 +462,43 @@ out: _leave(" = %d", ret); return ret; } + +/** + * rxrpc_kernel_accept_call - Allow a kernel service to accept an incoming call + * @sock: The socket on which the impending call is waiting + * @user_call_ID: The tag to attach to the call + * + * Allow a kernel service to accept an incoming call, assuming the incoming + * call is still valid. + */ +struct rxrpc_call *rxrpc_kernel_accept_call(struct socket *sock, + unsigned long user_call_ID) +{ + struct rxrpc_call *call; + + _enter(",%lx", user_call_ID); + call = rxrpc_accept_call(rxrpc_sk(sock->sk), user_call_ID); + _leave(" = %p", call); + return call; +} + +EXPORT_SYMBOL(rxrpc_kernel_accept_call); + +/** + * rxrpc_kernel_reject_call - Allow a kernel service to reject an incoming call + * @sock: The socket on which the impending call is waiting + * + * Allow a kernel service to reject an incoming call with a BUSY message, + * assuming the incoming call is still valid. + */ +int rxrpc_kernel_reject_call(struct socket *sock) +{ + int ret; + + _enter(""); + ret = rxrpc_reject_call(rxrpc_sk(sock->sk)); + _leave(" = %d", ret); + return ret; +} + +EXPORT_SYMBOL(rxrpc_kernel_reject_call); diff --git a/net/rxrpc/ar-ack.c b/net/rxrpc/ar-ack.c index 8f7764eca96c..fc07a926df56 100644 --- a/net/rxrpc/ar-ack.c +++ b/net/rxrpc/ar-ack.c @@ -113,7 +113,7 @@ cancel_timer: read_lock_bh(&call->state_lock); if (call->state <= RXRPC_CALL_COMPLETE && !test_and_set_bit(RXRPC_CALL_ACK, &call->events)) - schedule_work(&call->processor); + rxrpc_queue_call(call); read_unlock_bh(&call->state_lock); } @@ -1166,7 +1166,7 @@ send_message_2: _debug("sendmsg failed: %d", ret); read_lock_bh(&call->state_lock); if (call->state < RXRPC_CALL_DEAD) - schedule_work(&call->processor); + rxrpc_queue_call(call); read_unlock_bh(&call->state_lock); goto error; } @@ -1210,7 +1210,7 @@ maybe_reschedule: if (call->events || !skb_queue_empty(&call->rx_queue)) { read_lock_bh(&call->state_lock); if (call->state < RXRPC_CALL_DEAD) - schedule_work(&call->processor); + rxrpc_queue_call(call); read_unlock_bh(&call->state_lock); } @@ -1224,7 +1224,7 @@ maybe_reschedule: read_lock_bh(&call->state_lock); if (!test_bit(RXRPC_CALL_RELEASED, &call->flags) && !test_and_set_bit(RXRPC_CALL_RELEASE, &call->events)) - schedule_work(&call->processor); + rxrpc_queue_call(call); read_unlock_bh(&call->state_lock); } @@ -1238,7 +1238,7 @@ error: * work pending bit and the work item being processed again */ if (call->events && !work_pending(&call->processor)) { _debug("jumpstart %x", ntohl(call->conn->cid)); - schedule_work(&call->processor); + rxrpc_queue_call(call); } _leave(""); diff --git a/net/rxrpc/ar-call.c b/net/rxrpc/ar-call.c index ac31cceda2f1..4d92d88ff1fc 100644 --- a/net/rxrpc/ar-call.c +++ b/net/rxrpc/ar-call.c @@ -19,7 +19,7 @@ struct kmem_cache *rxrpc_call_jar; LIST_HEAD(rxrpc_calls); DEFINE_RWLOCK(rxrpc_call_lock); static unsigned rxrpc_call_max_lifetime = 60; -static unsigned rxrpc_dead_call_timeout = 10; +static unsigned rxrpc_dead_call_timeout = 2; static void rxrpc_destroy_call(struct work_struct *work); static void rxrpc_call_life_expired(unsigned long _call); @@ -264,7 +264,7 @@ struct rxrpc_call *rxrpc_incoming_call(struct rxrpc_sock *rx, switch (call->state) { case RXRPC_CALL_LOCALLY_ABORTED: if (!test_and_set_bit(RXRPC_CALL_ABORT, &call->events)) - schedule_work(&call->processor); + rxrpc_queue_call(call); case RXRPC_CALL_REMOTELY_ABORTED: read_unlock(&call->state_lock); goto aborted_call; @@ -398,6 +398,7 @@ found_extant_call: */ void rxrpc_release_call(struct rxrpc_call *call) { + struct rxrpc_connection *conn = call->conn; struct rxrpc_sock *rx = call->socket; _enter("{%d,%d,%d,%d}", @@ -413,8 +414,7 @@ void rxrpc_release_call(struct rxrpc_call *call) /* dissociate from the socket * - the socket's ref on the call is passed to the death timer */ - _debug("RELEASE CALL %p (%d CONN %p)", - call, call->debug_id, call->conn); + _debug("RELEASE CALL %p (%d CONN %p)", call, call->debug_id, conn); write_lock_bh(&rx->call_lock); if (!list_empty(&call->accept_link)) { @@ -430,24 +430,42 @@ void rxrpc_release_call(struct rxrpc_call *call) } write_unlock_bh(&rx->call_lock); - if (call->conn->out_clientflag) - spin_lock(&call->conn->trans->client_lock); - write_lock_bh(&call->conn->lock); - /* free up the channel for reuse */ - if (call->conn->out_clientflag) { - call->conn->avail_calls++; - if (call->conn->avail_calls == RXRPC_MAXCALLS) - list_move_tail(&call->conn->bundle_link, - &call->conn->bundle->unused_conns); - else if (call->conn->avail_calls == 1) - list_move_tail(&call->conn->bundle_link, - &call->conn->bundle->avail_conns); + spin_lock(&conn->trans->client_lock); + write_lock_bh(&conn->lock); + write_lock(&call->state_lock); + + if (conn->channels[call->channel] == call) + conn->channels[call->channel] = NULL; + + if (conn->out_clientflag && conn->bundle) { + conn->avail_calls++; + switch (conn->avail_calls) { + case 1: + list_move_tail(&conn->bundle_link, + &conn->bundle->avail_conns); + case 2 ... RXRPC_MAXCALLS - 1: + ASSERT(conn->channels[0] == NULL || + conn->channels[1] == NULL || + conn->channels[2] == NULL || + conn->channels[3] == NULL); + break; + case RXRPC_MAXCALLS: + list_move_tail(&conn->bundle_link, + &conn->bundle->unused_conns); + ASSERT(conn->channels[0] == NULL && + conn->channels[1] == NULL && + conn->channels[2] == NULL && + conn->channels[3] == NULL); + break; + default: + printk(KERN_ERR "RxRPC: conn->avail_calls=%d\n", + conn->avail_calls); + BUG(); + } } - write_lock(&call->state_lock); - if (call->conn->channels[call->channel] == call) - call->conn->channels[call->channel] = NULL; + spin_unlock(&conn->trans->client_lock); if (call->state < RXRPC_CALL_COMPLETE && call->state != RXRPC_CALL_CLIENT_FINAL_ACK) { @@ -455,13 +473,12 @@ void rxrpc_release_call(struct rxrpc_call *call) call->state = RXRPC_CALL_LOCALLY_ABORTED; call->abort_code = RX_CALL_DEAD; set_bit(RXRPC_CALL_ABORT, &call->events); - schedule_work(&call->processor); + rxrpc_queue_call(call); } write_unlock(&call->state_lock); - write_unlock_bh(&call->conn->lock); - if (call->conn->out_clientflag) - spin_unlock(&call->conn->trans->client_lock); + write_unlock_bh(&conn->lock); + /* clean up the Rx queue */ if (!skb_queue_empty(&call->rx_queue) || !skb_queue_empty(&call->rx_oos_queue)) { struct rxrpc_skb_priv *sp; @@ -538,7 +555,7 @@ static void rxrpc_mark_call_released(struct rxrpc_call *call) if (!test_and_set_bit(RXRPC_CALL_RELEASE, &call->events)) sched = true; if (sched) - schedule_work(&call->processor); + rxrpc_queue_call(call); } write_unlock(&call->state_lock); } @@ -588,7 +605,7 @@ void __rxrpc_put_call(struct rxrpc_call *call) if (atomic_dec_and_test(&call->usage)) { _debug("call %d dead", call->debug_id); ASSERTCMP(call->state, ==, RXRPC_CALL_DEAD); - schedule_work(&call->destroyer); + rxrpc_queue_work(&call->destroyer); } _leave(""); } @@ -613,7 +630,7 @@ static void rxrpc_cleanup_call(struct rxrpc_call *call) ASSERTCMP(call->events, ==, 0); if (work_pending(&call->processor)) { _debug("defer destroy"); - schedule_work(&call->destroyer); + rxrpc_queue_work(&call->destroyer); return; } @@ -742,7 +759,7 @@ static void rxrpc_call_life_expired(unsigned long _call) read_lock_bh(&call->state_lock); if (call->state < RXRPC_CALL_COMPLETE) { set_bit(RXRPC_CALL_LIFE_TIMER, &call->events); - schedule_work(&call->processor); + rxrpc_queue_call(call); } read_unlock_bh(&call->state_lock); } @@ -763,7 +780,7 @@ static void rxrpc_resend_time_expired(unsigned long _call) clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags); if (call->state < RXRPC_CALL_COMPLETE && !test_and_set_bit(RXRPC_CALL_RESEND_TIMER, &call->events)) - schedule_work(&call->processor); + rxrpc_queue_call(call); read_unlock_bh(&call->state_lock); } @@ -782,6 +799,6 @@ static void rxrpc_ack_time_expired(unsigned long _call) read_lock_bh(&call->state_lock); if (call->state < RXRPC_CALL_COMPLETE && !test_and_set_bit(RXRPC_CALL_ACK, &call->events)) - schedule_work(&call->processor); + rxrpc_queue_call(call); read_unlock_bh(&call->state_lock); } diff --git a/net/rxrpc/ar-connection.c b/net/rxrpc/ar-connection.c index 01eb33c30571..43cb3e051ece 100644 --- a/net/rxrpc/ar-connection.c +++ b/net/rxrpc/ar-connection.c @@ -356,7 +356,7 @@ static int rxrpc_connect_exclusive(struct rxrpc_sock *rx, conn->out_clientflag = RXRPC_CLIENT_INITIATED; conn->cid = 0; conn->state = RXRPC_CONN_CLIENT; - conn->avail_calls = RXRPC_MAXCALLS; + conn->avail_calls = RXRPC_MAXCALLS - 1; conn->security_level = rx->min_sec_level; conn->key = key_get(rx->key); @@ -447,6 +447,11 @@ int rxrpc_connect_call(struct rxrpc_sock *rx, if (--conn->avail_calls == 0) list_move(&conn->bundle_link, &bundle->busy_conns); + ASSERTCMP(conn->avail_calls, <, RXRPC_MAXCALLS); + ASSERT(conn->channels[0] == NULL || + conn->channels[1] == NULL || + conn->channels[2] == NULL || + conn->channels[3] == NULL); atomic_inc(&conn->usage); break; } @@ -456,6 +461,12 @@ int rxrpc_connect_call(struct rxrpc_sock *rx, conn = list_entry(bundle->unused_conns.next, struct rxrpc_connection, bundle_link); + ASSERTCMP(conn->avail_calls, ==, RXRPC_MAXCALLS); + conn->avail_calls = RXRPC_MAXCALLS - 1; + ASSERT(conn->channels[0] == NULL && + conn->channels[1] == NULL && + conn->channels[2] == NULL && + conn->channels[3] == NULL); atomic_inc(&conn->usage); list_move(&conn->bundle_link, &bundle->avail_conns); break; @@ -512,7 +523,7 @@ int rxrpc_connect_call(struct rxrpc_sock *rx, candidate->state = RXRPC_CONN_CLIENT; candidate->avail_calls = RXRPC_MAXCALLS; candidate->security_level = rx->min_sec_level; - candidate->key = key_get(rx->key); + candidate->key = key_get(bundle->key); ret = rxrpc_init_client_conn_security(candidate); if (ret < 0) { @@ -555,6 +566,10 @@ int rxrpc_connect_call(struct rxrpc_sock *rx, for (chan = 0; chan < RXRPC_MAXCALLS; chan++) if (!conn->channels[chan]) goto found_channel; + ASSERT(conn->channels[0] == NULL || + conn->channels[1] == NULL || + conn->channels[2] == NULL || + conn->channels[3] == NULL); BUG(); found_channel: @@ -567,6 +582,7 @@ found_channel: _net("CONNECT client on conn %d chan %d as call %x", conn->debug_id, chan, ntohl(call->call_id)); + ASSERTCMP(conn->avail_calls, <, RXRPC_MAXCALLS); spin_unlock(&trans->client_lock); rxrpc_add_call_ID_to_conn(conn, call); @@ -778,7 +794,7 @@ void rxrpc_put_connection(struct rxrpc_connection *conn) conn->put_time = xtime.tv_sec; if (atomic_dec_and_test(&conn->usage)) { _debug("zombie"); - schedule_delayed_work(&rxrpc_connection_reap, 0); + rxrpc_queue_delayed_work(&rxrpc_connection_reap, 0); } _leave(""); @@ -862,8 +878,8 @@ void rxrpc_connection_reaper(struct work_struct *work) if (earliest != ULONG_MAX) { _debug("reschedule reaper %ld", (long) earliest - now); ASSERTCMP(earliest, >, now); - schedule_delayed_work(&rxrpc_connection_reap, - (earliest - now) * HZ); + rxrpc_queue_delayed_work(&rxrpc_connection_reap, + (earliest - now) * HZ); } /* then destroy all those pulled out */ @@ -889,7 +905,7 @@ void __exit rxrpc_destroy_all_connections(void) rxrpc_connection_timeout = 0; cancel_delayed_work(&rxrpc_connection_reap); - schedule_delayed_work(&rxrpc_connection_reap, 0); + rxrpc_queue_delayed_work(&rxrpc_connection_reap, 0); _leave(""); } diff --git a/net/rxrpc/ar-connevent.c b/net/rxrpc/ar-connevent.c index 4b02815c1ded..1ada43d51165 100644 --- a/net/rxrpc/ar-connevent.c +++ b/net/rxrpc/ar-connevent.c @@ -45,7 +45,7 @@ static void rxrpc_abort_calls(struct rxrpc_connection *conn, int state, set_bit(RXRPC_CALL_CONN_ABORT, &call->events); else set_bit(RXRPC_CALL_RCVD_ABORT, &call->events); - schedule_work(&call->processor); + rxrpc_queue_call(call); } write_unlock(&call->state_lock); } @@ -133,7 +133,7 @@ void rxrpc_call_is_secure(struct rxrpc_call *call) read_lock(&call->state_lock); if (call->state < RXRPC_CALL_COMPLETE && !test_and_set_bit(RXRPC_CALL_SECURED, &call->events)) - schedule_work(&call->processor); + rxrpc_queue_call(call); read_unlock(&call->state_lock); } } @@ -307,6 +307,22 @@ protocol_error: goto out; } +/* + * put a packet up for transport-level abort + */ +void rxrpc_reject_packet(struct rxrpc_local *local, struct sk_buff *skb) +{ + CHECK_SLAB_OKAY(&local->usage); + + if (!atomic_inc_not_zero(&local->usage)) { + printk("resurrected on reject\n"); + BUG(); + } + + skb_queue_tail(&local->reject_queue, skb); + rxrpc_queue_work(&local->rejecter); +} + /* * reject packets through the local endpoint */ diff --git a/net/rxrpc/ar-error.c b/net/rxrpc/ar-error.c index f5539e2f7b58..2c27df1ffa17 100644 --- a/net/rxrpc/ar-error.c +++ b/net/rxrpc/ar-error.c @@ -111,7 +111,7 @@ void rxrpc_UDP_error_report(struct sock *sk) /* pass the transport ref to error_handler to release */ skb_queue_tail(&trans->error_queue, skb); - schedule_work(&trans->error_handler); + rxrpc_queue_work(&trans->error_handler); /* reset and regenerate socket error */ spin_lock_bh(&sk->sk_error_queue.lock); @@ -235,7 +235,7 @@ void rxrpc_UDP_error_handler(struct work_struct *work) call->state < RXRPC_CALL_NETWORK_ERROR) { call->state = RXRPC_CALL_NETWORK_ERROR; set_bit(RXRPC_CALL_RCVD_ERROR, &call->events); - schedule_work(&call->processor); + rxrpc_queue_call(call); } write_unlock(&call->state_lock); list_del_init(&call->error_link); @@ -245,7 +245,7 @@ void rxrpc_UDP_error_handler(struct work_struct *work) } if (!skb_queue_empty(&trans->error_queue)) - schedule_work(&trans->error_handler); + rxrpc_queue_work(&trans->error_handler); rxrpc_free_skb(skb); rxrpc_put_transport(trans); diff --git a/net/rxrpc/ar-input.c b/net/rxrpc/ar-input.c index 323c3454561c..ceb5d619a1d4 100644 --- a/net/rxrpc/ar-input.c +++ b/net/rxrpc/ar-input.c @@ -42,6 +42,7 @@ int rxrpc_queue_rcv_skb(struct rxrpc_call *call, struct sk_buff *skb, bool force, bool terminal) { struct rxrpc_skb_priv *sp; + struct rxrpc_sock *rx = call->socket; struct sock *sk; int skb_len, ret; @@ -64,7 +65,7 @@ int rxrpc_queue_rcv_skb(struct rxrpc_call *call, struct sk_buff *skb, return 0; } - sk = &call->socket->sk; + sk = &rx->sk; if (!force) { /* cast skb->rcvbuf to unsigned... It's pointless, but @@ -89,25 +90,30 @@ int rxrpc_queue_rcv_skb(struct rxrpc_call *call, struct sk_buff *skb, skb->sk = sk; atomic_add(skb->truesize, &sk->sk_rmem_alloc); - /* Cache the SKB length before we tack it onto the receive - * queue. Once it is added it no longer belongs to us and - * may be freed by other threads of control pulling packets - * from the queue. - */ - skb_len = skb->len; - - _net("post skb %p", skb); - __skb_queue_tail(&sk->sk_receive_queue, skb); - spin_unlock_bh(&sk->sk_receive_queue.lock); - - if (!sock_flag(sk, SOCK_DEAD)) - sk->sk_data_ready(sk, skb_len); - if (terminal) { _debug("<<<< TERMINAL MESSAGE >>>>"); set_bit(RXRPC_CALL_TERMINAL_MSG, &call->flags); } + /* allow interception by a kernel service */ + if (rx->interceptor) { + rx->interceptor(sk, call->user_call_ID, skb); + spin_unlock_bh(&sk->sk_receive_queue.lock); + } else { + + /* Cache the SKB length before we tack it onto the + * receive queue. Once it is added it no longer + * belongs to us and may be freed by other threads of + * control pulling packets from the queue */ + skb_len = skb->len; + + _net("post skb %p", skb); + __skb_queue_tail(&sk->sk_receive_queue, skb); + spin_unlock_bh(&sk->sk_receive_queue.lock); + + if (!sock_flag(sk, SOCK_DEAD)) + sk->sk_data_ready(sk, skb_len); + } skb = NULL; } else { spin_unlock_bh(&sk->sk_receive_queue.lock); @@ -232,7 +238,7 @@ static int rxrpc_fast_process_data(struct rxrpc_call *call, read_lock(&call->state_lock); if (call->state < RXRPC_CALL_COMPLETE && !test_and_set_bit(RXRPC_CALL_DRAIN_RX_OOS, &call->events)) - schedule_work(&call->processor); + rxrpc_queue_call(call); read_unlock(&call->state_lock); } @@ -267,7 +273,7 @@ enqueue_packet: atomic_inc(&call->ackr_not_idle); read_lock(&call->state_lock); if (call->state < RXRPC_CALL_DEAD) - schedule_work(&call->processor); + rxrpc_queue_call(call); read_unlock(&call->state_lock); _leave(" = 0 [queued]"); return 0; @@ -360,7 +366,7 @@ void rxrpc_fast_process_packet(struct rxrpc_call *call, struct sk_buff *skb) call->state = RXRPC_CALL_REMOTELY_ABORTED; call->abort_code = abort_code; set_bit(RXRPC_CALL_RCVD_ABORT, &call->events); - schedule_work(&call->processor); + rxrpc_queue_call(call); } goto free_packet_unlock; @@ -375,7 +381,7 @@ void rxrpc_fast_process_packet(struct rxrpc_call *call, struct sk_buff *skb) case RXRPC_CALL_CLIENT_SEND_REQUEST: call->state = RXRPC_CALL_SERVER_BUSY; set_bit(RXRPC_CALL_RCVD_BUSY, &call->events); - schedule_work(&call->processor); + rxrpc_queue_call(call); case RXRPC_CALL_SERVER_BUSY: goto free_packet_unlock; default: @@ -419,7 +425,7 @@ void rxrpc_fast_process_packet(struct rxrpc_call *call, struct sk_buff *skb) read_lock_bh(&call->state_lock); if (call->state < RXRPC_CALL_DEAD) { skb_queue_tail(&call->rx_queue, skb); - schedule_work(&call->processor); + rxrpc_queue_call(call); skb = NULL; } read_unlock_bh(&call->state_lock); @@ -434,7 +440,7 @@ protocol_error_locked: call->state = RXRPC_CALL_LOCALLY_ABORTED; call->abort_code = RX_PROTOCOL_ERROR; set_bit(RXRPC_CALL_ABORT, &call->events); - schedule_work(&call->processor); + rxrpc_queue_call(call); } free_packet_unlock: write_unlock_bh(&call->state_lock); @@ -506,7 +512,7 @@ protocol_error: call->state = RXRPC_CALL_LOCALLY_ABORTED; call->abort_code = RX_PROTOCOL_ERROR; set_bit(RXRPC_CALL_ABORT, &call->events); - schedule_work(&call->processor); + rxrpc_queue_call(call); } write_unlock_bh(&call->state_lock); _leave(""); @@ -542,7 +548,7 @@ static void rxrpc_post_packet_to_call(struct rxrpc_connection *conn, switch (call->state) { case RXRPC_CALL_LOCALLY_ABORTED: if (!test_and_set_bit(RXRPC_CALL_ABORT, &call->events)) - schedule_work(&call->processor); + rxrpc_queue_call(call); case RXRPC_CALL_REMOTELY_ABORTED: case RXRPC_CALL_NETWORK_ERROR: case RXRPC_CALL_DEAD: @@ -591,7 +597,7 @@ dead_call: sp->hdr.seq == __constant_cpu_to_be32(1)) { _debug("incoming call"); skb_queue_tail(&conn->trans->local->accept_queue, skb); - schedule_work(&conn->trans->local->acceptor); + rxrpc_queue_work(&conn->trans->local->acceptor); goto done; } @@ -630,7 +636,7 @@ found_completed_call: _debug("final ack again"); rxrpc_get_call(call); set_bit(RXRPC_CALL_ACK_FINAL, &call->events); - schedule_work(&call->processor); + rxrpc_queue_call(call); free_unlock: read_unlock(&call->state_lock); @@ -651,7 +657,7 @@ static void rxrpc_post_packet_to_conn(struct rxrpc_connection *conn, atomic_inc(&conn->usage); skb_queue_tail(&conn->rx_queue, skb); - schedule_work(&conn->processor); + rxrpc_queue_conn(conn); } /* @@ -767,7 +773,7 @@ cant_route_call: if (sp->hdr.seq == __constant_cpu_to_be32(1)) { _debug("first packet"); skb_queue_tail(&local->accept_queue, skb); - schedule_work(&local->acceptor); + rxrpc_queue_work(&local->acceptor); rxrpc_put_local(local); _leave(" [incoming]"); return; diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h index 7bfbf471c81e..cb1eb492ee48 100644 --- a/net/rxrpc/ar-internal.h +++ b/net/rxrpc/ar-internal.h @@ -19,8 +19,6 @@ #define CHECK_SLAB_OKAY(X) do {} while(0) #endif -extern atomic_t rxrpc_n_skbs; - #define FCRYPT_BSIZE 8 struct rxrpc_crypt { union { @@ -29,8 +27,12 @@ struct rxrpc_crypt { }; } __attribute__((aligned(8))); -extern __be32 rxrpc_epoch; /* local epoch for detecting local-end reset */ -extern atomic_t rxrpc_debug_id; /* current debugging ID */ +#define rxrpc_queue_work(WS) queue_work(rxrpc_workqueue, (WS)) +#define rxrpc_queue_delayed_work(WS,D) \ + queue_delayed_work(rxrpc_workqueue, (WS), (D)) + +#define rxrpc_queue_call(CALL) rxrpc_queue_work(&(CALL)->processor) +#define rxrpc_queue_conn(CONN) rxrpc_queue_work(&(CONN)->processor) /* * sk_state for RxRPC sockets @@ -50,6 +52,7 @@ enum { struct rxrpc_sock { /* WARNING: sk has to be the first member */ struct sock sk; + rxrpc_interceptor_t interceptor; /* kernel service Rx interceptor function */ struct rxrpc_local *local; /* local endpoint */ struct rxrpc_transport *trans; /* transport handler */ struct rxrpc_conn_bundle *bundle; /* virtual connection bundle */ @@ -91,16 +94,6 @@ struct rxrpc_skb_priv { #define rxrpc_skb(__skb) ((struct rxrpc_skb_priv *) &(__skb)->cb) -enum { - RXRPC_SKB_MARK_DATA, /* data message */ - RXRPC_SKB_MARK_FINAL_ACK, /* final ACK received message */ - RXRPC_SKB_MARK_BUSY, /* server busy message */ - RXRPC_SKB_MARK_REMOTE_ABORT, /* remote abort message */ - RXRPC_SKB_MARK_NET_ERROR, /* network error message */ - RXRPC_SKB_MARK_LOCAL_ERROR, /* local error message */ - RXRPC_SKB_MARK_NEW_CALL, /* local error message */ -}; - enum rxrpc_command { RXRPC_CMD_SEND_DATA, /* send data message */ RXRPC_CMD_SEND_ABORT, /* request abort generation */ @@ -439,25 +432,20 @@ static inline void rxrpc_abort_call(struct rxrpc_call *call, u32 abort_code) } /* - * put a packet up for transport-level abort + * af_rxrpc.c */ -static inline -void rxrpc_reject_packet(struct rxrpc_local *local, struct sk_buff *skb) -{ - CHECK_SLAB_OKAY(&local->usage); - if (!atomic_inc_not_zero(&local->usage)) { - printk("resurrected on reject\n"); - BUG(); - } - skb_queue_tail(&local->reject_queue, skb); - schedule_work(&local->rejecter); -} +extern atomic_t rxrpc_n_skbs; +extern __be32 rxrpc_epoch; +extern atomic_t rxrpc_debug_id; +extern struct workqueue_struct *rxrpc_workqueue; /* * ar-accept.c */ extern void rxrpc_accept_incoming_calls(struct work_struct *); -extern int rxrpc_accept_call(struct rxrpc_sock *, unsigned long); +extern struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *, + unsigned long); +extern int rxrpc_reject_call(struct rxrpc_sock *); /* * ar-ack.c @@ -514,6 +502,7 @@ rxrpc_incoming_connection(struct rxrpc_transport *, struct rxrpc_header *, * ar-connevent.c */ extern void rxrpc_process_connection(struct work_struct *); +extern void rxrpc_reject_packet(struct rxrpc_local *, struct sk_buff *); extern void rxrpc_reject_packets(struct work_struct *); /* @@ -583,6 +572,7 @@ extern struct file_operations rxrpc_connection_seq_fops; /* * ar-recvmsg.c */ +extern void rxrpc_remove_user_ID(struct rxrpc_sock *, struct rxrpc_call *); extern int rxrpc_recvmsg(struct kiocb *, struct socket *, struct msghdr *, size_t, int); diff --git a/net/rxrpc/ar-local.c b/net/rxrpc/ar-local.c index a20a2c0fe105..fe03f71f17da 100644 --- a/net/rxrpc/ar-local.c +++ b/net/rxrpc/ar-local.c @@ -228,7 +228,7 @@ void rxrpc_put_local(struct rxrpc_local *local) write_lock_bh(&rxrpc_local_lock); if (unlikely(atomic_dec_and_test(&local->usage))) { _debug("destroy local"); - schedule_work(&local->destroyer); + rxrpc_queue_work(&local->destroyer); } write_unlock_bh(&rxrpc_local_lock); _leave(""); diff --git a/net/rxrpc/ar-output.c b/net/rxrpc/ar-output.c index 67aa9510f09b..5cdde4a48ed1 100644 --- a/net/rxrpc/ar-output.c +++ b/net/rxrpc/ar-output.c @@ -113,7 +113,7 @@ static void rxrpc_send_abort(struct rxrpc_call *call, u32 abort_code) clear_bit(RXRPC_CALL_RESEND_TIMER, &call->events); clear_bit(RXRPC_CALL_ACK, &call->events); clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags); - schedule_work(&call->processor); + rxrpc_queue_call(call); } write_unlock_bh(&call->state_lock); @@ -194,6 +194,77 @@ int rxrpc_client_sendmsg(struct kiocb *iocb, struct rxrpc_sock *rx, return ret; } +/** + * rxrpc_kernel_send_data - Allow a kernel service to send data on a call + * @call: The call to send data through + * @msg: The data to send + * @len: The amount of data to send + * + * Allow a kernel service to send data on a call. The call must be in an state + * appropriate to sending data. No control data should be supplied in @msg, + * nor should an address be supplied. MSG_MORE should be flagged if there's + * more data to come, otherwise this data will end the transmission phase. + */ +int rxrpc_kernel_send_data(struct rxrpc_call *call, struct msghdr *msg, + size_t len) +{ + int ret; + + _enter("{%d,%s},", call->debug_id, rxrpc_call_states[call->state]); + + ASSERTCMP(msg->msg_name, ==, NULL); + ASSERTCMP(msg->msg_control, ==, NULL); + + lock_sock(&call->socket->sk); + + _debug("CALL %d USR %lx ST %d on CONN %p", + call->debug_id, call->user_call_ID, call->state, call->conn); + + if (call->state >= RXRPC_CALL_COMPLETE) { + ret = -ESHUTDOWN; /* it's too late for this call */ + } else if (call->state != RXRPC_CALL_CLIENT_SEND_REQUEST && + call->state != RXRPC_CALL_SERVER_ACK_REQUEST && + call->state != RXRPC_CALL_SERVER_SEND_REPLY) { + ret = -EPROTO; /* request phase complete for this client call */ + } else { + mm_segment_t oldfs = get_fs(); + set_fs(KERNEL_DS); + ret = rxrpc_send_data(NULL, call->socket, call, msg, len); + set_fs(oldfs); + } + + release_sock(&call->socket->sk); + _leave(" = %d", ret); + return ret; +} + +EXPORT_SYMBOL(rxrpc_kernel_send_data); + +/* + * rxrpc_kernel_abort_call - Allow a kernel service to abort a call + * @call: The call to be aborted + * @abort_code: The abort code to stick into the ABORT packet + * + * Allow a kernel service to abort a call, if it's still in an abortable state. + */ +void rxrpc_kernel_abort_call(struct rxrpc_call *call, u32 abort_code) +{ + _enter("{%d},%d", call->debug_id, abort_code); + + lock_sock(&call->socket->sk); + + _debug("CALL %d USR %lx ST %d on CONN %p", + call->debug_id, call->user_call_ID, call->state, call->conn); + + if (call->state < RXRPC_CALL_COMPLETE) + rxrpc_send_abort(call, abort_code); + + release_sock(&call->socket->sk); + _leave(""); +} + +EXPORT_SYMBOL(rxrpc_kernel_abort_call); + /* * send a message through a server socket * - caller holds the socket locked @@ -214,8 +285,13 @@ int rxrpc_server_sendmsg(struct kiocb *iocb, struct rxrpc_sock *rx, if (ret < 0) return ret; - if (cmd == RXRPC_CMD_ACCEPT) - return rxrpc_accept_call(rx, user_call_ID); + if (cmd == RXRPC_CMD_ACCEPT) { + call = rxrpc_accept_call(rx, user_call_ID); + if (IS_ERR(call)) + return PTR_ERR(call); + rxrpc_put_call(call); + return 0; + } call = rxrpc_find_server_call(rx, user_call_ID); if (!call) @@ -363,7 +439,7 @@ static inline void rxrpc_instant_resend(struct rxrpc_call *call) clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags); if (call->state < RXRPC_CALL_COMPLETE && !test_and_set_bit(RXRPC_CALL_RESEND_TIMER, &call->events)) - schedule_work(&call->processor); + rxrpc_queue_call(call); } read_unlock_bh(&call->state_lock); } diff --git a/net/rxrpc/ar-peer.c b/net/rxrpc/ar-peer.c index 69ac355546ae..d399de4a7fe2 100644 --- a/net/rxrpc/ar-peer.c +++ b/net/rxrpc/ar-peer.c @@ -219,7 +219,7 @@ void rxrpc_put_peer(struct rxrpc_peer *peer) return; } - schedule_work(&peer->destroyer); + rxrpc_queue_work(&peer->destroyer); _leave(""); } diff --git a/net/rxrpc/ar-recvmsg.c b/net/rxrpc/ar-recvmsg.c index e947d5c15900..f19121d4795b 100644 --- a/net/rxrpc/ar-recvmsg.c +++ b/net/rxrpc/ar-recvmsg.c @@ -19,7 +19,7 @@ * removal a call's user ID from the socket tree to make the user ID available * again and so that it won't be seen again in association with that call */ -static void rxrpc_remove_user_ID(struct rxrpc_sock *rx, struct rxrpc_call *call) +void rxrpc_remove_user_ID(struct rxrpc_sock *rx, struct rxrpc_call *call) { _debug("RELEASE CALL %d", call->debug_id); @@ -33,7 +33,7 @@ static void rxrpc_remove_user_ID(struct rxrpc_sock *rx, struct rxrpc_call *call) read_lock_bh(&call->state_lock); if (!test_bit(RXRPC_CALL_RELEASED, &call->flags) && !test_and_set_bit(RXRPC_CALL_RELEASE, &call->events)) - schedule_work(&call->processor); + rxrpc_queue_call(call); read_unlock_bh(&call->state_lock); } @@ -364,3 +364,74 @@ wait_error: return copied; } + +/** + * rxrpc_kernel_data_delivered - Record delivery of data message + * @skb: Message holding data + * + * Record the delivery of a data message. This permits RxRPC to keep its + * tracking correct. The socket buffer will be deleted. + */ +void rxrpc_kernel_data_delivered(struct sk_buff *skb) +{ + struct rxrpc_skb_priv *sp = rxrpc_skb(skb); + struct rxrpc_call *call = sp->call; + + ASSERTCMP(ntohl(sp->hdr.seq), >=, call->rx_data_recv); + ASSERTCMP(ntohl(sp->hdr.seq), <=, call->rx_data_recv + 1); + call->rx_data_recv = ntohl(sp->hdr.seq); + + ASSERTCMP(ntohl(sp->hdr.seq), >, call->rx_data_eaten); + rxrpc_free_skb(skb); +} + +EXPORT_SYMBOL(rxrpc_kernel_data_delivered); + +/** + * rxrpc_kernel_is_data_last - Determine if data message is last one + * @skb: Message holding data + * + * Determine if data message is last one for the parent call. + */ +bool rxrpc_kernel_is_data_last(struct sk_buff *skb) +{ + struct rxrpc_skb_priv *sp = rxrpc_skb(skb); + + ASSERTCMP(skb->mark, ==, RXRPC_SKB_MARK_DATA); + + return sp->hdr.flags & RXRPC_LAST_PACKET; +} + +EXPORT_SYMBOL(rxrpc_kernel_is_data_last); + +/** + * rxrpc_kernel_get_abort_code - Get the abort code from an RxRPC abort message + * @skb: Message indicating an abort + * + * Get the abort code from an RxRPC abort message. + */ +u32 rxrpc_kernel_get_abort_code(struct sk_buff *skb) +{ + struct rxrpc_skb_priv *sp = rxrpc_skb(skb); + + ASSERTCMP(skb->mark, ==, RXRPC_SKB_MARK_REMOTE_ABORT); + + return sp->call->abort_code; +} + +EXPORT_SYMBOL(rxrpc_kernel_get_abort_code); + +/** + * rxrpc_kernel_get_error - Get the error number from an RxRPC error message + * @skb: Message indicating an error + * + * Get the error number from an RxRPC error message. + */ +int rxrpc_kernel_get_error_number(struct sk_buff *skb) +{ + struct rxrpc_skb_priv *sp = rxrpc_skb(skb); + + return sp->error; +} + +EXPORT_SYMBOL(rxrpc_kernel_get_error_number); diff --git a/net/rxrpc/ar-skbuff.c b/net/rxrpc/ar-skbuff.c index d73f6fc76011..de755e04d29c 100644 --- a/net/rxrpc/ar-skbuff.c +++ b/net/rxrpc/ar-skbuff.c @@ -36,7 +36,7 @@ static void rxrpc_request_final_ACK(struct rxrpc_call *call) rxrpc_get_call(call); set_bit(RXRPC_CALL_ACK_FINAL, &call->events); if (try_to_del_timer_sync(&call->ack_timer) >= 0) - schedule_work(&call->processor); + rxrpc_queue_call(call); break; case RXRPC_CALL_SERVER_RECV_REQUEST: @@ -116,3 +116,17 @@ void rxrpc_packet_destructor(struct sk_buff *skb) sock_rfree(skb); _leave(""); } + +/** + * rxrpc_kernel_free_skb - Free an RxRPC socket buffer + * @skb: The socket buffer to be freed + * + * Let RxRPC free its own socket buffer, permitting it to maintain debug + * accounting. + */ +void rxrpc_kernel_free_skb(struct sk_buff *skb) +{ + rxrpc_free_skb(skb); +} + +EXPORT_SYMBOL(rxrpc_kernel_free_skb); diff --git a/net/rxrpc/ar-transport.c b/net/rxrpc/ar-transport.c index 9b4e5cb545d2..d43d78f19302 100644 --- a/net/rxrpc/ar-transport.c +++ b/net/rxrpc/ar-transport.c @@ -189,7 +189,7 @@ void rxrpc_put_transport(struct rxrpc_transport *trans) /* let the reaper determine the timeout to avoid a race with * overextending the timeout if the reaper is running at the * same time */ - schedule_delayed_work(&rxrpc_transport_reap, 0); + rxrpc_queue_delayed_work(&rxrpc_transport_reap, 0); _leave(""); } @@ -243,8 +243,8 @@ static void rxrpc_transport_reaper(struct work_struct *work) if (earliest != ULONG_MAX) { _debug("reschedule reaper %ld", (long) earliest - now); ASSERTCMP(earliest, >, now); - schedule_delayed_work(&rxrpc_transport_reap, - (earliest - now) * HZ); + rxrpc_queue_delayed_work(&rxrpc_transport_reap, + (earliest - now) * HZ); } /* then destroy all those pulled out */ @@ -270,7 +270,7 @@ void __exit rxrpc_destroy_all_transports(void) rxrpc_transport_timeout = 0; cancel_delayed_work(&rxrpc_transport_reap); - schedule_delayed_work(&rxrpc_transport_reap, 0); + rxrpc_queue_delayed_work(&rxrpc_transport_reap, 0); _leave(""); } -- cgit v1.2.3-59-g8ed1b From 63b6be55e8b51cb718468794d343058e96c7462c Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 26 Apr 2007 15:55:48 -0700 Subject: [AF_RXRPC]: Delete the old RxRPC code. Delete the old RxRPC code as it's now no longer used. Signed-off-by: David Howells Signed-off-by: David S. Miller --- fs/Kconfig | 4 - include/rxrpc/call.h | 212 ----- include/rxrpc/connection.h | 83 -- include/rxrpc/krxiod.h | 27 - include/rxrpc/krxsecd.h | 22 - include/rxrpc/krxtimod.h | 45 - include/rxrpc/message.h | 71 -- include/rxrpc/packet.h | 22 +- include/rxrpc/peer.h | 82 -- include/rxrpc/rxrpc.h | 36 - include/rxrpc/transport.h | 106 --- net/rxrpc/Makefile | 27 +- net/rxrpc/call.c | 2277 -------------------------------------------- net/rxrpc/connection.c | 777 --------------- net/rxrpc/internal.h | 106 --- net/rxrpc/krxiod.c | 262 ----- net/rxrpc/krxsecd.c | 270 ------ net/rxrpc/krxtimod.c | 204 ---- net/rxrpc/main.c | 180 ---- net/rxrpc/peer.c | 398 -------- net/rxrpc/proc.c | 617 ------------ net/rxrpc/rxrpc_syms.c | 34 - net/rxrpc/sysctl.c | 121 --- net/rxrpc/transport.c | 846 ---------------- 24 files changed, 9 insertions(+), 6820 deletions(-) delete mode 100644 include/rxrpc/call.h delete mode 100644 include/rxrpc/connection.h delete mode 100644 include/rxrpc/krxiod.h delete mode 100644 include/rxrpc/krxsecd.h delete mode 100644 include/rxrpc/krxtimod.h delete mode 100644 include/rxrpc/message.h delete mode 100644 include/rxrpc/peer.h delete mode 100644 include/rxrpc/rxrpc.h delete mode 100644 include/rxrpc/transport.h delete mode 100644 net/rxrpc/call.c delete mode 100644 net/rxrpc/connection.c delete mode 100644 net/rxrpc/internal.h delete mode 100644 net/rxrpc/krxiod.c delete mode 100644 net/rxrpc/krxsecd.c delete mode 100644 net/rxrpc/krxtimod.c delete mode 100644 net/rxrpc/main.c delete mode 100644 net/rxrpc/peer.c delete mode 100644 net/rxrpc/proc.c delete mode 100644 net/rxrpc/rxrpc_syms.c delete mode 100644 net/rxrpc/sysctl.c delete mode 100644 net/rxrpc/transport.c (limited to 'include') diff --git a/fs/Kconfig b/fs/Kconfig index 075c9997ddc5..e33c08924572 100644 --- a/fs/Kconfig +++ b/fs/Kconfig @@ -2038,10 +2038,6 @@ config AFS_DEBUG If unsure, say N. - -config RXRPC - tristate - config 9P_FS tristate "Plan 9 Resource Sharing Support (9P2000) (Experimental)" depends on INET && EXPERIMENTAL diff --git a/include/rxrpc/call.h b/include/rxrpc/call.h deleted file mode 100644 index b86f83743510..000000000000 --- a/include/rxrpc/call.h +++ /dev/null @@ -1,212 +0,0 @@ -/* call.h: Rx call record - * - * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. - * Written by David Howells (dhowells@redhat.com) - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ - -#ifndef _LINUX_RXRPC_CALL_H -#define _LINUX_RXRPC_CALL_H - -#include -#include -#include -#include - -#define RXRPC_CALL_ACK_WINDOW_SIZE 16 - -extern unsigned rxrpc_call_rcv_timeout; /* receive activity timeout (secs) */ - -/* application call state - * - only state 0 and ffff are reserved, the state is set to 1 after an opid is received - */ -enum rxrpc_app_cstate { - RXRPC_CSTATE_COMPLETE = 0, /* operation complete */ - RXRPC_CSTATE_ERROR, /* operation ICMP error or aborted */ - RXRPC_CSTATE_SRVR_RCV_OPID, /* [SERVER] receiving operation ID */ - RXRPC_CSTATE_SRVR_RCV_ARGS, /* [SERVER] receiving operation data */ - RXRPC_CSTATE_SRVR_GOT_ARGS, /* [SERVER] completely received operation data */ - RXRPC_CSTATE_SRVR_SND_REPLY, /* [SERVER] sending operation reply */ - RXRPC_CSTATE_SRVR_RCV_FINAL_ACK, /* [SERVER] receiving final ACK */ - RXRPC_CSTATE_CLNT_SND_ARGS, /* [CLIENT] sending operation args */ - RXRPC_CSTATE_CLNT_RCV_REPLY, /* [CLIENT] receiving operation reply */ - RXRPC_CSTATE_CLNT_GOT_REPLY, /* [CLIENT] completely received operation reply */ -} __attribute__((packed)); - -extern const char *rxrpc_call_states[]; - -enum rxrpc_app_estate { - RXRPC_ESTATE_NO_ERROR = 0, /* no error */ - RXRPC_ESTATE_LOCAL_ABORT, /* aborted locally by application layer */ - RXRPC_ESTATE_PEER_ABORT, /* aborted remotely by peer */ - RXRPC_ESTATE_LOCAL_ERROR, /* local ICMP network error */ - RXRPC_ESTATE_REMOTE_ERROR, /* remote ICMP network error */ -} __attribute__((packed)); - -extern const char *rxrpc_call_error_states[]; - -/*****************************************************************************/ -/* - * Rx call record and application scratch buffer - * - the call record occupies the bottom of a complete page - * - the application scratch buffer occupies the rest - */ -struct rxrpc_call -{ - atomic_t usage; - struct rxrpc_connection *conn; /* connection upon which active */ - spinlock_t lock; /* access lock */ - struct module *owner; /* owner module */ - wait_queue_head_t waitq; /* wait queue for events to happen */ - struct list_head link; /* general internal list link */ - struct list_head call_link; /* master call list link */ - __be32 chan_ix; /* connection channel index */ - __be32 call_id; /* call ID on connection */ - unsigned long cjif; /* jiffies at call creation */ - unsigned long flags; /* control flags */ -#define RXRPC_CALL_ACKS_TIMO 0x00000001 /* ACKS timeout reached */ -#define RXRPC_CALL_ACKR_TIMO 0x00000002 /* ACKR timeout reached */ -#define RXRPC_CALL_RCV_TIMO 0x00000004 /* RCV timeout reached */ -#define RXRPC_CALL_RCV_PKT 0x00000008 /* received packet */ - - /* transmission */ - rxrpc_seq_t snd_seq_count; /* outgoing packet sequence number counter */ - struct rxrpc_message *snd_nextmsg; /* next message being constructed for sending */ - struct rxrpc_message *snd_ping; /* last ping message sent */ - unsigned short snd_resend_cnt; /* count of resends since last ACK */ - - /* transmission ACK tracking */ - struct list_head acks_pendq; /* messages pending ACK (ordered by seq) */ - unsigned acks_pend_cnt; /* number of un-ACK'd packets */ - rxrpc_seq_t acks_dftv_seq; /* highest definitively ACK'd msg seq */ - struct timer_list acks_timeout; /* timeout on expected ACK */ - - /* reception */ - struct list_head rcv_receiveq; /* messages pending reception (ordered by seq) */ - struct list_head rcv_krxiodq_lk; /* krxiod queue for new inbound packets */ - struct timer_list rcv_timeout; /* call receive activity timeout */ - - /* reception ACK'ing */ - rxrpc_seq_t ackr_win_bot; /* bottom of ACK window */ - rxrpc_seq_t ackr_win_top; /* top of ACK window */ - rxrpc_seq_t ackr_high_seq; /* highest seqno yet received */ - rxrpc_seq_net_t ackr_prev_seq; /* previous seqno received */ - unsigned ackr_pend_cnt; /* number of pending ACKs */ - struct timer_list ackr_dfr_timo; /* timeout on deferred ACK */ - char ackr_dfr_perm; /* request for deferred ACKs permitted */ - rxrpc_seq_t ackr_dfr_seq; /* seqno for deferred ACK */ - struct rxrpc_ackpacket ackr; /* pending normal ACK packet */ - uint8_t ackr_array[RXRPC_CALL_ACK_WINDOW_SIZE]; /* ACK records */ - - /* presentation layer */ - char app_last_rcv; /* T if received last packet from remote end */ - enum rxrpc_app_cstate app_call_state; /* call state */ - enum rxrpc_app_estate app_err_state; /* abort/error state */ - struct list_head app_readyq; /* ordered ready received packet queue */ - struct list_head app_unreadyq; /* ordered post-hole recv'd packet queue */ - rxrpc_seq_t app_ready_seq; /* last seq number dropped into readyq */ - size_t app_ready_qty; /* amount of data ready in readyq */ - unsigned app_opcode; /* operation ID */ - unsigned app_abort_code; /* abort code (when aborted) */ - int app_errno; /* error number (when ICMP error received) */ - - /* statisics */ - unsigned pkt_rcv_count; /* count of received packets on this call */ - unsigned pkt_snd_count; /* count of sent packets on this call */ - unsigned app_read_count; /* number of reads issued */ - - /* bits for the application to use */ - rxrpc_call_attn_func_t app_attn_func; /* callback when attention required */ - rxrpc_call_error_func_t app_error_func; /* callback when abort sent (cleanup and put) */ - rxrpc_call_aemap_func_t app_aemap_func; /* callback to map abort code to/from errno */ - void *app_user; /* application data */ - struct list_head app_link; /* application list linkage */ - struct list_head app_attn_link; /* application attention list linkage */ - size_t app_mark; /* trigger callback when app_ready_qty>=app_mark */ - char app_async_read; /* T if in async-read mode */ - uint8_t *app_read_buf; /* application async read buffer (app_mark size) */ - uint8_t *app_scr_alloc; /* application scratch allocation pointer */ - void *app_scr_ptr; /* application pointer into scratch buffer */ - -#define RXRPC_APP_MARK_EOF 0xFFFFFFFFU /* mark at end of input */ - - /* application scratch buffer */ - uint8_t app_scratch[0] __attribute__((aligned(sizeof(long)))); -}; - -#define RXRPC_CALL_SCRATCH_SIZE (PAGE_SIZE - sizeof(struct rxrpc_call)) - -#define rxrpc_call_reset_scratch(CALL) \ -do { (CALL)->app_scr_alloc = (CALL)->app_scratch; } while(0) - -#define rxrpc_call_alloc_scratch(CALL,SIZE) \ -({ \ - void *ptr; \ - ptr = (CALL)->app_scr_alloc; \ - (CALL)->app_scr_alloc += (SIZE); \ - if ((SIZE)>RXRPC_CALL_SCRATCH_SIZE || \ - (size_t)((CALL)->app_scr_alloc - (u8*)(CALL)) > RXRPC_CALL_SCRATCH_SIZE) { \ - printk("rxrpc_call_alloc_scratch(%p,%Zu)\n",(CALL),(size_t)(SIZE)); \ - BUG(); \ - } \ - ptr; \ -}) - -#define rxrpc_call_alloc_scratch_s(CALL,TYPE) \ -({ \ - size_t size = sizeof(TYPE); \ - TYPE *ptr; \ - ptr = (TYPE*)(CALL)->app_scr_alloc; \ - (CALL)->app_scr_alloc += size; \ - if (size>RXRPC_CALL_SCRATCH_SIZE || \ - (size_t)((CALL)->app_scr_alloc - (u8*)(CALL)) > RXRPC_CALL_SCRATCH_SIZE) { \ - printk("rxrpc_call_alloc_scratch(%p,%Zu)\n",(CALL),size); \ - BUG(); \ - } \ - ptr; \ -}) - -#define rxrpc_call_is_ack_pending(CALL) ((CALL)->ackr.reason != 0) - -extern int rxrpc_create_call(struct rxrpc_connection *conn, - rxrpc_call_attn_func_t attn, - rxrpc_call_error_func_t error, - rxrpc_call_aemap_func_t aemap, - struct rxrpc_call **_call); - -extern int rxrpc_incoming_call(struct rxrpc_connection *conn, - struct rxrpc_message *msg, - struct rxrpc_call **_call); - -static inline void rxrpc_get_call(struct rxrpc_call *call) -{ - BUG_ON(atomic_read(&call->usage)<=0); - atomic_inc(&call->usage); - /*printk("rxrpc_get_call(%p{u=%d})\n",(C),atomic_read(&(C)->usage));*/ -} - -extern void rxrpc_put_call(struct rxrpc_call *call); - -extern void rxrpc_call_do_stuff(struct rxrpc_call *call); - -extern int rxrpc_call_abort(struct rxrpc_call *call, int error); - -#define RXRPC_CALL_READ_BLOCK 0x0001 /* block if not enough data and not yet EOF */ -#define RXRPC_CALL_READ_ALL 0x0002 /* error if insufficient data received */ -extern int rxrpc_call_read_data(struct rxrpc_call *call, void *buffer, size_t size, int flags); - -extern int rxrpc_call_write_data(struct rxrpc_call *call, - size_t sioc, - struct kvec *siov, - uint8_t rxhdr_flags, - gfp_t alloc_flags, - int dup_data, - size_t *size_sent); - -extern void rxrpc_call_handle_error(struct rxrpc_call *conn, int local, int errno); - -#endif /* _LINUX_RXRPC_CALL_H */ diff --git a/include/rxrpc/connection.h b/include/rxrpc/connection.h deleted file mode 100644 index 41e6781ad067..000000000000 --- a/include/rxrpc/connection.h +++ /dev/null @@ -1,83 +0,0 @@ -/* connection.h: Rx connection record - * - * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. - * Written by David Howells (dhowells@redhat.com) - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ - -#ifndef _LINUX_RXRPC_CONNECTION_H -#define _LINUX_RXRPC_CONNECTION_H - -#include -#include - -struct sk_buff; - -/*****************************************************************************/ -/* - * Rx connection - * - connections are matched by (rmt_port,rmt_addr,service_id,conn_id,clientflag) - * - connections only retain a refcount on the peer when they are active - * - connections with refcount==0 are inactive and reside in the peer's graveyard - */ -struct rxrpc_connection -{ - atomic_t usage; - struct rxrpc_transport *trans; /* transport endpoint */ - struct rxrpc_peer *peer; /* peer from/to which connected */ - struct rxrpc_service *service; /* responsible service (inbound conns) */ - struct rxrpc_timer timeout; /* decaching timer */ - struct list_head link; /* link in peer's list */ - struct list_head proc_link; /* link in proc list */ - struct list_head err_link; /* link in ICMP error processing list */ - struct list_head id_link; /* link in ID grant list */ - struct sockaddr_in addr; /* remote address */ - struct rxrpc_call *channels[4]; /* channels (active calls) */ - wait_queue_head_t chanwait; /* wait for channel to become available */ - spinlock_t lock; /* access lock */ - struct timeval atime; /* last access time */ - size_t mtu_size; /* MTU size for outbound messages */ - unsigned call_counter; /* call ID counter */ - rxrpc_serial_t serial_counter; /* packet serial number counter */ - - /* the following should all be in net order */ - __be32 in_epoch; /* peer's epoch */ - __be32 out_epoch; /* my epoch */ - __be32 conn_id; /* connection ID, appropriately shifted */ - __be16 service_id; /* service ID */ - uint8_t security_ix; /* security ID */ - uint8_t in_clientflag; /* RXRPC_CLIENT_INITIATED if we are server */ - uint8_t out_clientflag; /* RXRPC_CLIENT_INITIATED if we are client */ -}; - -extern int rxrpc_create_connection(struct rxrpc_transport *trans, - __be16 port, - __be32 addr, - uint16_t service_id, - void *security, - struct rxrpc_connection **_conn); - -extern int rxrpc_connection_lookup(struct rxrpc_peer *peer, - struct rxrpc_message *msg, - struct rxrpc_connection **_conn); - -static inline void rxrpc_get_connection(struct rxrpc_connection *conn) -{ - BUG_ON(atomic_read(&conn->usage)<0); - atomic_inc(&conn->usage); - //printk("rxrpc_get_conn(%p{u=%d})\n",conn,atomic_read(&conn->usage)); -} - -extern void rxrpc_put_connection(struct rxrpc_connection *conn); - -extern int rxrpc_conn_receive_call_packet(struct rxrpc_connection *conn, - struct rxrpc_call *call, - struct rxrpc_message *msg); - -extern void rxrpc_conn_handle_error(struct rxrpc_connection *conn, int local, int errno); - -#endif /* _LINUX_RXRPC_CONNECTION_H */ diff --git a/include/rxrpc/krxiod.h b/include/rxrpc/krxiod.h deleted file mode 100644 index c0e0e82e4df2..000000000000 --- a/include/rxrpc/krxiod.h +++ /dev/null @@ -1,27 +0,0 @@ -/* krxiod.h: Rx RPC I/O kernel thread interface - * - * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. - * Written by David Howells (dhowells@redhat.com) - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ - -#ifndef _LINUX_RXRPC_KRXIOD_H -#define _LINUX_RXRPC_KRXIOD_H - -#include - -extern int rxrpc_krxiod_init(void); -extern void rxrpc_krxiod_kill(void); -extern void rxrpc_krxiod_queue_transport(struct rxrpc_transport *trans); -extern void rxrpc_krxiod_dequeue_transport(struct rxrpc_transport *trans); -extern void rxrpc_krxiod_queue_peer(struct rxrpc_peer *peer); -extern void rxrpc_krxiod_dequeue_peer(struct rxrpc_peer *peer); -extern void rxrpc_krxiod_clear_peers(struct rxrpc_transport *trans); -extern void rxrpc_krxiod_queue_call(struct rxrpc_call *call); -extern void rxrpc_krxiod_dequeue_call(struct rxrpc_call *call); - -#endif /* _LINUX_RXRPC_KRXIOD_H */ diff --git a/include/rxrpc/krxsecd.h b/include/rxrpc/krxsecd.h deleted file mode 100644 index 55ce43a25b38..000000000000 --- a/include/rxrpc/krxsecd.h +++ /dev/null @@ -1,22 +0,0 @@ -/* krxsecd.h: Rx RPC security kernel thread interface - * - * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. - * Written by David Howells (dhowells@redhat.com) - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ - -#ifndef _LINUX_RXRPC_KRXSECD_H -#define _LINUX_RXRPC_KRXSECD_H - -#include - -extern int rxrpc_krxsecd_init(void); -extern void rxrpc_krxsecd_kill(void); -extern void rxrpc_krxsecd_clear_transport(struct rxrpc_transport *trans); -extern void rxrpc_krxsecd_queue_incoming_call(struct rxrpc_message *msg); - -#endif /* _LINUX_RXRPC_KRXSECD_H */ diff --git a/include/rxrpc/krxtimod.h b/include/rxrpc/krxtimod.h deleted file mode 100644 index b3d298b612f2..000000000000 --- a/include/rxrpc/krxtimod.h +++ /dev/null @@ -1,45 +0,0 @@ -/* krxtimod.h: RxRPC timeout daemon - * - * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. - * Written by David Howells (dhowells@redhat.com) - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ - -#ifndef _LINUX_RXRPC_KRXTIMOD_H -#define _LINUX_RXRPC_KRXTIMOD_H - -#include - -struct rxrpc_timer_ops { - /* called when the front of the timer queue has timed out */ - void (*timed_out)(struct rxrpc_timer *timer); -}; - -/*****************************************************************************/ -/* - * RXRPC timer/timeout record - */ -struct rxrpc_timer -{ - struct list_head link; /* link in timer queue */ - unsigned long timo_jif; /* timeout time */ - const struct rxrpc_timer_ops *ops; /* timeout expiry function */ -}; - -static inline void rxrpc_timer_init(rxrpc_timer_t *timer, const struct rxrpc_timer_ops *ops) -{ - INIT_LIST_HEAD(&timer->link); - timer->ops = ops; -} - -extern int rxrpc_krxtimod_start(void); -extern void rxrpc_krxtimod_kill(void); - -extern void rxrpc_krxtimod_add_timer(rxrpc_timer_t *timer, unsigned long timeout); -extern int rxrpc_krxtimod_del_timer(rxrpc_timer_t *timer); - -#endif /* _LINUX_RXRPC_KRXTIMOD_H */ diff --git a/include/rxrpc/message.h b/include/rxrpc/message.h deleted file mode 100644 index b318f273d4f2..000000000000 --- a/include/rxrpc/message.h +++ /dev/null @@ -1,71 +0,0 @@ -/* message.h: Rx message caching - * - * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. - * Written by David Howells (dhowells@redhat.com) - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ - -#ifndef _LINUX_RXRPC_MESSAGE_H -#define _LINUX_RXRPC_MESSAGE_H - -#include - -/*****************************************************************************/ -/* - * Rx message record - */ -struct rxrpc_message -{ - atomic_t usage; - struct list_head link; /* list link */ - struct timeval stamp; /* time received or last sent */ - rxrpc_seq_t seq; /* message sequence number */ - - int state; /* the state the message is currently in */ -#define RXRPC_MSG_PREPARED 0 -#define RXRPC_MSG_SENT 1 -#define RXRPC_MSG_ACKED 2 /* provisionally ACK'd */ -#define RXRPC_MSG_DONE 3 /* definitively ACK'd (msg->sequsage); } while(0) - -extern void __rxrpc_put_message(struct rxrpc_message *msg); -static inline void rxrpc_put_message(struct rxrpc_message *msg) -{ - BUG_ON(atomic_read(&msg->usage)<=0); - if (atomic_dec_and_test(&msg->usage)) - __rxrpc_put_message(msg); -} - -extern int rxrpc_conn_newmsg(struct rxrpc_connection *conn, - struct rxrpc_call *call, - uint8_t type, - int count, - struct kvec *diov, - gfp_t alloc_flags, - struct rxrpc_message **_msg); - -extern int rxrpc_conn_sendmsg(struct rxrpc_connection *conn, struct rxrpc_message *msg); - -#endif /* _LINUX_RXRPC_MESSAGE_H */ diff --git a/include/rxrpc/packet.h b/include/rxrpc/packet.h index 09b11a1e8d46..b69e6e173ea1 100644 --- a/include/rxrpc/packet.h +++ b/include/rxrpc/packet.h @@ -1,6 +1,6 @@ /* packet.h: Rx packet layout and definitions * - * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. + * Copyright (C) 2002, 2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or @@ -12,21 +12,17 @@ #ifndef _LINUX_RXRPC_PACKET_H #define _LINUX_RXRPC_PACKET_H -#include - -#define RXRPC_IPUDP_SIZE 28 -extern size_t RXRPC_MAX_PACKET_SIZE; -#define RXRPC_MAX_PACKET_DATA_SIZE (RXRPC_MAX_PACKET_SIZE - sizeof(struct rxrpc_header)) -#define RXRPC_LOCAL_PACKET_SIZE RXRPC_MAX_PACKET_SIZE -#define RXRPC_REMOTE_PACKET_SIZE (576 - RXRPC_IPUDP_SIZE) +typedef u32 rxrpc_seq_t; /* Rx message sequence number */ +typedef u32 rxrpc_serial_t; /* Rx message serial number */ +typedef __be32 rxrpc_seq_net_t; /* on-the-wire Rx message sequence number */ +typedef __be32 rxrpc_serial_net_t; /* on-the-wire Rx message serial number */ /*****************************************************************************/ /* * on-the-wire Rx packet header * - all multibyte fields should be in network byte order */ -struct rxrpc_header -{ +struct rxrpc_header { __be32 epoch; /* client boot timestamp */ __be32 cid; /* connection and channel ID */ @@ -85,8 +81,7 @@ extern const char *rxrpc_pkts[]; * - new__rsvd = j__rsvd * - duplicating all other fields */ -struct rxrpc_jumbo_header -{ +struct rxrpc_jumbo_header { uint8_t flags; /* packet flags (as per rxrpc_header) */ uint8_t pad; __be16 _rsvd; /* reserved (used by kerberos security as cksum) */ @@ -99,8 +94,7 @@ struct rxrpc_jumbo_header * on-the-wire Rx ACK packet data payload * - all multibyte fields should be in network byte order */ -struct rxrpc_ackpacket -{ +struct rxrpc_ackpacket { __be16 bufferSpace; /* number of packet buffers available */ __be16 maxSkew; /* diff between serno being ACK'd and highest serial no * received */ diff --git a/include/rxrpc/peer.h b/include/rxrpc/peer.h deleted file mode 100644 index 8b8fe97cbbcc..000000000000 --- a/include/rxrpc/peer.h +++ /dev/null @@ -1,82 +0,0 @@ -/* peer.h: Rx RPC per-transport peer record - * - * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. - * Written by David Howells (dhowells@redhat.com) - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ - -#ifndef _LINUX_RXRPC_PEER_H -#define _LINUX_RXRPC_PEER_H - -#include -#include -#include - -struct rxrpc_peer_ops -{ - /* peer record being added */ - int (*adding)(struct rxrpc_peer *peer); - - /* peer record being discarded from graveyard */ - void (*discarding)(struct rxrpc_peer *peer); - - /* change of epoch detected on connection */ - void (*change_of_epoch)(struct rxrpc_connection *conn); -}; - -/*****************************************************************************/ -/* - * Rx RPC per-transport peer record - * - peers only retain a refcount on the transport when they are active - * - peers with refcount==0 are inactive and reside in the transport's graveyard - */ -struct rxrpc_peer -{ - atomic_t usage; - struct rxrpc_peer_ops *ops; /* operations on this peer */ - struct rxrpc_transport *trans; /* owner transport */ - struct rxrpc_timer timeout; /* timeout for grave destruction */ - struct list_head link; /* link in transport's peer list */ - struct list_head proc_link; /* link in /proc list */ - rwlock_t conn_idlock; /* lock for connection IDs */ - struct list_head conn_idlist; /* list of connections granted IDs */ - uint32_t conn_idcounter; /* connection ID counter */ - rwlock_t conn_lock; /* lock for active/dead connections */ - struct list_head conn_active; /* active connections to/from this peer */ - struct list_head conn_graveyard; /* graveyard for inactive connections */ - spinlock_t conn_gylock; /* lock for conn_graveyard */ - wait_queue_head_t conn_gy_waitq; /* wait queue hit when graveyard is empty */ - atomic_t conn_count; /* number of attached connections */ - struct in_addr addr; /* remote address */ - size_t if_mtu; /* interface MTU for this peer */ - spinlock_t lock; /* access lock */ - - void *user; /* application layer data */ - - /* calculated RTT cache */ -#define RXRPC_RTT_CACHE_SIZE 32 - suseconds_t rtt; /* current RTT estimate (in uS) */ - unsigned rtt_point; /* next entry at which to insert */ - unsigned rtt_usage; /* amount of cache actually used */ - suseconds_t rtt_cache[RXRPC_RTT_CACHE_SIZE]; /* calculated RTT cache */ -}; - - -extern int rxrpc_peer_lookup(struct rxrpc_transport *trans, - __be32 addr, - struct rxrpc_peer **_peer); - -static inline void rxrpc_get_peer(struct rxrpc_peer *peer) -{ - BUG_ON(atomic_read(&peer->usage)<0); - atomic_inc(&peer->usage); - //printk("rxrpc_get_peer(%p{u=%d})\n",peer,atomic_read(&peer->usage)); -} - -extern void rxrpc_put_peer(struct rxrpc_peer *peer); - -#endif /* _LINUX_RXRPC_PEER_H */ diff --git a/include/rxrpc/rxrpc.h b/include/rxrpc/rxrpc.h deleted file mode 100644 index 8d9874cef991..000000000000 --- a/include/rxrpc/rxrpc.h +++ /dev/null @@ -1,36 +0,0 @@ -/* rx.h: Rx RPC interface - * - * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. - * Written by David Howells (dhowells@redhat.com) - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ - -#ifndef _LINUX_RXRPC_RXRPC_H -#define _LINUX_RXRPC_RXRPC_H - -#ifdef __KERNEL__ - -extern __be32 rxrpc_epoch; - -#ifdef CONFIG_SYSCTL -extern int rxrpc_ktrace; -extern int rxrpc_kdebug; -extern int rxrpc_kproto; -extern int rxrpc_knet; -#else -#define rxrpc_ktrace 0 -#define rxrpc_kdebug 0 -#define rxrpc_kproto 0 -#define rxrpc_knet 0 -#endif - -extern int rxrpc_sysctl_init(void); -extern void rxrpc_sysctl_cleanup(void); - -#endif /* __KERNEL__ */ - -#endif /* _LINUX_RXRPC_RXRPC_H */ diff --git a/include/rxrpc/transport.h b/include/rxrpc/transport.h deleted file mode 100644 index 7c7b9683fa39..000000000000 --- a/include/rxrpc/transport.h +++ /dev/null @@ -1,106 +0,0 @@ -/* transport.h: Rx transport management - * - * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. - * Written by David Howells (dhowells@redhat.com) - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ - -#ifndef _LINUX_RXRPC_TRANSPORT_H -#define _LINUX_RXRPC_TRANSPORT_H - -#include -#include -#include -#include -#include - -typedef int (*rxrpc_newcall_fnx_t)(struct rxrpc_call *call); - -extern wait_queue_head_t rxrpc_krxiod_wq; - -/*****************************************************************************/ -/* - * Rx operation specification - * - tables of these must be sorted by op ID so that they can be binary-chop searched - */ -struct rxrpc_operation -{ - unsigned id; /* operation ID */ - size_t asize; /* minimum size of argument block */ - const char *name; /* name of operation */ - void *user; /* initial user data */ -}; - -/*****************************************************************************/ -/* - * Rx transport service record - */ -struct rxrpc_service -{ - struct list_head link; /* link in services list on transport */ - struct module *owner; /* owner module */ - rxrpc_newcall_fnx_t new_call; /* new call handler function */ - const char *name; /* name of service */ - unsigned short service_id; /* Rx service ID */ - rxrpc_call_attn_func_t attn_func; /* call requires attention callback */ - rxrpc_call_error_func_t error_func; /* call error callback */ - rxrpc_call_aemap_func_t aemap_func; /* abort -> errno mapping callback */ - - const struct rxrpc_operation *ops_begin; /* beginning of operations table */ - const struct rxrpc_operation *ops_end; /* end of operations table */ -}; - -/*****************************************************************************/ -/* - * Rx transport endpoint record - */ -struct rxrpc_transport -{ - atomic_t usage; - struct socket *socket; /* my UDP socket */ - struct list_head services; /* services listening on this socket */ - struct list_head link; /* link in transport list */ - struct list_head proc_link; /* link in transport proc list */ - struct list_head krxiodq_link; /* krxiod attention queue link */ - spinlock_t lock; /* access lock */ - struct list_head peer_active; /* active peers connected to over this socket */ - struct list_head peer_graveyard; /* inactive peer list */ - spinlock_t peer_gylock; /* peer graveyard lock */ - wait_queue_head_t peer_gy_waitq; /* wait queue hit when peer graveyard is empty */ - rwlock_t peer_lock; /* peer list access lock */ - atomic_t peer_count; /* number of peers */ - struct rxrpc_peer_ops *peer_ops; /* default peer operations */ - unsigned short port; /* port upon which listening */ - volatile char error_rcvd; /* T if received ICMP error outstanding */ -}; - -extern int rxrpc_create_transport(unsigned short port, - struct rxrpc_transport **_trans); - -static inline void rxrpc_get_transport(struct rxrpc_transport *trans) -{ - BUG_ON(atomic_read(&trans->usage) <= 0); - atomic_inc(&trans->usage); - //printk("rxrpc_get_transport(%p{u=%d})\n", - // trans, atomic_read(&trans->usage)); -} - -extern void rxrpc_put_transport(struct rxrpc_transport *trans); - -extern int rxrpc_add_service(struct rxrpc_transport *trans, - struct rxrpc_service *srv); - -extern void rxrpc_del_service(struct rxrpc_transport *trans, - struct rxrpc_service *srv); - -extern void rxrpc_trans_receive_packet(struct rxrpc_transport *trans); - -extern int rxrpc_trans_immediate_abort(struct rxrpc_transport *trans, - struct rxrpc_message *msg, - int error); - -#endif /* _LINUX_RXRPC_TRANSPORT_H */ diff --git a/net/rxrpc/Makefile b/net/rxrpc/Makefile index 07bf82ffec6a..c46867c61c98 100644 --- a/net/rxrpc/Makefile +++ b/net/rxrpc/Makefile @@ -1,9 +1,7 @@ # -# Makefile for Linux kernel Rx RPC +# Makefile for Linux kernel RxRPC # -#CFLAGS += -finstrument-functions - af-rxrpc-objs := \ af_rxrpc.o \ ar-accept.o \ @@ -29,26 +27,3 @@ endif obj-$(CONFIG_AF_RXRPC) += af-rxrpc.o obj-$(CONFIG_RXKAD) += rxkad.o - -# -# obsolete RxRPC interface, still used by fs/afs/ -# -rxrpc-objs := \ - call.o \ - connection.o \ - krxiod.o \ - krxsecd.o \ - krxtimod.o \ - main.o \ - peer.o \ - rxrpc_syms.o \ - transport.o - -ifeq ($(CONFIG_PROC_FS),y) -rxrpc-objs += proc.o -endif -ifeq ($(CONFIG_SYSCTL),y) -rxrpc-objs += sysctl.o -endif - -obj-$(CONFIG_RXRPC) += rxrpc.o diff --git a/net/rxrpc/call.c b/net/rxrpc/call.c deleted file mode 100644 index d07122b57e0d..000000000000 --- a/net/rxrpc/call.c +++ /dev/null @@ -1,2277 +0,0 @@ -/* call.c: Rx call routines - * - * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. - * Written by David Howells (dhowells@redhat.com) - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include "internal.h" - -__RXACCT_DECL(atomic_t rxrpc_call_count); -__RXACCT_DECL(atomic_t rxrpc_message_count); - -LIST_HEAD(rxrpc_calls); -DECLARE_RWSEM(rxrpc_calls_sem); - -unsigned rxrpc_call_rcv_timeout = HZ/3; -static unsigned rxrpc_call_acks_timeout = HZ/3; -static unsigned rxrpc_call_dfr_ack_timeout = HZ/20; -static unsigned short rxrpc_call_max_resend = HZ/10; - -const char *rxrpc_call_states[] = { - "COMPLETE", - "ERROR", - "SRVR_RCV_OPID", - "SRVR_RCV_ARGS", - "SRVR_GOT_ARGS", - "SRVR_SND_REPLY", - "SRVR_RCV_FINAL_ACK", - "CLNT_SND_ARGS", - "CLNT_RCV_REPLY", - "CLNT_GOT_REPLY" -}; - -const char *rxrpc_call_error_states[] = { - "NO_ERROR", - "LOCAL_ABORT", - "PEER_ABORT", - "LOCAL_ERROR", - "REMOTE_ERROR" -}; - -const char *rxrpc_pkts[] = { - "?00", - "data", "ack", "busy", "abort", "ackall", "chall", "resp", "debug", - "?09", "?10", "?11", "?12", "?13", "?14", "?15" -}; - -static const char *rxrpc_acks[] = { - "---", "REQ", "DUP", "SEQ", "WIN", "MEM", "PNG", "PNR", "DLY", "IDL", - "-?-" -}; - -static const char _acktype[] = "NA-"; - -static void rxrpc_call_receive_packet(struct rxrpc_call *call); -static void rxrpc_call_receive_data_packet(struct rxrpc_call *call, - struct rxrpc_message *msg); -static void rxrpc_call_receive_ack_packet(struct rxrpc_call *call, - struct rxrpc_message *msg); -static void rxrpc_call_definitively_ACK(struct rxrpc_call *call, - rxrpc_seq_t higest); -static void rxrpc_call_resend(struct rxrpc_call *call, rxrpc_seq_t highest); -static int __rxrpc_call_read_data(struct rxrpc_call *call); - -static int rxrpc_call_record_ACK(struct rxrpc_call *call, - struct rxrpc_message *msg, - rxrpc_seq_t seq, - size_t count); - -static int rxrpc_call_flush(struct rxrpc_call *call); - -#define _state(call) \ - _debug("[[[ state %s ]]]", rxrpc_call_states[call->app_call_state]); - -static void rxrpc_call_default_attn_func(struct rxrpc_call *call) -{ - wake_up(&call->waitq); -} - -static void rxrpc_call_default_error_func(struct rxrpc_call *call) -{ - wake_up(&call->waitq); -} - -static void rxrpc_call_default_aemap_func(struct rxrpc_call *call) -{ - switch (call->app_err_state) { - case RXRPC_ESTATE_LOCAL_ABORT: - call->app_abort_code = -call->app_errno; - case RXRPC_ESTATE_PEER_ABORT: - call->app_errno = -ECONNABORTED; - default: - break; - } -} - -static void __rxrpc_call_acks_timeout(unsigned long _call) -{ - struct rxrpc_call *call = (struct rxrpc_call *) _call; - - _debug("ACKS TIMEOUT %05lu", jiffies - call->cjif); - - call->flags |= RXRPC_CALL_ACKS_TIMO; - rxrpc_krxiod_queue_call(call); -} - -static void __rxrpc_call_rcv_timeout(unsigned long _call) -{ - struct rxrpc_call *call = (struct rxrpc_call *) _call; - - _debug("RCV TIMEOUT %05lu", jiffies - call->cjif); - - call->flags |= RXRPC_CALL_RCV_TIMO; - rxrpc_krxiod_queue_call(call); -} - -static void __rxrpc_call_ackr_timeout(unsigned long _call) -{ - struct rxrpc_call *call = (struct rxrpc_call *) _call; - - _debug("ACKR TIMEOUT %05lu",jiffies - call->cjif); - - call->flags |= RXRPC_CALL_ACKR_TIMO; - rxrpc_krxiod_queue_call(call); -} - -/*****************************************************************************/ -/* - * calculate a timeout based on an RTT value - */ -static inline unsigned long __rxrpc_rtt_based_timeout(struct rxrpc_call *call, - unsigned long val) -{ - unsigned long expiry = call->conn->peer->rtt / (1000000 / HZ); - - expiry += 10; - if (expiry < HZ / 25) - expiry = HZ / 25; - if (expiry > HZ) - expiry = HZ; - - _leave(" = %lu jiffies", expiry); - return jiffies + expiry; -} /* end __rxrpc_rtt_based_timeout() */ - -/*****************************************************************************/ -/* - * create a new call record - */ -static inline int __rxrpc_create_call(struct rxrpc_connection *conn, - struct rxrpc_call **_call) -{ - struct rxrpc_call *call; - - _enter("%p", conn); - - /* allocate and initialise a call record */ - call = (struct rxrpc_call *) get_zeroed_page(GFP_KERNEL); - if (!call) { - _leave(" ENOMEM"); - return -ENOMEM; - } - - atomic_set(&call->usage, 1); - - init_waitqueue_head(&call->waitq); - spin_lock_init(&call->lock); - INIT_LIST_HEAD(&call->link); - INIT_LIST_HEAD(&call->acks_pendq); - INIT_LIST_HEAD(&call->rcv_receiveq); - INIT_LIST_HEAD(&call->rcv_krxiodq_lk); - INIT_LIST_HEAD(&call->app_readyq); - INIT_LIST_HEAD(&call->app_unreadyq); - INIT_LIST_HEAD(&call->app_link); - INIT_LIST_HEAD(&call->app_attn_link); - - init_timer(&call->acks_timeout); - call->acks_timeout.data = (unsigned long) call; - call->acks_timeout.function = __rxrpc_call_acks_timeout; - - init_timer(&call->rcv_timeout); - call->rcv_timeout.data = (unsigned long) call; - call->rcv_timeout.function = __rxrpc_call_rcv_timeout; - - init_timer(&call->ackr_dfr_timo); - call->ackr_dfr_timo.data = (unsigned long) call; - call->ackr_dfr_timo.function = __rxrpc_call_ackr_timeout; - - call->conn = conn; - call->ackr_win_bot = 1; - call->ackr_win_top = call->ackr_win_bot + RXRPC_CALL_ACK_WINDOW_SIZE - 1; - call->ackr_prev_seq = 0; - call->app_mark = RXRPC_APP_MARK_EOF; - call->app_attn_func = rxrpc_call_default_attn_func; - call->app_error_func = rxrpc_call_default_error_func; - call->app_aemap_func = rxrpc_call_default_aemap_func; - call->app_scr_alloc = call->app_scratch; - - call->cjif = jiffies; - - _leave(" = 0 (%p)", call); - - *_call = call; - - return 0; -} /* end __rxrpc_create_call() */ - -/*****************************************************************************/ -/* - * create a new call record for outgoing calls - */ -int rxrpc_create_call(struct rxrpc_connection *conn, - rxrpc_call_attn_func_t attn, - rxrpc_call_error_func_t error, - rxrpc_call_aemap_func_t aemap, - struct rxrpc_call **_call) -{ - DECLARE_WAITQUEUE(myself, current); - - struct rxrpc_call *call; - int ret, cix, loop; - - _enter("%p", conn); - - /* allocate and initialise a call record */ - ret = __rxrpc_create_call(conn, &call); - if (ret < 0) { - _leave(" = %d", ret); - return ret; - } - - call->app_call_state = RXRPC_CSTATE_CLNT_SND_ARGS; - if (attn) - call->app_attn_func = attn; - if (error) - call->app_error_func = error; - if (aemap) - call->app_aemap_func = aemap; - - _state(call); - - spin_lock(&conn->lock); - set_current_state(TASK_INTERRUPTIBLE); - add_wait_queue(&conn->chanwait, &myself); - - try_again: - /* try to find an unused channel */ - for (cix = 0; cix < 4; cix++) - if (!conn->channels[cix]) - goto obtained_chan; - - /* no free channels - wait for one to become available */ - ret = -EINTR; - if (signal_pending(current)) - goto error_unwait; - - spin_unlock(&conn->lock); - - schedule(); - set_current_state(TASK_INTERRUPTIBLE); - - spin_lock(&conn->lock); - goto try_again; - - /* got a channel - now attach to the connection */ - obtained_chan: - remove_wait_queue(&conn->chanwait, &myself); - set_current_state(TASK_RUNNING); - - /* concoct a unique call number */ - next_callid: - call->call_id = htonl(++conn->call_counter); - for (loop = 0; loop < 4; loop++) - if (conn->channels[loop] && - conn->channels[loop]->call_id == call->call_id) - goto next_callid; - - rxrpc_get_connection(conn); - conn->channels[cix] = call; /* assign _after_ done callid check loop */ - do_gettimeofday(&conn->atime); - call->chan_ix = htonl(cix); - - spin_unlock(&conn->lock); - - down_write(&rxrpc_calls_sem); - list_add_tail(&call->call_link, &rxrpc_calls); - up_write(&rxrpc_calls_sem); - - __RXACCT(atomic_inc(&rxrpc_call_count)); - *_call = call; - - _leave(" = 0 (call=%p cix=%u)", call, cix); - return 0; - - error_unwait: - remove_wait_queue(&conn->chanwait, &myself); - set_current_state(TASK_RUNNING); - spin_unlock(&conn->lock); - - free_page((unsigned long) call); - _leave(" = %d", ret); - return ret; -} /* end rxrpc_create_call() */ - -/*****************************************************************************/ -/* - * create a new call record for incoming calls - */ -int rxrpc_incoming_call(struct rxrpc_connection *conn, - struct rxrpc_message *msg, - struct rxrpc_call **_call) -{ - struct rxrpc_call *call; - unsigned cix; - int ret; - - cix = ntohl(msg->hdr.cid) & RXRPC_CHANNELMASK; - - _enter("%p,%u,%u", conn, ntohl(msg->hdr.callNumber), cix); - - /* allocate and initialise a call record */ - ret = __rxrpc_create_call(conn, &call); - if (ret < 0) { - _leave(" = %d", ret); - return ret; - } - - call->pkt_rcv_count = 1; - call->app_call_state = RXRPC_CSTATE_SRVR_RCV_OPID; - call->app_mark = sizeof(uint32_t); - - _state(call); - - /* attach to the connection */ - ret = -EBUSY; - call->chan_ix = htonl(cix); - call->call_id = msg->hdr.callNumber; - - spin_lock(&conn->lock); - - if (!conn->channels[cix] || - conn->channels[cix]->app_call_state == RXRPC_CSTATE_COMPLETE || - conn->channels[cix]->app_call_state == RXRPC_CSTATE_ERROR - ) { - conn->channels[cix] = call; - rxrpc_get_connection(conn); - ret = 0; - } - - spin_unlock(&conn->lock); - - if (ret < 0) { - free_page((unsigned long) call); - call = NULL; - } - - if (ret == 0) { - down_write(&rxrpc_calls_sem); - list_add_tail(&call->call_link, &rxrpc_calls); - up_write(&rxrpc_calls_sem); - __RXACCT(atomic_inc(&rxrpc_call_count)); - *_call = call; - } - - _leave(" = %d [%p]", ret, call); - return ret; -} /* end rxrpc_incoming_call() */ - -/*****************************************************************************/ -/* - * free a call record - */ -void rxrpc_put_call(struct rxrpc_call *call) -{ - struct rxrpc_connection *conn = call->conn; - struct rxrpc_message *msg; - - _enter("%p{u=%d}",call,atomic_read(&call->usage)); - - /* sanity check */ - if (atomic_read(&call->usage) <= 0) - BUG(); - - /* to prevent a race, the decrement and the de-list must be effectively - * atomic */ - spin_lock(&conn->lock); - if (likely(!atomic_dec_and_test(&call->usage))) { - spin_unlock(&conn->lock); - _leave(""); - return; - } - - if (conn->channels[ntohl(call->chan_ix)] == call) - conn->channels[ntohl(call->chan_ix)] = NULL; - - spin_unlock(&conn->lock); - - wake_up(&conn->chanwait); - - rxrpc_put_connection(conn); - - /* clear the timers and dequeue from krxiod */ - del_timer_sync(&call->acks_timeout); - del_timer_sync(&call->rcv_timeout); - del_timer_sync(&call->ackr_dfr_timo); - - rxrpc_krxiod_dequeue_call(call); - - /* clean up the contents of the struct */ - if (call->snd_nextmsg) - rxrpc_put_message(call->snd_nextmsg); - - if (call->snd_ping) - rxrpc_put_message(call->snd_ping); - - while (!list_empty(&call->acks_pendq)) { - msg = list_entry(call->acks_pendq.next, - struct rxrpc_message, link); - list_del(&msg->link); - rxrpc_put_message(msg); - } - - while (!list_empty(&call->rcv_receiveq)) { - msg = list_entry(call->rcv_receiveq.next, - struct rxrpc_message, link); - list_del(&msg->link); - rxrpc_put_message(msg); - } - - while (!list_empty(&call->app_readyq)) { - msg = list_entry(call->app_readyq.next, - struct rxrpc_message, link); - list_del(&msg->link); - rxrpc_put_message(msg); - } - - while (!list_empty(&call->app_unreadyq)) { - msg = list_entry(call->app_unreadyq.next, - struct rxrpc_message, link); - list_del(&msg->link); - rxrpc_put_message(msg); - } - - module_put(call->owner); - - down_write(&rxrpc_calls_sem); - list_del(&call->call_link); - up_write(&rxrpc_calls_sem); - - __RXACCT(atomic_dec(&rxrpc_call_count)); - free_page((unsigned long) call); - - _leave(" [destroyed]"); -} /* end rxrpc_put_call() */ - -/*****************************************************************************/ -/* - * actually generate a normal ACK - */ -static inline int __rxrpc_call_gen_normal_ACK(struct rxrpc_call *call, - rxrpc_seq_t seq) -{ - struct rxrpc_message *msg; - struct kvec diov[3]; - __be32 aux[4]; - int delta, ret; - - /* ACKs default to DELAY */ - if (!call->ackr.reason) - call->ackr.reason = RXRPC_ACK_DELAY; - - _proto("Rx %05lu Sending ACK { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }", - jiffies - call->cjif, - ntohs(call->ackr.maxSkew), - ntohl(call->ackr.firstPacket), - ntohl(call->ackr.previousPacket), - ntohl(call->ackr.serial), - rxrpc_acks[call->ackr.reason], - call->ackr.nAcks); - - aux[0] = htonl(call->conn->peer->if_mtu); /* interface MTU */ - aux[1] = htonl(1444); /* max MTU */ - aux[2] = htonl(16); /* rwind */ - aux[3] = htonl(4); /* max packets */ - - diov[0].iov_len = sizeof(struct rxrpc_ackpacket); - diov[0].iov_base = &call->ackr; - diov[1].iov_len = call->ackr_pend_cnt + 3; - diov[1].iov_base = call->ackr_array; - diov[2].iov_len = sizeof(aux); - diov[2].iov_base = &aux; - - /* build and send the message */ - ret = rxrpc_conn_newmsg(call->conn,call, RXRPC_PACKET_TYPE_ACK, - 3, diov, GFP_KERNEL, &msg); - if (ret < 0) - goto out; - - msg->seq = seq; - msg->hdr.seq = htonl(seq); - msg->hdr.flags |= RXRPC_SLOW_START_OK; - - ret = rxrpc_conn_sendmsg(call->conn, msg); - rxrpc_put_message(msg); - if (ret < 0) - goto out; - call->pkt_snd_count++; - - /* count how many actual ACKs there were at the front */ - for (delta = 0; delta < call->ackr_pend_cnt; delta++) - if (call->ackr_array[delta] != RXRPC_ACK_TYPE_ACK) - break; - - call->ackr_pend_cnt -= delta; /* all ACK'd to this point */ - - /* crank the ACK window around */ - if (delta == 0) { - /* un-ACK'd window */ - } - else if (delta < RXRPC_CALL_ACK_WINDOW_SIZE) { - /* partially ACK'd window - * - shuffle down to avoid losing out-of-sequence packets - */ - call->ackr_win_bot += delta; - call->ackr_win_top += delta; - - memmove(&call->ackr_array[0], - &call->ackr_array[delta], - call->ackr_pend_cnt); - - memset(&call->ackr_array[call->ackr_pend_cnt], - RXRPC_ACK_TYPE_NACK, - sizeof(call->ackr_array) - call->ackr_pend_cnt); - } - else { - /* fully ACK'd window - * - just clear the whole thing - */ - memset(&call->ackr_array, - RXRPC_ACK_TYPE_NACK, - sizeof(call->ackr_array)); - } - - /* clear this ACK */ - memset(&call->ackr, 0, sizeof(call->ackr)); - - out: - if (!call->app_call_state) - printk("___ STATE 0 ___\n"); - return ret; -} /* end __rxrpc_call_gen_normal_ACK() */ - -/*****************************************************************************/ -/* - * note the reception of a packet in the call's ACK records and generate an - * appropriate ACK packet if necessary - * - returns 0 if packet should be processed, 1 if packet should be ignored - * and -ve on an error - */ -static int rxrpc_call_generate_ACK(struct rxrpc_call *call, - struct rxrpc_header *hdr, - struct rxrpc_ackpacket *ack) -{ - struct rxrpc_message *msg; - rxrpc_seq_t seq; - unsigned offset; - int ret = 0, err; - u8 special_ACK, do_ACK, force; - - _enter("%p,%p { seq=%d tp=%d fl=%02x }", - call, hdr, ntohl(hdr->seq), hdr->type, hdr->flags); - - seq = ntohl(hdr->seq); - offset = seq - call->ackr_win_bot; - do_ACK = RXRPC_ACK_DELAY; - special_ACK = 0; - force = (seq == 1); - - if (call->ackr_high_seq < seq) - call->ackr_high_seq = seq; - - /* deal with generation of obvious special ACKs first */ - if (ack && ack->reason == RXRPC_ACK_PING) { - special_ACK = RXRPC_ACK_PING_RESPONSE; - ret = 1; - goto gen_ACK; - } - - if (seq < call->ackr_win_bot) { - special_ACK = RXRPC_ACK_DUPLICATE; - ret = 1; - goto gen_ACK; - } - - if (seq >= call->ackr_win_top) { - special_ACK = RXRPC_ACK_EXCEEDS_WINDOW; - ret = 1; - goto gen_ACK; - } - - if (call->ackr_array[offset] != RXRPC_ACK_TYPE_NACK) { - special_ACK = RXRPC_ACK_DUPLICATE; - ret = 1; - goto gen_ACK; - } - - /* okay... it's a normal data packet inside the ACK window */ - call->ackr_array[offset] = RXRPC_ACK_TYPE_ACK; - - if (offset < call->ackr_pend_cnt) { - } - else if (offset > call->ackr_pend_cnt) { - do_ACK = RXRPC_ACK_OUT_OF_SEQUENCE; - call->ackr_pend_cnt = offset; - goto gen_ACK; - } - - if (hdr->flags & RXRPC_REQUEST_ACK) { - do_ACK = RXRPC_ACK_REQUESTED; - } - - /* generate an ACK on the final packet of a reply just received */ - if (hdr->flags & RXRPC_LAST_PACKET) { - if (call->conn->out_clientflag) - force = 1; - } - else if (!(hdr->flags & RXRPC_MORE_PACKETS)) { - do_ACK = RXRPC_ACK_REQUESTED; - } - - /* re-ACK packets previously received out-of-order */ - for (offset++; offset < RXRPC_CALL_ACK_WINDOW_SIZE; offset++) - if (call->ackr_array[offset] != RXRPC_ACK_TYPE_ACK) - break; - - call->ackr_pend_cnt = offset; - - /* generate an ACK if we fill up the window */ - if (call->ackr_pend_cnt >= RXRPC_CALL_ACK_WINDOW_SIZE) - force = 1; - - gen_ACK: - _debug("%05lu ACKs pend=%u norm=%s special=%s%s", - jiffies - call->cjif, - call->ackr_pend_cnt, - rxrpc_acks[do_ACK], - rxrpc_acks[special_ACK], - force ? " immediate" : - do_ACK == RXRPC_ACK_REQUESTED ? " merge-req" : - hdr->flags & RXRPC_LAST_PACKET ? " finalise" : - " defer" - ); - - /* send any pending normal ACKs if need be */ - if (call->ackr_pend_cnt > 0) { - /* fill out the appropriate form */ - call->ackr.bufferSpace = htons(RXRPC_CALL_ACK_WINDOW_SIZE); - call->ackr.maxSkew = htons(min(call->ackr_high_seq - seq, - 65535U)); - call->ackr.firstPacket = htonl(call->ackr_win_bot); - call->ackr.previousPacket = call->ackr_prev_seq; - call->ackr.serial = hdr->serial; - call->ackr.nAcks = call->ackr_pend_cnt; - - if (do_ACK == RXRPC_ACK_REQUESTED) - call->ackr.reason = do_ACK; - - /* generate the ACK immediately if necessary */ - if (special_ACK || force) { - err = __rxrpc_call_gen_normal_ACK( - call, do_ACK == RXRPC_ACK_DELAY ? 0 : seq); - if (err < 0) { - ret = err; - goto out; - } - } - } - - if (call->ackr.reason == RXRPC_ACK_REQUESTED) - call->ackr_dfr_seq = seq; - - /* start the ACK timer if not running if there are any pending deferred - * ACKs */ - if (call->ackr_pend_cnt > 0 && - call->ackr.reason != RXRPC_ACK_REQUESTED && - !timer_pending(&call->ackr_dfr_timo) - ) { - unsigned long timo; - - timo = rxrpc_call_dfr_ack_timeout + jiffies; - - _debug("START ACKR TIMER for cj=%lu", timo - call->cjif); - - spin_lock(&call->lock); - mod_timer(&call->ackr_dfr_timo, timo); - spin_unlock(&call->lock); - } - else if ((call->ackr_pend_cnt == 0 || - call->ackr.reason == RXRPC_ACK_REQUESTED) && - timer_pending(&call->ackr_dfr_timo) - ) { - /* stop timer if no pending ACKs */ - _debug("CLEAR ACKR TIMER"); - del_timer_sync(&call->ackr_dfr_timo); - } - - /* send a special ACK if one is required */ - if (special_ACK) { - struct rxrpc_ackpacket ack; - struct kvec diov[2]; - uint8_t acks[1] = { RXRPC_ACK_TYPE_ACK }; - - /* fill out the appropriate form */ - ack.bufferSpace = htons(RXRPC_CALL_ACK_WINDOW_SIZE); - ack.maxSkew = htons(min(call->ackr_high_seq - seq, - 65535U)); - ack.firstPacket = htonl(call->ackr_win_bot); - ack.previousPacket = call->ackr_prev_seq; - ack.serial = hdr->serial; - ack.reason = special_ACK; - ack.nAcks = 0; - - _proto("Rx Sending s-ACK" - " { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }", - ntohs(ack.maxSkew), - ntohl(ack.firstPacket), - ntohl(ack.previousPacket), - ntohl(ack.serial), - rxrpc_acks[ack.reason], - ack.nAcks); - - diov[0].iov_len = sizeof(struct rxrpc_ackpacket); - diov[0].iov_base = &ack; - diov[1].iov_len = sizeof(acks); - diov[1].iov_base = acks; - - /* build and send the message */ - err = rxrpc_conn_newmsg(call->conn,call, RXRPC_PACKET_TYPE_ACK, - hdr->seq ? 2 : 1, diov, - GFP_KERNEL, - &msg); - if (err < 0) { - ret = err; - goto out; - } - - msg->seq = seq; - msg->hdr.seq = htonl(seq); - msg->hdr.flags |= RXRPC_SLOW_START_OK; - - err = rxrpc_conn_sendmsg(call->conn, msg); - rxrpc_put_message(msg); - if (err < 0) { - ret = err; - goto out; - } - call->pkt_snd_count++; - } - - out: - if (hdr->seq) - call->ackr_prev_seq = hdr->seq; - - _leave(" = %d", ret); - return ret; -} /* end rxrpc_call_generate_ACK() */ - -/*****************************************************************************/ -/* - * handle work to be done on a call - * - includes packet reception and timeout processing - */ -void rxrpc_call_do_stuff(struct rxrpc_call *call) -{ - _enter("%p{flags=%lx}", call, call->flags); - - /* handle packet reception */ - if (call->flags & RXRPC_CALL_RCV_PKT) { - _debug("- receive packet"); - call->flags &= ~RXRPC_CALL_RCV_PKT; - rxrpc_call_receive_packet(call); - } - - /* handle overdue ACKs */ - if (call->flags & RXRPC_CALL_ACKS_TIMO) { - _debug("- overdue ACK timeout"); - call->flags &= ~RXRPC_CALL_ACKS_TIMO; - rxrpc_call_resend(call, call->snd_seq_count); - } - - /* handle lack of reception */ - if (call->flags & RXRPC_CALL_RCV_TIMO) { - _debug("- reception timeout"); - call->flags &= ~RXRPC_CALL_RCV_TIMO; - rxrpc_call_abort(call, -EIO); - } - - /* handle deferred ACKs */ - if (call->flags & RXRPC_CALL_ACKR_TIMO || - (call->ackr.nAcks > 0 && call->ackr.reason == RXRPC_ACK_REQUESTED) - ) { - _debug("- deferred ACK timeout: cj=%05lu r=%s n=%u", - jiffies - call->cjif, - rxrpc_acks[call->ackr.reason], - call->ackr.nAcks); - - call->flags &= ~RXRPC_CALL_ACKR_TIMO; - - if (call->ackr.nAcks > 0 && - call->app_call_state != RXRPC_CSTATE_ERROR) { - /* generate ACK */ - __rxrpc_call_gen_normal_ACK(call, call->ackr_dfr_seq); - call->ackr_dfr_seq = 0; - } - } - - _leave(""); - -} /* end rxrpc_call_do_stuff() */ - -/*****************************************************************************/ -/* - * send an abort message at call or connection level - * - must be called with call->lock held - * - the supplied error code is sent as the packet data - */ -static int __rxrpc_call_abort(struct rxrpc_call *call, int errno) -{ - struct rxrpc_connection *conn = call->conn; - struct rxrpc_message *msg; - struct kvec diov[1]; - int ret; - __be32 _error; - - _enter("%p{%08x},%p{%d},%d", - conn, ntohl(conn->conn_id), call, ntohl(call->call_id), errno); - - /* if this call is already aborted, then just wake up any waiters */ - if (call->app_call_state == RXRPC_CSTATE_ERROR) { - spin_unlock(&call->lock); - call->app_error_func(call); - _leave(" = 0"); - return 0; - } - - rxrpc_get_call(call); - - /* change the state _with_ the lock still held */ - call->app_call_state = RXRPC_CSTATE_ERROR; - call->app_err_state = RXRPC_ESTATE_LOCAL_ABORT; - call->app_errno = errno; - call->app_mark = RXRPC_APP_MARK_EOF; - call->app_read_buf = NULL; - call->app_async_read = 0; - - _state(call); - - /* ask the app to translate the error code */ - call->app_aemap_func(call); - - spin_unlock(&call->lock); - - /* flush any outstanding ACKs */ - del_timer_sync(&call->acks_timeout); - del_timer_sync(&call->rcv_timeout); - del_timer_sync(&call->ackr_dfr_timo); - - if (rxrpc_call_is_ack_pending(call)) - __rxrpc_call_gen_normal_ACK(call, 0); - - /* send the abort packet only if we actually traded some other - * packets */ - ret = 0; - if (call->pkt_snd_count || call->pkt_rcv_count) { - /* actually send the abort */ - _proto("Rx Sending Call ABORT { data=%d }", - call->app_abort_code); - - _error = htonl(call->app_abort_code); - - diov[0].iov_len = sizeof(_error); - diov[0].iov_base = &_error; - - ret = rxrpc_conn_newmsg(conn, call, RXRPC_PACKET_TYPE_ABORT, - 1, diov, GFP_KERNEL, &msg); - if (ret == 0) { - ret = rxrpc_conn_sendmsg(conn, msg); - rxrpc_put_message(msg); - } - } - - /* tell the app layer to let go */ - call->app_error_func(call); - - rxrpc_put_call(call); - - _leave(" = %d", ret); - return ret; -} /* end __rxrpc_call_abort() */ - -/*****************************************************************************/ -/* - * send an abort message at call or connection level - * - the supplied error code is sent as the packet data - */ -int rxrpc_call_abort(struct rxrpc_call *call, int error) -{ - spin_lock(&call->lock); - - return __rxrpc_call_abort(call, error); - -} /* end rxrpc_call_abort() */ - -/*****************************************************************************/ -/* - * process packets waiting for this call - */ -static void rxrpc_call_receive_packet(struct rxrpc_call *call) -{ - struct rxrpc_message *msg; - struct list_head *_p; - - _enter("%p", call); - - rxrpc_get_call(call); /* must not go away too soon if aborted by - * app-layer */ - - while (!list_empty(&call->rcv_receiveq)) { - /* try to get next packet */ - _p = NULL; - spin_lock(&call->lock); - if (!list_empty(&call->rcv_receiveq)) { - _p = call->rcv_receiveq.next; - list_del_init(_p); - } - spin_unlock(&call->lock); - - if (!_p) - break; - - msg = list_entry(_p, struct rxrpc_message, link); - - _proto("Rx %05lu Received %s packet (%%%u,#%u,%c%c%c%c%c)", - jiffies - call->cjif, - rxrpc_pkts[msg->hdr.type], - ntohl(msg->hdr.serial), - msg->seq, - msg->hdr.flags & RXRPC_JUMBO_PACKET ? 'j' : '-', - msg->hdr.flags & RXRPC_MORE_PACKETS ? 'm' : '-', - msg->hdr.flags & RXRPC_LAST_PACKET ? 'l' : '-', - msg->hdr.flags & RXRPC_REQUEST_ACK ? 'r' : '-', - msg->hdr.flags & RXRPC_CLIENT_INITIATED ? 'C' : 'S' - ); - - switch (msg->hdr.type) { - /* deal with data packets */ - case RXRPC_PACKET_TYPE_DATA: - /* ACK the packet if necessary */ - switch (rxrpc_call_generate_ACK(call, &msg->hdr, - NULL)) { - case 0: /* useful packet */ - rxrpc_call_receive_data_packet(call, msg); - break; - case 1: /* duplicate or out-of-window packet */ - break; - default: - rxrpc_put_message(msg); - goto out; - } - break; - - /* deal with ACK packets */ - case RXRPC_PACKET_TYPE_ACK: - rxrpc_call_receive_ack_packet(call, msg); - break; - - /* deal with abort packets */ - case RXRPC_PACKET_TYPE_ABORT: { - __be32 _dbuf, *dp; - - dp = skb_header_pointer(msg->pkt, msg->offset, - sizeof(_dbuf), &_dbuf); - if (dp == NULL) - printk("Rx Received short ABORT packet\n"); - - _proto("Rx Received Call ABORT { data=%d }", - (dp ? ntohl(*dp) : 0)); - - spin_lock(&call->lock); - call->app_call_state = RXRPC_CSTATE_ERROR; - call->app_err_state = RXRPC_ESTATE_PEER_ABORT; - call->app_abort_code = (dp ? ntohl(*dp) : 0); - call->app_errno = -ECONNABORTED; - call->app_mark = RXRPC_APP_MARK_EOF; - call->app_read_buf = NULL; - call->app_async_read = 0; - - /* ask the app to translate the error code */ - call->app_aemap_func(call); - _state(call); - spin_unlock(&call->lock); - call->app_error_func(call); - break; - } - default: - /* deal with other packet types */ - _proto("Rx Unsupported packet type %u (#%u)", - msg->hdr.type, msg->seq); - break; - } - - rxrpc_put_message(msg); - } - - out: - rxrpc_put_call(call); - _leave(""); -} /* end rxrpc_call_receive_packet() */ - -/*****************************************************************************/ -/* - * process next data packet - * - as the next data packet arrives: - * - it is queued on app_readyq _if_ it is the next one expected - * (app_ready_seq+1) - * - it is queued on app_unreadyq _if_ it is not the next one expected - * - if a packet placed on app_readyq completely fills a hole leading up to - * the first packet on app_unreadyq, then packets now in sequence are - * tranferred to app_readyq - * - the application layer can only see packets on app_readyq - * (app_ready_qty bytes) - * - the application layer is prodded every time a new packet arrives - */ -static void rxrpc_call_receive_data_packet(struct rxrpc_call *call, - struct rxrpc_message *msg) -{ - const struct rxrpc_operation *optbl, *op; - struct rxrpc_message *pmsg; - struct list_head *_p; - int ret, lo, hi, rmtimo; - __be32 opid; - - _enter("%p{%u},%p{%u}", call, ntohl(call->call_id), msg, msg->seq); - - rxrpc_get_message(msg); - - /* add to the unready queue if we'd have to create a hole in the ready - * queue otherwise */ - if (msg->seq != call->app_ready_seq + 1) { - _debug("Call add packet %d to unreadyq", msg->seq); - - /* insert in seq order */ - list_for_each(_p, &call->app_unreadyq) { - pmsg = list_entry(_p, struct rxrpc_message, link); - if (pmsg->seq > msg->seq) - break; - } - - list_add_tail(&msg->link, _p); - - _leave(" [unreadyq]"); - return; - } - - /* next in sequence - simply append into the call's ready queue */ - _debug("Call add packet %d to readyq (+%Zd => %Zd bytes)", - msg->seq, msg->dsize, call->app_ready_qty); - - spin_lock(&call->lock); - call->app_ready_seq = msg->seq; - call->app_ready_qty += msg->dsize; - list_add_tail(&msg->link, &call->app_readyq); - - /* move unready packets to the readyq if we got rid of a hole */ - while (!list_empty(&call->app_unreadyq)) { - pmsg = list_entry(call->app_unreadyq.next, - struct rxrpc_message, link); - - if (pmsg->seq != call->app_ready_seq + 1) - break; - - /* next in sequence - just move list-to-list */ - _debug("Call transfer packet %d to readyq (+%Zd => %Zd bytes)", - pmsg->seq, pmsg->dsize, call->app_ready_qty); - - call->app_ready_seq = pmsg->seq; - call->app_ready_qty += pmsg->dsize; - list_move_tail(&pmsg->link, &call->app_readyq); - } - - /* see if we've got the last packet yet */ - if (!list_empty(&call->app_readyq)) { - pmsg = list_entry(call->app_readyq.prev, - struct rxrpc_message, link); - if (pmsg->hdr.flags & RXRPC_LAST_PACKET) { - call->app_last_rcv = 1; - _debug("Last packet on readyq"); - } - } - - switch (call->app_call_state) { - /* do nothing if call already aborted */ - case RXRPC_CSTATE_ERROR: - spin_unlock(&call->lock); - _leave(" [error]"); - return; - - /* extract the operation ID from an incoming call if that's not - * yet been done */ - case RXRPC_CSTATE_SRVR_RCV_OPID: - spin_unlock(&call->lock); - - /* handle as yet insufficient data for the operation ID */ - if (call->app_ready_qty < 4) { - if (call->app_last_rcv) - /* trouble - last packet seen */ - rxrpc_call_abort(call, -EINVAL); - - _leave(""); - return; - } - - /* pull the operation ID out of the buffer */ - ret = rxrpc_call_read_data(call, &opid, sizeof(opid), 0); - if (ret < 0) { - printk("Unexpected error from read-data: %d\n", ret); - if (call->app_call_state != RXRPC_CSTATE_ERROR) - rxrpc_call_abort(call, ret); - _leave(""); - return; - } - call->app_opcode = ntohl(opid); - - /* locate the operation in the available ops table */ - optbl = call->conn->service->ops_begin; - lo = 0; - hi = call->conn->service->ops_end - optbl; - - while (lo < hi) { - int mid = (hi + lo) / 2; - op = &optbl[mid]; - if (call->app_opcode == op->id) - goto found_op; - if (call->app_opcode > op->id) - lo = mid + 1; - else - hi = mid; - } - - /* search failed */ - kproto("Rx Client requested operation %d from %s service", - call->app_opcode, call->conn->service->name); - rxrpc_call_abort(call, -EINVAL); - _leave(" [inval]"); - return; - - found_op: - _proto("Rx Client requested operation %s from %s service", - op->name, call->conn->service->name); - - /* we're now waiting for the argument block (unless the call - * was aborted) */ - spin_lock(&call->lock); - if (call->app_call_state == RXRPC_CSTATE_SRVR_RCV_OPID || - call->app_call_state == RXRPC_CSTATE_SRVR_SND_REPLY) { - if (!call->app_last_rcv) - call->app_call_state = - RXRPC_CSTATE_SRVR_RCV_ARGS; - else if (call->app_ready_qty > 0) - call->app_call_state = - RXRPC_CSTATE_SRVR_GOT_ARGS; - else - call->app_call_state = - RXRPC_CSTATE_SRVR_SND_REPLY; - call->app_mark = op->asize; - call->app_user = op->user; - } - spin_unlock(&call->lock); - - _state(call); - break; - - case RXRPC_CSTATE_SRVR_RCV_ARGS: - /* change state if just received last packet of arg block */ - if (call->app_last_rcv) - call->app_call_state = RXRPC_CSTATE_SRVR_GOT_ARGS; - spin_unlock(&call->lock); - - _state(call); - break; - - case RXRPC_CSTATE_CLNT_RCV_REPLY: - /* change state if just received last packet of reply block */ - rmtimo = 0; - if (call->app_last_rcv) { - call->app_call_state = RXRPC_CSTATE_CLNT_GOT_REPLY; - rmtimo = 1; - } - spin_unlock(&call->lock); - - if (rmtimo) { - del_timer_sync(&call->acks_timeout); - del_timer_sync(&call->rcv_timeout); - del_timer_sync(&call->ackr_dfr_timo); - } - - _state(call); - break; - - default: - /* deal with data reception in an unexpected state */ - printk("Unexpected state [[[ %u ]]]\n", call->app_call_state); - __rxrpc_call_abort(call, -EBADMSG); - _leave(""); - return; - } - - if (call->app_call_state == RXRPC_CSTATE_CLNT_RCV_REPLY && - call->app_last_rcv) - BUG(); - - /* otherwise just invoke the data function whenever we can satisfy its desire for more - * data - */ - _proto("Rx Received Op Data: st=%u qty=%Zu mk=%Zu%s", - call->app_call_state, call->app_ready_qty, call->app_mark, - call->app_last_rcv ? " last-rcvd" : ""); - - spin_lock(&call->lock); - - ret = __rxrpc_call_read_data(call); - switch (ret) { - case 0: - spin_unlock(&call->lock); - call->app_attn_func(call); - break; - case -EAGAIN: - spin_unlock(&call->lock); - break; - case -ECONNABORTED: - spin_unlock(&call->lock); - break; - default: - __rxrpc_call_abort(call, ret); - break; - } - - _state(call); - - _leave(""); - -} /* end rxrpc_call_receive_data_packet() */ - -/*****************************************************************************/ -/* - * received an ACK packet - */ -static void rxrpc_call_receive_ack_packet(struct rxrpc_call *call, - struct rxrpc_message *msg) -{ - struct rxrpc_ackpacket _ack, *ap; - rxrpc_serial_net_t serial; - rxrpc_seq_t seq; - int ret; - - _enter("%p{%u},%p{%u}", call, ntohl(call->call_id), msg, msg->seq); - - /* extract the basic ACK record */ - ap = skb_header_pointer(msg->pkt, msg->offset, sizeof(_ack), &_ack); - if (ap == NULL) { - printk("Rx Received short ACK packet\n"); - return; - } - msg->offset += sizeof(_ack); - - serial = ap->serial; - seq = ntohl(ap->firstPacket); - - _proto("Rx Received ACK %%%d { b=%hu m=%hu f=%u p=%u s=%u r=%s n=%u }", - ntohl(msg->hdr.serial), - ntohs(ap->bufferSpace), - ntohs(ap->maxSkew), - seq, - ntohl(ap->previousPacket), - ntohl(serial), - rxrpc_acks[ap->reason], - call->ackr.nAcks - ); - - /* check the other side isn't ACK'ing a sequence number I haven't sent - * yet */ - if (ap->nAcks > 0 && - (seq > call->snd_seq_count || - seq + ap->nAcks - 1 > call->snd_seq_count)) { - printk("Received ACK (#%u-#%u) for unsent packet\n", - seq, seq + ap->nAcks - 1); - rxrpc_call_abort(call, -EINVAL); - _leave(""); - return; - } - - /* deal with RTT calculation */ - if (serial) { - struct rxrpc_message *rttmsg; - - /* find the prompting packet */ - spin_lock(&call->lock); - if (call->snd_ping && call->snd_ping->hdr.serial == serial) { - /* it was a ping packet */ - rttmsg = call->snd_ping; - call->snd_ping = NULL; - spin_unlock(&call->lock); - - if (rttmsg) { - rttmsg->rttdone = 1; - rxrpc_peer_calculate_rtt(call->conn->peer, - rttmsg, msg); - rxrpc_put_message(rttmsg); - } - } - else { - struct list_head *_p; - - /* it ought to be a data packet - look in the pending - * ACK list */ - list_for_each(_p, &call->acks_pendq) { - rttmsg = list_entry(_p, struct rxrpc_message, - link); - if (rttmsg->hdr.serial == serial) { - if (rttmsg->rttdone) - /* never do RTT twice without - * resending */ - break; - - rttmsg->rttdone = 1; - rxrpc_peer_calculate_rtt( - call->conn->peer, rttmsg, msg); - break; - } - } - spin_unlock(&call->lock); - } - } - - switch (ap->reason) { - /* deal with negative/positive acknowledgement of data - * packets */ - case RXRPC_ACK_REQUESTED: - case RXRPC_ACK_DELAY: - case RXRPC_ACK_IDLE: - rxrpc_call_definitively_ACK(call, seq - 1); - - case RXRPC_ACK_DUPLICATE: - case RXRPC_ACK_OUT_OF_SEQUENCE: - case RXRPC_ACK_EXCEEDS_WINDOW: - call->snd_resend_cnt = 0; - ret = rxrpc_call_record_ACK(call, msg, seq, ap->nAcks); - if (ret < 0) - rxrpc_call_abort(call, ret); - break; - - /* respond to ping packets immediately */ - case RXRPC_ACK_PING: - rxrpc_call_generate_ACK(call, &msg->hdr, ap); - break; - - /* only record RTT on ping response packets */ - case RXRPC_ACK_PING_RESPONSE: - if (call->snd_ping) { - struct rxrpc_message *rttmsg; - - /* only do RTT stuff if the response matches the - * retained ping */ - rttmsg = NULL; - spin_lock(&call->lock); - if (call->snd_ping && - call->snd_ping->hdr.serial == ap->serial) { - rttmsg = call->snd_ping; - call->snd_ping = NULL; - } - spin_unlock(&call->lock); - - if (rttmsg) { - rttmsg->rttdone = 1; - rxrpc_peer_calculate_rtt(call->conn->peer, - rttmsg, msg); - rxrpc_put_message(rttmsg); - } - } - break; - - default: - printk("Unsupported ACK reason %u\n", ap->reason); - break; - } - - _leave(""); -} /* end rxrpc_call_receive_ack_packet() */ - -/*****************************************************************************/ -/* - * record definitive ACKs for all messages up to and including the one with the - * 'highest' seq - */ -static void rxrpc_call_definitively_ACK(struct rxrpc_call *call, - rxrpc_seq_t highest) -{ - struct rxrpc_message *msg; - int now_complete; - - _enter("%p{ads=%u},%u", call, call->acks_dftv_seq, highest); - - while (call->acks_dftv_seq < highest) { - call->acks_dftv_seq++; - - _proto("Definitive ACK on packet #%u", call->acks_dftv_seq); - - /* discard those at front of queue until message with highest - * ACK is found */ - spin_lock(&call->lock); - msg = NULL; - if (!list_empty(&call->acks_pendq)) { - msg = list_entry(call->acks_pendq.next, - struct rxrpc_message, link); - list_del_init(&msg->link); /* dequeue */ - if (msg->state == RXRPC_MSG_SENT) - call->acks_pend_cnt--; - } - spin_unlock(&call->lock); - - /* insanity check */ - if (!msg) - panic("%s(): acks_pendq unexpectedly empty\n", - __FUNCTION__); - - if (msg->seq != call->acks_dftv_seq) - panic("%s(): Packet #%u expected at front of acks_pendq" - " (#%u found)\n", - __FUNCTION__, call->acks_dftv_seq, msg->seq); - - /* discard the message */ - msg->state = RXRPC_MSG_DONE; - rxrpc_put_message(msg); - } - - /* if all sent packets are definitively ACK'd then prod any sleepers just in case */ - now_complete = 0; - spin_lock(&call->lock); - if (call->acks_dftv_seq == call->snd_seq_count) { - if (call->app_call_state != RXRPC_CSTATE_COMPLETE) { - call->app_call_state = RXRPC_CSTATE_COMPLETE; - _state(call); - now_complete = 1; - } - } - spin_unlock(&call->lock); - - if (now_complete) { - del_timer_sync(&call->acks_timeout); - del_timer_sync(&call->rcv_timeout); - del_timer_sync(&call->ackr_dfr_timo); - call->app_attn_func(call); - } - - _leave(""); -} /* end rxrpc_call_definitively_ACK() */ - -/*****************************************************************************/ -/* - * record the specified amount of ACKs/NAKs - */ -static int rxrpc_call_record_ACK(struct rxrpc_call *call, - struct rxrpc_message *msg, - rxrpc_seq_t seq, - size_t count) -{ - struct rxrpc_message *dmsg; - struct list_head *_p; - rxrpc_seq_t highest; - unsigned ix; - size_t chunk; - char resend, now_complete; - u8 acks[16]; - - _enter("%p{apc=%u ads=%u},%p,%u,%Zu", - call, call->acks_pend_cnt, call->acks_dftv_seq, - msg, seq, count); - - /* handle re-ACK'ing of definitively ACK'd packets (may be out-of-order - * ACKs) */ - if (seq <= call->acks_dftv_seq) { - unsigned delta = call->acks_dftv_seq - seq; - - if (count <= delta) { - _leave(" = 0 [all definitively ACK'd]"); - return 0; - } - - seq += delta; - count -= delta; - msg->offset += delta; - } - - highest = seq + count - 1; - resend = 0; - while (count > 0) { - /* extract up to 16 ACK slots at a time */ - chunk = min(count, sizeof(acks)); - count -= chunk; - - memset(acks, 2, sizeof(acks)); - - if (skb_copy_bits(msg->pkt, msg->offset, &acks, chunk) < 0) { - printk("Rx Received short ACK packet\n"); - _leave(" = -EINVAL"); - return -EINVAL; - } - msg->offset += chunk; - - /* check that the ACK set is valid */ - for (ix = 0; ix < chunk; ix++) { - switch (acks[ix]) { - case RXRPC_ACK_TYPE_ACK: - break; - case RXRPC_ACK_TYPE_NACK: - resend = 1; - break; - default: - printk("Rx Received unsupported ACK state" - " %u\n", acks[ix]); - _leave(" = -EINVAL"); - return -EINVAL; - } - } - - _proto("Rx ACK of packets #%u-#%u " - "[%c%c%c%c%c%c%c%c%c%c%c%c%c%c%c%c] (pend=%u)", - seq, (unsigned) (seq + chunk - 1), - _acktype[acks[0x0]], - _acktype[acks[0x1]], - _acktype[acks[0x2]], - _acktype[acks[0x3]], - _acktype[acks[0x4]], - _acktype[acks[0x5]], - _acktype[acks[0x6]], - _acktype[acks[0x7]], - _acktype[acks[0x8]], - _acktype[acks[0x9]], - _acktype[acks[0xA]], - _acktype[acks[0xB]], - _acktype[acks[0xC]], - _acktype[acks[0xD]], - _acktype[acks[0xE]], - _acktype[acks[0xF]], - call->acks_pend_cnt - ); - - /* mark the packets in the ACK queue as being provisionally - * ACK'd */ - ix = 0; - spin_lock(&call->lock); - - /* find the first packet ACK'd/NAK'd here */ - list_for_each(_p, &call->acks_pendq) { - dmsg = list_entry(_p, struct rxrpc_message, link); - if (dmsg->seq == seq) - goto found_first; - _debug("- %u: skipping #%u", ix, dmsg->seq); - } - goto bad_queue; - - found_first: - do { - _debug("- %u: processing #%u (%c) apc=%u", - ix, dmsg->seq, _acktype[acks[ix]], - call->acks_pend_cnt); - - if (acks[ix] == RXRPC_ACK_TYPE_ACK) { - if (dmsg->state == RXRPC_MSG_SENT) - call->acks_pend_cnt--; - dmsg->state = RXRPC_MSG_ACKED; - } - else { - if (dmsg->state == RXRPC_MSG_ACKED) - call->acks_pend_cnt++; - dmsg->state = RXRPC_MSG_SENT; - } - ix++; - seq++; - - _p = dmsg->link.next; - dmsg = list_entry(_p, struct rxrpc_message, link); - } while(ix < chunk && - _p != &call->acks_pendq && - dmsg->seq == seq); - - if (ix < chunk) - goto bad_queue; - - spin_unlock(&call->lock); - } - - if (resend) - rxrpc_call_resend(call, highest); - - /* if all packets are provisionally ACK'd, then wake up anyone who's - * waiting for that */ - now_complete = 0; - spin_lock(&call->lock); - if (call->acks_pend_cnt == 0) { - if (call->app_call_state == RXRPC_CSTATE_SRVR_RCV_FINAL_ACK) { - call->app_call_state = RXRPC_CSTATE_COMPLETE; - _state(call); - } - now_complete = 1; - } - spin_unlock(&call->lock); - - if (now_complete) { - _debug("- wake up waiters"); - del_timer_sync(&call->acks_timeout); - del_timer_sync(&call->rcv_timeout); - del_timer_sync(&call->ackr_dfr_timo); - call->app_attn_func(call); - } - - _leave(" = 0 (apc=%u)", call->acks_pend_cnt); - return 0; - - bad_queue: - panic("%s(): acks_pendq in bad state (packet #%u absent)\n", - __FUNCTION__, seq); - -} /* end rxrpc_call_record_ACK() */ - -/*****************************************************************************/ -/* - * transfer data from the ready packet queue to the asynchronous read buffer - * - since this func is the only one going to look at packets queued on - * app_readyq, we don't need a lock to modify or access them, only to modify - * the queue pointers - * - called with call->lock held - * - the buffer must be in kernel space - * - returns: - * 0 if buffer filled - * -EAGAIN if buffer not filled and more data to come - * -EBADMSG if last packet received and insufficient data left - * -ECONNABORTED if the call has in an error state - */ -static int __rxrpc_call_read_data(struct rxrpc_call *call) -{ - struct rxrpc_message *msg; - size_t qty; - int ret; - - _enter("%p{as=%d buf=%p qty=%Zu/%Zu}", - call, - call->app_async_read, call->app_read_buf, - call->app_ready_qty, call->app_mark); - - /* check the state */ - switch (call->app_call_state) { - case RXRPC_CSTATE_SRVR_RCV_ARGS: - case RXRPC_CSTATE_CLNT_RCV_REPLY: - if (call->app_last_rcv) { - printk("%s(%p,%p,%Zd):" - " Inconsistent call state (%s, last pkt)", - __FUNCTION__, - call, call->app_read_buf, call->app_mark, - rxrpc_call_states[call->app_call_state]); - BUG(); - } - break; - - case RXRPC_CSTATE_SRVR_RCV_OPID: - case RXRPC_CSTATE_SRVR_GOT_ARGS: - case RXRPC_CSTATE_CLNT_GOT_REPLY: - break; - - case RXRPC_CSTATE_SRVR_SND_REPLY: - if (!call->app_last_rcv) { - printk("%s(%p,%p,%Zd):" - " Inconsistent call state (%s, not last pkt)", - __FUNCTION__, - call, call->app_read_buf, call->app_mark, - rxrpc_call_states[call->app_call_state]); - BUG(); - } - _debug("Trying to read data from call in SND_REPLY state"); - break; - - case RXRPC_CSTATE_ERROR: - _leave(" = -ECONNABORTED"); - return -ECONNABORTED; - - default: - printk("reading in unexpected state [[[ %u ]]]\n", - call->app_call_state); - BUG(); - } - - /* handle the case of not having an async buffer */ - if (!call->app_async_read) { - if (call->app_mark == RXRPC_APP_MARK_EOF) { - ret = call->app_last_rcv ? 0 : -EAGAIN; - } - else { - if (call->app_mark >= call->app_ready_qty) { - call->app_mark = RXRPC_APP_MARK_EOF; - ret = 0; - } - else { - ret = call->app_last_rcv ? -EBADMSG : -EAGAIN; - } - } - - _leave(" = %d [no buf]", ret); - return 0; - } - - while (!list_empty(&call->app_readyq) && call->app_mark > 0) { - msg = list_entry(call->app_readyq.next, - struct rxrpc_message, link); - - /* drag as much data as we need out of this packet */ - qty = min(call->app_mark, msg->dsize); - - _debug("reading %Zu from skb=%p off=%lu", - qty, msg->pkt, msg->offset); - - if (call->app_read_buf) - if (skb_copy_bits(msg->pkt, msg->offset, - call->app_read_buf, qty) < 0) - panic("%s: Failed to copy data from packet:" - " (%p,%p,%Zd)", - __FUNCTION__, - call, call->app_read_buf, qty); - - /* if that packet is now empty, discard it */ - call->app_ready_qty -= qty; - msg->dsize -= qty; - - if (msg->dsize == 0) { - list_del_init(&msg->link); - rxrpc_put_message(msg); - } - else { - msg->offset += qty; - } - - call->app_mark -= qty; - if (call->app_read_buf) - call->app_read_buf += qty; - } - - if (call->app_mark == 0) { - call->app_async_read = 0; - call->app_mark = RXRPC_APP_MARK_EOF; - call->app_read_buf = NULL; - - /* adjust the state if used up all packets */ - if (list_empty(&call->app_readyq) && call->app_last_rcv) { - switch (call->app_call_state) { - case RXRPC_CSTATE_SRVR_RCV_OPID: - call->app_call_state = RXRPC_CSTATE_SRVR_SND_REPLY; - call->app_mark = RXRPC_APP_MARK_EOF; - _state(call); - del_timer_sync(&call->rcv_timeout); - break; - case RXRPC_CSTATE_SRVR_GOT_ARGS: - call->app_call_state = RXRPC_CSTATE_SRVR_SND_REPLY; - _state(call); - del_timer_sync(&call->rcv_timeout); - break; - default: - call->app_call_state = RXRPC_CSTATE_COMPLETE; - _state(call); - del_timer_sync(&call->acks_timeout); - del_timer_sync(&call->ackr_dfr_timo); - del_timer_sync(&call->rcv_timeout); - break; - } - } - - _leave(" = 0"); - return 0; - } - - if (call->app_last_rcv) { - _debug("Insufficient data (%Zu/%Zu)", - call->app_ready_qty, call->app_mark); - call->app_async_read = 0; - call->app_mark = RXRPC_APP_MARK_EOF; - call->app_read_buf = NULL; - - _leave(" = -EBADMSG"); - return -EBADMSG; - } - - _leave(" = -EAGAIN"); - return -EAGAIN; -} /* end __rxrpc_call_read_data() */ - -/*****************************************************************************/ -/* - * attempt to read the specified amount of data from the call's ready queue - * into the buffer provided - * - since this func is the only one going to look at packets queued on - * app_readyq, we don't need a lock to modify or access them, only to modify - * the queue pointers - * - if the buffer pointer is NULL, then data is merely drained, not copied - * - if flags&RXRPC_CALL_READ_BLOCK, then the function will wait until there is - * enough data or an error will be generated - * - note that the caller must have added the calling task to the call's wait - * queue beforehand - * - if flags&RXRPC_CALL_READ_ALL, then an error will be generated if this - * function doesn't read all available data - */ -int rxrpc_call_read_data(struct rxrpc_call *call, - void *buffer, size_t size, int flags) -{ - int ret; - - _enter("%p{arq=%Zu},%p,%Zd,%x", - call, call->app_ready_qty, buffer, size, flags); - - spin_lock(&call->lock); - - if (unlikely(!!call->app_read_buf)) { - spin_unlock(&call->lock); - _leave(" = -EBUSY"); - return -EBUSY; - } - - call->app_mark = size; - call->app_read_buf = buffer; - call->app_async_read = 1; - call->app_read_count++; - - /* read as much data as possible */ - ret = __rxrpc_call_read_data(call); - switch (ret) { - case 0: - if (flags & RXRPC_CALL_READ_ALL && - (!call->app_last_rcv || call->app_ready_qty > 0)) { - _leave(" = -EBADMSG"); - __rxrpc_call_abort(call, -EBADMSG); - return -EBADMSG; - } - - spin_unlock(&call->lock); - call->app_attn_func(call); - _leave(" = 0"); - return ret; - - case -ECONNABORTED: - spin_unlock(&call->lock); - _leave(" = %d [aborted]", ret); - return ret; - - default: - __rxrpc_call_abort(call, ret); - _leave(" = %d", ret); - return ret; - - case -EAGAIN: - spin_unlock(&call->lock); - - if (!(flags & RXRPC_CALL_READ_BLOCK)) { - _leave(" = -EAGAIN"); - return -EAGAIN; - } - - /* wait for the data to arrive */ - _debug("blocking for data arrival"); - - for (;;) { - set_current_state(TASK_INTERRUPTIBLE); - if (!call->app_async_read || signal_pending(current)) - break; - schedule(); - } - set_current_state(TASK_RUNNING); - - if (signal_pending(current)) { - _leave(" = -EINTR"); - return -EINTR; - } - - if (call->app_call_state == RXRPC_CSTATE_ERROR) { - _leave(" = -ECONNABORTED"); - return -ECONNABORTED; - } - - _leave(" = 0"); - return 0; - } - -} /* end rxrpc_call_read_data() */ - -/*****************************************************************************/ -/* - * write data to a call - * - the data may not be sent immediately if it doesn't fill a buffer - * - if we can't queue all the data for buffering now, siov[] will have been - * adjusted to take account of what has been sent - */ -int rxrpc_call_write_data(struct rxrpc_call *call, - size_t sioc, - struct kvec *siov, - u8 rxhdr_flags, - gfp_t alloc_flags, - int dup_data, - size_t *size_sent) -{ - struct rxrpc_message *msg; - struct kvec *sptr; - size_t space, size, chunk, tmp; - char *buf; - int ret; - - _enter("%p,%Zu,%p,%02x,%x,%d,%p", - call, sioc, siov, rxhdr_flags, alloc_flags, dup_data, - size_sent); - - *size_sent = 0; - size = 0; - ret = -EINVAL; - - /* can't send more if we've sent last packet from this end */ - switch (call->app_call_state) { - case RXRPC_CSTATE_SRVR_SND_REPLY: - case RXRPC_CSTATE_CLNT_SND_ARGS: - break; - case RXRPC_CSTATE_ERROR: - ret = call->app_errno; - default: - goto out; - } - - /* calculate how much data we've been given */ - sptr = siov; - for (; sioc > 0; sptr++, sioc--) { - if (!sptr->iov_len) - continue; - - if (!sptr->iov_base) - goto out; - - size += sptr->iov_len; - } - - _debug("- size=%Zu mtu=%Zu", size, call->conn->mtu_size); - - do { - /* make sure there's a message under construction */ - if (!call->snd_nextmsg) { - /* no - allocate a message with no data yet attached */ - ret = rxrpc_conn_newmsg(call->conn, call, - RXRPC_PACKET_TYPE_DATA, - 0, NULL, alloc_flags, - &call->snd_nextmsg); - if (ret < 0) - goto out; - _debug("- allocated new message [ds=%Zu]", - call->snd_nextmsg->dsize); - } - - msg = call->snd_nextmsg; - msg->hdr.flags |= rxhdr_flags; - - /* deal with zero-length terminal packet */ - if (size == 0) { - if (rxhdr_flags & RXRPC_LAST_PACKET) { - ret = rxrpc_call_flush(call); - if (ret < 0) - goto out; - } - break; - } - - /* work out how much space current packet has available */ - space = call->conn->mtu_size - msg->dsize; - chunk = min(space, size); - - _debug("- [before] space=%Zu chunk=%Zu", space, chunk); - - while (!siov->iov_len) - siov++; - - /* if we are going to have to duplicate the data then coalesce - * it too */ - if (dup_data) { - /* don't allocate more that 1 page at a time */ - if (chunk > PAGE_SIZE) - chunk = PAGE_SIZE; - - /* allocate a data buffer and attach to the message */ - buf = kmalloc(chunk, alloc_flags); - if (unlikely(!buf)) { - if (msg->dsize == - sizeof(struct rxrpc_header)) { - /* discard an empty msg and wind back - * the seq counter */ - rxrpc_put_message(msg); - call->snd_nextmsg = NULL; - call->snd_seq_count--; - } - - ret = -ENOMEM; - goto out; - } - - tmp = msg->dcount++; - set_bit(tmp, &msg->dfree); - msg->data[tmp].iov_base = buf; - msg->data[tmp].iov_len = chunk; - msg->dsize += chunk; - *size_sent += chunk; - size -= chunk; - - /* load the buffer with data */ - while (chunk > 0) { - tmp = min(chunk, siov->iov_len); - memcpy(buf, siov->iov_base, tmp); - buf += tmp; - siov->iov_base += tmp; - siov->iov_len -= tmp; - if (!siov->iov_len) - siov++; - chunk -= tmp; - } - } - else { - /* we want to attach the supplied buffers directly */ - while (chunk > 0 && - msg->dcount < RXRPC_MSG_MAX_IOCS) { - tmp = msg->dcount++; - msg->data[tmp].iov_base = siov->iov_base; - msg->data[tmp].iov_len = siov->iov_len; - msg->dsize += siov->iov_len; - *size_sent += siov->iov_len; - size -= siov->iov_len; - chunk -= siov->iov_len; - siov++; - } - } - - _debug("- [loaded] chunk=%Zu size=%Zu", chunk, size); - - /* dispatch the message when full, final or requesting ACK */ - if (msg->dsize >= call->conn->mtu_size || rxhdr_flags) { - ret = rxrpc_call_flush(call); - if (ret < 0) - goto out; - } - - } while(size > 0); - - ret = 0; - out: - _leave(" = %d (%Zd queued, %Zd rem)", ret, *size_sent, size); - return ret; - -} /* end rxrpc_call_write_data() */ - -/*****************************************************************************/ -/* - * flush outstanding packets to the network - */ -static int rxrpc_call_flush(struct rxrpc_call *call) -{ - struct rxrpc_message *msg; - int ret = 0; - - _enter("%p", call); - - rxrpc_get_call(call); - - /* if there's a packet under construction, then dispatch it now */ - if (call->snd_nextmsg) { - msg = call->snd_nextmsg; - call->snd_nextmsg = NULL; - - if (msg->hdr.flags & RXRPC_LAST_PACKET) { - msg->hdr.flags &= ~RXRPC_MORE_PACKETS; - if (call->app_call_state != RXRPC_CSTATE_CLNT_SND_ARGS) - msg->hdr.flags |= RXRPC_REQUEST_ACK; - } - else { - msg->hdr.flags |= RXRPC_MORE_PACKETS; - } - - _proto("Sending DATA message { ds=%Zu dc=%u df=%02lu }", - msg->dsize, msg->dcount, msg->dfree); - - /* queue and adjust call state */ - spin_lock(&call->lock); - list_add_tail(&msg->link, &call->acks_pendq); - - /* decide what to do depending on current state and if this is - * the last packet */ - ret = -EINVAL; - switch (call->app_call_state) { - case RXRPC_CSTATE_SRVR_SND_REPLY: - if (msg->hdr.flags & RXRPC_LAST_PACKET) { - call->app_call_state = - RXRPC_CSTATE_SRVR_RCV_FINAL_ACK; - _state(call); - } - break; - - case RXRPC_CSTATE_CLNT_SND_ARGS: - if (msg->hdr.flags & RXRPC_LAST_PACKET) { - call->app_call_state = - RXRPC_CSTATE_CLNT_RCV_REPLY; - _state(call); - } - break; - - case RXRPC_CSTATE_ERROR: - ret = call->app_errno; - default: - spin_unlock(&call->lock); - goto out; - } - - call->acks_pend_cnt++; - - mod_timer(&call->acks_timeout, - __rxrpc_rtt_based_timeout(call, - rxrpc_call_acks_timeout)); - - spin_unlock(&call->lock); - - ret = rxrpc_conn_sendmsg(call->conn, msg); - if (ret == 0) - call->pkt_snd_count++; - } - - out: - rxrpc_put_call(call); - - _leave(" = %d", ret); - return ret; - -} /* end rxrpc_call_flush() */ - -/*****************************************************************************/ -/* - * resend NAK'd or unacknowledged packets up to the highest one specified - */ -static void rxrpc_call_resend(struct rxrpc_call *call, rxrpc_seq_t highest) -{ - struct rxrpc_message *msg; - struct list_head *_p; - rxrpc_seq_t seq = 0; - - _enter("%p,%u", call, highest); - - _proto("Rx Resend required"); - - /* handle too many resends */ - if (call->snd_resend_cnt >= rxrpc_call_max_resend) { - _debug("Aborting due to too many resends (rcv=%d)", - call->pkt_rcv_count); - rxrpc_call_abort(call, - call->pkt_rcv_count > 0 ? -EIO : -ETIMEDOUT); - _leave(""); - return; - } - - spin_lock(&call->lock); - call->snd_resend_cnt++; - for (;;) { - /* determine which the next packet we might need to ACK is */ - if (seq <= call->acks_dftv_seq) - seq = call->acks_dftv_seq; - seq++; - - if (seq > highest) - break; - - /* look for the packet in the pending-ACK queue */ - list_for_each(_p, &call->acks_pendq) { - msg = list_entry(_p, struct rxrpc_message, link); - if (msg->seq == seq) - goto found_msg; - } - - panic("%s(%p,%d):" - " Inconsistent pending-ACK queue (ds=%u sc=%u sq=%u)\n", - __FUNCTION__, call, highest, - call->acks_dftv_seq, call->snd_seq_count, seq); - - found_msg: - if (msg->state != RXRPC_MSG_SENT) - continue; /* only un-ACK'd packets */ - - rxrpc_get_message(msg); - spin_unlock(&call->lock); - - /* send each message again (and ignore any errors we might - * incur) */ - _proto("Resending DATA message { ds=%Zu dc=%u df=%02lu }", - msg->dsize, msg->dcount, msg->dfree); - - if (rxrpc_conn_sendmsg(call->conn, msg) == 0) - call->pkt_snd_count++; - - rxrpc_put_message(msg); - - spin_lock(&call->lock); - } - - /* reset the timeout */ - mod_timer(&call->acks_timeout, - __rxrpc_rtt_based_timeout(call, rxrpc_call_acks_timeout)); - - spin_unlock(&call->lock); - - _leave(""); -} /* end rxrpc_call_resend() */ - -/*****************************************************************************/ -/* - * handle an ICMP error being applied to a call - */ -void rxrpc_call_handle_error(struct rxrpc_call *call, int local, int errno) -{ - _enter("%p{%u},%d", call, ntohl(call->call_id), errno); - - /* if this call is already aborted, then just wake up any waiters */ - if (call->app_call_state == RXRPC_CSTATE_ERROR) { - call->app_error_func(call); - } - else { - /* tell the app layer what happened */ - spin_lock(&call->lock); - call->app_call_state = RXRPC_CSTATE_ERROR; - _state(call); - if (local) - call->app_err_state = RXRPC_ESTATE_LOCAL_ERROR; - else - call->app_err_state = RXRPC_ESTATE_REMOTE_ERROR; - call->app_errno = errno; - call->app_mark = RXRPC_APP_MARK_EOF; - call->app_read_buf = NULL; - call->app_async_read = 0; - - /* map the error */ - call->app_aemap_func(call); - - del_timer_sync(&call->acks_timeout); - del_timer_sync(&call->rcv_timeout); - del_timer_sync(&call->ackr_dfr_timo); - - spin_unlock(&call->lock); - - call->app_error_func(call); - } - - _leave(""); -} /* end rxrpc_call_handle_error() */ diff --git a/net/rxrpc/connection.c b/net/rxrpc/connection.c deleted file mode 100644 index 665a99952440..000000000000 --- a/net/rxrpc/connection.c +++ /dev/null @@ -1,777 +0,0 @@ -/* connection.c: Rx connection routines - * - * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. - * Written by David Howells (dhowells@redhat.com) - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include "internal.h" - -__RXACCT_DECL(atomic_t rxrpc_connection_count); - -LIST_HEAD(rxrpc_conns); -DECLARE_RWSEM(rxrpc_conns_sem); -unsigned long rxrpc_conn_timeout = 60 * 60; - -static void rxrpc_conn_do_timeout(struct rxrpc_connection *conn); - -static void __rxrpc_conn_timeout(rxrpc_timer_t *timer) -{ - struct rxrpc_connection *conn = - list_entry(timer, struct rxrpc_connection, timeout); - - _debug("Rx CONN TIMEOUT [%p{u=%d}]", conn, atomic_read(&conn->usage)); - - rxrpc_conn_do_timeout(conn); -} - -static const struct rxrpc_timer_ops rxrpc_conn_timer_ops = { - .timed_out = __rxrpc_conn_timeout, -}; - -/*****************************************************************************/ -/* - * create a new connection record - */ -static inline int __rxrpc_create_connection(struct rxrpc_peer *peer, - struct rxrpc_connection **_conn) -{ - struct rxrpc_connection *conn; - - _enter("%p",peer); - - /* allocate and initialise a connection record */ - conn = kzalloc(sizeof(struct rxrpc_connection), GFP_KERNEL); - if (!conn) { - _leave(" = -ENOMEM"); - return -ENOMEM; - } - - atomic_set(&conn->usage, 1); - - INIT_LIST_HEAD(&conn->link); - INIT_LIST_HEAD(&conn->id_link); - init_waitqueue_head(&conn->chanwait); - spin_lock_init(&conn->lock); - rxrpc_timer_init(&conn->timeout, &rxrpc_conn_timer_ops); - - do_gettimeofday(&conn->atime); - conn->mtu_size = 1024; - conn->peer = peer; - conn->trans = peer->trans; - - __RXACCT(atomic_inc(&rxrpc_connection_count)); - *_conn = conn; - _leave(" = 0 (%p)", conn); - - return 0; -} /* end __rxrpc_create_connection() */ - -/*****************************************************************************/ -/* - * create a new connection record for outgoing connections - */ -int rxrpc_create_connection(struct rxrpc_transport *trans, - __be16 port, - __be32 addr, - uint16_t service_id, - void *security, - struct rxrpc_connection **_conn) -{ - struct rxrpc_connection *candidate, *conn; - struct rxrpc_peer *peer; - struct list_head *_p; - __be32 connid; - int ret; - - _enter("%p{%hu},%u,%hu", trans, trans->port, ntohs(port), service_id); - - /* get a peer record */ - ret = rxrpc_peer_lookup(trans, addr, &peer); - if (ret < 0) { - _leave(" = %d", ret); - return ret; - } - - /* allocate and initialise a connection record */ - ret = __rxrpc_create_connection(peer, &candidate); - if (ret < 0) { - rxrpc_put_peer(peer); - _leave(" = %d", ret); - return ret; - } - - /* fill in the specific bits */ - candidate->addr.sin_family = AF_INET; - candidate->addr.sin_port = port; - candidate->addr.sin_addr.s_addr = addr; - - candidate->in_epoch = rxrpc_epoch; - candidate->out_epoch = rxrpc_epoch; - candidate->in_clientflag = 0; - candidate->out_clientflag = RXRPC_CLIENT_INITIATED; - candidate->service_id = htons(service_id); - - /* invent a unique connection ID */ - write_lock(&peer->conn_idlock); - - try_next_id: - connid = htonl(peer->conn_idcounter & RXRPC_CIDMASK); - peer->conn_idcounter += RXRPC_MAXCALLS; - - list_for_each(_p, &peer->conn_idlist) { - conn = list_entry(_p, struct rxrpc_connection, id_link); - if (connid == conn->conn_id) - goto try_next_id; - if (connid > conn->conn_id) - break; - } - - _debug("selected candidate conn ID %x.%u", - ntohl(peer->addr.s_addr), ntohl(connid)); - - candidate->conn_id = connid; - list_add_tail(&candidate->id_link, _p); - - write_unlock(&peer->conn_idlock); - - /* attach to peer */ - candidate->peer = peer; - - write_lock(&peer->conn_lock); - - /* search the peer's transport graveyard list */ - spin_lock(&peer->conn_gylock); - list_for_each(_p, &peer->conn_graveyard) { - conn = list_entry(_p, struct rxrpc_connection, link); - if (conn->addr.sin_port == candidate->addr.sin_port && - conn->security_ix == candidate->security_ix && - conn->service_id == candidate->service_id && - conn->in_clientflag == 0) - goto found_in_graveyard; - } - spin_unlock(&peer->conn_gylock); - - /* pick the new candidate */ - _debug("created connection: {%08x} [out]", ntohl(candidate->conn_id)); - atomic_inc(&peer->conn_count); - conn = candidate; - candidate = NULL; - - make_active: - list_add_tail(&conn->link, &peer->conn_active); - write_unlock(&peer->conn_lock); - - if (candidate) { - write_lock(&peer->conn_idlock); - list_del(&candidate->id_link); - write_unlock(&peer->conn_idlock); - - __RXACCT(atomic_dec(&rxrpc_connection_count)); - kfree(candidate); - } - else { - down_write(&rxrpc_conns_sem); - list_add_tail(&conn->proc_link, &rxrpc_conns); - up_write(&rxrpc_conns_sem); - } - - *_conn = conn; - _leave(" = 0 (%p)", conn); - - return 0; - - /* handle resurrecting a connection from the graveyard */ - found_in_graveyard: - _debug("resurrecting connection: {%08x} [out]", ntohl(conn->conn_id)); - rxrpc_get_connection(conn); - rxrpc_krxtimod_del_timer(&conn->timeout); - list_del_init(&conn->link); - spin_unlock(&peer->conn_gylock); - goto make_active; -} /* end rxrpc_create_connection() */ - -/*****************************************************************************/ -/* - * lookup the connection for an incoming packet - * - create a new connection record for unrecorded incoming connections - */ -int rxrpc_connection_lookup(struct rxrpc_peer *peer, - struct rxrpc_message *msg, - struct rxrpc_connection **_conn) -{ - struct rxrpc_connection *conn, *candidate = NULL; - struct list_head *_p; - struct sk_buff *pkt = msg->pkt; - int ret, fresh = 0; - __be32 x_epoch, x_connid; - __be16 x_port, x_servid; - __u32 x_secix; - u8 x_clflag; - - _enter("%p{{%hu}},%u,%hu", - peer, - peer->trans->port, - ntohs(udp_hdr(pkt)->source), - ntohs(msg->hdr.serviceId)); - - x_port = udp_hdr(pkt)->source; - x_epoch = msg->hdr.epoch; - x_clflag = msg->hdr.flags & RXRPC_CLIENT_INITIATED; - x_connid = htonl(ntohl(msg->hdr.cid) & RXRPC_CIDMASK); - x_servid = msg->hdr.serviceId; - x_secix = msg->hdr.securityIndex; - - /* [common case] search the transport's active list first */ - read_lock(&peer->conn_lock); - list_for_each(_p, &peer->conn_active) { - conn = list_entry(_p, struct rxrpc_connection, link); - if (conn->addr.sin_port == x_port && - conn->in_epoch == x_epoch && - conn->conn_id == x_connid && - conn->security_ix == x_secix && - conn->service_id == x_servid && - conn->in_clientflag == x_clflag) - goto found_active; - } - read_unlock(&peer->conn_lock); - - /* [uncommon case] not active - * - create a candidate for a new record if an inbound connection - * - only examine the graveyard for an outbound connection - */ - if (x_clflag) { - ret = __rxrpc_create_connection(peer, &candidate); - if (ret < 0) { - _leave(" = %d", ret); - return ret; - } - - /* fill in the specifics */ - candidate->addr.sin_family = AF_INET; - candidate->addr.sin_port = x_port; - candidate->addr.sin_addr.s_addr = ip_hdr(pkt)->saddr; - candidate->in_epoch = x_epoch; - candidate->out_epoch = x_epoch; - candidate->in_clientflag = RXRPC_CLIENT_INITIATED; - candidate->out_clientflag = 0; - candidate->conn_id = x_connid; - candidate->service_id = x_servid; - candidate->security_ix = x_secix; - } - - /* search the active list again, just in case it appeared whilst we - * were busy */ - write_lock(&peer->conn_lock); - list_for_each(_p, &peer->conn_active) { - conn = list_entry(_p, struct rxrpc_connection, link); - if (conn->addr.sin_port == x_port && - conn->in_epoch == x_epoch && - conn->conn_id == x_connid && - conn->security_ix == x_secix && - conn->service_id == x_servid && - conn->in_clientflag == x_clflag) - goto found_active_second_chance; - } - - /* search the transport's graveyard list */ - spin_lock(&peer->conn_gylock); - list_for_each(_p, &peer->conn_graveyard) { - conn = list_entry(_p, struct rxrpc_connection, link); - if (conn->addr.sin_port == x_port && - conn->in_epoch == x_epoch && - conn->conn_id == x_connid && - conn->security_ix == x_secix && - conn->service_id == x_servid && - conn->in_clientflag == x_clflag) - goto found_in_graveyard; - } - spin_unlock(&peer->conn_gylock); - - /* outbound connections aren't created here */ - if (!x_clflag) { - write_unlock(&peer->conn_lock); - _leave(" = -ENOENT"); - return -ENOENT; - } - - /* we can now add the new candidate to the list */ - _debug("created connection: {%08x} [in]", ntohl(candidate->conn_id)); - rxrpc_get_peer(peer); - conn = candidate; - candidate = NULL; - atomic_inc(&peer->conn_count); - fresh = 1; - - make_active: - list_add_tail(&conn->link, &peer->conn_active); - - success_uwfree: - write_unlock(&peer->conn_lock); - - if (candidate) { - write_lock(&peer->conn_idlock); - list_del(&candidate->id_link); - write_unlock(&peer->conn_idlock); - - __RXACCT(atomic_dec(&rxrpc_connection_count)); - kfree(candidate); - } - - if (fresh) { - down_write(&rxrpc_conns_sem); - list_add_tail(&conn->proc_link, &rxrpc_conns); - up_write(&rxrpc_conns_sem); - } - - success: - *_conn = conn; - _leave(" = 0 (%p)", conn); - return 0; - - /* handle the connection being found in the active list straight off */ - found_active: - rxrpc_get_connection(conn); - read_unlock(&peer->conn_lock); - goto success; - - /* handle resurrecting a connection from the graveyard */ - found_in_graveyard: - _debug("resurrecting connection: {%08x} [in]", ntohl(conn->conn_id)); - rxrpc_get_peer(peer); - rxrpc_get_connection(conn); - rxrpc_krxtimod_del_timer(&conn->timeout); - list_del_init(&conn->link); - spin_unlock(&peer->conn_gylock); - goto make_active; - - /* handle finding the connection on the second time through the active - * list */ - found_active_second_chance: - rxrpc_get_connection(conn); - goto success_uwfree; - -} /* end rxrpc_connection_lookup() */ - -/*****************************************************************************/ -/* - * finish using a connection record - * - it will be transferred to the peer's connection graveyard when refcount - * reaches 0 - */ -void rxrpc_put_connection(struct rxrpc_connection *conn) -{ - struct rxrpc_peer *peer; - - if (!conn) - return; - - _enter("%p{u=%d p=%hu}", - conn, atomic_read(&conn->usage), ntohs(conn->addr.sin_port)); - - peer = conn->peer; - spin_lock(&peer->conn_gylock); - - /* sanity check */ - if (atomic_read(&conn->usage) <= 0) - BUG(); - - if (likely(!atomic_dec_and_test(&conn->usage))) { - spin_unlock(&peer->conn_gylock); - _leave(""); - return; - } - - /* move to graveyard queue */ - _debug("burying connection: {%08x}", ntohl(conn->conn_id)); - list_move_tail(&conn->link, &peer->conn_graveyard); - - rxrpc_krxtimod_add_timer(&conn->timeout, rxrpc_conn_timeout * HZ); - - spin_unlock(&peer->conn_gylock); - - rxrpc_put_peer(conn->peer); - - _leave(" [killed]"); -} /* end rxrpc_put_connection() */ - -/*****************************************************************************/ -/* - * free a connection record - */ -static void rxrpc_conn_do_timeout(struct rxrpc_connection *conn) -{ - struct rxrpc_peer *peer; - - _enter("%p{u=%d p=%hu}", - conn, atomic_read(&conn->usage), ntohs(conn->addr.sin_port)); - - peer = conn->peer; - - if (atomic_read(&conn->usage) < 0) - BUG(); - - /* remove from graveyard if still dead */ - spin_lock(&peer->conn_gylock); - if (atomic_read(&conn->usage) == 0) { - list_del_init(&conn->link); - } - else { - conn = NULL; - } - spin_unlock(&peer->conn_gylock); - - if (!conn) { - _leave(""); - return; /* resurrected */ - } - - _debug("--- Destroying Connection %p{%08x} ---", - conn, ntohl(conn->conn_id)); - - down_write(&rxrpc_conns_sem); - list_del(&conn->proc_link); - up_write(&rxrpc_conns_sem); - - write_lock(&peer->conn_idlock); - list_del(&conn->id_link); - write_unlock(&peer->conn_idlock); - - __RXACCT(atomic_dec(&rxrpc_connection_count)); - kfree(conn); - - /* if the graveyard is now empty, wake up anyone waiting for that */ - if (atomic_dec_and_test(&peer->conn_count)) - wake_up(&peer->conn_gy_waitq); - - _leave(" [destroyed]"); -} /* end rxrpc_conn_do_timeout() */ - -/*****************************************************************************/ -/* - * clear all connection records from a peer endpoint - */ -void rxrpc_conn_clearall(struct rxrpc_peer *peer) -{ - DECLARE_WAITQUEUE(myself, current); - - struct rxrpc_connection *conn; - int err; - - _enter("%p", peer); - - /* there shouldn't be any active conns remaining */ - if (!list_empty(&peer->conn_active)) - BUG(); - - /* manually timeout all conns in the graveyard */ - spin_lock(&peer->conn_gylock); - while (!list_empty(&peer->conn_graveyard)) { - conn = list_entry(peer->conn_graveyard.next, - struct rxrpc_connection, link); - err = rxrpc_krxtimod_del_timer(&conn->timeout); - spin_unlock(&peer->conn_gylock); - - if (err == 0) - rxrpc_conn_do_timeout(conn); - - spin_lock(&peer->conn_gylock); - } - spin_unlock(&peer->conn_gylock); - - /* wait for the the conn graveyard to be completely cleared */ - set_current_state(TASK_UNINTERRUPTIBLE); - add_wait_queue(&peer->conn_gy_waitq, &myself); - - while (atomic_read(&peer->conn_count) != 0) { - schedule(); - set_current_state(TASK_UNINTERRUPTIBLE); - } - - remove_wait_queue(&peer->conn_gy_waitq, &myself); - set_current_state(TASK_RUNNING); - - _leave(""); -} /* end rxrpc_conn_clearall() */ - -/*****************************************************************************/ -/* - * allocate and prepare a message for sending out through the transport - * endpoint - */ -int rxrpc_conn_newmsg(struct rxrpc_connection *conn, - struct rxrpc_call *call, - uint8_t type, - int dcount, - struct kvec diov[], - gfp_t alloc_flags, - struct rxrpc_message **_msg) -{ - struct rxrpc_message *msg; - int loop; - - _enter("%p{%d},%p,%u", conn, ntohs(conn->addr.sin_port), call, type); - - if (dcount > 3) { - _leave(" = -EINVAL"); - return -EINVAL; - } - - msg = kzalloc(sizeof(struct rxrpc_message), alloc_flags); - if (!msg) { - _leave(" = -ENOMEM"); - return -ENOMEM; - } - - atomic_set(&msg->usage, 1); - - INIT_LIST_HEAD(&msg->link); - - msg->state = RXRPC_MSG_PREPARED; - - msg->hdr.epoch = conn->out_epoch; - msg->hdr.cid = conn->conn_id | (call ? call->chan_ix : 0); - msg->hdr.callNumber = call ? call->call_id : 0; - msg->hdr.type = type; - msg->hdr.flags = conn->out_clientflag; - msg->hdr.securityIndex = conn->security_ix; - msg->hdr.serviceId = conn->service_id; - - /* generate sequence numbers for data packets */ - if (call) { - switch (type) { - case RXRPC_PACKET_TYPE_DATA: - msg->seq = ++call->snd_seq_count; - msg->hdr.seq = htonl(msg->seq); - break; - case RXRPC_PACKET_TYPE_ACK: - /* ACK sequence numbers are complicated. The following - * may be wrong: - * - jumbo packet ACKs should have a seq number - * - normal ACKs should not - */ - default: - break; - } - } - - msg->dcount = dcount + 1; - msg->dsize = sizeof(msg->hdr); - msg->data[0].iov_len = sizeof(msg->hdr); - msg->data[0].iov_base = &msg->hdr; - - for (loop=0; loop < dcount; loop++) { - msg->dsize += diov[loop].iov_len; - msg->data[loop+1].iov_len = diov[loop].iov_len; - msg->data[loop+1].iov_base = diov[loop].iov_base; - } - - __RXACCT(atomic_inc(&rxrpc_message_count)); - *_msg = msg; - _leave(" = 0 (%p) #%d", msg, atomic_read(&rxrpc_message_count)); - return 0; -} /* end rxrpc_conn_newmsg() */ - -/*****************************************************************************/ -/* - * free a message - */ -void __rxrpc_put_message(struct rxrpc_message *msg) -{ - int loop; - - _enter("%p #%d", msg, atomic_read(&rxrpc_message_count)); - - if (msg->pkt) - kfree_skb(msg->pkt); - rxrpc_put_connection(msg->conn); - - for (loop = 0; loop < 8; loop++) - if (test_bit(loop, &msg->dfree)) - kfree(msg->data[loop].iov_base); - - __RXACCT(atomic_dec(&rxrpc_message_count)); - kfree(msg); - - _leave(""); -} /* end __rxrpc_put_message() */ - -/*****************************************************************************/ -/* - * send a message out through the transport endpoint - */ -int rxrpc_conn_sendmsg(struct rxrpc_connection *conn, - struct rxrpc_message *msg) -{ - struct msghdr msghdr; - int ret; - - _enter("%p{%d}", conn, ntohs(conn->addr.sin_port)); - - /* fill in some fields in the header */ - spin_lock(&conn->lock); - msg->hdr.serial = htonl(++conn->serial_counter); - msg->rttdone = 0; - spin_unlock(&conn->lock); - - /* set up the message to be transmitted */ - msghdr.msg_name = &conn->addr; - msghdr.msg_namelen = sizeof(conn->addr); - msghdr.msg_control = NULL; - msghdr.msg_controllen = 0; - msghdr.msg_flags = MSG_CONFIRM | MSG_DONTWAIT; - - _net("Sending message type %d of %Zd bytes to %08x:%d", - msg->hdr.type, - msg->dsize, - ntohl(conn->addr.sin_addr.s_addr), - ntohs(conn->addr.sin_port)); - - /* send the message */ - ret = kernel_sendmsg(conn->trans->socket, &msghdr, - msg->data, msg->dcount, msg->dsize); - if (ret < 0) { - msg->state = RXRPC_MSG_ERROR; - } else { - msg->state = RXRPC_MSG_SENT; - ret = 0; - - spin_lock(&conn->lock); - do_gettimeofday(&conn->atime); - msg->stamp = conn->atime; - spin_unlock(&conn->lock); - } - - _leave(" = %d", ret); - - return ret; -} /* end rxrpc_conn_sendmsg() */ - -/*****************************************************************************/ -/* - * deal with a subsequent call packet - */ -int rxrpc_conn_receive_call_packet(struct rxrpc_connection *conn, - struct rxrpc_call *call, - struct rxrpc_message *msg) -{ - struct rxrpc_message *pmsg; - struct dst_entry *dst; - struct list_head *_p; - unsigned cix, seq; - int ret = 0; - - _enter("%p,%p,%p", conn, call, msg); - - if (!call) { - cix = ntohl(msg->hdr.cid) & RXRPC_CHANNELMASK; - - spin_lock(&conn->lock); - call = conn->channels[cix]; - - if (!call || call->call_id != msg->hdr.callNumber) { - spin_unlock(&conn->lock); - rxrpc_trans_immediate_abort(conn->trans, msg, -ENOENT); - goto out; - } - else { - rxrpc_get_call(call); - spin_unlock(&conn->lock); - } - } - else { - rxrpc_get_call(call); - } - - _proto("Received packet %%%u [%u] on call %hu:%u:%u", - ntohl(msg->hdr.serial), - ntohl(msg->hdr.seq), - ntohs(msg->hdr.serviceId), - ntohl(conn->conn_id), - ntohl(call->call_id)); - - call->pkt_rcv_count++; - - dst = msg->pkt->dst; - if (dst && dst->dev) - conn->peer->if_mtu = - dst->dev->mtu - dst->dev->hard_header_len; - - /* queue on the call in seq order */ - rxrpc_get_message(msg); - seq = msg->seq; - - spin_lock(&call->lock); - list_for_each(_p, &call->rcv_receiveq) { - pmsg = list_entry(_p, struct rxrpc_message, link); - if (pmsg->seq > seq) - break; - } - list_add_tail(&msg->link, _p); - - /* reset the activity timeout */ - call->flags |= RXRPC_CALL_RCV_PKT; - mod_timer(&call->rcv_timeout,jiffies + rxrpc_call_rcv_timeout * HZ); - - spin_unlock(&call->lock); - - rxrpc_krxiod_queue_call(call); - - rxrpc_put_call(call); - out: - _leave(" = %d", ret); - return ret; -} /* end rxrpc_conn_receive_call_packet() */ - -/*****************************************************************************/ -/* - * handle an ICMP error being applied to a connection - */ -void rxrpc_conn_handle_error(struct rxrpc_connection *conn, - int local, int errno) -{ - struct rxrpc_call *calls[4]; - int loop; - - _enter("%p{%d},%d", conn, ntohs(conn->addr.sin_port), errno); - - /* get a ref to all my calls in one go */ - memset(calls, 0, sizeof(calls)); - spin_lock(&conn->lock); - - for (loop = 3; loop >= 0; loop--) { - if (conn->channels[loop]) { - calls[loop] = conn->channels[loop]; - rxrpc_get_call(calls[loop]); - } - } - - spin_unlock(&conn->lock); - - /* now kick them all */ - for (loop = 3; loop >= 0; loop--) { - if (calls[loop]) { - rxrpc_call_handle_error(calls[loop], local, errno); - rxrpc_put_call(calls[loop]); - } - } - - _leave(""); -} /* end rxrpc_conn_handle_error() */ diff --git a/net/rxrpc/internal.h b/net/rxrpc/internal.h deleted file mode 100644 index cc0c5795a103..000000000000 --- a/net/rxrpc/internal.h +++ /dev/null @@ -1,106 +0,0 @@ -/* internal.h: internal Rx RPC stuff - * - * Copyright (c) 2002 David Howells (dhowells@redhat.com). - */ - -#ifndef RXRPC_INTERNAL_H -#define RXRPC_INTERNAL_H - -#include -#include - -/* - * debug accounting - */ -#if 1 -#define __RXACCT_DECL(X) X -#define __RXACCT(X) do { X; } while(0) -#else -#define __RXACCT_DECL(X) -#define __RXACCT(X) do { } while(0) -#endif - -__RXACCT_DECL(extern atomic_t rxrpc_transport_count); -__RXACCT_DECL(extern atomic_t rxrpc_peer_count); -__RXACCT_DECL(extern atomic_t rxrpc_connection_count); -__RXACCT_DECL(extern atomic_t rxrpc_call_count); -__RXACCT_DECL(extern atomic_t rxrpc_message_count); - -/* - * debug tracing - */ -#define kenter(FMT, a...) printk("==> %s("FMT")\n",__FUNCTION__ , ##a) -#define kleave(FMT, a...) printk("<== %s()"FMT"\n",__FUNCTION__ , ##a) -#define kdebug(FMT, a...) printk(" "FMT"\n" , ##a) -#define kproto(FMT, a...) printk("### "FMT"\n" , ##a) -#define knet(FMT, a...) printk(" "FMT"\n" , ##a) - -#if 0 -#define _enter(FMT, a...) kenter(FMT , ##a) -#define _leave(FMT, a...) kleave(FMT , ##a) -#define _debug(FMT, a...) kdebug(FMT , ##a) -#define _proto(FMT, a...) kproto(FMT , ##a) -#define _net(FMT, a...) knet(FMT , ##a) -#else -#define _enter(FMT, a...) do { if (rxrpc_ktrace) kenter(FMT , ##a); } while(0) -#define _leave(FMT, a...) do { if (rxrpc_ktrace) kleave(FMT , ##a); } while(0) -#define _debug(FMT, a...) do { if (rxrpc_kdebug) kdebug(FMT , ##a); } while(0) -#define _proto(FMT, a...) do { if (rxrpc_kproto) kproto(FMT , ##a); } while(0) -#define _net(FMT, a...) do { if (rxrpc_knet) knet (FMT , ##a); } while(0) -#endif - -static inline void rxrpc_discard_my_signals(void) -{ - while (signal_pending(current)) { - siginfo_t sinfo; - - spin_lock_irq(¤t->sighand->siglock); - dequeue_signal(current, ¤t->blocked, &sinfo); - spin_unlock_irq(¤t->sighand->siglock); - } -} - -/* - * call.c - */ -extern struct list_head rxrpc_calls; -extern struct rw_semaphore rxrpc_calls_sem; - -/* - * connection.c - */ -extern struct list_head rxrpc_conns; -extern struct rw_semaphore rxrpc_conns_sem; -extern unsigned long rxrpc_conn_timeout; - -extern void rxrpc_conn_clearall(struct rxrpc_peer *peer); - -/* - * peer.c - */ -extern struct list_head rxrpc_peers; -extern struct rw_semaphore rxrpc_peers_sem; -extern unsigned long rxrpc_peer_timeout; - -extern void rxrpc_peer_calculate_rtt(struct rxrpc_peer *peer, - struct rxrpc_message *msg, - struct rxrpc_message *resp); - -extern void rxrpc_peer_clearall(struct rxrpc_transport *trans); - - -/* - * proc.c - */ -#ifdef CONFIG_PROC_FS -extern int rxrpc_proc_init(void); -extern void rxrpc_proc_cleanup(void); -#endif - -/* - * transport.c - */ -extern struct list_head rxrpc_proc_transports; -extern struct rw_semaphore rxrpc_proc_transports_sem; - -#endif /* RXRPC_INTERNAL_H */ diff --git a/net/rxrpc/krxiod.c b/net/rxrpc/krxiod.c deleted file mode 100644 index bbbcd6c24048..000000000000 --- a/net/rxrpc/krxiod.c +++ /dev/null @@ -1,262 +0,0 @@ -/* krxiod.c: Rx I/O daemon - * - * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. - * Written by David Howells (dhowells@redhat.com) - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include "internal.h" - -static DECLARE_WAIT_QUEUE_HEAD(rxrpc_krxiod_sleepq); -static DECLARE_COMPLETION(rxrpc_krxiod_dead); - -static atomic_t rxrpc_krxiod_qcount = ATOMIC_INIT(0); - -static LIST_HEAD(rxrpc_krxiod_transportq); -static DEFINE_SPINLOCK(rxrpc_krxiod_transportq_lock); - -static LIST_HEAD(rxrpc_krxiod_callq); -static DEFINE_SPINLOCK(rxrpc_krxiod_callq_lock); - -static volatile int rxrpc_krxiod_die; - -/*****************************************************************************/ -/* - * Rx I/O daemon - */ -static int rxrpc_krxiod(void *arg) -{ - DECLARE_WAITQUEUE(krxiod,current); - - printk("Started krxiod %d\n",current->pid); - - daemonize("krxiod"); - - /* loop around waiting for work to do */ - do { - /* wait for work or to be told to exit */ - _debug("### Begin Wait"); - if (!atomic_read(&rxrpc_krxiod_qcount)) { - set_current_state(TASK_INTERRUPTIBLE); - - add_wait_queue(&rxrpc_krxiod_sleepq, &krxiod); - - for (;;) { - set_current_state(TASK_INTERRUPTIBLE); - if (atomic_read(&rxrpc_krxiod_qcount) || - rxrpc_krxiod_die || - signal_pending(current)) - break; - - schedule(); - } - - remove_wait_queue(&rxrpc_krxiod_sleepq, &krxiod); - set_current_state(TASK_RUNNING); - } - _debug("### End Wait"); - - /* do work if been given some to do */ - _debug("### Begin Work"); - - /* see if there's a transport in need of attention */ - if (!list_empty(&rxrpc_krxiod_transportq)) { - struct rxrpc_transport *trans = NULL; - - spin_lock_irq(&rxrpc_krxiod_transportq_lock); - - if (!list_empty(&rxrpc_krxiod_transportq)) { - trans = list_entry( - rxrpc_krxiod_transportq.next, - struct rxrpc_transport, - krxiodq_link); - - list_del_init(&trans->krxiodq_link); - atomic_dec(&rxrpc_krxiod_qcount); - - /* make sure it hasn't gone away and doesn't go - * away */ - if (atomic_read(&trans->usage)>0) - rxrpc_get_transport(trans); - else - trans = NULL; - } - - spin_unlock_irq(&rxrpc_krxiod_transportq_lock); - - if (trans) { - rxrpc_trans_receive_packet(trans); - rxrpc_put_transport(trans); - } - } - - /* see if there's a call in need of attention */ - if (!list_empty(&rxrpc_krxiod_callq)) { - struct rxrpc_call *call = NULL; - - spin_lock_irq(&rxrpc_krxiod_callq_lock); - - if (!list_empty(&rxrpc_krxiod_callq)) { - call = list_entry(rxrpc_krxiod_callq.next, - struct rxrpc_call, - rcv_krxiodq_lk); - list_del_init(&call->rcv_krxiodq_lk); - atomic_dec(&rxrpc_krxiod_qcount); - - /* make sure it hasn't gone away and doesn't go - * away */ - if (atomic_read(&call->usage) > 0) { - _debug("@@@ KRXIOD" - " Begin Attend Call %p", call); - rxrpc_get_call(call); - } - else { - call = NULL; - } - } - - spin_unlock_irq(&rxrpc_krxiod_callq_lock); - - if (call) { - rxrpc_call_do_stuff(call); - rxrpc_put_call(call); - _debug("@@@ KRXIOD End Attend Call %p", call); - } - } - - _debug("### End Work"); - - try_to_freeze(); - - /* discard pending signals */ - rxrpc_discard_my_signals(); - - } while (!rxrpc_krxiod_die); - - /* and that's all */ - complete_and_exit(&rxrpc_krxiod_dead, 0); - -} /* end rxrpc_krxiod() */ - -/*****************************************************************************/ -/* - * start up a krxiod daemon - */ -int __init rxrpc_krxiod_init(void) -{ - return kernel_thread(rxrpc_krxiod, NULL, 0); - -} /* end rxrpc_krxiod_init() */ - -/*****************************************************************************/ -/* - * kill the krxiod daemon and wait for it to complete - */ -void rxrpc_krxiod_kill(void) -{ - rxrpc_krxiod_die = 1; - wake_up_all(&rxrpc_krxiod_sleepq); - wait_for_completion(&rxrpc_krxiod_dead); - -} /* end rxrpc_krxiod_kill() */ - -/*****************************************************************************/ -/* - * queue a transport for attention by krxiod - */ -void rxrpc_krxiod_queue_transport(struct rxrpc_transport *trans) -{ - unsigned long flags; - - _enter(""); - - if (list_empty(&trans->krxiodq_link)) { - spin_lock_irqsave(&rxrpc_krxiod_transportq_lock, flags); - - if (list_empty(&trans->krxiodq_link)) { - if (atomic_read(&trans->usage) > 0) { - list_add_tail(&trans->krxiodq_link, - &rxrpc_krxiod_transportq); - atomic_inc(&rxrpc_krxiod_qcount); - } - } - - spin_unlock_irqrestore(&rxrpc_krxiod_transportq_lock, flags); - wake_up_all(&rxrpc_krxiod_sleepq); - } - - _leave(""); - -} /* end rxrpc_krxiod_queue_transport() */ - -/*****************************************************************************/ -/* - * dequeue a transport from krxiod's attention queue - */ -void rxrpc_krxiod_dequeue_transport(struct rxrpc_transport *trans) -{ - unsigned long flags; - - _enter(""); - - spin_lock_irqsave(&rxrpc_krxiod_transportq_lock, flags); - if (!list_empty(&trans->krxiodq_link)) { - list_del_init(&trans->krxiodq_link); - atomic_dec(&rxrpc_krxiod_qcount); - } - spin_unlock_irqrestore(&rxrpc_krxiod_transportq_lock, flags); - - _leave(""); - -} /* end rxrpc_krxiod_dequeue_transport() */ - -/*****************************************************************************/ -/* - * queue a call for attention by krxiod - */ -void rxrpc_krxiod_queue_call(struct rxrpc_call *call) -{ - unsigned long flags; - - if (list_empty(&call->rcv_krxiodq_lk)) { - spin_lock_irqsave(&rxrpc_krxiod_callq_lock, flags); - if (atomic_read(&call->usage) > 0) { - list_add_tail(&call->rcv_krxiodq_lk, - &rxrpc_krxiod_callq); - atomic_inc(&rxrpc_krxiod_qcount); - } - spin_unlock_irqrestore(&rxrpc_krxiod_callq_lock, flags); - } - wake_up_all(&rxrpc_krxiod_sleepq); - -} /* end rxrpc_krxiod_queue_call() */ - -/*****************************************************************************/ -/* - * dequeue a call from krxiod's attention queue - */ -void rxrpc_krxiod_dequeue_call(struct rxrpc_call *call) -{ - unsigned long flags; - - spin_lock_irqsave(&rxrpc_krxiod_callq_lock, flags); - if (!list_empty(&call->rcv_krxiodq_lk)) { - list_del_init(&call->rcv_krxiodq_lk); - atomic_dec(&rxrpc_krxiod_qcount); - } - spin_unlock_irqrestore(&rxrpc_krxiod_callq_lock, flags); - -} /* end rxrpc_krxiod_dequeue_call() */ diff --git a/net/rxrpc/krxsecd.c b/net/rxrpc/krxsecd.c deleted file mode 100644 index 9a1e7f5e034c..000000000000 --- a/net/rxrpc/krxsecd.c +++ /dev/null @@ -1,270 +0,0 @@ -/* krxsecd.c: Rx security daemon - * - * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. - * Written by David Howells (dhowells@redhat.com) - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - * - * This daemon deals with: - * - consulting the application as to whether inbound peers and calls should be authorised - * - generating security challenges for inbound connections - * - responding to security challenges on outbound connections - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include "internal.h" - -static DECLARE_WAIT_QUEUE_HEAD(rxrpc_krxsecd_sleepq); -static DECLARE_COMPLETION(rxrpc_krxsecd_dead); -static volatile int rxrpc_krxsecd_die; - -static atomic_t rxrpc_krxsecd_qcount; - -/* queue of unprocessed inbound messages with seqno #1 and - * RXRPC_CLIENT_INITIATED flag set */ -static LIST_HEAD(rxrpc_krxsecd_initmsgq); -static DEFINE_SPINLOCK(rxrpc_krxsecd_initmsgq_lock); - -static void rxrpc_krxsecd_process_incoming_call(struct rxrpc_message *msg); - -/*****************************************************************************/ -/* - * Rx security daemon - */ -static int rxrpc_krxsecd(void *arg) -{ - DECLARE_WAITQUEUE(krxsecd, current); - - int die; - - printk("Started krxsecd %d\n", current->pid); - - daemonize("krxsecd"); - - /* loop around waiting for work to do */ - do { - /* wait for work or to be told to exit */ - _debug("### Begin Wait"); - if (!atomic_read(&rxrpc_krxsecd_qcount)) { - set_current_state(TASK_INTERRUPTIBLE); - - add_wait_queue(&rxrpc_krxsecd_sleepq, &krxsecd); - - for (;;) { - set_current_state(TASK_INTERRUPTIBLE); - if (atomic_read(&rxrpc_krxsecd_qcount) || - rxrpc_krxsecd_die || - signal_pending(current)) - break; - - schedule(); - } - - remove_wait_queue(&rxrpc_krxsecd_sleepq, &krxsecd); - set_current_state(TASK_RUNNING); - } - die = rxrpc_krxsecd_die; - _debug("### End Wait"); - - /* see if there're incoming calls in need of authenticating */ - _debug("### Begin Inbound Calls"); - - if (!list_empty(&rxrpc_krxsecd_initmsgq)) { - struct rxrpc_message *msg = NULL; - - spin_lock(&rxrpc_krxsecd_initmsgq_lock); - - if (!list_empty(&rxrpc_krxsecd_initmsgq)) { - msg = list_entry(rxrpc_krxsecd_initmsgq.next, - struct rxrpc_message, link); - list_del_init(&msg->link); - atomic_dec(&rxrpc_krxsecd_qcount); - } - - spin_unlock(&rxrpc_krxsecd_initmsgq_lock); - - if (msg) { - rxrpc_krxsecd_process_incoming_call(msg); - rxrpc_put_message(msg); - } - } - - _debug("### End Inbound Calls"); - - try_to_freeze(); - - /* discard pending signals */ - rxrpc_discard_my_signals(); - - } while (!die); - - /* and that's all */ - complete_and_exit(&rxrpc_krxsecd_dead, 0); - -} /* end rxrpc_krxsecd() */ - -/*****************************************************************************/ -/* - * start up a krxsecd daemon - */ -int __init rxrpc_krxsecd_init(void) -{ - return kernel_thread(rxrpc_krxsecd, NULL, 0); - -} /* end rxrpc_krxsecd_init() */ - -/*****************************************************************************/ -/* - * kill the krxsecd daemon and wait for it to complete - */ -void rxrpc_krxsecd_kill(void) -{ - rxrpc_krxsecd_die = 1; - wake_up_all(&rxrpc_krxsecd_sleepq); - wait_for_completion(&rxrpc_krxsecd_dead); - -} /* end rxrpc_krxsecd_kill() */ - -/*****************************************************************************/ -/* - * clear all pending incoming calls for the specified transport - */ -void rxrpc_krxsecd_clear_transport(struct rxrpc_transport *trans) -{ - LIST_HEAD(tmp); - - struct rxrpc_message *msg; - struct list_head *_p, *_n; - - _enter("%p",trans); - - /* move all the messages for this transport onto a temp list */ - spin_lock(&rxrpc_krxsecd_initmsgq_lock); - - list_for_each_safe(_p, _n, &rxrpc_krxsecd_initmsgq) { - msg = list_entry(_p, struct rxrpc_message, link); - if (msg->trans == trans) { - list_move_tail(&msg->link, &tmp); - atomic_dec(&rxrpc_krxsecd_qcount); - } - } - - spin_unlock(&rxrpc_krxsecd_initmsgq_lock); - - /* zap all messages on the temp list */ - while (!list_empty(&tmp)) { - msg = list_entry(tmp.next, struct rxrpc_message, link); - list_del_init(&msg->link); - rxrpc_put_message(msg); - } - - _leave(""); -} /* end rxrpc_krxsecd_clear_transport() */ - -/*****************************************************************************/ -/* - * queue a message on the incoming calls list - */ -void rxrpc_krxsecd_queue_incoming_call(struct rxrpc_message *msg) -{ - _enter("%p", msg); - - /* queue for processing by krxsecd */ - spin_lock(&rxrpc_krxsecd_initmsgq_lock); - - if (!rxrpc_krxsecd_die) { - rxrpc_get_message(msg); - list_add_tail(&msg->link, &rxrpc_krxsecd_initmsgq); - atomic_inc(&rxrpc_krxsecd_qcount); - } - - spin_unlock(&rxrpc_krxsecd_initmsgq_lock); - - wake_up(&rxrpc_krxsecd_sleepq); - - _leave(""); -} /* end rxrpc_krxsecd_queue_incoming_call() */ - -/*****************************************************************************/ -/* - * process the initial message of an incoming call - */ -void rxrpc_krxsecd_process_incoming_call(struct rxrpc_message *msg) -{ - struct rxrpc_transport *trans = msg->trans; - struct rxrpc_service *srv; - struct rxrpc_call *call; - struct list_head *_p; - unsigned short sid; - int ret; - - _enter("%p{tr=%p}", msg, trans); - - ret = rxrpc_incoming_call(msg->conn, msg, &call); - if (ret < 0) - goto out; - - /* find the matching service on the transport */ - sid = ntohs(msg->hdr.serviceId); - srv = NULL; - - spin_lock(&trans->lock); - list_for_each(_p, &trans->services) { - srv = list_entry(_p, struct rxrpc_service, link); - if (srv->service_id == sid && try_module_get(srv->owner)) { - /* found a match (made sure it won't vanish) */ - _debug("found service '%s'", srv->name); - call->owner = srv->owner; - break; - } - } - spin_unlock(&trans->lock); - - /* report the new connection - * - the func must inc the call's usage count to keep it - */ - ret = -ENOENT; - if (_p != &trans->services) { - /* attempt to accept the call */ - call->conn->service = srv; - call->app_attn_func = srv->attn_func; - call->app_error_func = srv->error_func; - call->app_aemap_func = srv->aemap_func; - - ret = srv->new_call(call); - - /* send an abort if an error occurred */ - if (ret < 0) { - rxrpc_call_abort(call, ret); - } - else { - /* formally receive and ACK the new packet */ - ret = rxrpc_conn_receive_call_packet(call->conn, - call, msg); - } - } - - rxrpc_put_call(call); - out: - if (ret < 0) - rxrpc_trans_immediate_abort(trans, msg, ret); - - _leave(" (%d)", ret); -} /* end rxrpc_krxsecd_process_incoming_call() */ diff --git a/net/rxrpc/krxtimod.c b/net/rxrpc/krxtimod.c deleted file mode 100644 index 9a9b6132dba4..000000000000 --- a/net/rxrpc/krxtimod.c +++ /dev/null @@ -1,204 +0,0 @@ -/* krxtimod.c: RXRPC timeout daemon - * - * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. - * Written by David Howells (dhowells@redhat.com) - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include "internal.h" - -static DECLARE_COMPLETION(krxtimod_alive); -static DECLARE_COMPLETION(krxtimod_dead); -static DECLARE_WAIT_QUEUE_HEAD(krxtimod_sleepq); -static int krxtimod_die; - -static LIST_HEAD(krxtimod_list); -static DEFINE_SPINLOCK(krxtimod_lock); - -static int krxtimod(void *arg); - -/*****************************************************************************/ -/* - * start the timeout daemon - */ -int rxrpc_krxtimod_start(void) -{ - int ret; - - ret = kernel_thread(krxtimod, NULL, 0); - if (ret < 0) - return ret; - - wait_for_completion(&krxtimod_alive); - - return ret; -} /* end rxrpc_krxtimod_start() */ - -/*****************************************************************************/ -/* - * stop the timeout daemon - */ -void rxrpc_krxtimod_kill(void) -{ - /* get rid of my daemon */ - krxtimod_die = 1; - wake_up(&krxtimod_sleepq); - wait_for_completion(&krxtimod_dead); - -} /* end rxrpc_krxtimod_kill() */ - -/*****************************************************************************/ -/* - * timeout processing daemon - */ -static int krxtimod(void *arg) -{ - DECLARE_WAITQUEUE(myself, current); - - rxrpc_timer_t *timer; - - printk("Started krxtimod %d\n", current->pid); - - daemonize("krxtimod"); - - complete(&krxtimod_alive); - - /* loop around looking for things to attend to */ - loop: - set_current_state(TASK_INTERRUPTIBLE); - add_wait_queue(&krxtimod_sleepq, &myself); - - for (;;) { - unsigned long jif; - long timeout; - - /* deal with the server being asked to die */ - if (krxtimod_die) { - remove_wait_queue(&krxtimod_sleepq, &myself); - _leave(""); - complete_and_exit(&krxtimod_dead, 0); - } - - try_to_freeze(); - - /* discard pending signals */ - rxrpc_discard_my_signals(); - - /* work out the time to elapse before the next event */ - spin_lock(&krxtimod_lock); - if (list_empty(&krxtimod_list)) { - timeout = MAX_SCHEDULE_TIMEOUT; - } - else { - timer = list_entry(krxtimod_list.next, - rxrpc_timer_t, link); - timeout = timer->timo_jif; - jif = jiffies; - - if (time_before_eq((unsigned long) timeout, jif)) - goto immediate; - - else { - timeout = (long) timeout - (long) jiffies; - } - } - spin_unlock(&krxtimod_lock); - - schedule_timeout(timeout); - - set_current_state(TASK_INTERRUPTIBLE); - } - - /* the thing on the front of the queue needs processing - * - we come here with the lock held and timer pointing to the expired - * entry - */ - immediate: - remove_wait_queue(&krxtimod_sleepq, &myself); - set_current_state(TASK_RUNNING); - - _debug("@@@ Begin Timeout of %p", timer); - - /* dequeue the timer */ - list_del_init(&timer->link); - spin_unlock(&krxtimod_lock); - - /* call the timeout function */ - timer->ops->timed_out(timer); - - _debug("@@@ End Timeout"); - goto loop; - -} /* end krxtimod() */ - -/*****************************************************************************/ -/* - * (re-)queue a timer - */ -void rxrpc_krxtimod_add_timer(rxrpc_timer_t *timer, unsigned long timeout) -{ - struct list_head *_p; - rxrpc_timer_t *ptimer; - - _enter("%p,%lu", timer, timeout); - - spin_lock(&krxtimod_lock); - - list_del(&timer->link); - - /* the timer was deferred or reset - put it back in the queue at the - * right place */ - timer->timo_jif = jiffies + timeout; - - list_for_each(_p, &krxtimod_list) { - ptimer = list_entry(_p, rxrpc_timer_t, link); - if (time_before(timer->timo_jif, ptimer->timo_jif)) - break; - } - - list_add_tail(&timer->link, _p); /* insert before stopping point */ - - spin_unlock(&krxtimod_lock); - - wake_up(&krxtimod_sleepq); - - _leave(""); -} /* end rxrpc_krxtimod_add_timer() */ - -/*****************************************************************************/ -/* - * dequeue a timer - * - returns 0 if the timer was deleted or -ENOENT if it wasn't queued - */ -int rxrpc_krxtimod_del_timer(rxrpc_timer_t *timer) -{ - int ret = 0; - - _enter("%p", timer); - - spin_lock(&krxtimod_lock); - - if (list_empty(&timer->link)) - ret = -ENOENT; - else - list_del_init(&timer->link); - - spin_unlock(&krxtimod_lock); - - wake_up(&krxtimod_sleepq); - - _leave(" = %d", ret); - return ret; -} /* end rxrpc_krxtimod_del_timer() */ diff --git a/net/rxrpc/main.c b/net/rxrpc/main.c deleted file mode 100644 index cead31b5bdf5..000000000000 --- a/net/rxrpc/main.c +++ /dev/null @@ -1,180 +0,0 @@ -/* main.c: Rx RPC interface - * - * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. - * Written by David Howells (dhowells@redhat.com) - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include "internal.h" - -MODULE_DESCRIPTION("Rx RPC implementation"); -MODULE_AUTHOR("Red Hat, Inc."); -MODULE_LICENSE("GPL"); - -__be32 rxrpc_epoch; - -/*****************************************************************************/ -/* - * initialise the Rx module - */ -static int __init rxrpc_initialise(void) -{ - int ret; - - /* my epoch value */ - rxrpc_epoch = htonl(get_seconds()); - - /* register the /proc interface */ -#ifdef CONFIG_PROC_FS - ret = rxrpc_proc_init(); - if (ret<0) - return ret; -#endif - - /* register the sysctl files */ -#ifdef CONFIG_SYSCTL - ret = rxrpc_sysctl_init(); - if (ret<0) - goto error_proc; -#endif - - /* start the krxtimod daemon */ - ret = rxrpc_krxtimod_start(); - if (ret<0) - goto error_sysctl; - - /* start the krxiod daemon */ - ret = rxrpc_krxiod_init(); - if (ret<0) - goto error_krxtimod; - - /* start the krxsecd daemon */ - ret = rxrpc_krxsecd_init(); - if (ret<0) - goto error_krxiod; - - kdebug("\n\n"); - - return 0; - - error_krxiod: - rxrpc_krxiod_kill(); - error_krxtimod: - rxrpc_krxtimod_kill(); - error_sysctl: -#ifdef CONFIG_SYSCTL - rxrpc_sysctl_cleanup(); - error_proc: -#endif -#ifdef CONFIG_PROC_FS - rxrpc_proc_cleanup(); -#endif - return ret; -} /* end rxrpc_initialise() */ - -module_init(rxrpc_initialise); - -/*****************************************************************************/ -/* - * clean up the Rx module - */ -static void __exit rxrpc_cleanup(void) -{ - kenter(""); - - __RXACCT(printk("Outstanding Messages : %d\n", - atomic_read(&rxrpc_message_count))); - __RXACCT(printk("Outstanding Calls : %d\n", - atomic_read(&rxrpc_call_count))); - __RXACCT(printk("Outstanding Connections: %d\n", - atomic_read(&rxrpc_connection_count))); - __RXACCT(printk("Outstanding Peers : %d\n", - atomic_read(&rxrpc_peer_count))); - __RXACCT(printk("Outstanding Transports : %d\n", - atomic_read(&rxrpc_transport_count))); - - rxrpc_krxsecd_kill(); - rxrpc_krxiod_kill(); - rxrpc_krxtimod_kill(); -#ifdef CONFIG_SYSCTL - rxrpc_sysctl_cleanup(); -#endif -#ifdef CONFIG_PROC_FS - rxrpc_proc_cleanup(); -#endif - - __RXACCT(printk("Outstanding Messages : %d\n", - atomic_read(&rxrpc_message_count))); - __RXACCT(printk("Outstanding Calls : %d\n", - atomic_read(&rxrpc_call_count))); - __RXACCT(printk("Outstanding Connections: %d\n", - atomic_read(&rxrpc_connection_count))); - __RXACCT(printk("Outstanding Peers : %d\n", - atomic_read(&rxrpc_peer_count))); - __RXACCT(printk("Outstanding Transports : %d\n", - atomic_read(&rxrpc_transport_count))); - - kleave(""); -} /* end rxrpc_cleanup() */ - -module_exit(rxrpc_cleanup); - -/*****************************************************************************/ -/* - * clear the dead space between task_struct and kernel stack - * - called by supplying -finstrument-functions to gcc - */ -#if 0 -void __cyg_profile_func_enter (void *this_fn, void *call_site) -__attribute__((no_instrument_function)); - -void __cyg_profile_func_enter (void *this_fn, void *call_site) -{ - asm volatile(" movl %%esp,%%edi \n" - " andl %0,%%edi \n" - " addl %1,%%edi \n" - " movl %%esp,%%ecx \n" - " subl %%edi,%%ecx \n" - " shrl $2,%%ecx \n" - " movl $0xedededed,%%eax \n" - " rep stosl \n" - : - : "i"(~(THREAD_SIZE-1)), "i"(sizeof(struct thread_info)) - : "eax", "ecx", "edi", "memory", "cc" - ); -} - -void __cyg_profile_func_exit(void *this_fn, void *call_site) -__attribute__((no_instrument_function)); - -void __cyg_profile_func_exit(void *this_fn, void *call_site) -{ - asm volatile(" movl %%esp,%%edi \n" - " andl %0,%%edi \n" - " addl %1,%%edi \n" - " movl %%esp,%%ecx \n" - " subl %%edi,%%ecx \n" - " shrl $2,%%ecx \n" - " movl $0xdadadada,%%eax \n" - " rep stosl \n" - : - : "i"(~(THREAD_SIZE-1)), "i"(sizeof(struct thread_info)) - : "eax", "ecx", "edi", "memory", "cc" - ); -} -#endif diff --git a/net/rxrpc/peer.c b/net/rxrpc/peer.c deleted file mode 100644 index 8a275157a3bb..000000000000 --- a/net/rxrpc/peer.c +++ /dev/null @@ -1,398 +0,0 @@ -/* peer.c: Rx RPC peer management - * - * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. - * Written by David Howells (dhowells@redhat.com) - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include "internal.h" - -__RXACCT_DECL(atomic_t rxrpc_peer_count); -LIST_HEAD(rxrpc_peers); -DECLARE_RWSEM(rxrpc_peers_sem); -unsigned long rxrpc_peer_timeout = 12 * 60 * 60; - -static void rxrpc_peer_do_timeout(struct rxrpc_peer *peer); - -static void __rxrpc_peer_timeout(rxrpc_timer_t *timer) -{ - struct rxrpc_peer *peer = - list_entry(timer, struct rxrpc_peer, timeout); - - _debug("Rx PEER TIMEOUT [%p{u=%d}]", peer, atomic_read(&peer->usage)); - - rxrpc_peer_do_timeout(peer); -} - -static const struct rxrpc_timer_ops rxrpc_peer_timer_ops = { - .timed_out = __rxrpc_peer_timeout, -}; - -/*****************************************************************************/ -/* - * create a peer record - */ -static int __rxrpc_create_peer(struct rxrpc_transport *trans, __be32 addr, - struct rxrpc_peer **_peer) -{ - struct rxrpc_peer *peer; - - _enter("%p,%08x", trans, ntohl(addr)); - - /* allocate and initialise a peer record */ - peer = kzalloc(sizeof(struct rxrpc_peer), GFP_KERNEL); - if (!peer) { - _leave(" = -ENOMEM"); - return -ENOMEM; - } - - atomic_set(&peer->usage, 1); - - INIT_LIST_HEAD(&peer->link); - INIT_LIST_HEAD(&peer->proc_link); - INIT_LIST_HEAD(&peer->conn_idlist); - INIT_LIST_HEAD(&peer->conn_active); - INIT_LIST_HEAD(&peer->conn_graveyard); - spin_lock_init(&peer->conn_gylock); - init_waitqueue_head(&peer->conn_gy_waitq); - rwlock_init(&peer->conn_idlock); - rwlock_init(&peer->conn_lock); - atomic_set(&peer->conn_count, 0); - spin_lock_init(&peer->lock); - rxrpc_timer_init(&peer->timeout, &rxrpc_peer_timer_ops); - - peer->addr.s_addr = addr; - - peer->trans = trans; - peer->ops = trans->peer_ops; - - __RXACCT(atomic_inc(&rxrpc_peer_count)); - *_peer = peer; - _leave(" = 0 (%p)", peer); - - return 0; -} /* end __rxrpc_create_peer() */ - -/*****************************************************************************/ -/* - * find a peer record on the specified transport - * - returns (if successful) with peer record usage incremented - * - resurrects it from the graveyard if found there - */ -int rxrpc_peer_lookup(struct rxrpc_transport *trans, __be32 addr, - struct rxrpc_peer **_peer) -{ - struct rxrpc_peer *peer, *candidate = NULL; - struct list_head *_p; - int ret; - - _enter("%p{%hu},%08x", trans, trans->port, ntohl(addr)); - - /* [common case] search the transport's active list first */ - read_lock(&trans->peer_lock); - list_for_each(_p, &trans->peer_active) { - peer = list_entry(_p, struct rxrpc_peer, link); - if (peer->addr.s_addr == addr) - goto found_active; - } - read_unlock(&trans->peer_lock); - - /* [uncommon case] not active - create a candidate for a new record */ - ret = __rxrpc_create_peer(trans, addr, &candidate); - if (ret < 0) { - _leave(" = %d", ret); - return ret; - } - - /* search the active list again, just in case it appeared whilst we - * were busy */ - write_lock(&trans->peer_lock); - list_for_each(_p, &trans->peer_active) { - peer = list_entry(_p, struct rxrpc_peer, link); - if (peer->addr.s_addr == addr) - goto found_active_second_chance; - } - - /* search the transport's graveyard list */ - spin_lock(&trans->peer_gylock); - list_for_each(_p, &trans->peer_graveyard) { - peer = list_entry(_p, struct rxrpc_peer, link); - if (peer->addr.s_addr == addr) - goto found_in_graveyard; - } - spin_unlock(&trans->peer_gylock); - - /* we can now add the new candidate to the list - * - tell the application layer that this peer has been added - */ - rxrpc_get_transport(trans); - peer = candidate; - candidate = NULL; - - if (peer->ops && peer->ops->adding) { - ret = peer->ops->adding(peer); - if (ret < 0) { - write_unlock(&trans->peer_lock); - __RXACCT(atomic_dec(&rxrpc_peer_count)); - kfree(peer); - rxrpc_put_transport(trans); - _leave(" = %d", ret); - return ret; - } - } - - atomic_inc(&trans->peer_count); - - make_active: - list_add_tail(&peer->link, &trans->peer_active); - - success_uwfree: - write_unlock(&trans->peer_lock); - - if (candidate) { - __RXACCT(atomic_dec(&rxrpc_peer_count)); - kfree(candidate); - } - - if (list_empty(&peer->proc_link)) { - down_write(&rxrpc_peers_sem); - list_add_tail(&peer->proc_link, &rxrpc_peers); - up_write(&rxrpc_peers_sem); - } - - success: - *_peer = peer; - - _leave(" = 0 (%p{u=%d cc=%d})", - peer, - atomic_read(&peer->usage), - atomic_read(&peer->conn_count)); - return 0; - - /* handle the peer being found in the active list straight off */ - found_active: - rxrpc_get_peer(peer); - read_unlock(&trans->peer_lock); - goto success; - - /* handle resurrecting a peer from the graveyard */ - found_in_graveyard: - rxrpc_get_peer(peer); - rxrpc_get_transport(peer->trans); - rxrpc_krxtimod_del_timer(&peer->timeout); - list_del_init(&peer->link); - spin_unlock(&trans->peer_gylock); - goto make_active; - - /* handle finding the peer on the second time through the active - * list */ - found_active_second_chance: - rxrpc_get_peer(peer); - goto success_uwfree; - -} /* end rxrpc_peer_lookup() */ - -/*****************************************************************************/ -/* - * finish with a peer record - * - it gets sent to the graveyard from where it can be resurrected or timed - * out - */ -void rxrpc_put_peer(struct rxrpc_peer *peer) -{ - struct rxrpc_transport *trans = peer->trans; - - _enter("%p{cc=%d a=%08x}", - peer, - atomic_read(&peer->conn_count), - ntohl(peer->addr.s_addr)); - - /* sanity check */ - if (atomic_read(&peer->usage) <= 0) - BUG(); - - write_lock(&trans->peer_lock); - spin_lock(&trans->peer_gylock); - if (likely(!atomic_dec_and_test(&peer->usage))) { - spin_unlock(&trans->peer_gylock); - write_unlock(&trans->peer_lock); - _leave(""); - return; - } - - /* move to graveyard queue */ - list_del(&peer->link); - write_unlock(&trans->peer_lock); - - list_add_tail(&peer->link, &trans->peer_graveyard); - - BUG_ON(!list_empty(&peer->conn_active)); - - rxrpc_krxtimod_add_timer(&peer->timeout, rxrpc_peer_timeout * HZ); - - spin_unlock(&trans->peer_gylock); - - rxrpc_put_transport(trans); - - _leave(" [killed]"); -} /* end rxrpc_put_peer() */ - -/*****************************************************************************/ -/* - * handle a peer timing out in the graveyard - * - called from krxtimod - */ -static void rxrpc_peer_do_timeout(struct rxrpc_peer *peer) -{ - struct rxrpc_transport *trans = peer->trans; - - _enter("%p{u=%d cc=%d a=%08x}", - peer, - atomic_read(&peer->usage), - atomic_read(&peer->conn_count), - ntohl(peer->addr.s_addr)); - - BUG_ON(atomic_read(&peer->usage) < 0); - - /* remove from graveyard if still dead */ - spin_lock(&trans->peer_gylock); - if (atomic_read(&peer->usage) == 0) - list_del_init(&peer->link); - else - peer = NULL; - spin_unlock(&trans->peer_gylock); - - if (!peer) { - _leave(""); - return; /* resurrected */ - } - - /* clear all connections on this peer */ - rxrpc_conn_clearall(peer); - - BUG_ON(!list_empty(&peer->conn_active)); - BUG_ON(!list_empty(&peer->conn_graveyard)); - - /* inform the application layer */ - if (peer->ops && peer->ops->discarding) - peer->ops->discarding(peer); - - if (!list_empty(&peer->proc_link)) { - down_write(&rxrpc_peers_sem); - list_del(&peer->proc_link); - up_write(&rxrpc_peers_sem); - } - - __RXACCT(atomic_dec(&rxrpc_peer_count)); - kfree(peer); - - /* if the graveyard is now empty, wake up anyone waiting for that */ - if (atomic_dec_and_test(&trans->peer_count)) - wake_up(&trans->peer_gy_waitq); - - _leave(" [destroyed]"); -} /* end rxrpc_peer_do_timeout() */ - -/*****************************************************************************/ -/* - * clear all peer records from a transport endpoint - */ -void rxrpc_peer_clearall(struct rxrpc_transport *trans) -{ - DECLARE_WAITQUEUE(myself,current); - - struct rxrpc_peer *peer; - int err; - - _enter("%p",trans); - - /* there shouldn't be any active peers remaining */ - BUG_ON(!list_empty(&trans->peer_active)); - - /* manually timeout all peers in the graveyard */ - spin_lock(&trans->peer_gylock); - while (!list_empty(&trans->peer_graveyard)) { - peer = list_entry(trans->peer_graveyard.next, - struct rxrpc_peer, link); - _debug("Clearing peer %p\n", peer); - err = rxrpc_krxtimod_del_timer(&peer->timeout); - spin_unlock(&trans->peer_gylock); - - if (err == 0) - rxrpc_peer_do_timeout(peer); - - spin_lock(&trans->peer_gylock); - } - spin_unlock(&trans->peer_gylock); - - /* wait for the the peer graveyard to be completely cleared */ - set_current_state(TASK_UNINTERRUPTIBLE); - add_wait_queue(&trans->peer_gy_waitq, &myself); - - while (atomic_read(&trans->peer_count) != 0) { - schedule(); - set_current_state(TASK_UNINTERRUPTIBLE); - } - - remove_wait_queue(&trans->peer_gy_waitq, &myself); - set_current_state(TASK_RUNNING); - - _leave(""); -} /* end rxrpc_peer_clearall() */ - -/*****************************************************************************/ -/* - * calculate and cache the Round-Trip-Time for a message and its response - */ -void rxrpc_peer_calculate_rtt(struct rxrpc_peer *peer, - struct rxrpc_message *msg, - struct rxrpc_message *resp) -{ - unsigned long long rtt; - int loop; - - _enter("%p,%p,%p", peer, msg, resp); - - /* calculate the latest RTT */ - rtt = resp->stamp.tv_sec - msg->stamp.tv_sec; - rtt *= 1000000UL; - rtt += resp->stamp.tv_usec - msg->stamp.tv_usec; - - /* add to cache */ - peer->rtt_cache[peer->rtt_point] = rtt; - peer->rtt_point++; - peer->rtt_point %= RXRPC_RTT_CACHE_SIZE; - - if (peer->rtt_usage < RXRPC_RTT_CACHE_SIZE) - peer->rtt_usage++; - - /* recalculate RTT */ - rtt = 0; - for (loop = peer->rtt_usage - 1; loop >= 0; loop--) - rtt += peer->rtt_cache[loop]; - - do_div(rtt, peer->rtt_usage); - peer->rtt = rtt; - - _leave(" RTT=%lu.%lums", - (long) (peer->rtt / 1000), (long) (peer->rtt % 1000)); - -} /* end rxrpc_peer_calculate_rtt() */ diff --git a/net/rxrpc/proc.c b/net/rxrpc/proc.c deleted file mode 100644 index 8551c879e456..000000000000 --- a/net/rxrpc/proc.c +++ /dev/null @@ -1,617 +0,0 @@ -/* proc.c: /proc interface for RxRPC - * - * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. - * Written by David Howells (dhowells@redhat.com) - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include "internal.h" - -static struct proc_dir_entry *proc_rxrpc; - -static int rxrpc_proc_transports_open(struct inode *inode, struct file *file); -static void *rxrpc_proc_transports_start(struct seq_file *p, loff_t *pos); -static void *rxrpc_proc_transports_next(struct seq_file *p, void *v, loff_t *pos); -static void rxrpc_proc_transports_stop(struct seq_file *p, void *v); -static int rxrpc_proc_transports_show(struct seq_file *m, void *v); - -static struct seq_operations rxrpc_proc_transports_ops = { - .start = rxrpc_proc_transports_start, - .next = rxrpc_proc_transports_next, - .stop = rxrpc_proc_transports_stop, - .show = rxrpc_proc_transports_show, -}; - -static const struct file_operations rxrpc_proc_transports_fops = { - .open = rxrpc_proc_transports_open, - .read = seq_read, - .llseek = seq_lseek, - .release = seq_release, -}; - -static int rxrpc_proc_peers_open(struct inode *inode, struct file *file); -static void *rxrpc_proc_peers_start(struct seq_file *p, loff_t *pos); -static void *rxrpc_proc_peers_next(struct seq_file *p, void *v, loff_t *pos); -static void rxrpc_proc_peers_stop(struct seq_file *p, void *v); -static int rxrpc_proc_peers_show(struct seq_file *m, void *v); - -static struct seq_operations rxrpc_proc_peers_ops = { - .start = rxrpc_proc_peers_start, - .next = rxrpc_proc_peers_next, - .stop = rxrpc_proc_peers_stop, - .show = rxrpc_proc_peers_show, -}; - -static const struct file_operations rxrpc_proc_peers_fops = { - .open = rxrpc_proc_peers_open, - .read = seq_read, - .llseek = seq_lseek, - .release = seq_release, -}; - -static int rxrpc_proc_conns_open(struct inode *inode, struct file *file); -static void *rxrpc_proc_conns_start(struct seq_file *p, loff_t *pos); -static void *rxrpc_proc_conns_next(struct seq_file *p, void *v, loff_t *pos); -static void rxrpc_proc_conns_stop(struct seq_file *p, void *v); -static int rxrpc_proc_conns_show(struct seq_file *m, void *v); - -static struct seq_operations rxrpc_proc_conns_ops = { - .start = rxrpc_proc_conns_start, - .next = rxrpc_proc_conns_next, - .stop = rxrpc_proc_conns_stop, - .show = rxrpc_proc_conns_show, -}; - -static const struct file_operations rxrpc_proc_conns_fops = { - .open = rxrpc_proc_conns_open, - .read = seq_read, - .llseek = seq_lseek, - .release = seq_release, -}; - -static int rxrpc_proc_calls_open(struct inode *inode, struct file *file); -static void *rxrpc_proc_calls_start(struct seq_file *p, loff_t *pos); -static void *rxrpc_proc_calls_next(struct seq_file *p, void *v, loff_t *pos); -static void rxrpc_proc_calls_stop(struct seq_file *p, void *v); -static int rxrpc_proc_calls_show(struct seq_file *m, void *v); - -static struct seq_operations rxrpc_proc_calls_ops = { - .start = rxrpc_proc_calls_start, - .next = rxrpc_proc_calls_next, - .stop = rxrpc_proc_calls_stop, - .show = rxrpc_proc_calls_show, -}; - -static const struct file_operations rxrpc_proc_calls_fops = { - .open = rxrpc_proc_calls_open, - .read = seq_read, - .llseek = seq_lseek, - .release = seq_release, -}; - -static const char *rxrpc_call_states7[] = { - "complet", - "error ", - "rcv_op ", - "rcv_arg", - "got_arg", - "snd_rpl", - "fin_ack", - "snd_arg", - "rcv_rpl", - "got_rpl" -}; - -static const char *rxrpc_call_error_states7[] = { - "no_err ", - "loc_abt", - "rmt_abt", - "loc_err", - "rmt_err" -}; - -/*****************************************************************************/ -/* - * initialise the /proc/net/rxrpc/ directory - */ -int rxrpc_proc_init(void) -{ - struct proc_dir_entry *p; - - proc_rxrpc = proc_mkdir("rxrpc", proc_net); - if (!proc_rxrpc) - goto error; - proc_rxrpc->owner = THIS_MODULE; - - p = create_proc_entry("calls", 0, proc_rxrpc); - if (!p) - goto error_proc; - p->proc_fops = &rxrpc_proc_calls_fops; - p->owner = THIS_MODULE; - - p = create_proc_entry("connections", 0, proc_rxrpc); - if (!p) - goto error_calls; - p->proc_fops = &rxrpc_proc_conns_fops; - p->owner = THIS_MODULE; - - p = create_proc_entry("peers", 0, proc_rxrpc); - if (!p) - goto error_calls; - p->proc_fops = &rxrpc_proc_peers_fops; - p->owner = THIS_MODULE; - - p = create_proc_entry("transports", 0, proc_rxrpc); - if (!p) - goto error_conns; - p->proc_fops = &rxrpc_proc_transports_fops; - p->owner = THIS_MODULE; - - return 0; - - error_conns: - remove_proc_entry("connections", proc_rxrpc); - error_calls: - remove_proc_entry("calls", proc_rxrpc); - error_proc: - remove_proc_entry("rxrpc", proc_net); - error: - return -ENOMEM; -} /* end rxrpc_proc_init() */ - -/*****************************************************************************/ -/* - * clean up the /proc/net/rxrpc/ directory - */ -void rxrpc_proc_cleanup(void) -{ - remove_proc_entry("transports", proc_rxrpc); - remove_proc_entry("peers", proc_rxrpc); - remove_proc_entry("connections", proc_rxrpc); - remove_proc_entry("calls", proc_rxrpc); - - remove_proc_entry("rxrpc", proc_net); - -} /* end rxrpc_proc_cleanup() */ - -/*****************************************************************************/ -/* - * open "/proc/net/rxrpc/transports" which provides a summary of extant transports - */ -static int rxrpc_proc_transports_open(struct inode *inode, struct file *file) -{ - struct seq_file *m; - int ret; - - ret = seq_open(file, &rxrpc_proc_transports_ops); - if (ret < 0) - return ret; - - m = file->private_data; - m->private = PDE(inode)->data; - - return 0; -} /* end rxrpc_proc_transports_open() */ - -/*****************************************************************************/ -/* - * set up the iterator to start reading from the transports list and return the first item - */ -static void *rxrpc_proc_transports_start(struct seq_file *m, loff_t *_pos) -{ - struct list_head *_p; - loff_t pos = *_pos; - - /* lock the list against modification */ - down_read(&rxrpc_proc_transports_sem); - - /* allow for the header line */ - if (!pos) - return SEQ_START_TOKEN; - pos--; - - /* find the n'th element in the list */ - list_for_each(_p, &rxrpc_proc_transports) - if (!pos--) - break; - - return _p != &rxrpc_proc_transports ? _p : NULL; -} /* end rxrpc_proc_transports_start() */ - -/*****************************************************************************/ -/* - * move to next call in transports list - */ -static void *rxrpc_proc_transports_next(struct seq_file *p, void *v, loff_t *pos) -{ - struct list_head *_p; - - (*pos)++; - - _p = v; - _p = (v == SEQ_START_TOKEN) ? rxrpc_proc_transports.next : _p->next; - - return _p != &rxrpc_proc_transports ? _p : NULL; -} /* end rxrpc_proc_transports_next() */ - -/*****************************************************************************/ -/* - * clean up after reading from the transports list - */ -static void rxrpc_proc_transports_stop(struct seq_file *p, void *v) -{ - up_read(&rxrpc_proc_transports_sem); - -} /* end rxrpc_proc_transports_stop() */ - -/*****************************************************************************/ -/* - * display a header line followed by a load of call lines - */ -static int rxrpc_proc_transports_show(struct seq_file *m, void *v) -{ - struct rxrpc_transport *trans = - list_entry(v, struct rxrpc_transport, proc_link); - - /* display header on line 1 */ - if (v == SEQ_START_TOKEN) { - seq_puts(m, "LOCAL USE\n"); - return 0; - } - - /* display one transport per line on subsequent lines */ - seq_printf(m, "%5hu %3d\n", - trans->port, - atomic_read(&trans->usage) - ); - - return 0; -} /* end rxrpc_proc_transports_show() */ - -/*****************************************************************************/ -/* - * open "/proc/net/rxrpc/peers" which provides a summary of extant peers - */ -static int rxrpc_proc_peers_open(struct inode *inode, struct file *file) -{ - struct seq_file *m; - int ret; - - ret = seq_open(file, &rxrpc_proc_peers_ops); - if (ret < 0) - return ret; - - m = file->private_data; - m->private = PDE(inode)->data; - - return 0; -} /* end rxrpc_proc_peers_open() */ - -/*****************************************************************************/ -/* - * set up the iterator to start reading from the peers list and return the - * first item - */ -static void *rxrpc_proc_peers_start(struct seq_file *m, loff_t *_pos) -{ - struct list_head *_p; - loff_t pos = *_pos; - - /* lock the list against modification */ - down_read(&rxrpc_peers_sem); - - /* allow for the header line */ - if (!pos) - return SEQ_START_TOKEN; - pos--; - - /* find the n'th element in the list */ - list_for_each(_p, &rxrpc_peers) - if (!pos--) - break; - - return _p != &rxrpc_peers ? _p : NULL; -} /* end rxrpc_proc_peers_start() */ - -/*****************************************************************************/ -/* - * move to next conn in peers list - */ -static void *rxrpc_proc_peers_next(struct seq_file *p, void *v, loff_t *pos) -{ - struct list_head *_p; - - (*pos)++; - - _p = v; - _p = (v == SEQ_START_TOKEN) ? rxrpc_peers.next : _p->next; - - return _p != &rxrpc_peers ? _p : NULL; -} /* end rxrpc_proc_peers_next() */ - -/*****************************************************************************/ -/* - * clean up after reading from the peers list - */ -static void rxrpc_proc_peers_stop(struct seq_file *p, void *v) -{ - up_read(&rxrpc_peers_sem); - -} /* end rxrpc_proc_peers_stop() */ - -/*****************************************************************************/ -/* - * display a header line followed by a load of conn lines - */ -static int rxrpc_proc_peers_show(struct seq_file *m, void *v) -{ - struct rxrpc_peer *peer = list_entry(v, struct rxrpc_peer, proc_link); - long timeout; - - /* display header on line 1 */ - if (v == SEQ_START_TOKEN) { - seq_puts(m, "LOCAL REMOTE USAGE CONNS TIMEOUT" - " MTU RTT(uS)\n"); - return 0; - } - - /* display one peer per line on subsequent lines */ - timeout = 0; - if (!list_empty(&peer->timeout.link)) - timeout = (long) peer->timeout.timo_jif - - (long) jiffies; - - seq_printf(m, "%5hu %08x %5d %5d %8ld %5Zu %7lu\n", - peer->trans->port, - ntohl(peer->addr.s_addr), - atomic_read(&peer->usage), - atomic_read(&peer->conn_count), - timeout, - peer->if_mtu, - (long) peer->rtt - ); - - return 0; -} /* end rxrpc_proc_peers_show() */ - -/*****************************************************************************/ -/* - * open "/proc/net/rxrpc/connections" which provides a summary of extant - * connections - */ -static int rxrpc_proc_conns_open(struct inode *inode, struct file *file) -{ - struct seq_file *m; - int ret; - - ret = seq_open(file, &rxrpc_proc_conns_ops); - if (ret < 0) - return ret; - - m = file->private_data; - m->private = PDE(inode)->data; - - return 0; -} /* end rxrpc_proc_conns_open() */ - -/*****************************************************************************/ -/* - * set up the iterator to start reading from the conns list and return the - * first item - */ -static void *rxrpc_proc_conns_start(struct seq_file *m, loff_t *_pos) -{ - struct list_head *_p; - loff_t pos = *_pos; - - /* lock the list against modification */ - down_read(&rxrpc_conns_sem); - - /* allow for the header line */ - if (!pos) - return SEQ_START_TOKEN; - pos--; - - /* find the n'th element in the list */ - list_for_each(_p, &rxrpc_conns) - if (!pos--) - break; - - return _p != &rxrpc_conns ? _p : NULL; -} /* end rxrpc_proc_conns_start() */ - -/*****************************************************************************/ -/* - * move to next conn in conns list - */ -static void *rxrpc_proc_conns_next(struct seq_file *p, void *v, loff_t *pos) -{ - struct list_head *_p; - - (*pos)++; - - _p = v; - _p = (v == SEQ_START_TOKEN) ? rxrpc_conns.next : _p->next; - - return _p != &rxrpc_conns ? _p : NULL; -} /* end rxrpc_proc_conns_next() */ - -/*****************************************************************************/ -/* - * clean up after reading from the conns list - */ -static void rxrpc_proc_conns_stop(struct seq_file *p, void *v) -{ - up_read(&rxrpc_conns_sem); - -} /* end rxrpc_proc_conns_stop() */ - -/*****************************************************************************/ -/* - * display a header line followed by a load of conn lines - */ -static int rxrpc_proc_conns_show(struct seq_file *m, void *v) -{ - struct rxrpc_connection *conn; - long timeout; - - conn = list_entry(v, struct rxrpc_connection, proc_link); - - /* display header on line 1 */ - if (v == SEQ_START_TOKEN) { - seq_puts(m, - "LOCAL REMOTE RPORT SRVC CONN END SERIALNO " - "CALLNO MTU TIMEOUT" - "\n"); - return 0; - } - - /* display one conn per line on subsequent lines */ - timeout = 0; - if (!list_empty(&conn->timeout.link)) - timeout = (long) conn->timeout.timo_jif - - (long) jiffies; - - seq_printf(m, - "%5hu %08x %5hu %04hx %08x %-3.3s %08x %08x %5Zu %8ld\n", - conn->trans->port, - ntohl(conn->addr.sin_addr.s_addr), - ntohs(conn->addr.sin_port), - ntohs(conn->service_id), - ntohl(conn->conn_id), - conn->out_clientflag ? "CLT" : "SRV", - conn->serial_counter, - conn->call_counter, - conn->mtu_size, - timeout - ); - - return 0; -} /* end rxrpc_proc_conns_show() */ - -/*****************************************************************************/ -/* - * open "/proc/net/rxrpc/calls" which provides a summary of extant calls - */ -static int rxrpc_proc_calls_open(struct inode *inode, struct file *file) -{ - struct seq_file *m; - int ret; - - ret = seq_open(file, &rxrpc_proc_calls_ops); - if (ret < 0) - return ret; - - m = file->private_data; - m->private = PDE(inode)->data; - - return 0; -} /* end rxrpc_proc_calls_open() */ - -/*****************************************************************************/ -/* - * set up the iterator to start reading from the calls list and return the - * first item - */ -static void *rxrpc_proc_calls_start(struct seq_file *m, loff_t *_pos) -{ - struct list_head *_p; - loff_t pos = *_pos; - - /* lock the list against modification */ - down_read(&rxrpc_calls_sem); - - /* allow for the header line */ - if (!pos) - return SEQ_START_TOKEN; - pos--; - - /* find the n'th element in the list */ - list_for_each(_p, &rxrpc_calls) - if (!pos--) - break; - - return _p != &rxrpc_calls ? _p : NULL; -} /* end rxrpc_proc_calls_start() */ - -/*****************************************************************************/ -/* - * move to next call in calls list - */ -static void *rxrpc_proc_calls_next(struct seq_file *p, void *v, loff_t *pos) -{ - struct list_head *_p; - - (*pos)++; - - _p = v; - _p = (v == SEQ_START_TOKEN) ? rxrpc_calls.next : _p->next; - - return _p != &rxrpc_calls ? _p : NULL; -} /* end rxrpc_proc_calls_next() */ - -/*****************************************************************************/ -/* - * clean up after reading from the calls list - */ -static void rxrpc_proc_calls_stop(struct seq_file *p, void *v) -{ - up_read(&rxrpc_calls_sem); - -} /* end rxrpc_proc_calls_stop() */ - -/*****************************************************************************/ -/* - * display a header line followed by a load of call lines - */ -static int rxrpc_proc_calls_show(struct seq_file *m, void *v) -{ - struct rxrpc_call *call = list_entry(v, struct rxrpc_call, call_link); - - /* display header on line 1 */ - if (v == SEQ_START_TOKEN) { - seq_puts(m, - "LOCAL REMOT SRVC CONN CALL DIR USE " - " L STATE OPCODE ABORT ERRNO\n" - ); - return 0; - } - - /* display one call per line on subsequent lines */ - seq_printf(m, - "%5hu %5hu %04hx %08x %08x %s %3u%c" - " %c %-7.7s %6d %08x %5d\n", - call->conn->trans->port, - ntohs(call->conn->addr.sin_port), - ntohs(call->conn->service_id), - ntohl(call->conn->conn_id), - ntohl(call->call_id), - call->conn->service ? "SVC" : "CLT", - atomic_read(&call->usage), - waitqueue_active(&call->waitq) ? 'w' : ' ', - call->app_last_rcv ? 'Y' : '-', - (call->app_call_state!=RXRPC_CSTATE_ERROR ? - rxrpc_call_states7[call->app_call_state] : - rxrpc_call_error_states7[call->app_err_state]), - call->app_opcode, - call->app_abort_code, - call->app_errno - ); - - return 0; -} /* end rxrpc_proc_calls_show() */ diff --git a/net/rxrpc/rxrpc_syms.c b/net/rxrpc/rxrpc_syms.c deleted file mode 100644 index 9896fd87a4d4..000000000000 --- a/net/rxrpc/rxrpc_syms.c +++ /dev/null @@ -1,34 +0,0 @@ -/* rxrpc_syms.c: exported Rx RPC layer interface symbols - * - * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. - * Written by David Howells (dhowells@redhat.com) - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ - -#include - -#include -#include -#include -#include - -/* call.c */ -EXPORT_SYMBOL(rxrpc_create_call); -EXPORT_SYMBOL(rxrpc_put_call); -EXPORT_SYMBOL(rxrpc_call_abort); -EXPORT_SYMBOL(rxrpc_call_read_data); -EXPORT_SYMBOL(rxrpc_call_write_data); - -/* connection.c */ -EXPORT_SYMBOL(rxrpc_create_connection); -EXPORT_SYMBOL(rxrpc_put_connection); - -/* transport.c */ -EXPORT_SYMBOL(rxrpc_create_transport); -EXPORT_SYMBOL(rxrpc_put_transport); -EXPORT_SYMBOL(rxrpc_add_service); -EXPORT_SYMBOL(rxrpc_del_service); diff --git a/net/rxrpc/sysctl.c b/net/rxrpc/sysctl.c deleted file mode 100644 index 884290754af7..000000000000 --- a/net/rxrpc/sysctl.c +++ /dev/null @@ -1,121 +0,0 @@ -/* sysctl.c: Rx RPC control - * - * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. - * Written by David Howells (dhowells@redhat.com) - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ - -#include -#include -#include -#include -#include -#include -#include -#include "internal.h" - -int rxrpc_ktrace; -int rxrpc_kdebug; -int rxrpc_kproto; -int rxrpc_knet; - -#ifdef CONFIG_SYSCTL -static struct ctl_table_header *rxrpc_sysctl = NULL; - -static ctl_table rxrpc_sysctl_table[] = { - { - .ctl_name = 1, - .procname = "kdebug", - .data = &rxrpc_kdebug, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = &proc_dointvec - }, - { - .ctl_name = 2, - .procname = "ktrace", - .data = &rxrpc_ktrace, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = &proc_dointvec - }, - { - .ctl_name = 3, - .procname = "kproto", - .data = &rxrpc_kproto, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = &proc_dointvec - }, - { - .ctl_name = 4, - .procname = "knet", - .data = &rxrpc_knet, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = &proc_dointvec - }, - { - .ctl_name = 5, - .procname = "peertimo", - .data = &rxrpc_peer_timeout, - .maxlen = sizeof(unsigned long), - .mode = 0644, - .proc_handler = &proc_doulongvec_minmax - }, - { - .ctl_name = 6, - .procname = "conntimo", - .data = &rxrpc_conn_timeout, - .maxlen = sizeof(unsigned long), - .mode = 0644, - .proc_handler = &proc_doulongvec_minmax - }, - { .ctl_name = 0 } -}; - -static ctl_table rxrpc_dir_sysctl_table[] = { - { - .ctl_name = 1, - .procname = "rxrpc", - .maxlen = 0, - .mode = 0555, - .child = rxrpc_sysctl_table - }, - { .ctl_name = 0 } -}; -#endif /* CONFIG_SYSCTL */ - -/*****************************************************************************/ -/* - * initialise the sysctl stuff for Rx RPC - */ -int rxrpc_sysctl_init(void) -{ -#ifdef CONFIG_SYSCTL - rxrpc_sysctl = register_sysctl_table(rxrpc_dir_sysctl_table); - if (!rxrpc_sysctl) - return -ENOMEM; -#endif /* CONFIG_SYSCTL */ - - return 0; -} /* end rxrpc_sysctl_init() */ - -/*****************************************************************************/ -/* - * clean up the sysctl stuff for Rx RPC - */ -void rxrpc_sysctl_cleanup(void) -{ -#ifdef CONFIG_SYSCTL - if (rxrpc_sysctl) { - unregister_sysctl_table(rxrpc_sysctl); - rxrpc_sysctl = NULL; - } -#endif /* CONFIG_SYSCTL */ - -} /* end rxrpc_sysctl_cleanup() */ diff --git a/net/rxrpc/transport.c b/net/rxrpc/transport.c deleted file mode 100644 index 62398fd01f85..000000000000 --- a/net/rxrpc/transport.c +++ /dev/null @@ -1,846 +0,0 @@ -/* transport.c: Rx Transport routines - * - * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. - * Written by David Howells (dhowells@redhat.com) - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) -#include /* this should _really_ be in errqueue.h.. */ -#endif -#include -#include -#include "internal.h" - -struct errormsg { - struct cmsghdr cmsg; /* control message header */ - struct sock_extended_err ee; /* extended error information */ - struct sockaddr_in icmp_src; /* ICMP packet source address */ -}; - -static DEFINE_SPINLOCK(rxrpc_transports_lock); -static struct list_head rxrpc_transports = LIST_HEAD_INIT(rxrpc_transports); - -__RXACCT_DECL(atomic_t rxrpc_transport_count); -LIST_HEAD(rxrpc_proc_transports); -DECLARE_RWSEM(rxrpc_proc_transports_sem); - -static void rxrpc_data_ready(struct sock *sk, int count); -static void rxrpc_error_report(struct sock *sk); -static int rxrpc_trans_receive_new_call(struct rxrpc_transport *trans, - struct list_head *msgq); -static void rxrpc_trans_receive_error_report(struct rxrpc_transport *trans); - -/*****************************************************************************/ -/* - * create a new transport endpoint using the specified UDP port - */ -int rxrpc_create_transport(unsigned short port, - struct rxrpc_transport **_trans) -{ - struct rxrpc_transport *trans; - struct sockaddr_in sin; - mm_segment_t oldfs; - struct sock *sock; - int ret, opt; - - _enter("%hu", port); - - trans = kzalloc(sizeof(struct rxrpc_transport), GFP_KERNEL); - if (!trans) - return -ENOMEM; - - atomic_set(&trans->usage, 1); - INIT_LIST_HEAD(&trans->services); - INIT_LIST_HEAD(&trans->link); - INIT_LIST_HEAD(&trans->krxiodq_link); - spin_lock_init(&trans->lock); - INIT_LIST_HEAD(&trans->peer_active); - INIT_LIST_HEAD(&trans->peer_graveyard); - spin_lock_init(&trans->peer_gylock); - init_waitqueue_head(&trans->peer_gy_waitq); - rwlock_init(&trans->peer_lock); - atomic_set(&trans->peer_count, 0); - trans->port = port; - - /* create a UDP socket to be my actual transport endpoint */ - ret = sock_create_kern(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &trans->socket); - if (ret < 0) - goto error; - - /* use the specified port */ - if (port) { - memset(&sin, 0, sizeof(sin)); - sin.sin_family = AF_INET; - sin.sin_port = htons(port); - ret = trans->socket->ops->bind(trans->socket, - (struct sockaddr *) &sin, - sizeof(sin)); - if (ret < 0) - goto error; - } - - opt = 1; - oldfs = get_fs(); - set_fs(KERNEL_DS); - ret = trans->socket->ops->setsockopt(trans->socket, SOL_IP, IP_RECVERR, - (char *) &opt, sizeof(opt)); - set_fs(oldfs); - - spin_lock(&rxrpc_transports_lock); - list_add(&trans->link, &rxrpc_transports); - spin_unlock(&rxrpc_transports_lock); - - /* set the socket up */ - sock = trans->socket->sk; - sock->sk_user_data = trans; - sock->sk_data_ready = rxrpc_data_ready; - sock->sk_error_report = rxrpc_error_report; - - down_write(&rxrpc_proc_transports_sem); - list_add_tail(&trans->proc_link, &rxrpc_proc_transports); - up_write(&rxrpc_proc_transports_sem); - - __RXACCT(atomic_inc(&rxrpc_transport_count)); - - *_trans = trans; - _leave(" = 0 (%p)", trans); - return 0; - - error: - /* finish cleaning up the transport (not really needed here, but...) */ - if (trans->socket) - trans->socket->ops->shutdown(trans->socket, 2); - - /* close the socket */ - if (trans->socket) { - trans->socket->sk->sk_user_data = NULL; - sock_release(trans->socket); - trans->socket = NULL; - } - - kfree(trans); - - - _leave(" = %d", ret); - return ret; -} /* end rxrpc_create_transport() */ - -/*****************************************************************************/ -/* - * destroy a transport endpoint - */ -void rxrpc_put_transport(struct rxrpc_transport *trans) -{ - _enter("%p{u=%d p=%hu}", - trans, atomic_read(&trans->usage), trans->port); - - BUG_ON(atomic_read(&trans->usage) <= 0); - - /* to prevent a race, the decrement and the dequeue must be - * effectively atomic */ - spin_lock(&rxrpc_transports_lock); - if (likely(!atomic_dec_and_test(&trans->usage))) { - spin_unlock(&rxrpc_transports_lock); - _leave(""); - return; - } - - list_del(&trans->link); - spin_unlock(&rxrpc_transports_lock); - - /* finish cleaning up the transport */ - if (trans->socket) - trans->socket->ops->shutdown(trans->socket, 2); - - rxrpc_krxsecd_clear_transport(trans); - rxrpc_krxiod_dequeue_transport(trans); - - /* discard all peer information */ - rxrpc_peer_clearall(trans); - - down_write(&rxrpc_proc_transports_sem); - list_del(&trans->proc_link); - up_write(&rxrpc_proc_transports_sem); - __RXACCT(atomic_dec(&rxrpc_transport_count)); - - /* close the socket */ - if (trans->socket) { - trans->socket->sk->sk_user_data = NULL; - sock_release(trans->socket); - trans->socket = NULL; - } - - kfree(trans); - - _leave(""); -} /* end rxrpc_put_transport() */ - -/*****************************************************************************/ -/* - * add a service to a transport to be listened upon - */ -int rxrpc_add_service(struct rxrpc_transport *trans, - struct rxrpc_service *newsrv) -{ - struct rxrpc_service *srv; - struct list_head *_p; - int ret = -EEXIST; - - _enter("%p{%hu},%p{%hu}", - trans, trans->port, newsrv, newsrv->service_id); - - /* verify that the service ID is not already present */ - spin_lock(&trans->lock); - - list_for_each(_p, &trans->services) { - srv = list_entry(_p, struct rxrpc_service, link); - if (srv->service_id == newsrv->service_id) - goto out; - } - - /* okay - add the transport to the list */ - list_add_tail(&newsrv->link, &trans->services); - rxrpc_get_transport(trans); - ret = 0; - - out: - spin_unlock(&trans->lock); - - _leave("= %d", ret); - return ret; -} /* end rxrpc_add_service() */ - -/*****************************************************************************/ -/* - * remove a service from a transport - */ -void rxrpc_del_service(struct rxrpc_transport *trans, struct rxrpc_service *srv) -{ - _enter("%p{%hu},%p{%hu}", trans, trans->port, srv, srv->service_id); - - spin_lock(&trans->lock); - list_del(&srv->link); - spin_unlock(&trans->lock); - - rxrpc_put_transport(trans); - - _leave(""); -} /* end rxrpc_del_service() */ - -/*****************************************************************************/ -/* - * INET callback when data has been received on the socket. - */ -static void rxrpc_data_ready(struct sock *sk, int count) -{ - struct rxrpc_transport *trans; - - _enter("%p{t=%p},%d", sk, sk->sk_user_data, count); - - /* queue the transport for attention by krxiod */ - trans = (struct rxrpc_transport *) sk->sk_user_data; - if (trans) - rxrpc_krxiod_queue_transport(trans); - - /* wake up anyone waiting on the socket */ - if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) - wake_up_interruptible(sk->sk_sleep); - - _leave(""); -} /* end rxrpc_data_ready() */ - -/*****************************************************************************/ -/* - * INET callback when an ICMP error packet is received - * - sk->err is error (EHOSTUNREACH, EPROTO or EMSGSIZE) - */ -static void rxrpc_error_report(struct sock *sk) -{ - struct rxrpc_transport *trans; - - _enter("%p{t=%p}", sk, sk->sk_user_data); - - /* queue the transport for attention by krxiod */ - trans = (struct rxrpc_transport *) sk->sk_user_data; - if (trans) { - trans->error_rcvd = 1; - rxrpc_krxiod_queue_transport(trans); - } - - /* wake up anyone waiting on the socket */ - if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) - wake_up_interruptible(sk->sk_sleep); - - _leave(""); -} /* end rxrpc_error_report() */ - -/*****************************************************************************/ -/* - * split a message up, allocating message records and filling them in - * from the contents of a socket buffer - */ -static int rxrpc_incoming_msg(struct rxrpc_transport *trans, - struct sk_buff *pkt, - struct list_head *msgq) -{ - struct rxrpc_message *msg; - int ret; - - _enter(""); - - msg = kzalloc(sizeof(struct rxrpc_message), GFP_KERNEL); - if (!msg) { - _leave(" = -ENOMEM"); - return -ENOMEM; - } - - atomic_set(&msg->usage, 1); - list_add_tail(&msg->link,msgq); - - /* dig out the Rx routing parameters */ - if (skb_copy_bits(pkt, sizeof(struct udphdr), - &msg->hdr, sizeof(msg->hdr)) < 0) { - ret = -EBADMSG; - goto error; - } - - msg->trans = trans; - msg->state = RXRPC_MSG_RECEIVED; - skb_get_timestamp(pkt, &msg->stamp); - if (msg->stamp.tv_sec == 0) { - do_gettimeofday(&msg->stamp); - if (pkt->sk) - sock_enable_timestamp(pkt->sk); - } - msg->seq = ntohl(msg->hdr.seq); - - /* attach the packet */ - skb_get(pkt); - msg->pkt = pkt; - - msg->offset = sizeof(struct udphdr) + sizeof(struct rxrpc_header); - msg->dsize = msg->pkt->len - msg->offset; - - _net("Rx Received packet from %s (%08x;%08x,%1x,%d,%s,%02x,%d,%d)", - msg->hdr.flags & RXRPC_CLIENT_INITIATED ? "client" : "server", - ntohl(msg->hdr.epoch), - (ntohl(msg->hdr.cid) & RXRPC_CIDMASK) >> RXRPC_CIDSHIFT, - ntohl(msg->hdr.cid) & RXRPC_CHANNELMASK, - ntohl(msg->hdr.callNumber), - rxrpc_pkts[msg->hdr.type], - msg->hdr.flags, - ntohs(msg->hdr.serviceId), - msg->hdr.securityIndex); - - __RXACCT(atomic_inc(&rxrpc_message_count)); - - /* split off jumbo packets */ - while (msg->hdr.type == RXRPC_PACKET_TYPE_DATA && - msg->hdr.flags & RXRPC_JUMBO_PACKET - ) { - struct rxrpc_jumbo_header jumbo; - struct rxrpc_message *jumbomsg = msg; - - _debug("split jumbo packet"); - - /* quick sanity check */ - ret = -EBADMSG; - if (msg->dsize < - RXRPC_JUMBO_DATALEN + sizeof(struct rxrpc_jumbo_header)) - goto error; - if (msg->hdr.flags & RXRPC_LAST_PACKET) - goto error; - - /* dig out the secondary header */ - if (skb_copy_bits(pkt, msg->offset + RXRPC_JUMBO_DATALEN, - &jumbo, sizeof(jumbo)) < 0) - goto error; - - /* allocate a new message record */ - ret = -ENOMEM; - msg = kmemdup(jumbomsg, sizeof(struct rxrpc_message), GFP_KERNEL); - if (!msg) - goto error; - - list_add_tail(&msg->link, msgq); - - /* adjust the jumbo packet */ - jumbomsg->dsize = RXRPC_JUMBO_DATALEN; - - /* attach the packet here too */ - skb_get(pkt); - - /* adjust the parameters */ - msg->seq++; - msg->hdr.seq = htonl(msg->seq); - msg->hdr.serial = htonl(ntohl(msg->hdr.serial) + 1); - msg->offset += RXRPC_JUMBO_DATALEN + - sizeof(struct rxrpc_jumbo_header); - msg->dsize -= RXRPC_JUMBO_DATALEN + - sizeof(struct rxrpc_jumbo_header); - msg->hdr.flags = jumbo.flags; - msg->hdr._rsvd = jumbo._rsvd; - - _net("Rx Split jumbo packet from %s" - " (%08x;%08x,%1x,%d,%s,%02x,%d,%d)", - msg->hdr.flags & RXRPC_CLIENT_INITIATED ? "client" : "server", - ntohl(msg->hdr.epoch), - (ntohl(msg->hdr.cid) & RXRPC_CIDMASK) >> RXRPC_CIDSHIFT, - ntohl(msg->hdr.cid) & RXRPC_CHANNELMASK, - ntohl(msg->hdr.callNumber), - rxrpc_pkts[msg->hdr.type], - msg->hdr.flags, - ntohs(msg->hdr.serviceId), - msg->hdr.securityIndex); - - __RXACCT(atomic_inc(&rxrpc_message_count)); - } - - _leave(" = 0 #%d", atomic_read(&rxrpc_message_count)); - return 0; - - error: - while (!list_empty(msgq)) { - msg = list_entry(msgq->next, struct rxrpc_message, link); - list_del_init(&msg->link); - - rxrpc_put_message(msg); - } - - _leave(" = %d", ret); - return ret; -} /* end rxrpc_incoming_msg() */ - -/*****************************************************************************/ -/* - * accept a new call - * - called from krxiod in process context - */ -void rxrpc_trans_receive_packet(struct rxrpc_transport *trans) -{ - struct rxrpc_message *msg; - struct rxrpc_peer *peer; - struct sk_buff *pkt; - int ret; - __be32 addr; - __be16 port; - - LIST_HEAD(msgq); - - _enter("%p{%d}", trans, trans->port); - - for (;;) { - /* deal with outstanting errors first */ - if (trans->error_rcvd) - rxrpc_trans_receive_error_report(trans); - - /* attempt to receive a packet */ - pkt = skb_recv_datagram(trans->socket->sk, 0, 1, &ret); - if (!pkt) { - if (ret == -EAGAIN) { - _leave(" EAGAIN"); - return; - } - - /* an icmp error may have occurred */ - rxrpc_krxiod_queue_transport(trans); - _leave(" error %d\n", ret); - return; - } - - /* we'll probably need to checksum it (didn't call - * sock_recvmsg) */ - if (skb_checksum_complete(pkt)) { - kfree_skb(pkt); - rxrpc_krxiod_queue_transport(trans); - _leave(" CSUM failed"); - return; - } - - addr = ip_hdr(pkt)->saddr; - port = udp_hdr(pkt)->source; - - _net("Rx Received UDP packet from %08x:%04hu", - ntohl(addr), ntohs(port)); - - /* unmarshall the Rx parameters and split jumbo packets */ - ret = rxrpc_incoming_msg(trans, pkt, &msgq); - if (ret < 0) { - kfree_skb(pkt); - rxrpc_krxiod_queue_transport(trans); - _leave(" bad packet"); - return; - } - - BUG_ON(list_empty(&msgq)); - - msg = list_entry(msgq.next, struct rxrpc_message, link); - - /* locate the record for the peer from which it - * originated */ - ret = rxrpc_peer_lookup(trans, addr, &peer); - if (ret < 0) { - kdebug("Rx No connections from that peer"); - rxrpc_trans_immediate_abort(trans, msg, -EINVAL); - goto finished_msg; - } - - /* try and find a matching connection */ - ret = rxrpc_connection_lookup(peer, msg, &msg->conn); - if (ret < 0) { - kdebug("Rx Unknown Connection"); - rxrpc_trans_immediate_abort(trans, msg, -EINVAL); - rxrpc_put_peer(peer); - goto finished_msg; - } - rxrpc_put_peer(peer); - - /* deal with the first packet of a new call */ - if (msg->hdr.flags & RXRPC_CLIENT_INITIATED && - msg->hdr.type == RXRPC_PACKET_TYPE_DATA && - ntohl(msg->hdr.seq) == 1 - ) { - _debug("Rx New server call"); - rxrpc_trans_receive_new_call(trans, &msgq); - goto finished_msg; - } - - /* deal with subsequent packet(s) of call */ - _debug("Rx Call packet"); - while (!list_empty(&msgq)) { - msg = list_entry(msgq.next, struct rxrpc_message, link); - list_del_init(&msg->link); - - ret = rxrpc_conn_receive_call_packet(msg->conn, NULL, msg); - if (ret < 0) { - rxrpc_trans_immediate_abort(trans, msg, ret); - rxrpc_put_message(msg); - goto finished_msg; - } - - rxrpc_put_message(msg); - } - - goto finished_msg; - - /* dispose of the packets */ - finished_msg: - while (!list_empty(&msgq)) { - msg = list_entry(msgq.next, struct rxrpc_message, link); - list_del_init(&msg->link); - - rxrpc_put_message(msg); - } - kfree_skb(pkt); - } - - _leave(""); - -} /* end rxrpc_trans_receive_packet() */ - -/*****************************************************************************/ -/* - * accept a new call from a client trying to connect to one of my services - * - called in process context - */ -static int rxrpc_trans_receive_new_call(struct rxrpc_transport *trans, - struct list_head *msgq) -{ - struct rxrpc_message *msg; - - _enter(""); - - /* only bother with the first packet */ - msg = list_entry(msgq->next, struct rxrpc_message, link); - list_del_init(&msg->link); - rxrpc_krxsecd_queue_incoming_call(msg); - rxrpc_put_message(msg); - - _leave(" = 0"); - - return 0; -} /* end rxrpc_trans_receive_new_call() */ - -/*****************************************************************************/ -/* - * perform an immediate abort without connection or call structures - */ -int rxrpc_trans_immediate_abort(struct rxrpc_transport *trans, - struct rxrpc_message *msg, - int error) -{ - struct rxrpc_header ahdr; - struct sockaddr_in sin; - struct msghdr msghdr; - struct kvec iov[2]; - __be32 _error; - int len, ret; - - _enter("%p,%p,%d", trans, msg, error); - - /* don't abort an abort packet */ - if (msg->hdr.type == RXRPC_PACKET_TYPE_ABORT) { - _leave(" = 0"); - return 0; - } - - _error = htonl(-error); - - /* set up the message to be transmitted */ - memcpy(&ahdr, &msg->hdr, sizeof(ahdr)); - ahdr.epoch = msg->hdr.epoch; - ahdr.serial = htonl(1); - ahdr.seq = 0; - ahdr.type = RXRPC_PACKET_TYPE_ABORT; - ahdr.flags = RXRPC_LAST_PACKET; - ahdr.flags |= ~msg->hdr.flags & RXRPC_CLIENT_INITIATED; - - iov[0].iov_len = sizeof(ahdr); - iov[0].iov_base = &ahdr; - iov[1].iov_len = sizeof(_error); - iov[1].iov_base = &_error; - - len = sizeof(ahdr) + sizeof(_error); - - memset(&sin,0,sizeof(sin)); - sin.sin_family = AF_INET; - sin.sin_port = udp_hdr(msg->pkt)->source; - sin.sin_addr.s_addr = ip_hdr(msg->pkt)->saddr; - - msghdr.msg_name = &sin; - msghdr.msg_namelen = sizeof(sin); - msghdr.msg_control = NULL; - msghdr.msg_controllen = 0; - msghdr.msg_flags = MSG_DONTWAIT; - - _net("Sending message type %d of %d bytes to %08x:%d", - ahdr.type, - len, - ntohl(sin.sin_addr.s_addr), - ntohs(sin.sin_port)); - - /* send the message */ - ret = kernel_sendmsg(trans->socket, &msghdr, iov, 2, len); - - _leave(" = %d", ret); - return ret; -} /* end rxrpc_trans_immediate_abort() */ - -/*****************************************************************************/ -/* - * receive an ICMP error report and percolate it to all connections - * heading to the affected host or port - */ -static void rxrpc_trans_receive_error_report(struct rxrpc_transport *trans) -{ - struct rxrpc_connection *conn; - struct sockaddr_in sin; - struct rxrpc_peer *peer; - struct list_head connq, *_p; - struct errormsg emsg; - struct msghdr msg; - __be16 port; - int local, err; - - _enter("%p", trans); - - for (;;) { - trans->error_rcvd = 0; - - /* try and receive an error message */ - msg.msg_name = &sin; - msg.msg_namelen = sizeof(sin); - msg.msg_control = &emsg; - msg.msg_controllen = sizeof(emsg); - msg.msg_flags = 0; - - err = kernel_recvmsg(trans->socket, &msg, NULL, 0, 0, - MSG_ERRQUEUE | MSG_DONTWAIT | MSG_TRUNC); - - if (err == -EAGAIN) { - _leave(""); - return; - } - - if (err < 0) { - printk("%s: unable to recv an error report: %d\n", - __FUNCTION__, err); - _leave(""); - return; - } - - msg.msg_controllen = (char *) msg.msg_control - (char *) &emsg; - - if (msg.msg_controllen < sizeof(emsg.cmsg) || - msg.msg_namelen < sizeof(sin)) { - printk("%s: short control message" - " (nlen=%u clen=%Zu fl=%x)\n", - __FUNCTION__, - msg.msg_namelen, - msg.msg_controllen, - msg.msg_flags); - continue; - } - - _net("Rx Received control message" - " { len=%Zu level=%u type=%u }", - emsg.cmsg.cmsg_len, - emsg.cmsg.cmsg_level, - emsg.cmsg.cmsg_type); - - if (sin.sin_family != AF_INET) { - printk("Rx Ignoring error report with non-INET address" - " (fam=%u)", - sin.sin_family); - continue; - } - - _net("Rx Received message pertaining to host addr=%x port=%hu", - ntohl(sin.sin_addr.s_addr), ntohs(sin.sin_port)); - - if (emsg.cmsg.cmsg_level != SOL_IP || - emsg.cmsg.cmsg_type != IP_RECVERR) { - printk("Rx Ignoring unknown error report" - " { level=%u type=%u }", - emsg.cmsg.cmsg_level, - emsg.cmsg.cmsg_type); - continue; - } - - if (msg.msg_controllen < sizeof(emsg.cmsg) + sizeof(emsg.ee)) { - printk("%s: short error message (%Zu)\n", - __FUNCTION__, msg.msg_controllen); - _leave(""); - return; - } - - port = sin.sin_port; - - switch (emsg.ee.ee_origin) { - case SO_EE_ORIGIN_ICMP: - local = 0; - switch (emsg.ee.ee_type) { - case ICMP_DEST_UNREACH: - switch (emsg.ee.ee_code) { - case ICMP_NET_UNREACH: - _net("Rx Received ICMP Network Unreachable"); - port = 0; - err = -ENETUNREACH; - break; - case ICMP_HOST_UNREACH: - _net("Rx Received ICMP Host Unreachable"); - port = 0; - err = -EHOSTUNREACH; - break; - case ICMP_PORT_UNREACH: - _net("Rx Received ICMP Port Unreachable"); - err = -ECONNREFUSED; - break; - case ICMP_NET_UNKNOWN: - _net("Rx Received ICMP Unknown Network"); - port = 0; - err = -ENETUNREACH; - break; - case ICMP_HOST_UNKNOWN: - _net("Rx Received ICMP Unknown Host"); - port = 0; - err = -EHOSTUNREACH; - break; - default: - _net("Rx Received ICMP DestUnreach { code=%u }", - emsg.ee.ee_code); - err = emsg.ee.ee_errno; - break; - } - break; - - case ICMP_TIME_EXCEEDED: - _net("Rx Received ICMP TTL Exceeded"); - err = emsg.ee.ee_errno; - break; - - default: - _proto("Rx Received ICMP error { type=%u code=%u }", - emsg.ee.ee_type, emsg.ee.ee_code); - err = emsg.ee.ee_errno; - break; - } - break; - - case SO_EE_ORIGIN_LOCAL: - _proto("Rx Received local error { error=%d }", - emsg.ee.ee_errno); - local = 1; - err = emsg.ee.ee_errno; - break; - - case SO_EE_ORIGIN_NONE: - case SO_EE_ORIGIN_ICMP6: - default: - _proto("Rx Received error report { orig=%u }", - emsg.ee.ee_origin); - local = 0; - err = emsg.ee.ee_errno; - break; - } - - /* find all the connections between this transport and the - * affected destination */ - INIT_LIST_HEAD(&connq); - - if (rxrpc_peer_lookup(trans, sin.sin_addr.s_addr, - &peer) == 0) { - read_lock(&peer->conn_lock); - list_for_each(_p, &peer->conn_active) { - conn = list_entry(_p, struct rxrpc_connection, - link); - if (port && conn->addr.sin_port != port) - continue; - if (!list_empty(&conn->err_link)) - continue; - - rxrpc_get_connection(conn); - list_add_tail(&conn->err_link, &connq); - } - read_unlock(&peer->conn_lock); - - /* service all those connections */ - while (!list_empty(&connq)) { - conn = list_entry(connq.next, - struct rxrpc_connection, - err_link); - list_del(&conn->err_link); - - rxrpc_conn_handle_error(conn, local, err); - - rxrpc_put_connection(conn); - } - - rxrpc_put_peer(peer); - } - } - - _leave(""); - return; -} /* end rxrpc_trans_receive_error_report() */ -- cgit v1.2.3-59-g8ed1b From 295f4a1fa3ecdf816b18393ef7bcd37c032df2fa Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Thu, 26 Apr 2007 20:43:56 -0700 Subject: [WEXT]: Clean up how wext is called. This patch cleans up the call paths from the core code into wext. Signed-off-by: Johannes Berg Signed-off-by: John W. Linville Signed-off-by: David S. Miller --- include/net/iw_handler.h | 11 +---------- include/net/wext.h | 24 ++++++++++++++++++++++++ net/core/dev.c | 34 ++++------------------------------ net/wireless/wext.c | 28 ++++++++++++++++++++++++---- 4 files changed, 53 insertions(+), 44 deletions(-) create mode 100644 include/net/wext.h (limited to 'include') diff --git a/include/net/iw_handler.h b/include/net/iw_handler.h index 909ca87d13b7..f23d07ca7c59 100644 --- a/include/net/iw_handler.h +++ b/include/net/iw_handler.h @@ -431,16 +431,7 @@ struct iw_public_data { * Those may be called only within the kernel. */ -/* First : function strictly used inside the kernel */ - -/* Handle /proc/net/wireless, called in net/code/dev.c */ -extern int dev_get_wireless_info(char * buffer, char **start, off_t offset, - int length); - -/* Handle IOCTLs, called in net/core/dev.c */ -extern int wireless_process_ioctl(struct ifreq *ifr, unsigned int cmd); - -/* Second : functions that may be called by driver modules */ +/* functions that may be called by driver modules */ /* Send a single event to user space */ extern void wireless_send_event(struct net_device * dev, diff --git a/include/net/wext.h b/include/net/wext.h new file mode 100644 index 000000000000..55741836a675 --- /dev/null +++ b/include/net/wext.h @@ -0,0 +1,24 @@ +#ifndef __NET_WEXT_H +#define __NET_WEXT_H + +/* + * wireless extensions interface to the core code + */ + +#ifdef CONFIG_WIRELESS_EXT +extern int wext_proc_init(void); +extern int wext_handle_ioctl(struct ifreq *ifr, unsigned int cmd, + void __user *arg); +#else +static inline int wext_proc_init() +{ + return 0; +} +static inline int wext_handle_ioctl(struct ifreq *ifr, unsigned int cmd, + void __user *arg) +{ + return -EINVAL; +} +#endif + +#endif /* __NET_WEXT_H */ diff --git a/net/core/dev.c b/net/core/dev.c index 700e4b5081b6..d5e42d13bd67 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -109,7 +109,7 @@ #include #include #include -#include +#include #include #include #include @@ -2348,12 +2348,6 @@ static const struct file_operations ptype_seq_fops = { }; -#ifdef CONFIG_WIRELESS_EXT -extern int wireless_proc_init(void); -#else -#define wireless_proc_init() 0 -#endif - static int __init dev_proc_init(void) { int rc = -ENOMEM; @@ -2365,7 +2359,7 @@ static int __init dev_proc_init(void) if (!proc_net_fops_create("ptype", S_IRUGO, &ptype_seq_fops)) goto out_dev2; - if (wireless_proc_init()) + if (wext_proc_init()) goto out_softnet; rc = 0; out: @@ -2923,29 +2917,9 @@ int dev_ioctl(unsigned int cmd, void __user *arg) ret = -EFAULT; return ret; } -#ifdef CONFIG_WIRELESS_EXT /* Take care of Wireless Extensions */ - if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST) { - /* If command is `set a parameter', or - * `get the encoding parameters', check if - * the user has the right to do it */ - if (IW_IS_SET(cmd) || cmd == SIOCGIWENCODE - || cmd == SIOCGIWENCODEEXT) { - if (!capable(CAP_NET_ADMIN)) - return -EPERM; - } - dev_load(ifr.ifr_name); - rtnl_lock(); - /* Follow me in net/wireless/wext.c */ - ret = wireless_process_ioctl(&ifr, cmd); - rtnl_unlock(); - if (IW_IS_GET(cmd) && - copy_to_user(arg, &ifr, - sizeof(struct ifreq))) - ret = -EFAULT; - return ret; - } -#endif /* CONFIG_WIRELESS_EXT */ + if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST) + return wext_handle_ioctl(&ifr, cmd, arg); return -EINVAL; } } diff --git a/net/wireless/wext.c b/net/wireless/wext.c index fba295e05e7a..a6cf1034e07c 100644 --- a/net/wireless/wext.c +++ b/net/wireless/wext.c @@ -97,6 +97,7 @@ #include /* Pretty obvious */ #include /* New driver API */ #include +#include #include /* copy_to_user() */ @@ -696,7 +697,7 @@ static const struct file_operations wireless_seq_fops = { .release = seq_release, }; -int __init wireless_proc_init(void) +int __init wext_proc_init(void) { /* Create /proc/net/wireless entry */ if (!proc_net_fops_create("wireless", S_IRUGO, &wireless_seq_fops)) @@ -1075,11 +1076,10 @@ static inline int ioctl_private_call(struct net_device * dev, /* ---------------------------------------------------------------- */ /* - * Main IOCTl dispatcher. Called from the main networking code - * (dev_ioctl() in net/core/dev.c). + * Main IOCTl dispatcher. * Check the type of IOCTL and call the appropriate wrapper... */ -int wireless_process_ioctl(struct ifreq *ifr, unsigned int cmd) +static int wireless_process_ioctl(struct ifreq *ifr, unsigned int cmd) { struct net_device *dev; iw_handler handler; @@ -1143,6 +1143,26 @@ int wireless_process_ioctl(struct ifreq *ifr, unsigned int cmd) return -EINVAL; } +/* entry point from dev ioctl */ +int wext_handle_ioctl(struct ifreq *ifr, unsigned int cmd, + void __user *arg) +{ + int ret; + + /* If command is `set a parameter', or + * `get the encoding parameters', check if + * the user has the right to do it */ + if (IW_IS_SET(cmd) || cmd == SIOCGIWENCODE || cmd == SIOCGIWENCODEEXT) + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + dev_load(ifr->ifr_name); + rtnl_lock(); + ret = wireless_process_ioctl(ifr, cmd); + rtnl_unlock(); + if (IW_IS_GET(cmd) && copy_to_user(arg, ifr, sizeof(struct ifreq))) + return -EFAULT; + return ret; +} /************************* EVENT PROCESSING *************************/ /* -- cgit v1.2.3-59-g8ed1b From b86e0280bb5585a610783ff5392d9d439dee7ddd Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Thu, 26 Apr 2007 20:48:23 -0700 Subject: [WEXT] net_device: Don't include wext bits if not required. This patch makes the wext bits in struct net_device depend on CONFIG_WIRELESS_EXT. Signed-off-by: Johannes Berg Signed-off-by: John W. Linville Signed-off-by: David S. Miller --- include/linux/netdevice.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 584c199ec2d5..e027a3750a77 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -352,12 +352,13 @@ struct net_device struct net_device_stats* (*get_stats)(struct net_device *dev); struct net_device_stats stats; +#ifdef CONFIG_WIRELESS_EXT /* List of functions to handle Wireless Extensions (instead of ioctl). * See for details. Jean II */ const struct iw_handler_def * wireless_handlers; /* Instance data managed by the core of Wireless Extensions. */ struct iw_public_data * wireless_data; - +#endif const struct ethtool_ops *ethtool_ops; /* -- cgit v1.2.3-59-g8ed1b From 16ce82d846f2e6b652a064f91c5019cfe8682be4 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Thu, 26 Apr 2007 21:08:21 -0700 Subject: [SPARC64]: Convert PCI over to generic struct iommu/strbuf. Signed-off-by: David S. Miller --- arch/sparc64/kernel/pci_iommu.c | 53 ++++++++++++----------- arch/sparc64/kernel/pci_psycho.c | 10 ++--- arch/sparc64/kernel/pci_sabre.c | 11 +++-- arch/sparc64/kernel/pci_schizo.c | 12 +++--- arch/sparc64/kernel/pci_sun4v.c | 34 +++++++-------- include/asm-sparc64/pbm.h | 90 ++++------------------------------------ 6 files changed, 66 insertions(+), 144 deletions(-) (limited to 'include') diff --git a/arch/sparc64/kernel/pci_iommu.c b/arch/sparc64/kernel/pci_iommu.c index 355ed0ba937a..66712772f494 100644 --- a/arch/sparc64/kernel/pci_iommu.c +++ b/arch/sparc64/kernel/pci_iommu.c @@ -1,7 +1,6 @@ -/* $Id: pci_iommu.c,v 1.17 2001/12/17 07:05:09 davem Exp $ - * pci_iommu.c: UltraSparc PCI controller IOM/STC support. +/* pci_iommu.c: UltraSparc PCI controller IOM/STC support. * - * Copyright (C) 1999 David S. Miller (davem@redhat.com) + * Copyright (C) 1999, 2007 David S. Miller (davem@davemloft.net) * Copyright (C) 1999, 2000 Jakub Jelinek (jakub@redhat.com) */ @@ -36,7 +35,7 @@ "i" (ASI_PHYS_BYPASS_EC_E)) /* Must be invoked under the IOMMU lock. */ -static void __iommu_flushall(struct pci_iommu *iommu) +static void __iommu_flushall(struct iommu *iommu) { unsigned long tag; int entry; @@ -64,7 +63,7 @@ static void __iommu_flushall(struct pci_iommu *iommu) #define IOPTE_IS_DUMMY(iommu, iopte) \ ((iopte_val(*iopte) & IOPTE_PAGE) == (iommu)->dummy_page_pa) -static inline void iopte_make_dummy(struct pci_iommu *iommu, iopte_t *iopte) +static inline void iopte_make_dummy(struct iommu *iommu, iopte_t *iopte) { unsigned long val = iopte_val(*iopte); @@ -75,7 +74,7 @@ static inline void iopte_make_dummy(struct pci_iommu *iommu, iopte_t *iopte) } /* Based largely upon the ppc64 iommu allocator. */ -static long pci_arena_alloc(struct pci_iommu *iommu, unsigned long npages) +static long pci_arena_alloc(struct iommu *iommu, unsigned long npages) { struct iommu_arena *arena = &iommu->arena; unsigned long n, i, start, end, limit; @@ -124,7 +123,7 @@ static void pci_arena_free(struct iommu_arena *arena, unsigned long base, unsign __clear_bit(i, arena->map); } -void pci_iommu_table_init(struct pci_iommu *iommu, int tsbsize, u32 dma_offset, u32 dma_addr_mask) +void pci_iommu_table_init(struct iommu *iommu, int tsbsize, u32 dma_offset, u32 dma_addr_mask) { unsigned long i, tsbbase, order, sz, num_tsb_entries; @@ -170,7 +169,7 @@ void pci_iommu_table_init(struct pci_iommu *iommu, int tsbsize, u32 dma_offset, iopte_make_dummy(iommu, &iommu->page_table[i]); } -static inline iopte_t *alloc_npages(struct pci_iommu *iommu, unsigned long npages) +static inline iopte_t *alloc_npages(struct iommu *iommu, unsigned long npages) { long entry; @@ -181,12 +180,12 @@ static inline iopte_t *alloc_npages(struct pci_iommu *iommu, unsigned long npage return iommu->page_table + entry; } -static inline void free_npages(struct pci_iommu *iommu, dma_addr_t base, unsigned long npages) +static inline void free_npages(struct iommu *iommu, dma_addr_t base, unsigned long npages) { pci_arena_free(&iommu->arena, base >> IO_PAGE_SHIFT, npages); } -static int iommu_alloc_ctx(struct pci_iommu *iommu) +static int iommu_alloc_ctx(struct iommu *iommu) { int lowest = iommu->ctx_lowest_free; int sz = IOMMU_NUM_CTXS - lowest; @@ -205,7 +204,7 @@ static int iommu_alloc_ctx(struct pci_iommu *iommu) return n; } -static inline void iommu_free_ctx(struct pci_iommu *iommu, int ctx) +static inline void iommu_free_ctx(struct iommu *iommu, int ctx) { if (likely(ctx)) { __clear_bit(ctx, iommu->ctx_bitmap); @@ -220,7 +219,7 @@ static inline void iommu_free_ctx(struct pci_iommu *iommu, int ctx) */ static void *pci_4u_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp, gfp_t gfp) { - struct pci_iommu *iommu; + struct iommu *iommu; iopte_t *iopte; unsigned long flags, order, first_page; void *ret; @@ -266,7 +265,7 @@ static void *pci_4u_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr /* Free and unmap a consistent DMA translation. */ static void pci_4u_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_t dvma) { - struct pci_iommu *iommu; + struct iommu *iommu; iopte_t *iopte; unsigned long flags, order, npages; @@ -291,8 +290,8 @@ static void pci_4u_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, */ static dma_addr_t pci_4u_map_single(struct pci_dev *pdev, void *ptr, size_t sz, int direction) { - struct pci_iommu *iommu; - struct pci_strbuf *strbuf; + struct iommu *iommu; + struct strbuf *strbuf; iopte_t *base; unsigned long flags, npages, oaddr; unsigned long i, base_paddr, ctx; @@ -343,7 +342,7 @@ bad_no_ctx: return PCI_DMA_ERROR_CODE; } -static void pci_strbuf_flush(struct pci_strbuf *strbuf, struct pci_iommu *iommu, u32 vaddr, unsigned long ctx, unsigned long npages, int direction) +static void pci_strbuf_flush(struct strbuf *strbuf, struct iommu *iommu, u32 vaddr, unsigned long ctx, unsigned long npages, int direction) { int limit; @@ -410,8 +409,8 @@ do_flush_sync: /* Unmap a single streaming mode DMA translation. */ static void pci_4u_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction) { - struct pci_iommu *iommu; - struct pci_strbuf *strbuf; + struct iommu *iommu; + struct strbuf *strbuf; iopte_t *base; unsigned long flags, npages, ctx, i; @@ -541,8 +540,8 @@ static inline void fill_sg(iopte_t *iopte, struct scatterlist *sg, */ static int pci_4u_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction) { - struct pci_iommu *iommu; - struct pci_strbuf *strbuf; + struct iommu *iommu; + struct strbuf *strbuf; unsigned long flags, ctx, npages, iopte_protection; iopte_t *base; u32 dma_base; @@ -626,8 +625,8 @@ bad_no_ctx: /* Unmap a set of streaming mode DMA translations. */ static void pci_4u_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction) { - struct pci_iommu *iommu; - struct pci_strbuf *strbuf; + struct iommu *iommu; + struct strbuf *strbuf; iopte_t *base; unsigned long flags, ctx, i, npages; u32 bus_addr; @@ -684,8 +683,8 @@ static void pci_4u_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, in */ static void pci_4u_dma_sync_single_for_cpu(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction) { - struct pci_iommu *iommu; - struct pci_strbuf *strbuf; + struct iommu *iommu; + struct strbuf *strbuf; unsigned long flags, ctx, npages; iommu = pdev->dev.archdata.iommu; @@ -722,8 +721,8 @@ static void pci_4u_dma_sync_single_for_cpu(struct pci_dev *pdev, dma_addr_t bus_ */ static void pci_4u_dma_sync_sg_for_cpu(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction) { - struct pci_iommu *iommu; - struct pci_strbuf *strbuf; + struct iommu *iommu; + struct strbuf *strbuf; unsigned long flags, ctx, npages, i; u32 bus_addr; @@ -798,7 +797,7 @@ int pci_dma_supported(struct pci_dev *pdev, u64 device_mask) if (pdev == NULL) { dma_addr_mask = 0xffffffff; } else { - struct pci_iommu *iommu = pdev->dev.archdata.iommu; + struct iommu *iommu = pdev->dev.archdata.iommu; dma_addr_mask = iommu->dma_addr_mask; diff --git a/arch/sparc64/kernel/pci_psycho.c b/arch/sparc64/kernel/pci_psycho.c index 3725910c8b2b..253d40ec2245 100644 --- a/arch/sparc64/kernel/pci_psycho.c +++ b/arch/sparc64/kernel/pci_psycho.c @@ -269,7 +269,7 @@ static void __psycho_check_one_stc(struct pci_controller_info *p, struct pci_pbm_info *pbm, int is_pbm_a) { - struct pci_strbuf *strbuf = &pbm->stc; + struct strbuf *strbuf = &pbm->stc; unsigned long regbase = p->pbm_A.controller_regs; unsigned long err_base, tag_base, line_base; u64 control; @@ -418,7 +418,7 @@ static void psycho_check_iommu_error(struct pci_controller_info *p, unsigned long afar, enum psycho_error_type type) { - struct pci_iommu *iommu = p->pbm_A.iommu; + struct iommu *iommu = p->pbm_A.iommu; unsigned long iommu_tag[16]; unsigned long iommu_data[16]; unsigned long flags; @@ -941,7 +941,7 @@ static void psycho_scan_bus(struct pci_controller_info *p) static void psycho_iommu_init(struct pci_controller_info *p) { - struct pci_iommu *iommu = p->pbm_A.iommu; + struct iommu *iommu = p->pbm_A.iommu; unsigned long i; u64 control; @@ -1131,7 +1131,7 @@ void psycho_init(struct device_node *dp, char *model_name) { struct linux_prom64_registers *pr_regs; struct pci_controller_info *p; - struct pci_iommu *iommu; + struct iommu *iommu; struct property *prop; u32 upa_portid; int is_pbm_a; @@ -1154,7 +1154,7 @@ void psycho_init(struct device_node *dp, char *model_name) prom_printf("PSYCHO: Fatal memory allocation error.\n"); prom_halt(); } - iommu = kzalloc(sizeof(struct pci_iommu), GFP_ATOMIC); + iommu = kzalloc(sizeof(struct iommu), GFP_ATOMIC); if (!iommu) { prom_printf("PSYCHO: Fatal memory allocation error.\n"); prom_halt(); diff --git a/arch/sparc64/kernel/pci_sabre.c b/arch/sparc64/kernel/pci_sabre.c index 9a2ce0637c80..397862fbd9e1 100644 --- a/arch/sparc64/kernel/pci_sabre.c +++ b/arch/sparc64/kernel/pci_sabre.c @@ -1,7 +1,6 @@ -/* $Id: pci_sabre.c,v 1.42 2002/01/23 11:27:32 davem Exp $ - * pci_sabre.c: Sabre specific PCI controller support. +/* pci_sabre.c: Sabre specific PCI controller support. * - * Copyright (C) 1997, 1998, 1999 David S. Miller (davem@caipfs.rutgers.edu) + * Copyright (C) 1997, 1998, 1999, 2007 David S. Miller (davem@davemloft.net) * Copyright (C) 1998, 1999 Eddie C. Dost (ecd@skynet.be) * Copyright (C) 1999 Jakub Jelinek (jakub@redhat.com) */ @@ -499,7 +498,7 @@ static void sabre_check_iommu_error(struct pci_controller_info *p, unsigned long afsr, unsigned long afar) { - struct pci_iommu *iommu = p->pbm_A.iommu; + struct iommu *iommu = p->pbm_A.iommu; unsigned long iommu_tag[16]; unsigned long iommu_data[16]; unsigned long flags; @@ -948,7 +947,7 @@ static void sabre_iommu_init(struct pci_controller_info *p, int tsbsize, unsigned long dvma_offset, u32 dma_mask) { - struct pci_iommu *iommu = p->pbm_A.iommu; + struct iommu *iommu = p->pbm_A.iommu; unsigned long i; u64 control; @@ -1017,7 +1016,7 @@ void sabre_init(struct device_node *dp, char *model_name) { const struct linux_prom64_registers *pr_regs; struct pci_controller_info *p; - struct pci_iommu *iommu; + struct iommu *iommu; int tsbsize; const u32 *busrange; const u32 *vdma; diff --git a/arch/sparc64/kernel/pci_schizo.c b/arch/sparc64/kernel/pci_schizo.c index 47a5aa94dcae..91a7385e5d32 100644 --- a/arch/sparc64/kernel/pci_schizo.c +++ b/arch/sparc64/kernel/pci_schizo.c @@ -279,7 +279,7 @@ struct pci_pbm_info *pbm_for_ino(struct pci_controller_info *p, u32 ino) static void __schizo_check_stc_error_pbm(struct pci_pbm_info *pbm, enum schizo_error_type type) { - struct pci_strbuf *strbuf = &pbm->stc; + struct strbuf *strbuf = &pbm->stc; unsigned long regbase = pbm->pbm_regs; unsigned long err_base, tag_base, line_base; u64 control; @@ -387,7 +387,7 @@ static void __schizo_check_stc_error_pbm(struct pci_pbm_info *pbm, static void schizo_check_iommu_error_pbm(struct pci_pbm_info *pbm, enum schizo_error_type type) { - struct pci_iommu *iommu = pbm->iommu; + struct iommu *iommu = pbm->iommu; unsigned long iommu_tag[16]; unsigned long iommu_data[16]; unsigned long flags; @@ -1308,7 +1308,7 @@ static void schizo_pbm_strbuf_init(struct pci_pbm_info *pbm) static void schizo_pbm_iommu_init(struct pci_pbm_info *pbm) { - struct pci_iommu *iommu = pbm->iommu; + struct iommu *iommu = pbm->iommu; unsigned long i, tagbase, database; struct property *prop; u32 vdma[2], dma_mask; @@ -1580,7 +1580,7 @@ static inline int portid_compare(u32 x, u32 y, int chip_type) static void __schizo_init(struct device_node *dp, char *model_name, int chip_type) { struct pci_controller_info *p; - struct pci_iommu *iommu; + struct iommu *iommu; u32 portid; portid = of_getintprop_default(dp, "portid", 0xff); @@ -1605,13 +1605,13 @@ static void __schizo_init(struct device_node *dp, char *model_name, int chip_typ if (!p) goto memfail; - iommu = kzalloc(sizeof(struct pci_iommu), GFP_ATOMIC); + iommu = kzalloc(sizeof(struct iommu), GFP_ATOMIC); if (!iommu) goto memfail; p->pbm_A.iommu = iommu; - iommu = kzalloc(sizeof(struct pci_iommu), GFP_ATOMIC); + iommu = kzalloc(sizeof(struct iommu), GFP_ATOMIC); if (!iommu) goto memfail; diff --git a/arch/sparc64/kernel/pci_sun4v.c b/arch/sparc64/kernel/pci_sun4v.c index 0e99808f2121..94295c219329 100644 --- a/arch/sparc64/kernel/pci_sun4v.c +++ b/arch/sparc64/kernel/pci_sun4v.c @@ -29,7 +29,7 @@ #define PGLIST_NENTS (PAGE_SIZE / sizeof(u64)) -struct pci_iommu_batch { +struct iommu_batch { struct pci_dev *pdev; /* Device mapping is for. */ unsigned long prot; /* IOMMU page protections */ unsigned long entry; /* Index into IOTSB. */ @@ -37,12 +37,12 @@ struct pci_iommu_batch { unsigned long npages; /* Number of pages in list. */ }; -static DEFINE_PER_CPU(struct pci_iommu_batch, pci_iommu_batch); +static DEFINE_PER_CPU(struct iommu_batch, pci_iommu_batch); /* Interrupts must be disabled. */ static inline void pci_iommu_batch_start(struct pci_dev *pdev, unsigned long prot, unsigned long entry) { - struct pci_iommu_batch *p = &__get_cpu_var(pci_iommu_batch); + struct iommu_batch *p = &__get_cpu_var(pci_iommu_batch); p->pdev = pdev; p->prot = prot; @@ -51,7 +51,7 @@ static inline void pci_iommu_batch_start(struct pci_dev *pdev, unsigned long pro } /* Interrupts must be disabled. */ -static long pci_iommu_batch_flush(struct pci_iommu_batch *p) +static long pci_iommu_batch_flush(struct iommu_batch *p) { struct pci_pbm_info *pbm = p->pdev->dev.archdata.host_controller; unsigned long devhandle = pbm->devhandle; @@ -89,7 +89,7 @@ static long pci_iommu_batch_flush(struct pci_iommu_batch *p) /* Interrupts must be disabled. */ static inline long pci_iommu_batch_add(u64 phys_page) { - struct pci_iommu_batch *p = &__get_cpu_var(pci_iommu_batch); + struct iommu_batch *p = &__get_cpu_var(pci_iommu_batch); BUG_ON(p->npages >= PGLIST_NENTS); @@ -103,7 +103,7 @@ static inline long pci_iommu_batch_add(u64 phys_page) /* Interrupts must be disabled. */ static inline long pci_iommu_batch_end(void) { - struct pci_iommu_batch *p = &__get_cpu_var(pci_iommu_batch); + struct iommu_batch *p = &__get_cpu_var(pci_iommu_batch); BUG_ON(p->npages >= PGLIST_NENTS); @@ -159,7 +159,7 @@ static void pci_arena_free(struct iommu_arena *arena, unsigned long base, unsign static void *pci_4v_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp, gfp_t gfp) { - struct pci_iommu *iommu; + struct iommu *iommu; unsigned long flags, order, first_page, npages, n; void *ret; long entry; @@ -225,7 +225,7 @@ arena_alloc_fail: static void pci_4v_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_t dvma) { struct pci_pbm_info *pbm; - struct pci_iommu *iommu; + struct iommu *iommu; unsigned long flags, order, npages, entry; u32 devhandle; @@ -257,7 +257,7 @@ static void pci_4v_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, static dma_addr_t pci_4v_map_single(struct pci_dev *pdev, void *ptr, size_t sz, int direction) { - struct pci_iommu *iommu; + struct iommu *iommu; unsigned long flags, npages, oaddr; unsigned long i, base_paddr; u32 bus_addr, ret; @@ -321,7 +321,7 @@ iommu_map_fail: static void pci_4v_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction) { struct pci_pbm_info *pbm; - struct pci_iommu *iommu; + struct iommu *iommu; unsigned long flags, npages; long entry; u32 devhandle; @@ -456,7 +456,7 @@ iommu_map_failed: static int pci_4v_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction) { - struct pci_iommu *iommu; + struct iommu *iommu; unsigned long flags, npages, prot; u32 dma_base; struct scatterlist *sgtmp; @@ -532,7 +532,7 @@ iommu_map_failed: static void pci_4v_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction) { struct pci_pbm_info *pbm; - struct pci_iommu *iommu; + struct iommu *iommu; unsigned long flags, i, npages; long entry; u32 devhandle, bus_addr; @@ -705,7 +705,7 @@ static void pci_sun4v_scan_bus(struct pci_controller_info *p) } static unsigned long probe_existing_entries(struct pci_pbm_info *pbm, - struct pci_iommu *iommu) + struct iommu *iommu) { struct iommu_arena *arena = &iommu->arena; unsigned long i, cnt = 0; @@ -734,7 +734,7 @@ static unsigned long probe_existing_entries(struct pci_pbm_info *pbm, static void pci_sun4v_iommu_init(struct pci_pbm_info *pbm) { - struct pci_iommu *iommu = pbm->iommu; + struct iommu *iommu = pbm->iommu; struct property *prop; unsigned long num_tsb_entries, sz; u32 vdma[2], dma_mask, dma_offset; @@ -1279,7 +1279,7 @@ static void pci_sun4v_pbm_init(struct pci_controller_info *p, struct device_node void sun4v_pci_init(struct device_node *dp, char *model_name) { struct pci_controller_info *p; - struct pci_iommu *iommu; + struct iommu *iommu; struct property *prop; struct linux_prom64_registers *regs; u32 devhandle; @@ -1319,13 +1319,13 @@ void sun4v_pci_init(struct device_node *dp, char *model_name) if (!p) goto fatal_memory_error; - iommu = kzalloc(sizeof(struct pci_iommu), GFP_ATOMIC); + iommu = kzalloc(sizeof(struct iommu), GFP_ATOMIC); if (!iommu) goto fatal_memory_error; p->pbm_A.iommu = iommu; - iommu = kzalloc(sizeof(struct pci_iommu), GFP_ATOMIC); + iommu = kzalloc(sizeof(struct iommu), GFP_ATOMIC); if (!iommu) goto fatal_memory_error; diff --git a/include/asm-sparc64/pbm.h b/include/asm-sparc64/pbm.h index 4a0ed2ea950c..c008cecca149 100644 --- a/include/asm-sparc64/pbm.h +++ b/include/asm-sparc64/pbm.h @@ -1,7 +1,6 @@ -/* $Id: pbm.h,v 1.27 2001/08/12 13:18:23 davem Exp $ - * pbm.h: UltraSparc PCI controller software state. +/* pbm.h: UltraSparc PCI controller software state. * - * Copyright (C) 1997, 1998, 1999 David S. Miller (davem@redhat.com) + * Copyright (C) 1997, 1998, 1999, 2007 David S. Miller (davem@davemloft.net) */ #ifndef __SPARC64_PBM_H @@ -30,84 +29,7 @@ * PCI bus. */ -struct pci_controller_info; - -/* This contains the software state necessary to drive a PCI - * controller's IOMMU. - */ -struct pci_iommu { - /* This protects the controller's IOMMU and all - * streaming buffers underneath. - */ - spinlock_t lock; - - struct iommu_arena arena; - - /* IOMMU page table, a linear array of ioptes. */ - iopte_t *page_table; /* The page table itself. */ - - /* Base PCI memory space address where IOMMU mappings - * begin. - */ - u32 page_table_map_base; - - /* IOMMU Controller Registers */ - unsigned long iommu_control; /* IOMMU control register */ - unsigned long iommu_tsbbase; /* IOMMU page table base register */ - unsigned long iommu_flush; /* IOMMU page flush register */ - unsigned long iommu_ctxflush; /* IOMMU context flush register */ - - /* This is a register in the PCI controller, which if - * read will have no side-effects but will guarantee - * completion of all previous writes into IOMMU/STC. - */ - unsigned long write_complete_reg; - - /* In order to deal with some buggy third-party PCI bridges that - * do wrong prefetching, we never mark valid mappings as invalid. - * Instead we point them at this dummy page. - */ - unsigned long dummy_page; - unsigned long dummy_page_pa; - - /* CTX allocation. */ - unsigned long ctx_lowest_free; - DECLARE_BITMAP(ctx_bitmap, IOMMU_NUM_CTXS); - - /* Here a PCI controller driver describes the areas of - * PCI memory space where DMA to/from physical memory - * are addressed. Drivers interrogate the PCI layer - * if their device has addressing limitations. They - * do so via pci_dma_supported, and pass in a mask of - * DMA address bits their device can actually drive. - * - * The test for being usable is: - * (device_mask & dma_addr_mask) == dma_addr_mask - */ - u32 dma_addr_mask; -}; - -extern void pci_iommu_table_init(struct pci_iommu *iommu, int tsbsize, u32 dma_offset, u32 dma_addr_mask); - -/* This describes a PCI bus module's streaming buffer. */ -struct pci_strbuf { - int strbuf_enabled; /* Present and using it? */ - - /* Streaming Buffer Control Registers */ - unsigned long strbuf_control; /* STC control register */ - unsigned long strbuf_pflush; /* STC page flush register */ - unsigned long strbuf_fsync; /* STC flush synchronization reg */ - unsigned long strbuf_ctxflush; /* STC context flush register */ - unsigned long strbuf_ctxmatch_base; /* STC context flush match reg */ - unsigned long strbuf_flushflag_pa; /* Physical address of flush flag */ - volatile unsigned long *strbuf_flushflag; /* The flush flag itself */ - - /* And this is the actual flush flag area. - * We allocate extra because the chips require - * a 64-byte aligned area. - */ - volatile unsigned long __flushflag_buf[(64 + (64 - 1)) / sizeof(long)]; -}; +extern void pci_iommu_table_init(struct iommu *iommu, int tsbsize, u32 dma_offset, u32 dma_addr_mask); #define PCI_STC_FLUSHFLAG_INIT(STC) \ (*((STC)->strbuf_flushflag) = 0UL) @@ -120,6 +42,8 @@ struct pci_strbuf { #define PROM_PCIRNG_MAX 64 #define PROM_PCIIMAP_MAX 64 +struct pci_controller_info; + struct pci_pbm_info { /* PCI controller we sit under. */ struct pci_controller_info *parent; @@ -186,10 +110,10 @@ struct pci_pbm_info { #endif /* !(CONFIG_PCI_MSI) */ /* This PBM's streaming buffer. */ - struct pci_strbuf stc; + struct strbuf stc; /* IOMMU state, potentially shared by both PBM segments. */ - struct pci_iommu *iommu; + struct iommu *iommu; /* Now things for the actual PCI bus probes. */ unsigned int pci_first_busno; -- cgit v1.2.3-59-g8ed1b From 801c135ce73d5df1caf3eca35b66a10824ae0707 Mon Sep 17 00:00:00 2001 From: "Artem B. Bityutskiy" Date: Tue, 27 Jun 2006 12:22:22 +0400 Subject: UBI: Unsorted Block Images UBI (Latin: "where?") manages multiple logical volumes on a single flash device, specifically supporting NAND flash devices. UBI provides a flexible partitioning concept which still allows for wear-levelling across the whole flash device. In a sense, UBI may be compared to the Logical Volume Manager (LVM). Whereas LVM maps logical sector numbers to physical HDD sector numbers, UBI maps logical eraseblocks to physical eraseblocks. More information may be found at http://www.linux-mtd.infradead.org/doc/ubi.html Partitioning/Re-partitioning An UBI volume occupies a certain number of erase blocks. This is limited by a configured maximum volume size, which could also be viewed as the partition size. Each individual UBI volume's size can be changed independently of the other UBI volumes, provided that the sum of all volume sizes doesn't exceed a certain limit. UBI supports dynamic volumes and static volumes. Static volumes are read-only and their contents are protected by CRC check sums. Bad eraseblocks handling UBI transparently handles bad eraseblocks. When a physical eraseblock becomes bad, it is substituted by a good physical eraseblock, and the user does not even notice this. Scrubbing On a NAND flash bit flips can occur on any write operation, sometimes also on read. If bit flips persist on the device, at first they can still be corrected by ECC, but once they accumulate, correction will become impossible. Thus it is best to actively scrub the affected eraseblock, by first copying it to a free eraseblock and then erasing the original. The UBI layer performs this type of scrubbing under the covers, transparently to the UBI volume users. Erase Counts UBI maintains an erase count header per eraseblock. This frees higher-level layers (like file systems) from doing this and allows for centralized erase count management instead. The erase counts are used by the wear-levelling algorithm in the UBI layer. The algorithm itself is exchangeable. Booting from NAND For booting directly from NAND flash the hardware must at least be capable of fetching and executing a small portion of the NAND flash. Some NAND flash controllers have this kind of support. They usually limit the window to a few kilobytes in erase block 0. This "initial program loader" (IPL) must then contain sufficient logic to load and execute the next boot phase. Due to bad eraseblocks, which may be randomly scattered over the flash device, it is problematic to store the "secondary program loader" (SPL) statically. Also, due to bit-flips it may become corrupted over time. UBI allows to solve this problem gracefully by storing the SPL in a small static UBI volume. UBI volumes vs. static partitions UBI volumes are still very similar to static MTD partitions: * both consist of eraseblocks (logical eraseblocks in case of UBI volumes, and physical eraseblocks in case of static partitions; * both support three basic operations - read, write, erase. But UBI volumes have the following advantages over traditional static MTD partitions: * there are no eraseblock wear-leveling constraints in case of UBI volumes, so the user should not care about this; * there are no bit-flips and bad eraseblocks in case of UBI volumes. So, UBI volumes may be considered as flash devices with relaxed restrictions. Where can it be found? Documentation, kernel code and applications can be found in the MTD gits. What are the applications for? The applications help to create binary flash images for two purposes: pfi files (partial flash images) for in-system update of UBI volumes, and plain binary images, with or without OOB data in case of NAND, for a manufacturing step. Furthermore some tools are/and will be created that allow flash content analysis after a system has crashed.. Who did UBI? The original ideas, where UBI is based on, were developed by Andreas Arnez, Frank Haverkamp and Thomas Gleixner. Josh W. Boyer and some others were involved too. The implementation of the kernel layer was done by Artem B. Bityutskiy. The user-space applications and tools were written by Oliver Lohmann with contributions from Frank Haverkamp, Andreas Arnez, and Artem. Joern Engel contributed a patch which modifies JFFS2 so that it can be run on a UBI volume. Thomas Gleixner did modifications to the NAND layer. Alexander Schmidt made some testing work as well as core functionality improvements. Signed-off-by: Artem B. Bityutskiy Signed-off-by: Frank Haverkamp --- drivers/mtd/Kconfig | 2 + drivers/mtd/Makefile | 2 + drivers/mtd/ubi/Kconfig | 58 ++ drivers/mtd/ubi/Kconfig.debug | 104 +++ drivers/mtd/ubi/Makefile | 7 + drivers/mtd/ubi/build.c | 848 +++++++++++++++++++++ drivers/mtd/ubi/cdev.c | 722 ++++++++++++++++++ drivers/mtd/ubi/debug.c | 224 ++++++ drivers/mtd/ubi/debug.h | 161 ++++ drivers/mtd/ubi/eba.c | 1241 ++++++++++++++++++++++++++++++ drivers/mtd/ubi/gluebi.c | 324 ++++++++ drivers/mtd/ubi/io.c | 1259 +++++++++++++++++++++++++++++++ drivers/mtd/ubi/kapi.c | 575 ++++++++++++++ drivers/mtd/ubi/misc.c | 105 +++ drivers/mtd/ubi/scan.c | 1368 +++++++++++++++++++++++++++++++++ drivers/mtd/ubi/scan.h | 167 ++++ drivers/mtd/ubi/ubi.h | 535 +++++++++++++ drivers/mtd/ubi/upd.c | 348 +++++++++ drivers/mtd/ubi/vmt.c | 809 ++++++++++++++++++++ drivers/mtd/ubi/vtbl.c | 809 ++++++++++++++++++++ drivers/mtd/ubi/wl.c | 1671 +++++++++++++++++++++++++++++++++++++++++ include/linux/mtd/ubi.h | 202 +++++ include/mtd/Kbuild | 2 + include/mtd/mtd-abi.h | 1 + include/mtd/ubi-header.h | 360 +++++++++ include/mtd/ubi-user.h | 161 ++++ 26 files changed, 12065 insertions(+) create mode 100644 drivers/mtd/ubi/Kconfig create mode 100644 drivers/mtd/ubi/Kconfig.debug create mode 100644 drivers/mtd/ubi/Makefile create mode 100644 drivers/mtd/ubi/build.c create mode 100644 drivers/mtd/ubi/cdev.c create mode 100644 drivers/mtd/ubi/debug.c create mode 100644 drivers/mtd/ubi/debug.h create mode 100644 drivers/mtd/ubi/eba.c create mode 100644 drivers/mtd/ubi/gluebi.c create mode 100644 drivers/mtd/ubi/io.c create mode 100644 drivers/mtd/ubi/kapi.c create mode 100644 drivers/mtd/ubi/misc.c create mode 100644 drivers/mtd/ubi/scan.c create mode 100644 drivers/mtd/ubi/scan.h create mode 100644 drivers/mtd/ubi/ubi.h create mode 100644 drivers/mtd/ubi/upd.c create mode 100644 drivers/mtd/ubi/vmt.c create mode 100644 drivers/mtd/ubi/vtbl.c create mode 100644 drivers/mtd/ubi/wl.c create mode 100644 include/linux/mtd/ubi.h create mode 100644 include/mtd/ubi-header.h create mode 100644 include/mtd/ubi-user.h (limited to 'include') diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig index 26f75c299440..6d1b91bf7ad5 100644 --- a/drivers/mtd/Kconfig +++ b/drivers/mtd/Kconfig @@ -292,5 +292,7 @@ source "drivers/mtd/nand/Kconfig" source "drivers/mtd/onenand/Kconfig" +source "drivers/mtd/ubi/Kconfig" + endmenu diff --git a/drivers/mtd/Makefile b/drivers/mtd/Makefile index c130e6261adf..92055405cb30 100644 --- a/drivers/mtd/Makefile +++ b/drivers/mtd/Makefile @@ -28,3 +28,5 @@ nftl-objs := nftlcore.o nftlmount.o inftl-objs := inftlcore.o inftlmount.o obj-y += chips/ maps/ devices/ nand/ onenand/ + +obj-$(CONFIG_MTD_UBI) += ubi/ diff --git a/drivers/mtd/ubi/Kconfig b/drivers/mtd/ubi/Kconfig new file mode 100644 index 000000000000..b9daf159a4a7 --- /dev/null +++ b/drivers/mtd/ubi/Kconfig @@ -0,0 +1,58 @@ +# drivers/mtd/ubi/Kconfig + +menu "UBI - Unsorted block images" + depends on MTD + +config MTD_UBI + tristate "Enable UBI" + depends on MTD + select CRC32 + help + UBI is a software layer above MTD layer which admits of LVM-like + logical volumes on top of MTD devices, hides some complexities of + flash chips like wear and bad blocks and provides some other useful + capabilities. Please, consult the MTD web site for more details + (www.linux-mtd.infradead.org). + +config MTD_UBI_WL_THRESHOLD + int "UBI wear-leveling threshold" + default 4096 + range 2 65536 + depends on MTD_UBI + help + This parameter defines the maximum difference between the highest + erase counter value and the lowest erase counter value of eraseblocks + of UBI devices. When this threshold is exceeded, UBI starts performing + wear leveling by means of moving data from eraseblock with low erase + counter to eraseblocks with high erase counter. Leave the default + value if unsure. + +config MTD_UBI_BEB_RESERVE + int "Percentage of reserved eraseblocks for bad eraseblocks handling" + default 1 + range 0 25 + depends on MTD_UBI + help + If the MTD device admits of bad eraseblocks (e.g. NAND flash), UBI + reserves some amount of physical eraseblocks to handle new bad + eraseblocks. For example, if a flash physical eraseblock becomes bad, + UBI uses these reserved physical eraseblocks to relocate the bad one. + This option specifies how many physical eraseblocks will be reserved + for bad eraseblock handling (percents of total number of good flash + eraseblocks). If the underlying flash does not admit of bad + eraseblocks (e.g. NOR flash), this value is ignored and nothing is + reserved. Leave the default value if unsure. + +config MTD_UBI_GLUEBI + bool "Emulate MTD devices" + default n + depends on MTD_UBI + help + This option enables MTD devices emulation on top of UBI volumes: for + each UBI volumes an MTD device is created, and all I/O to this MTD + device is redirected to the UBI volume. This is handy to make + MTD-oriented software (like JFFS2) work on top of UBI. Do not enable + this if no legacy software will be used. + +source "drivers/mtd/ubi/Kconfig.debug" +endmenu diff --git a/drivers/mtd/ubi/Kconfig.debug b/drivers/mtd/ubi/Kconfig.debug new file mode 100644 index 000000000000..1e2ee22edeff --- /dev/null +++ b/drivers/mtd/ubi/Kconfig.debug @@ -0,0 +1,104 @@ +comment "UBI debugging options" + depends on MTD_UBI + +config MTD_UBI_DEBUG + bool "UBI debugging" + depends on SYSFS + depends on MTD_UBI + select DEBUG_FS + select KALLSYMS_ALL + help + This option enables UBI debugging. + +config MTD_UBI_DEBUG_MSG + bool "UBI debugging messages" + depends on MTD_UBI_DEBUG + default n + help + This option enables UBI debugging messages. + +config MTD_UBI_DEBUG_PARANOID + bool "Extra self-checks" + default n + depends on MTD_UBI_DEBUG + help + This option enables extra checks in UBI code. Note this slows UBI down + significantly. + +config MTD_UBI_DEBUG_DISABLE_BGT + bool "Do not enable the UBI background thread" + depends on MTD_UBI_DEBUG + default n + help + This option switches the background thread off by default. The thread + may be also be enabled/disabled via UBI sysfs. + +config MTD_UBI_DEBUG_USERSPACE_IO + bool "Direct user-space write/erase support" + default n + depends on MTD_UBI_DEBUG + help + By default, users cannot directly write and erase individual + eraseblocks of dynamic volumes, and have to use update operation + instead. This option enables this capability - it is very useful for + debugging and testing. + +config MTD_UBI_DEBUG_EMULATE_BITFLIPS + bool "Emulate flash bit-flips" + depends on MTD_UBI_DEBUG + default n + help + This option emulates bit-flips with probability 1/50, which in turn + causes scrubbing. Useful for debugging and stressing UBI. + +config MTD_UBI_DEBUG_EMULATE_WRITE_FAILURES + bool "Emulate flash write failures" + depends on MTD_UBI_DEBUG + default n + help + This option emulates write failures with probability 1/100. Useful for + debugging and testing how UBI handlines errors. + +config MTD_UBI_DEBUG_EMULATE_ERASE_FAILURES + bool "Emulate flash erase failures" + depends on MTD_UBI_DEBUG + default n + help + This option emulates erase failures with probability 1/100. Useful for + debugging and testing how UBI handlines errors. + +menu "Additional UBI debugging messages" + depends on MTD_UBI_DEBUG + +config MTD_UBI_DEBUG_MSG_BLD + bool "Additional UBI initialization and build messages" + default n + depends on MTD_UBI_DEBUG + help + This option enables detailed UBI initialization and device build + debugging messages. + +config MTD_UBI_DEBUG_MSG_EBA + bool "Eraseblock association unit messages" + default n + depends on MTD_UBI_DEBUG + help + This option enables debugging messages from the UBI eraseblock + association unit. + +config MTD_UBI_DEBUG_MSG_WL + bool "Wear-leveling unit messages" + default n + depends on MTD_UBI_DEBUG + help + This option enables debugging messages from the UBI wear-leveling + unit. + +config MTD_UBI_DEBUG_MSG_IO + bool "Input/output unit messages" + default n + depends on MTD_UBI_DEBUG + help + This option enables debugging messages from the UBI input/output unit. + +endmenu # UBI debugging messages diff --git a/drivers/mtd/ubi/Makefile b/drivers/mtd/ubi/Makefile new file mode 100644 index 000000000000..dd834e04151b --- /dev/null +++ b/drivers/mtd/ubi/Makefile @@ -0,0 +1,7 @@ +obj-$(CONFIG_MTD_UBI) += ubi.o + +ubi-y += vtbl.o vmt.o upd.o build.o cdev.o kapi.o eba.o io.o wl.o scan.o +ubi-y += misc.o + +ubi-$(CONFIG_MTD_UBI_DEBUG) += debug.o +ubi-$(CONFIG_MTD_UBI_GLUEBI) += gluebi.o diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c new file mode 100644 index 000000000000..555d594d1811 --- /dev/null +++ b/drivers/mtd/ubi/build.c @@ -0,0 +1,848 @@ +/* + * Copyright (c) International Business Machines Corp., 2006 + * Copyright (c) Nokia Corporation, 2007 + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See + * the GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + * Author: Artem Bityutskiy (Битюцкий Артём), + * Frank Haverkamp + */ + +/* + * This file includes UBI initialization and building of UBI devices. At the + * moment UBI devices may only be added while UBI is initialized, but dynamic + * device add/remove functionality is planned. Also, at the moment we only + * attach UBI devices by scanning, which will become a bottleneck when flashes + * reach certain large size. Then one may improve UBI and add other methods. + */ + +#include +#include +#include +#include +#include +#include "ubi.h" + +/* Maximum length of the 'mtd=' parameter */ +#define MTD_PARAM_LEN_MAX 64 + +/** + * struct mtd_dev_param - MTD device parameter description data structure. + * @name: MTD device name or number string + * @vid_hdr_offs: VID header offset + * @data_offs: data offset + */ +struct mtd_dev_param +{ + char name[MTD_PARAM_LEN_MAX]; + int vid_hdr_offs; + int data_offs; +}; + +/* Numbers of elements set in the @mtd_dev_param array */ +static int mtd_devs = 0; + +/* MTD devices specification parameters */ +static struct mtd_dev_param mtd_dev_param[UBI_MAX_DEVICES]; + +/* Number of UBI devices in system */ +int ubi_devices_cnt; + +/* All UBI devices in system */ +struct ubi_device *ubi_devices[UBI_MAX_DEVICES]; + +/* Root UBI "class" object (corresponds to '//class/ubi/') */ +struct class *ubi_class; + +/* "Show" method for files in '//class/ubi/' */ +static ssize_t ubi_version_show(struct class *class, char *buf) +{ + return sprintf(buf, "%d\n", UBI_VERSION); +} + +/* UBI version attribute ('//class/ubi/version') */ +static struct class_attribute ubi_version = + __ATTR(version, S_IRUGO, ubi_version_show, NULL); + +static ssize_t dev_attribute_show(struct device *dev, + struct device_attribute *attr, char *buf); + +/* UBI device attributes (correspond to files in '//class/ubi/ubiX') */ +static struct device_attribute dev_eraseblock_size = + __ATTR(eraseblock_size, S_IRUGO, dev_attribute_show, NULL); +static struct device_attribute dev_avail_eraseblocks = + __ATTR(avail_eraseblocks, S_IRUGO, dev_attribute_show, NULL); +static struct device_attribute dev_total_eraseblocks = + __ATTR(total_eraseblocks, S_IRUGO, dev_attribute_show, NULL); +static struct device_attribute dev_volumes_count = + __ATTR(volumes_count, S_IRUGO, dev_attribute_show, NULL); +static struct device_attribute dev_max_ec = + __ATTR(max_ec, S_IRUGO, dev_attribute_show, NULL); +static struct device_attribute dev_reserved_for_bad = + __ATTR(reserved_for_bad, S_IRUGO, dev_attribute_show, NULL); +static struct device_attribute dev_bad_peb_count = + __ATTR(bad_peb_count, S_IRUGO, dev_attribute_show, NULL); +static struct device_attribute dev_max_vol_count = + __ATTR(max_vol_count, S_IRUGO, dev_attribute_show, NULL); +static struct device_attribute dev_min_io_size = + __ATTR(min_io_size, S_IRUGO, dev_attribute_show, NULL); +static struct device_attribute dev_bgt_enabled = + __ATTR(bgt_enabled, S_IRUGO, dev_attribute_show, NULL); + +/* "Show" method for files in '//class/ubi/ubiX/' */ +static ssize_t dev_attribute_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + const struct ubi_device *ubi; + + ubi = container_of(dev, struct ubi_device, dev); + if (attr == &dev_eraseblock_size) + return sprintf(buf, "%d\n", ubi->leb_size); + else if (attr == &dev_avail_eraseblocks) + return sprintf(buf, "%d\n", ubi->avail_pebs); + else if (attr == &dev_total_eraseblocks) + return sprintf(buf, "%d\n", ubi->good_peb_count); + else if (attr == &dev_volumes_count) + return sprintf(buf, "%d\n", ubi->vol_count); + else if (attr == &dev_max_ec) + return sprintf(buf, "%d\n", ubi->max_ec); + else if (attr == &dev_reserved_for_bad) + return sprintf(buf, "%d\n", ubi->beb_rsvd_pebs); + else if (attr == &dev_bad_peb_count) + return sprintf(buf, "%d\n", ubi->bad_peb_count); + else if (attr == &dev_max_vol_count) + return sprintf(buf, "%d\n", ubi->vtbl_slots); + else if (attr == &dev_min_io_size) + return sprintf(buf, "%d\n", ubi->min_io_size); + else if (attr == &dev_bgt_enabled) + return sprintf(buf, "%d\n", ubi->thread_enabled); + else + BUG(); + + return 0; +} + +/* Fake "release" method for UBI devices */ +static void dev_release(struct device *dev) { } + +/** + * ubi_sysfs_init - initialize sysfs for an UBI device. + * @ubi: UBI device description object + * + * This function returns zero in case of success and a negative error code in + * case of failure. + */ +static int ubi_sysfs_init(struct ubi_device *ubi) +{ + int err; + + ubi->dev.release = dev_release; + ubi->dev.devt = MKDEV(ubi->major, 0); + ubi->dev.class = ubi_class; + sprintf(&ubi->dev.bus_id[0], UBI_NAME_STR"%d", ubi->ubi_num); + err = device_register(&ubi->dev); + if (err) + goto out; + + err = device_create_file(&ubi->dev, &dev_eraseblock_size); + if (err) + goto out_unregister; + err = device_create_file(&ubi->dev, &dev_avail_eraseblocks); + if (err) + goto out_eraseblock_size; + err = device_create_file(&ubi->dev, &dev_total_eraseblocks); + if (err) + goto out_avail_eraseblocks; + err = device_create_file(&ubi->dev, &dev_volumes_count); + if (err) + goto out_total_eraseblocks; + err = device_create_file(&ubi->dev, &dev_max_ec); + if (err) + goto out_volumes_count; + err = device_create_file(&ubi->dev, &dev_reserved_for_bad); + if (err) + goto out_volumes_max_ec; + err = device_create_file(&ubi->dev, &dev_bad_peb_count); + if (err) + goto out_reserved_for_bad; + err = device_create_file(&ubi->dev, &dev_max_vol_count); + if (err) + goto out_bad_peb_count; + err = device_create_file(&ubi->dev, &dev_min_io_size); + if (err) + goto out_max_vol_count; + err = device_create_file(&ubi->dev, &dev_bgt_enabled); + if (err) + goto out_min_io_size; + + return 0; + +out_min_io_size: + device_remove_file(&ubi->dev, &dev_min_io_size); +out_max_vol_count: + device_remove_file(&ubi->dev, &dev_max_vol_count); +out_bad_peb_count: + device_remove_file(&ubi->dev, &dev_bad_peb_count); +out_reserved_for_bad: + device_remove_file(&ubi->dev, &dev_reserved_for_bad); +out_volumes_max_ec: + device_remove_file(&ubi->dev, &dev_max_ec); +out_volumes_count: + device_remove_file(&ubi->dev, &dev_volumes_count); +out_total_eraseblocks: + device_remove_file(&ubi->dev, &dev_total_eraseblocks); +out_avail_eraseblocks: + device_remove_file(&ubi->dev, &dev_avail_eraseblocks); +out_eraseblock_size: + device_remove_file(&ubi->dev, &dev_eraseblock_size); +out_unregister: + device_unregister(&ubi->dev); +out: + ubi_err("failed to initialize sysfs for %s", ubi->ubi_name); + return err; +} + +/** + * ubi_sysfs_close - close sysfs for an UBI device. + * @ubi: UBI device description object + */ +static void ubi_sysfs_close(struct ubi_device *ubi) +{ + device_remove_file(&ubi->dev, &dev_bgt_enabled); + device_remove_file(&ubi->dev, &dev_min_io_size); + device_remove_file(&ubi->dev, &dev_max_vol_count); + device_remove_file(&ubi->dev, &dev_bad_peb_count); + device_remove_file(&ubi->dev, &dev_reserved_for_bad); + device_remove_file(&ubi->dev, &dev_max_ec); + device_remove_file(&ubi->dev, &dev_volumes_count); + device_remove_file(&ubi->dev, &dev_total_eraseblocks); + device_remove_file(&ubi->dev, &dev_avail_eraseblocks); + device_remove_file(&ubi->dev, &dev_eraseblock_size); + device_unregister(&ubi->dev); +} + +/** + * kill_volumes - destroy all volumes. + * @ubi: UBI device description object + */ +static void kill_volumes(struct ubi_device *ubi) +{ + int i; + + for (i = 0; i < ubi->vtbl_slots; i++) + if (ubi->volumes[i]) + ubi_free_volume(ubi, i); +} + +/** + * uif_init - initialize user interfaces for an UBI device. + * @ubi: UBI device description object + * + * This function returns zero in case of success and a negative error code in + * case of failure. + */ +static int uif_init(struct ubi_device *ubi) +{ + int i, err; + dev_t dev; + + mutex_init(&ubi->vtbl_mutex); + spin_lock_init(&ubi->volumes_lock); + + sprintf(ubi->ubi_name, UBI_NAME_STR "%d", ubi->ubi_num); + + /* + * Major numbers for the UBI character devices are allocated + * dynamically. Major numbers of volume character devices are + * equivalent to ones of the corresponding UBI character device. Minor + * numbers of UBI character devices are 0, while minor numbers of + * volume character devices start from 1. Thus, we allocate one major + * number and ubi->vtbl_slots + 1 minor numbers. + */ + err = alloc_chrdev_region(&dev, 0, ubi->vtbl_slots + 1, ubi->ubi_name); + if (err) { + ubi_err("cannot register UBI character devices"); + return err; + } + + cdev_init(&ubi->cdev, &ubi_cdev_operations); + ubi->major = MAJOR(dev); + dbg_msg("%s major is %u", ubi->ubi_name, ubi->major); + ubi->cdev.owner = THIS_MODULE; + + dev = MKDEV(ubi->major, 0); + err = cdev_add(&ubi->cdev, dev, 1); + if (err) { + ubi_err("cannot add character device %s", ubi->ubi_name); + goto out_unreg; + } + + err = ubi_sysfs_init(ubi); + if (err) + goto out_cdev; + + for (i = 0; i < ubi->vtbl_slots; i++) + if (ubi->volumes[i]) { + err = ubi_add_volume(ubi, i); + if (err) + goto out_volumes; + } + + return 0; + +out_volumes: + kill_volumes(ubi); + ubi_sysfs_close(ubi); +out_cdev: + cdev_del(&ubi->cdev); +out_unreg: + unregister_chrdev_region(MKDEV(ubi->major, 0), + ubi->vtbl_slots + 1); + return err; +} + +/** + * uif_close - close user interfaces for an UBI device. + * @ubi: UBI device description object + */ +static void uif_close(struct ubi_device *ubi) +{ + kill_volumes(ubi); + ubi_sysfs_close(ubi); + cdev_del(&ubi->cdev); + unregister_chrdev_region(MKDEV(ubi->major, 0), ubi->vtbl_slots + 1); +} + +/** + * attach_by_scanning - attach an MTD device using scanning method. + * @ubi: UBI device descriptor + * + * This function returns zero in case of success and a negative error code in + * case of failure. + * + * Note, currently this is the only method to attach UBI devices. Hopefully in + * the future we'll have more scalable attaching methods and avoid full media + * scanning. But even in this case scanning will be needed as a fall-back + * attaching method if there are some on-flash table corruptions. + */ +static int attach_by_scanning(struct ubi_device *ubi) +{ + int err; + struct ubi_scan_info *si; + + si = ubi_scan(ubi); + if (IS_ERR(si)) + return PTR_ERR(si); + + ubi->bad_peb_count = si->bad_peb_count; + ubi->good_peb_count = ubi->peb_count - ubi->bad_peb_count; + ubi->max_ec = si->max_ec; + ubi->mean_ec = si->mean_ec; + + err = ubi_read_volume_table(ubi, si); + if (err) + goto out_si; + + err = ubi_wl_init_scan(ubi, si); + if (err) + goto out_vtbl; + + err = ubi_eba_init_scan(ubi, si); + if (err) + goto out_wl; + + ubi_scan_destroy_si(si); + return 0; + +out_wl: + ubi_wl_close(ubi); +out_vtbl: + kfree(ubi->vtbl); +out_si: + ubi_scan_destroy_si(si); + return err; +} + +/** + * io_init - initialize I/O unit for a given UBI device. + * @ubi: UBI device description object + * + * If @ubi->vid_hdr_offset or @ubi->leb_start is zero, default offsets are + * assumed: + * o EC header is always at offset zero - this cannot be changed; + * o VID header starts just after the EC header at the closest address + * aligned to @io->@hdrs_min_io_size; + * o data starts just after the VID header at the closest address aligned to + * @io->@min_io_size + * + * This function returns zero in case of success and a negative error code in + * case of failure. + */ +static int io_init(struct ubi_device *ubi) +{ + if (ubi->mtd->numeraseregions != 0) { + /* + * Some flashes have several erase regions. Different regions + * may have different eraseblock size and other + * characteristics. It looks like mostly multi-region flashes + * have one "main" region and one or more small regions to + * store boot loader code or boot parameters or whatever. I + * guess we should just pick the largest region. But this is + * not implemented. + */ + ubi_err("multiple regions, not implemented"); + return -EINVAL; + } + + /* + * Note, in this implementation we support MTD devices with 0x7FFFFFFF + * physical eraseblocks maximum. + */ + + ubi->peb_size = ubi->mtd->erasesize; + ubi->peb_count = ubi->mtd->size / ubi->mtd->erasesize; + ubi->flash_size = ubi->mtd->size; + + if (ubi->mtd->block_isbad && ubi->mtd->block_markbad) + ubi->bad_allowed = 1; + + ubi->min_io_size = ubi->mtd->writesize; + ubi->hdrs_min_io_size = ubi->mtd->writesize >> ubi->mtd->subpage_sft; + + /* Make sure minimal I/O unit is power of 2 */ + if (ubi->min_io_size == 0 || + (ubi->min_io_size & (ubi->min_io_size - 1))) { + ubi_err("bad min. I/O unit"); + return -EINVAL; + } + + ubi_assert(ubi->hdrs_min_io_size > 0); + ubi_assert(ubi->hdrs_min_io_size <= ubi->min_io_size); + ubi_assert(ubi->min_io_size % ubi->hdrs_min_io_size == 0); + + /* Calculate default aligned sizes of EC and VID headers */ + ubi->ec_hdr_alsize = ALIGN(UBI_EC_HDR_SIZE, ubi->hdrs_min_io_size); + ubi->vid_hdr_alsize = ALIGN(UBI_VID_HDR_SIZE, ubi->hdrs_min_io_size); + + dbg_msg("min_io_size %d", ubi->min_io_size); + dbg_msg("hdrs_min_io_size %d", ubi->hdrs_min_io_size); + dbg_msg("ec_hdr_alsize %d", ubi->ec_hdr_alsize); + dbg_msg("vid_hdr_alsize %d", ubi->vid_hdr_alsize); + + if (ubi->vid_hdr_offset == 0) + /* Default offset */ + ubi->vid_hdr_offset = ubi->vid_hdr_aloffset = + ubi->ec_hdr_alsize; + else { + ubi->vid_hdr_aloffset = ubi->vid_hdr_offset & + ~(ubi->hdrs_min_io_size - 1); + ubi->vid_hdr_shift = ubi->vid_hdr_offset - + ubi->vid_hdr_aloffset; + } + + /* Similar for the data offset */ + if (ubi->leb_start == 0) { + ubi->leb_start = ubi->vid_hdr_offset + ubi->vid_hdr_alsize; + ubi->leb_start = ALIGN(ubi->leb_start, ubi->min_io_size); + } + + dbg_msg("vid_hdr_offset %d", ubi->vid_hdr_offset); + dbg_msg("vid_hdr_aloffset %d", ubi->vid_hdr_aloffset); + dbg_msg("vid_hdr_shift %d", ubi->vid_hdr_shift); + dbg_msg("leb_start %d", ubi->leb_start); + + /* The shift must be aligned to 32-bit boundary */ + if (ubi->vid_hdr_shift % 4) { + ubi_err("unaligned VID header shift %d", + ubi->vid_hdr_shift); + return -EINVAL; + } + + /* Check sanity */ + if (ubi->vid_hdr_offset < UBI_EC_HDR_SIZE || + ubi->leb_start < ubi->vid_hdr_offset + UBI_VID_HDR_SIZE || + ubi->leb_start > ubi->peb_size - UBI_VID_HDR_SIZE || + ubi->leb_start % ubi->min_io_size) { + ubi_err("bad VID header (%d) or data offsets (%d)", + ubi->vid_hdr_offset, ubi->leb_start); + return -EINVAL; + } + + /* + * It may happen that EC and VID headers are situated in one minimal + * I/O unit. In this case we can only accept this UBI image in + * read-only mode. + */ + if (ubi->vid_hdr_offset + UBI_VID_HDR_SIZE <= ubi->hdrs_min_io_size) { + ubi_warn("EC and VID headers are in the same minimal I/O unit, " + "switch to read-only mode"); + ubi->ro_mode = 1; + } + + ubi->leb_size = ubi->peb_size - ubi->leb_start; + + if (!(ubi->mtd->flags & MTD_WRITEABLE)) { + ubi_msg("MTD device %d is write-protected, attach in " + "read-only mode", ubi->mtd->index); + ubi->ro_mode = 1; + } + + dbg_msg("leb_size %d", ubi->leb_size); + dbg_msg("ro_mode %d", ubi->ro_mode); + + /* + * Note, ideally, we have to initialize ubi->bad_peb_count here. But + * unfortunately, MTD does not provide this information. We should loop + * over all physical eraseblocks and invoke mtd->block_is_bad() for + * each physical eraseblock. So, we skip ubi->bad_peb_count + * uninitialized and initialize it after scanning. + */ + + return 0; +} + +/** + * attach_mtd_dev - attach an MTD device. + * @mtd_dev: MTD device name or number string + * @vid_hdr_offset: VID header offset + * @data_offset: data offset + * + * This function attaches an MTD device to UBI. It first treats @mtd_dev as the + * MTD device name, and tries to open it by this name. If it is unable to open, + * it tries to convert @mtd_dev to an integer and open the MTD device by its + * number. Returns zero in case of success and a negative error code in case of + * failure. + */ +static int attach_mtd_dev(const char *mtd_dev, int vid_hdr_offset, + int data_offset) +{ + struct ubi_device *ubi; + struct mtd_info *mtd; + int i, err; + + mtd = get_mtd_device_nm(mtd_dev); + if (IS_ERR(mtd)) { + int mtd_num; + char *endp; + + if (PTR_ERR(mtd) != -ENODEV) + return PTR_ERR(mtd); + + /* + * Probably this is not MTD device name but MTD device number - + * check this out. + */ + mtd_num = simple_strtoul(mtd_dev, &endp, 0); + if (*endp != '\0' || mtd_dev == endp) { + ubi_err("incorrect MTD device: \"%s\"", mtd_dev); + return -ENODEV; + } + + mtd = get_mtd_device(NULL, mtd_num); + if (IS_ERR(mtd)) + return PTR_ERR(mtd); + } + + /* Check if we already have the same MTD device attached */ + for (i = 0; i < ubi_devices_cnt; i++) + if (ubi_devices[i]->mtd->index == mtd->index) { + ubi_err("mtd%d is already attached to ubi%d", + mtd->index, i); + err = -EINVAL; + goto out_mtd; + } + + ubi = ubi_devices[ubi_devices_cnt] = kzalloc(sizeof(struct ubi_device), + GFP_KERNEL); + if (!ubi) { + err = -ENOMEM; + goto out_mtd; + } + + ubi->ubi_num = ubi_devices_cnt; + ubi->mtd = mtd; + + dbg_msg("attaching mtd%d to ubi%d: VID header offset %d data offset %d", + ubi->mtd->index, ubi_devices_cnt, vid_hdr_offset, data_offset); + + ubi->vid_hdr_offset = vid_hdr_offset; + ubi->leb_start = data_offset; + err = io_init(ubi); + if (err) + goto out_free; + + err = attach_by_scanning(ubi); + if (err) { + dbg_err("failed to attach by scanning, error %d", err); + goto out_free; + } + + err = uif_init(ubi); + if (err) + goto out_detach; + + ubi_devices_cnt += 1; + + ubi_msg("attached mtd%d to ubi%d", ubi->mtd->index, ubi_devices_cnt); + ubi_msg("MTD device name: \"%s\"", ubi->mtd->name); + ubi_msg("MTD device size: %llu MiB", ubi->flash_size >> 20); + ubi_msg("physical eraseblock size: %d bytes (%d KiB)", + ubi->peb_size, ubi->peb_size >> 10); + ubi_msg("logical eraseblock size: %d bytes", ubi->leb_size); + ubi_msg("number of good PEBs: %d", ubi->good_peb_count); + ubi_msg("number of bad PEBs: %d", ubi->bad_peb_count); + ubi_msg("smallest flash I/O unit: %d", ubi->min_io_size); + ubi_msg("VID header offset: %d (aligned %d)", + ubi->vid_hdr_offset, ubi->vid_hdr_aloffset); + ubi_msg("data offset: %d", ubi->leb_start); + ubi_msg("max. allowed volumes: %d", ubi->vtbl_slots); + ubi_msg("wear-leveling threshold: %d", CONFIG_MTD_UBI_WL_THRESHOLD); + ubi_msg("number of internal volumes: %d", UBI_INT_VOL_COUNT); + ubi_msg("number of user volumes: %d", + ubi->vol_count - UBI_INT_VOL_COUNT); + ubi_msg("available PEBs: %d", ubi->avail_pebs); + ubi_msg("total number of reserved PEBs: %d", ubi->rsvd_pebs); + ubi_msg("number of PEBs reserved for bad PEB handling: %d", + ubi->beb_rsvd_pebs); + ubi_msg("max/mean erase counter: %d/%d", ubi->max_ec, ubi->mean_ec); + + /* Enable the background thread */ + if (!DBG_DISABLE_BGT) { + ubi->thread_enabled = 1; + wake_up_process(ubi->bgt_thread); + } + + return 0; + +out_detach: + ubi_eba_close(ubi); + ubi_wl_close(ubi); + kfree(ubi->vtbl); +out_free: + kfree(ubi); +out_mtd: + put_mtd_device(mtd); + ubi_devices[ubi_devices_cnt] = NULL; + return err; +} + +/** + * detach_mtd_dev - detach an MTD device. + * @ubi: UBI device description object + */ +static void detach_mtd_dev(struct ubi_device *ubi) +{ + int ubi_num = ubi->ubi_num, mtd_num = ubi->mtd->index; + + dbg_msg("detaching mtd%d from ubi%d", ubi->mtd->index, ubi_num); + uif_close(ubi); + ubi_eba_close(ubi); + ubi_wl_close(ubi); + kfree(ubi->vtbl); + put_mtd_device(ubi->mtd); + kfree(ubi_devices[ubi_num]); + ubi_devices[ubi_num] = NULL; + ubi_devices_cnt -= 1; + ubi_assert(ubi_devices_cnt >= 0); + ubi_msg("mtd%d is detached from ubi%d", mtd_num, ubi_num); +} + +static int __init ubi_init(void) +{ + int err, i, k; + + /* Ensure that EC and VID headers have correct size */ + BUILD_BUG_ON(sizeof(struct ubi_ec_hdr) != 64); + BUILD_BUG_ON(sizeof(struct ubi_vid_hdr) != 64); + + if (mtd_devs > UBI_MAX_DEVICES) { + printk("UBI error: too many MTD devices, maximum is %d\n", + UBI_MAX_DEVICES); + return -EINVAL; + } + + ubi_class = class_create(THIS_MODULE, UBI_NAME_STR); + if (IS_ERR(ubi_class)) + return PTR_ERR(ubi_class); + + err = class_create_file(ubi_class, &ubi_version); + if (err) + goto out_class; + + /* Attach MTD devices */ + for (i = 0; i < mtd_devs; i++) { + struct mtd_dev_param *p = &mtd_dev_param[i]; + + cond_resched(); + + if (!p->name) { + dbg_err("empty name"); + err = -EINVAL; + goto out_detach; + } + + err = attach_mtd_dev(p->name, p->vid_hdr_offs, p->data_offs); + if (err) + goto out_detach; + } + + return 0; + +out_detach: + for (k = 0; k < i; k++) + detach_mtd_dev(ubi_devices[k]); + class_remove_file(ubi_class, &ubi_version); +out_class: + class_destroy(ubi_class); + return err; +} +module_init(ubi_init); + +static void __exit ubi_exit(void) +{ + int i, n = ubi_devices_cnt; + + for (i = 0; i < n; i++) + detach_mtd_dev(ubi_devices[i]); + class_remove_file(ubi_class, &ubi_version); + class_destroy(ubi_class); +} +module_exit(ubi_exit); + +/** + * bytes_str_to_int - convert a string representing number of bytes to an + * integer. + * @str: the string to convert + * + * This function returns positive resulting integer in case of success and a + * negative error code in case of failure. + */ +static int __init bytes_str_to_int(const char *str) +{ + char *endp; + unsigned long result; + + result = simple_strtoul(str, &endp, 0); + if (str == endp || result < 0) { + printk("UBI error: incorrect bytes count: \"%s\"\n", str); + return -EINVAL; + } + + switch (*endp) { + case 'G': + result *= 1024; + case 'M': + result *= 1024; + case 'K': + case 'k': + result *= 1024; + if (endp[1] == 'i' && (endp[2] == '\0' || + endp[2] == 'B' || endp[2] == 'b')) + endp += 2; + case '\0': + break; + default: + printk("UBI error: incorrect bytes count: \"%s\"\n", str); + return -EINVAL; + } + + return result; +} + +/** + * ubi_mtd_param_parse - parse the 'mtd=' UBI parameter. + * @val: the parameter value to parse + * @kp: not used + * + * This function returns zero in case of success and a negative error code in + * case of error. + */ +static int __init ubi_mtd_param_parse(const char *val, struct kernel_param *kp) +{ + int i, len; + struct mtd_dev_param *p; + char buf[MTD_PARAM_LEN_MAX]; + char *pbuf = &buf[0]; + char *tokens[3] = {NULL, NULL, NULL}; + + if (mtd_devs == UBI_MAX_DEVICES) { + printk("UBI error: too many parameters, max. is %d\n", + UBI_MAX_DEVICES); + return -EINVAL; + } + + len = strnlen(val, MTD_PARAM_LEN_MAX); + if (len == MTD_PARAM_LEN_MAX) { + printk("UBI error: parameter \"%s\" is too long, max. is %d\n", + val, MTD_PARAM_LEN_MAX); + return -EINVAL; + } + + if (len == 0) { + printk("UBI warning: empty 'mtd=' parameter - ignored\n"); + return 0; + } + + strcpy(buf, val); + + /* Get rid of the final newline */ + if (buf[len - 1] == '\n') + buf[len - 1] = 0; + + for (i = 0; i < 3; i++) + tokens[i] = strsep(&pbuf, ","); + + if (pbuf) { + printk("UBI error: too many arguments at \"%s\"\n", val); + return -EINVAL; + } + + if (tokens[0] == '\0') + return -EINVAL; + + p = &mtd_dev_param[mtd_devs]; + strcpy(&p->name[0], tokens[0]); + + if (tokens[1]) + p->vid_hdr_offs = bytes_str_to_int(tokens[1]); + if (tokens[2]) + p->data_offs = bytes_str_to_int(tokens[2]); + + if (p->vid_hdr_offs < 0) + return p->vid_hdr_offs; + if (p->data_offs < 0) + return p->data_offs; + + mtd_devs += 1; + return 0; +} + +module_param_call(mtd, ubi_mtd_param_parse, NULL, NULL, 000); +MODULE_PARM_DESC(mtd, "MTD devices to attach. Parameter format: " + "mtd=[,,]. " + "Multiple \"mtd\" parameters may be specified.\n" + "MTD devices may be specified by their number or name. " + "Optional \"vid_hdr_offs\" and \"data_offs\" parameters " + "specify UBI VID header position and data starting " + "position to be used by UBI.\n" + "Example: mtd=content,1984,2048 mtd=4 - attach MTD device" + "with name content using VID header offset 1984 and data " + "start 2048, and MTD device number 4 using default " + "offsets"); + +MODULE_VERSION(__stringify(UBI_VERSION)); +MODULE_DESCRIPTION("UBI - Unsorted Block Images"); +MODULE_AUTHOR("Artem Bityutskiy"); +MODULE_LICENSE("GPL"); diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c new file mode 100644 index 000000000000..6612eb79bf17 --- /dev/null +++ b/drivers/mtd/ubi/cdev.c @@ -0,0 +1,722 @@ +/* + * Copyright (c) International Business Machines Corp., 2006 + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See + * the GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + * Author: Artem Bityutskiy (Битюцкий Артём) + */ + +/* + * This file includes implementation of UBI character device operations. + * + * There are two kinds of character devices in UBI: UBI character devices and + * UBI volume character devices. UBI character devices allow users to + * manipulate whole volumes: create, remove, and re-size them. Volume character + * devices provide volume I/O capabilities. + * + * Major and minor numbers are assigned dynamically to both UBI and volume + * character devices. + */ + +#include +#include +#include +#include +#include +#include +#include +#include "ubi.h" + +/* + * Maximum sequence numbers of UBI and volume character device IOCTLs (direct + * logical eraseblock erase is a debug-only feature). + */ +#define UBI_CDEV_IOC_MAX_SEQ 2 +#ifndef CONFIG_MTD_UBI_DEBUG_USERSPACE_IO +#define VOL_CDEV_IOC_MAX_SEQ 1 +#else +#define VOL_CDEV_IOC_MAX_SEQ 2 +#endif + +/** + * major_to_device - get UBI device object by character device major number. + * @major: major number + * + * This function returns a pointer to the UBI device object. + */ +static struct ubi_device *major_to_device(int major) +{ + int i; + + for (i = 0; i < ubi_devices_cnt; i++) + if (ubi_devices[i] && ubi_devices[i]->major == major) + return ubi_devices[i]; + BUG(); +} + +/** + * get_exclusive - get exclusive access to an UBI volume. + * @desc: volume descriptor + * + * This function changes UBI volume open mode to "exclusive". Returns previous + * mode value (positive integer) in case of success and a negative error code + * in case of failure. + */ +static int get_exclusive(struct ubi_volume_desc *desc) +{ + int users, err; + struct ubi_volume *vol = desc->vol; + + spin_lock(&vol->ubi->volumes_lock); + users = vol->readers + vol->writers + vol->exclusive; + ubi_assert(users > 0); + if (users > 1) { + dbg_err("%d users for volume %d", users, vol->vol_id); + err = -EBUSY; + } else { + vol->readers = vol->writers = 0; + vol->exclusive = 1; + err = desc->mode; + desc->mode = UBI_EXCLUSIVE; + } + spin_unlock(&vol->ubi->volumes_lock); + + return err; +} + +/** + * revoke_exclusive - revoke exclusive mode. + * @desc: volume descriptor + * @mode: new mode to switch to + */ +static void revoke_exclusive(struct ubi_volume_desc *desc, int mode) +{ + struct ubi_volume *vol = desc->vol; + + spin_lock(&vol->ubi->volumes_lock); + ubi_assert(vol->readers == 0 && vol->writers == 0); + ubi_assert(vol->exclusive == 1 && desc->mode == UBI_EXCLUSIVE); + vol->exclusive = 0; + if (mode == UBI_READONLY) + vol->readers = 1; + else if (mode == UBI_READWRITE) + vol->writers = 1; + else + vol->exclusive = 1; + spin_unlock(&vol->ubi->volumes_lock); + + desc->mode = mode; +} + +static int vol_cdev_open(struct inode *inode, struct file *file) +{ + struct ubi_volume_desc *desc; + const struct ubi_device *ubi = major_to_device(imajor(inode)); + int vol_id = iminor(inode) - 1; + int mode; + + if (file->f_mode & FMODE_WRITE) + mode = UBI_READWRITE; + else + mode = UBI_READONLY; + + dbg_msg("open volume %d, mode %d", vol_id, mode); + + desc = ubi_open_volume(ubi->ubi_num, vol_id, mode); + if (IS_ERR(desc)) + return PTR_ERR(desc); + + file->private_data = desc; + return 0; +} + +static int vol_cdev_release(struct inode *inode, struct file *file) +{ + struct ubi_volume_desc *desc = file->private_data; + struct ubi_volume *vol = desc->vol; + + dbg_msg("release volume %d, mode %d", vol->vol_id, desc->mode); + + if (vol->updating) { + ubi_warn("update of volume %d not finished, volume is damaged", + vol->vol_id); + vol->updating = 0; + kfree(vol->upd_buf); + } + + ubi_close_volume(desc); + return 0; +} + +static loff_t vol_cdev_llseek(struct file *file, loff_t offset, int origin) +{ + struct ubi_volume_desc *desc = file->private_data; + struct ubi_volume *vol = desc->vol; + loff_t new_offset; + + if (vol->updating) { + /* Update is in progress, seeking is prohibited */ + dbg_err("updating"); + return -EBUSY; + } + + switch (origin) { + case 0: /* SEEK_SET */ + new_offset = offset; + break; + case 1: /* SEEK_CUR */ + new_offset = file->f_pos + offset; + break; + case 2: /* SEEK_END */ + new_offset = vol->used_bytes + offset; + break; + default: + return -EINVAL; + } + + if (new_offset < 0 || new_offset > vol->used_bytes) { + dbg_err("bad seek %lld", new_offset); + return -EINVAL; + } + + dbg_msg("seek volume %d, offset %lld, origin %d, new offset %lld", + vol->vol_id, offset, origin, new_offset); + + file->f_pos = new_offset; + return new_offset; +} + +static ssize_t vol_cdev_read(struct file *file, __user char *buf, size_t count, + loff_t *offp) +{ + struct ubi_volume_desc *desc = file->private_data; + struct ubi_volume *vol = desc->vol; + struct ubi_device *ubi = vol->ubi; + int err, lnum, off, len, vol_id = desc->vol->vol_id, tbuf_size; + size_t count_save = count; + void *tbuf; + uint64_t tmp; + + dbg_msg("read %zd bytes from offset %lld of volume %d", + count, *offp, vol_id); + + if (vol->updating) { + dbg_err("updating"); + return -EBUSY; + } + if (vol->upd_marker) { + dbg_err("damaged volume, update marker is set"); + return -EBADF; + } + if (*offp == vol->used_bytes || count == 0) + return 0; + + if (vol->corrupted) + dbg_msg("read from corrupted volume %d", vol_id); + + if (*offp + count > vol->used_bytes) + count_save = count = vol->used_bytes - *offp; + + tbuf_size = vol->usable_leb_size; + if (count < tbuf_size) + tbuf_size = ALIGN(count, ubi->min_io_size); + tbuf = kmalloc(tbuf_size, GFP_KERNEL); + if (!tbuf) + return -ENOMEM; + + len = count > tbuf_size ? tbuf_size : count; + + tmp = *offp; + off = do_div(tmp, vol->usable_leb_size); + lnum = tmp; + + do { + cond_resched(); + + if (off + len >= vol->usable_leb_size) + len = vol->usable_leb_size - off; + + err = ubi_eba_read_leb(ubi, vol_id, lnum, tbuf, off, len, 0); + if (err) + break; + + off += len; + if (off == vol->usable_leb_size) { + lnum += 1; + off -= vol->usable_leb_size; + } + + count -= len; + *offp += len; + + err = copy_to_user(buf, tbuf, len); + if (err) { + err = -EFAULT; + break; + } + + buf += len; + len = count > tbuf_size ? tbuf_size : count; + } while (count); + + kfree(tbuf); + return err ? err : count_save - count; +} + +#ifdef CONFIG_MTD_UBI_DEBUG_USERSPACE_IO + +/* + * This function allows to directly write to dynamic UBI volumes, without + * issuing the volume update operation. Available only as a debugging feature. + * Very useful for testing UBI. + */ +static ssize_t vol_cdev_direct_write(struct file *file, const char __user *buf, + size_t count, loff_t *offp) +{ + struct ubi_volume_desc *desc = file->private_data; + struct ubi_volume *vol = desc->vol; + struct ubi_device *ubi = vol->ubi; + int lnum, off, len, tbuf_size, vol_id = vol->vol_id, err = 0; + size_t count_save = count; + char *tbuf; + uint64_t tmp; + + dbg_msg("requested: write %zd bytes to offset %lld of volume %u", + count, *offp, desc->vol->vol_id); + + if (vol->vol_type == UBI_STATIC_VOLUME) + return -EROFS; + + tmp = *offp; + off = do_div(tmp, vol->usable_leb_size); + lnum = tmp; + + if (off % ubi->min_io_size) { + dbg_err("unaligned position"); + return -EINVAL; + } + + if (*offp + count > vol->used_bytes) + count_save = count = vol->used_bytes - *offp; + + /* We can write only in fractions of the minimum I/O unit */ + if (count % ubi->min_io_size) { + dbg_err("unaligned write length"); + return -EINVAL; + } + + tbuf_size = vol->usable_leb_size; + if (count < tbuf_size) + tbuf_size = ALIGN(count, ubi->min_io_size); + tbuf = kmalloc(tbuf_size, GFP_KERNEL); + if (!tbuf) + return -ENOMEM; + + len = count > tbuf_size ? tbuf_size : count; + + while (count) { + cond_resched(); + + if (off + len >= vol->usable_leb_size) + len = vol->usable_leb_size - off; + + err = copy_from_user(tbuf, buf, len); + if (err) { + err = -EFAULT; + break; + } + + err = ubi_eba_write_leb(ubi, vol_id, lnum, tbuf, off, len, + UBI_UNKNOWN); + if (err) + break; + + off += len; + if (off == vol->usable_leb_size) { + lnum += 1; + off -= vol->usable_leb_size; + } + + count -= len; + *offp += len; + buf += len; + len = count > tbuf_size ? tbuf_size : count; + } + + kfree(tbuf); + return err ? err : count_save - count; +} + +#else +#define vol_cdev_direct_write(file, buf, count, offp) -EPERM +#endif /* CONFIG_MTD_UBI_DEBUG_USERSPACE_IO */ + +static ssize_t vol_cdev_write(struct file *file, const char __user *buf, + size_t count, loff_t *offp) +{ + int err = 0; + struct ubi_volume_desc *desc = file->private_data; + struct ubi_volume *vol = desc->vol; + struct ubi_device *ubi = vol->ubi; + + if (!vol->updating) + return vol_cdev_direct_write(file, buf, count, offp); + + err = ubi_more_update_data(ubi, vol->vol_id, buf, count); + if (err < 0) { + ubi_err("cannot write %zd bytes of update data", count); + return err; + } + + if (err) { + /* + * Update is finished, @err contains number of actually written + * bytes now. + */ + count = err; + + err = ubi_check_volume(ubi, vol->vol_id); + if (err < 0) + return err; + + if (err) { + ubi_warn("volume %d on UBI device %d is corrupted", + vol->vol_id, ubi->ubi_num); + vol->corrupted = 1; + } + vol->checked = 1; + revoke_exclusive(desc, UBI_READWRITE); + } + + *offp += count; + return count; +} + +static int vol_cdev_ioctl(struct inode *inode, struct file *file, + unsigned int cmd, unsigned long arg) +{ + int err = 0; + struct ubi_volume_desc *desc = file->private_data; + struct ubi_volume *vol = desc->vol; + struct ubi_device *ubi = vol->ubi; + void __user *argp = (void __user *)arg; + + if (_IOC_NR(cmd) > VOL_CDEV_IOC_MAX_SEQ || + _IOC_TYPE(cmd) != UBI_VOL_IOC_MAGIC) + return -ENOTTY; + + if (_IOC_DIR(cmd) && _IOC_READ) + err = !access_ok(VERIFY_WRITE, argp, _IOC_SIZE(cmd)); + else if (_IOC_DIR(cmd) && _IOC_WRITE) + err = !access_ok(VERIFY_READ, argp, _IOC_SIZE(cmd)); + if (err) + return -EFAULT; + + switch (cmd) { + + /* Volume update command */ + case UBI_IOCVOLUP: + { + int64_t bytes, rsvd_bytes; + + if (!capable(CAP_SYS_RESOURCE)) { + err = -EPERM; + break; + } + + err = copy_from_user(&bytes, argp, sizeof(int64_t)); + if (err) { + err = -EFAULT; + break; + } + + if (desc->mode == UBI_READONLY) { + err = -EROFS; + break; + } + + rsvd_bytes = vol->reserved_pebs * (ubi->leb_size-vol->data_pad); + if (bytes < 0 || bytes > rsvd_bytes) { + err = -EINVAL; + break; + } + + err = get_exclusive(desc); + if (err < 0) + break; + + err = ubi_start_update(ubi, vol->vol_id, bytes); + if (bytes == 0) + revoke_exclusive(desc, UBI_READWRITE); + + file->f_pos = 0; + break; + } + +#ifdef CONFIG_MTD_UBI_DEBUG_USERSPACE_IO + /* Logical eraseblock erasure command */ + case UBI_IOCEBER: + { + int32_t lnum; + + err = __get_user(lnum, (__user int32_t *)argp); + if (err) { + err = -EFAULT; + break; + } + + if (desc->mode == UBI_READONLY) { + err = -EROFS; + break; + } + + if (lnum < 0 || lnum >= vol->reserved_pebs) { + err = -EINVAL; + break; + } + + if (vol->vol_type != UBI_DYNAMIC_VOLUME) { + err = -EROFS; + break; + } + + dbg_msg("erase LEB %d:%d", vol->vol_id, lnum); + err = ubi_eba_unmap_leb(ubi, vol->vol_id, lnum); + if (err) + break; + + err = ubi_wl_flush(ubi); + break; + } +#endif + + default: + err = -ENOTTY; + break; + } + + return err; +} + +/** + * verify_mkvol_req - verify volume creation request. + * @ubi: UBI device description object + * @req: the request to check + * + * This function zero if the request is correct, and %-EINVAL if not. + */ +static int verify_mkvol_req(const struct ubi_device *ubi, + const struct ubi_mkvol_req *req) +{ + int n, err = -EINVAL; + + if (req->bytes < 0 || req->alignment < 0 || req->vol_type < 0 || + req->name_len < 0) + goto bad; + + if ((req->vol_id < 0 || req->vol_id >= ubi->vtbl_slots) && + req->vol_id != UBI_VOL_NUM_AUTO) + goto bad; + + if (req->alignment == 0) + goto bad; + + if (req->bytes == 0) + goto bad; + + if (req->vol_type != UBI_DYNAMIC_VOLUME && + req->vol_type != UBI_STATIC_VOLUME) + goto bad; + + if (req->alignment > ubi->leb_size) + goto bad; + + n = req->alignment % ubi->min_io_size; + if (req->alignment != 1 && n) + goto bad; + + if (req->name_len > UBI_VOL_NAME_MAX) { + err = -ENAMETOOLONG; + goto bad; + } + + return 0; + +bad: + dbg_err("bad volume creation request"); + ubi_dbg_dump_mkvol_req(req); + return err; +} + +/** + * verify_rsvol_req - verify volume re-size request. + * @ubi: UBI device description object + * @req: the request to check + * + * This function returns zero if the request is correct, and %-EINVAL if not. + */ +static int verify_rsvol_req(const struct ubi_device *ubi, + const struct ubi_rsvol_req *req) +{ + if (req->bytes <= 0) + return -EINVAL; + + if (req->vol_id < 0 || req->vol_id >= ubi->vtbl_slots) + return -EINVAL; + + return 0; +} + +static int ubi_cdev_ioctl(struct inode *inode, struct file *file, + unsigned int cmd, unsigned long arg) +{ + int err = 0; + struct ubi_device *ubi; + struct ubi_volume_desc *desc; + void __user *argp = (void __user *)arg; + + if (_IOC_NR(cmd) > UBI_CDEV_IOC_MAX_SEQ || + _IOC_TYPE(cmd) != UBI_IOC_MAGIC) + return -ENOTTY; + + if (_IOC_DIR(cmd) && _IOC_READ) + err = !access_ok(VERIFY_WRITE, argp, _IOC_SIZE(cmd)); + else if (_IOC_DIR(cmd) && _IOC_WRITE) + err = !access_ok(VERIFY_READ, argp, _IOC_SIZE(cmd)); + if (err) + return -EFAULT; + + if (!capable(CAP_SYS_RESOURCE)) + return -EPERM; + + ubi = major_to_device(imajor(inode)); + if (IS_ERR(ubi)) + return PTR_ERR(ubi); + + switch (cmd) { + /* Create volume command */ + case UBI_IOCMKVOL: + { + struct ubi_mkvol_req req; + + dbg_msg("create volume"); + err = __copy_from_user(&req, argp, + sizeof(struct ubi_mkvol_req)); + if (err) { + err = -EFAULT; + break; + } + + err = verify_mkvol_req(ubi, &req); + if (err) + break; + + req.name[req.name_len] = '\0'; + + err = ubi_create_volume(ubi, &req); + if (err) + break; + + err = __put_user(req.vol_id, (__user int32_t *)argp); + if (err) + err = -EFAULT; + + break; + } + + /* Remove volume command */ + case UBI_IOCRMVOL: + { + int vol_id; + + dbg_msg("remove volume"); + err = __get_user(vol_id, (__user int32_t *)argp); + if (err) { + err = -EFAULT; + break; + } + + desc = ubi_open_volume(ubi->ubi_num, vol_id, UBI_EXCLUSIVE); + if (IS_ERR(desc)) { + err = PTR_ERR(desc); + break; + } + + err = ubi_remove_volume(desc); + if (err) + ubi_close_volume(desc); + + break; + } + + /* Re-size volume command */ + case UBI_IOCRSVOL: + { + int pebs; + uint64_t tmp; + struct ubi_rsvol_req req; + + dbg_msg("re-size volume"); + err = __copy_from_user(&req, argp, + sizeof(struct ubi_rsvol_req)); + if (err) { + err = -EFAULT; + break; + } + + err = verify_rsvol_req(ubi, &req); + if (err) + break; + + desc = ubi_open_volume(ubi->ubi_num, req.vol_id, UBI_EXCLUSIVE); + if (IS_ERR(desc)) { + err = PTR_ERR(desc); + break; + } + + tmp = req.bytes; + pebs = !!do_div(tmp, desc->vol->usable_leb_size); + pebs += tmp; + + err = ubi_resize_volume(desc, pebs); + ubi_close_volume(desc); + break; + } + + default: + err = -ENOTTY; + break; + } + + return err; +} + +/* UBI character device operations */ +struct file_operations ubi_cdev_operations = { + .owner = THIS_MODULE, + .ioctl = ubi_cdev_ioctl, + .llseek = no_llseek +}; + +/* UBI volume character device operations */ +struct file_operations ubi_vol_cdev_operations = { + .owner = THIS_MODULE, + .open = vol_cdev_open, + .release = vol_cdev_release, + .llseek = vol_cdev_llseek, + .read = vol_cdev_read, + .write = vol_cdev_write, + .ioctl = vol_cdev_ioctl +}; diff --git a/drivers/mtd/ubi/debug.c b/drivers/mtd/ubi/debug.c new file mode 100644 index 000000000000..86364221fafe --- /dev/null +++ b/drivers/mtd/ubi/debug.c @@ -0,0 +1,224 @@ +/* + * Copyright (c) International Business Machines Corp., 2006 + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See + * the GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + * Author: Artem Bityutskiy (Битюцкий Артём) + */ + +/* + * Here we keep all the UBI debugging stuff which should normally be disabled + * and compiled-out, but it is extremely helpful when hunting bugs or doing big + * changes. + */ + +#ifdef CONFIG_MTD_UBI_DEBUG_MSG + +#include "ubi.h" + +/** + * ubi_dbg_dump_ec_hdr - dump an erase counter header. + * @ec_hdr: the erase counter header to dump + */ +void ubi_dbg_dump_ec_hdr(const struct ubi_ec_hdr *ec_hdr) +{ + dbg_msg("erase counter header dump:"); + dbg_msg("magic %#08x", ubi32_to_cpu(ec_hdr->magic)); + dbg_msg("version %d", (int)ec_hdr->version); + dbg_msg("ec %llu", (long long)ubi64_to_cpu(ec_hdr->ec)); + dbg_msg("vid_hdr_offset %d", ubi32_to_cpu(ec_hdr->vid_hdr_offset)); + dbg_msg("data_offset %d", ubi32_to_cpu(ec_hdr->data_offset)); + dbg_msg("hdr_crc %#08x", ubi32_to_cpu(ec_hdr->hdr_crc)); + dbg_msg("erase counter header hexdump:"); + ubi_dbg_hexdump(ec_hdr, UBI_EC_HDR_SIZE); +} + +/** + * ubi_dbg_dump_vid_hdr - dump a volume identifier header. + * @vid_hdr: the volume identifier header to dump + */ +void ubi_dbg_dump_vid_hdr(const struct ubi_vid_hdr *vid_hdr) +{ + dbg_msg("volume identifier header dump:"); + dbg_msg("magic %08x", ubi32_to_cpu(vid_hdr->magic)); + dbg_msg("version %d", (int)vid_hdr->version); + dbg_msg("vol_type %d", (int)vid_hdr->vol_type); + dbg_msg("copy_flag %d", (int)vid_hdr->copy_flag); + dbg_msg("compat %d", (int)vid_hdr->compat); + dbg_msg("vol_id %d", ubi32_to_cpu(vid_hdr->vol_id)); + dbg_msg("lnum %d", ubi32_to_cpu(vid_hdr->lnum)); + dbg_msg("leb_ver %u", ubi32_to_cpu(vid_hdr->leb_ver)); + dbg_msg("data_size %d", ubi32_to_cpu(vid_hdr->data_size)); + dbg_msg("used_ebs %d", ubi32_to_cpu(vid_hdr->used_ebs)); + dbg_msg("data_pad %d", ubi32_to_cpu(vid_hdr->data_pad)); + dbg_msg("sqnum %llu", + (unsigned long long)ubi64_to_cpu(vid_hdr->sqnum)); + dbg_msg("hdr_crc %08x", ubi32_to_cpu(vid_hdr->hdr_crc)); + dbg_msg("volume identifier header hexdump:"); +} + +/** + * ubi_dbg_dump_vol_info- dump volume information. + * @vol: UBI volume description object + */ +void ubi_dbg_dump_vol_info(const struct ubi_volume *vol) +{ + dbg_msg("volume information dump:"); + dbg_msg("vol_id %d", vol->vol_id); + dbg_msg("reserved_pebs %d", vol->reserved_pebs); + dbg_msg("alignment %d", vol->alignment); + dbg_msg("data_pad %d", vol->data_pad); + dbg_msg("vol_type %d", vol->vol_type); + dbg_msg("name_len %d", vol->name_len); + dbg_msg("usable_leb_size %d", vol->usable_leb_size); + dbg_msg("used_ebs %d", vol->used_ebs); + dbg_msg("used_bytes %lld", vol->used_bytes); + dbg_msg("last_eb_bytes %d", vol->last_eb_bytes); + dbg_msg("corrupted %d", vol->corrupted); + dbg_msg("upd_marker %d", vol->upd_marker); + + if (vol->name_len <= UBI_VOL_NAME_MAX && + strnlen(vol->name, vol->name_len + 1) == vol->name_len) { + dbg_msg("name %s", vol->name); + } else { + dbg_msg("the 1st 5 characters of the name: %c%c%c%c%c", + vol->name[0], vol->name[1], vol->name[2], + vol->name[3], vol->name[4]); + } +} + +/** + * ubi_dbg_dump_vtbl_record - dump a &struct ubi_vtbl_record object. + * @r: the object to dump + * @idx: volume table index + */ +void ubi_dbg_dump_vtbl_record(const struct ubi_vtbl_record *r, int idx) +{ + int name_len = ubi16_to_cpu(r->name_len); + + dbg_msg("volume table record %d dump:", idx); + dbg_msg("reserved_pebs %d", ubi32_to_cpu(r->reserved_pebs)); + dbg_msg("alignment %d", ubi32_to_cpu(r->alignment)); + dbg_msg("data_pad %d", ubi32_to_cpu(r->data_pad)); + dbg_msg("vol_type %d", (int)r->vol_type); + dbg_msg("upd_marker %d", (int)r->upd_marker); + dbg_msg("name_len %d", name_len); + + if (r->name[0] == '\0') { + dbg_msg("name NULL"); + return; + } + + if (name_len <= UBI_VOL_NAME_MAX && + strnlen(&r->name[0], name_len + 1) == name_len) { + dbg_msg("name %s", &r->name[0]); + } else { + dbg_msg("1st 5 characters of the name: %c%c%c%c%c", + r->name[0], r->name[1], r->name[2], r->name[3], + r->name[4]); + } + dbg_msg("crc %#08x", ubi32_to_cpu(r->crc)); +} + +/** + * ubi_dbg_dump_sv - dump a &struct ubi_scan_volume object. + * @sv: the object to dump + */ +void ubi_dbg_dump_sv(const struct ubi_scan_volume *sv) +{ + dbg_msg("volume scanning information dump:"); + dbg_msg("vol_id %d", sv->vol_id); + dbg_msg("highest_lnum %d", sv->highest_lnum); + dbg_msg("leb_count %d", sv->leb_count); + dbg_msg("compat %d", sv->compat); + dbg_msg("vol_type %d", sv->vol_type); + dbg_msg("used_ebs %d", sv->used_ebs); + dbg_msg("last_data_size %d", sv->last_data_size); + dbg_msg("data_pad %d", sv->data_pad); +} + +/** + * ubi_dbg_dump_seb - dump a &struct ubi_scan_leb object. + * @seb: the object to dump + * @type: object type: 0 - not corrupted, 1 - corrupted + */ +void ubi_dbg_dump_seb(const struct ubi_scan_leb *seb, int type) +{ + dbg_msg("eraseblock scanning information dump:"); + dbg_msg("ec %d", seb->ec); + dbg_msg("pnum %d", seb->pnum); + if (type == 0) { + dbg_msg("lnum %d", seb->lnum); + dbg_msg("scrub %d", seb->scrub); + dbg_msg("sqnum %llu", seb->sqnum); + dbg_msg("leb_ver %u", seb->leb_ver); + } +} + +/** + * ubi_dbg_dump_mkvol_req - dump a &struct ubi_mkvol_req object. + * @req: the object to dump + */ +void ubi_dbg_dump_mkvol_req(const struct ubi_mkvol_req *req) +{ + char nm[17]; + + dbg_msg("volume creation request dump:"); + dbg_msg("vol_id %d", req->vol_id); + dbg_msg("alignment %d", req->alignment); + dbg_msg("bytes %lld", (long long)req->bytes); + dbg_msg("vol_type %d", req->vol_type); + dbg_msg("name_len %d", req->name_len); + + memcpy(nm, req->name, 16); + nm[16] = 0; + dbg_msg("the 1st 16 characters of the name: %s", nm); +} + +#define BYTES_PER_LINE 32 + +/** + * ubi_dbg_hexdump - dump a buffer. + * @ptr: the buffer to dump + * @size: buffer size which must be multiple of 4 bytes + */ +void ubi_dbg_hexdump(const void *ptr, int size) +{ + int i, k = 0, rows, columns; + const uint8_t *p = ptr; + + size = ALIGN(size, 4); + rows = size/BYTES_PER_LINE + size % BYTES_PER_LINE; + for (i = 0; i < rows; i++) { + int j; + + cond_resched(); + columns = min(size - k, BYTES_PER_LINE) / 4; + if (columns == 0) + break; + printk(KERN_DEBUG "%5d: ", i * BYTES_PER_LINE); + for (j = 0; j < columns; j++) { + int n, N; + + N = size - k > 4 ? 4 : size - k; + for (n = 0; n < N; n++) + printk("%02x", p[k++]); + printk(" "); + } + printk("\n"); + } +} + +#endif /* CONFIG_MTD_UBI_DEBUG_MSG */ diff --git a/drivers/mtd/ubi/debug.h b/drivers/mtd/ubi/debug.h new file mode 100644 index 000000000000..f816ad9a36c0 --- /dev/null +++ b/drivers/mtd/ubi/debug.h @@ -0,0 +1,161 @@ +/* + * Copyright (c) International Business Machines Corp., 2006 + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See + * the GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + * Author: Artem Bityutskiy (Битюцкий Артём) + */ + +#ifndef __UBI_DEBUG_H__ +#define __UBI_DEBUG_H__ + +#ifdef CONFIG_MTD_UBI_DEBUG +#include + +#define ubi_assert(expr) BUG_ON(!(expr)) +#define dbg_err(fmt, ...) ubi_err(fmt, ##__VA_ARGS__) +#else +#define ubi_assert(expr) ({}) +#define dbg_err(fmt, ...) ({}) +#endif + +#ifdef CONFIG_MTD_UBI_DEBUG_DISABLE_BGT +#define DBG_DISABLE_BGT 1 +#else +#define DBG_DISABLE_BGT 0 +#endif + +#ifdef CONFIG_MTD_UBI_DEBUG_MSG +/* Generic debugging message */ +#define dbg_msg(fmt, ...) \ + printk(KERN_DEBUG "UBI DBG: %s: " fmt "\n", __FUNCTION__, ##__VA_ARGS__) + +#define ubi_dbg_dump_stack() dump_stack() + +struct ubi_ec_hdr; +struct ubi_vid_hdr; +struct ubi_volume; +struct ubi_vtbl_record; +struct ubi_scan_volume; +struct ubi_scan_leb; +struct ubi_mkvol_req; + +void ubi_dbg_print(int type, const char *func, const char *fmt, ...); +void ubi_dbg_dump_ec_hdr(const struct ubi_ec_hdr *ec_hdr); +void ubi_dbg_dump_vid_hdr(const struct ubi_vid_hdr *vid_hdr); +void ubi_dbg_dump_vol_info(const struct ubi_volume *vol); +void ubi_dbg_dump_vtbl_record(const struct ubi_vtbl_record *r, int idx); +void ubi_dbg_dump_sv(const struct ubi_scan_volume *sv); +void ubi_dbg_dump_seb(const struct ubi_scan_leb *seb, int type); +void ubi_dbg_dump_mkvol_req(const struct ubi_mkvol_req *req); +void ubi_dbg_hexdump(const void *buf, int size); + +#else + +#define dbg_msg(fmt, ...) ({}) +#define ubi_dbg_dump_stack() ({}) +#define ubi_dbg_print(func, fmt, ...) ({}) +#define ubi_dbg_dump_ec_hdr(ec_hdr) ({}) +#define ubi_dbg_dump_vid_hdr(vid_hdr) ({}) +#define ubi_dbg_dump_vol_info(vol) ({}) +#define ubi_dbg_dump_vtbl_record(r, idx) ({}) +#define ubi_dbg_dump_sv(sv) ({}) +#define ubi_dbg_dump_seb(seb, type) ({}) +#define ubi_dbg_dump_mkvol_req(req) ({}) +#define ubi_dbg_hexdump(buf, size) ({}) + +#endif /* CONFIG_MTD_UBI_DEBUG_MSG */ + +#ifdef CONFIG_MTD_UBI_DEBUG_MSG_EBA +/* Messages from the eraseblock association unit */ +#define dbg_eba(fmt, ...) \ + printk(KERN_DEBUG "UBI DBG eba: %s: " fmt "\n", __FUNCTION__, \ + ##__VA_ARGS__) +#else +#define dbg_eba(fmt, ...) ({}) +#endif + +#ifdef CONFIG_MTD_UBI_DEBUG_MSG_WL +/* Messages from the wear-leveling unit */ +#define dbg_wl(fmt, ...) \ + printk(KERN_DEBUG "UBI DBG wl: %s: " fmt "\n", __FUNCTION__, \ + ##__VA_ARGS__) +#else +#define dbg_wl(fmt, ...) ({}) +#endif + +#ifdef CONFIG_MTD_UBI_DEBUG_MSG_IO +/* Messages from the input/output unit */ +#define dbg_io(fmt, ...) \ + printk(KERN_DEBUG "UBI DBG io: %s: " fmt "\n", __FUNCTION__, \ + ##__VA_ARGS__) +#else +#define dbg_io(fmt, ...) ({}) +#endif + +#ifdef CONFIG_MTD_UBI_DEBUG_MSG_BLD +/* Initialization and build messages */ +#define dbg_bld(fmt, ...) \ + printk(KERN_DEBUG "UBI DBG bld: %s: " fmt "\n", __FUNCTION__, \ + ##__VA_ARGS__) +#else +#define dbg_bld(fmt, ...) ({}) +#endif + +#ifdef CONFIG_MTD_UBI_DEBUG_EMULATE_BITFLIPS +/** + * ubi_dbg_is_bitflip - if it is time to emulate a bit-flip. + * + * Returns non-zero if a bit-flip should be emulated, otherwise returns zero. + */ +static inline int ubi_dbg_is_bitflip(void) +{ + return !(random32() % 200); +} +#else +#define ubi_dbg_is_bitflip() 0 +#endif + +#ifdef CONFIG_MTD_UBI_DEBUG_EMULATE_WRITE_FAILURES +/** + * ubi_dbg_is_write_failure - if it is time to emulate a write failure. + * + * Returns non-zero if a write failure should be emulated, otherwise returns + * zero. + */ +static inline int ubi_dbg_is_write_failure(void) +{ + return !(random32() % 500); +} +#else +#define ubi_dbg_is_write_failure() 0 +#endif + +#ifdef CONFIG_MTD_UBI_DEBUG_EMULATE_ERASE_FAILURES +/** + * ubi_dbg_is_erase_failure - if its time to emulate an erase failure. + * + * Returns non-zero if an erase failure should be emulated, otherwise returns + * zero. + */ +static inline int ubi_dbg_is_erase_failure(void) +{ + return !(random32() % 400); +} +#else +#define ubi_dbg_is_erase_failure() 0 +#endif + +#endif /* !__UBI_DEBUG_H__ */ diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c new file mode 100644 index 000000000000..d847ee1da3d9 --- /dev/null +++ b/drivers/mtd/ubi/eba.c @@ -0,0 +1,1241 @@ +/* + * Copyright (c) International Business Machines Corp., 2006 + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See + * the GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + * Author: Artem Bityutskiy (Битюцкий Артём) + */ + +/* + * The UBI Eraseblock Association (EBA) unit. + * + * This unit is responsible for I/O to/from logical eraseblock. + * + * Although in this implementation the EBA table is fully kept and managed in + * RAM, which assumes poor scalability, it might be (partially) maintained on + * flash in future implementations. + * + * The EBA unit implements per-logical eraseblock locking. Before accessing a + * logical eraseblock it is locked for reading or writing. The per-logical + * eraseblock locking is implemented by means of the lock tree. The lock tree + * is an RB-tree which refers all the currently locked logical eraseblocks. The + * lock tree elements are &struct ltree_entry objects. They are indexed by + * (@vol_id, @lnum) pairs. + * + * EBA also maintains the global sequence counter which is incremented each + * time a logical eraseblock is mapped to a physical eraseblock and it is + * stored in the volume identifier header. This means that each VID header has + * a unique sequence number. The sequence number is only increased an we assume + * 64 bits is enough to never overflow. + */ + +#include +#include +#include +#include "ubi.h" + +/** + * struct ltree_entry - an entry in the lock tree. + * @rb: links RB-tree nodes + * @vol_id: volume ID of the locked logical eraseblock + * @lnum: locked logical eraseblock number + * @users: how many tasks are using this logical eraseblock or wait for it + * @mutex: read/write mutex to implement read/write access serialization to + * the (@vol_id, @lnum) logical eraseblock + * + * When a logical eraseblock is being locked - corresponding &struct ltree_entry + * object is inserted to the lock tree (@ubi->ltree). + */ +struct ltree_entry { + struct rb_node rb; + int vol_id; + int lnum; + int users; + struct rw_semaphore mutex; +}; + +/* Slab cache for lock-tree entries */ +static struct kmem_cache *ltree_slab; + +/** + * next_sqnum - get next sequence number. + * @ubi: UBI device description object + * + * This function returns next sequence number to use, which is just the current + * global sequence counter value. It also increases the global sequence + * counter. + */ +static unsigned long long next_sqnum(struct ubi_device *ubi) +{ + unsigned long long sqnum; + + spin_lock(&ubi->ltree_lock); + sqnum = ubi->global_sqnum++; + spin_unlock(&ubi->ltree_lock); + + return sqnum; +} + +/** + * ubi_get_compat - get compatibility flags of a volume. + * @ubi: UBI device description object + * @vol_id: volume ID + * + * This function returns compatibility flags for an internal volume. User + * volumes have no compatibility flags, so %0 is returned. + */ +static int ubi_get_compat(const struct ubi_device *ubi, int vol_id) +{ + if (vol_id == UBI_LAYOUT_VOL_ID) + return UBI_LAYOUT_VOLUME_COMPAT; + return 0; +} + +/** + * ltree_lookup - look up the lock tree. + * @ubi: UBI device description object + * @vol_id: volume ID + * @lnum: logical eraseblock number + * + * This function returns a pointer to the corresponding &struct ltree_entry + * object if the logical eraseblock is locked and %NULL if it is not. + * @ubi->ltree_lock has to be locked. + */ +static struct ltree_entry *ltree_lookup(struct ubi_device *ubi, int vol_id, + int lnum) +{ + struct rb_node *p; + + p = ubi->ltree.rb_node; + while (p) { + struct ltree_entry *le; + + le = rb_entry(p, struct ltree_entry, rb); + + if (vol_id < le->vol_id) + p = p->rb_left; + else if (vol_id > le->vol_id) + p = p->rb_right; + else { + if (lnum < le->lnum) + p = p->rb_left; + else if (lnum > le->lnum) + p = p->rb_right; + else + return le; + } + } + + return NULL; +} + +/** + * ltree_add_entry - add new entry to the lock tree. + * @ubi: UBI device description object + * @vol_id: volume ID + * @lnum: logical eraseblock number + * + * This function adds new entry for logical eraseblock (@vol_id, @lnum) to the + * lock tree. If such entry is already there, its usage counter is increased. + * Returns pointer to the lock tree entry or %-ENOMEM if memory allocation + * failed. + */ +static struct ltree_entry *ltree_add_entry(struct ubi_device *ubi, int vol_id, + int lnum) +{ + struct ltree_entry *le, *le1, *le_free; + + le = kmem_cache_alloc(ltree_slab, GFP_KERNEL); + if (!le) + return ERR_PTR(-ENOMEM); + + le->vol_id = vol_id; + le->lnum = lnum; + + spin_lock(&ubi->ltree_lock); + le1 = ltree_lookup(ubi, vol_id, lnum); + + if (le1) { + /* + * This logical eraseblock is already locked. The newly + * allocated lock entry is not needed. + */ + le_free = le; + le = le1; + } else { + struct rb_node **p, *parent = NULL; + + /* + * No lock entry, add the newly allocated one to the + * @ubi->ltree RB-tree. + */ + le_free = NULL; + + p = &ubi->ltree.rb_node; + while (*p) { + parent = *p; + le1 = rb_entry(parent, struct ltree_entry, rb); + + if (vol_id < le1->vol_id) + p = &(*p)->rb_left; + else if (vol_id > le1->vol_id) + p = &(*p)->rb_right; + else { + ubi_assert(lnum != le1->lnum); + if (lnum < le1->lnum) + p = &(*p)->rb_left; + else + p = &(*p)->rb_right; + } + } + + rb_link_node(&le->rb, parent, p); + rb_insert_color(&le->rb, &ubi->ltree); + } + le->users += 1; + spin_unlock(&ubi->ltree_lock); + + if (le_free) + kmem_cache_free(ltree_slab, le_free); + + return le; +} + +/** + * leb_read_lock - lock logical eraseblock for reading. + * @ubi: UBI device description object + * @vol_id: volume ID + * @lnum: logical eraseblock number + * + * This function locks a logical eraseblock for reading. Returns zero in case + * of success and a negative error code in case of failure. + */ +static int leb_read_lock(struct ubi_device *ubi, int vol_id, int lnum) +{ + struct ltree_entry *le; + + le = ltree_add_entry(ubi, vol_id, lnum); + if (IS_ERR(le)) + return PTR_ERR(le); + down_read(&le->mutex); + return 0; +} + +/** + * leb_read_unlock - unlock logical eraseblock. + * @ubi: UBI device description object + * @vol_id: volume ID + * @lnum: logical eraseblock number + */ +static void leb_read_unlock(struct ubi_device *ubi, int vol_id, int lnum) +{ + int free = 0; + struct ltree_entry *le; + + spin_lock(&ubi->ltree_lock); + le = ltree_lookup(ubi, vol_id, lnum); + le->users -= 1; + ubi_assert(le->users >= 0); + if (le->users == 0) { + rb_erase(&le->rb, &ubi->ltree); + free = 1; + } + spin_unlock(&ubi->ltree_lock); + + up_read(&le->mutex); + if (free) + kmem_cache_free(ltree_slab, le); +} + +/** + * leb_write_lock - lock logical eraseblock for writing. + * @ubi: UBI device description object + * @vol_id: volume ID + * @lnum: logical eraseblock number + * + * This function locks a logical eraseblock for writing. Returns zero in case + * of success and a negative error code in case of failure. + */ +static int leb_write_lock(struct ubi_device *ubi, int vol_id, int lnum) +{ + struct ltree_entry *le; + + le = ltree_add_entry(ubi, vol_id, lnum); + if (IS_ERR(le)) + return PTR_ERR(le); + down_write(&le->mutex); + return 0; +} + +/** + * leb_write_unlock - unlock logical eraseblock. + * @ubi: UBI device description object + * @vol_id: volume ID + * @lnum: logical eraseblock number + */ +static void leb_write_unlock(struct ubi_device *ubi, int vol_id, int lnum) +{ + int free; + struct ltree_entry *le; + + spin_lock(&ubi->ltree_lock); + le = ltree_lookup(ubi, vol_id, lnum); + le->users -= 1; + ubi_assert(le->users >= 0); + if (le->users == 0) { + rb_erase(&le->rb, &ubi->ltree); + free = 1; + } else + free = 0; + spin_unlock(&ubi->ltree_lock); + + up_write(&le->mutex); + if (free) + kmem_cache_free(ltree_slab, le); +} + +/** + * ubi_eba_unmap_leb - un-map logical eraseblock. + * @ubi: UBI device description object + * @vol_id: volume ID + * @lnum: logical eraseblock number + * + * This function un-maps logical eraseblock @lnum and schedules corresponding + * physical eraseblock for erasure. Returns zero in case of success and a + * negative error code in case of failure. + */ +int ubi_eba_unmap_leb(struct ubi_device *ubi, int vol_id, int lnum) +{ + int idx = vol_id2idx(ubi, vol_id), err, pnum; + struct ubi_volume *vol = ubi->volumes[idx]; + + if (ubi->ro_mode) + return -EROFS; + + err = leb_write_lock(ubi, vol_id, lnum); + if (err) + return err; + + pnum = vol->eba_tbl[lnum]; + if (pnum < 0) + /* This logical eraseblock is already unmapped */ + goto out_unlock; + + dbg_eba("erase LEB %d:%d, PEB %d", vol_id, lnum, pnum); + + vol->eba_tbl[lnum] = UBI_LEB_UNMAPPED; + err = ubi_wl_put_peb(ubi, pnum, 0); + +out_unlock: + leb_write_unlock(ubi, vol_id, lnum); + return err; +} + +/** + * ubi_eba_read_leb - read data. + * @ubi: UBI device description object + * @vol_id: volume ID + * @lnum: logical eraseblock number + * @buf: buffer to store the read data + * @offset: offset from where to read + * @len: how many bytes to read + * @check: data CRC check flag + * + * If the logical eraseblock @lnum is unmapped, @buf is filled with 0xFF + * bytes. The @check flag only makes sense for static volumes and forces + * eraseblock data CRC checking. + * + * In case of success this function returns zero. In case of a static volume, + * if data CRC mismatches - %-EBADMSG is returned. %-EBADMSG may also be + * returned for any volume type if an ECC error was detected by the MTD device + * driver. Other negative error cored may be returned in case of other errors. + */ +int ubi_eba_read_leb(struct ubi_device *ubi, int vol_id, int lnum, void *buf, + int offset, int len, int check) +{ + int err, pnum, scrub = 0, idx = vol_id2idx(ubi, vol_id); + struct ubi_vid_hdr *vid_hdr; + struct ubi_volume *vol = ubi->volumes[idx]; + uint32_t crc, crc1; + + err = leb_read_lock(ubi, vol_id, lnum); + if (err) + return err; + + pnum = vol->eba_tbl[lnum]; + if (pnum < 0) { + /* + * The logical eraseblock is not mapped, fill the whole buffer + * with 0xFF bytes. The exception is static volumes for which + * it is an error to read unmapped logical eraseblocks. + */ + dbg_eba("read %d bytes from offset %d of LEB %d:%d (unmapped)", + len, offset, vol_id, lnum); + leb_read_unlock(ubi, vol_id, lnum); + ubi_assert(vol->vol_type != UBI_STATIC_VOLUME); + memset(buf, 0xFF, len); + return 0; + } + + dbg_eba("read %d bytes from offset %d of LEB %d:%d, PEB %d", + len, offset, vol_id, lnum, pnum); + + if (vol->vol_type == UBI_DYNAMIC_VOLUME) + check = 0; + +retry: + if (check) { + vid_hdr = ubi_zalloc_vid_hdr(ubi); + if (!vid_hdr) { + err = -ENOMEM; + goto out_unlock; + } + + err = ubi_io_read_vid_hdr(ubi, pnum, vid_hdr, 1); + if (err && err != UBI_IO_BITFLIPS) { + if (err > 0) { + /* + * The header is either absent or corrupted. + * The former case means there is a bug - + * switch to read-only mode just in case. + * The latter case means a real corruption - we + * may try to recover data. FIXME: but this is + * not implemented. + */ + if (err == UBI_IO_BAD_VID_HDR) { + ubi_warn("bad VID header at PEB %d, LEB" + "%d:%d", pnum, vol_id, lnum); + err = -EBADMSG; + } else + ubi_ro_mode(ubi); + } + goto out_free; + } else if (err == UBI_IO_BITFLIPS) + scrub = 1; + + ubi_assert(lnum < ubi32_to_cpu(vid_hdr->used_ebs)); + ubi_assert(len == ubi32_to_cpu(vid_hdr->data_size)); + + crc = ubi32_to_cpu(vid_hdr->data_crc); + ubi_free_vid_hdr(ubi, vid_hdr); + } + + err = ubi_io_read_data(ubi, buf, pnum, offset, len); + if (err) { + if (err == UBI_IO_BITFLIPS) { + scrub = 1; + err = 0; + } else if (err == -EBADMSG) { + if (vol->vol_type == UBI_DYNAMIC_VOLUME) + goto out_unlock; + scrub = 1; + if (!check) { + ubi_msg("force data checking"); + check = 1; + goto retry; + } + } else + goto out_unlock; + } + + if (check) { + crc1 = crc32(UBI_CRC32_INIT, buf, len); + if (crc1 != crc) { + ubi_warn("CRC error: calculated %#08x, must be %#08x", + crc1, crc); + err = -EBADMSG; + goto out_unlock; + } + } + + if (scrub) + err = ubi_wl_scrub_peb(ubi, pnum); + + leb_read_unlock(ubi, vol_id, lnum); + return err; + +out_free: + ubi_free_vid_hdr(ubi, vid_hdr); +out_unlock: + leb_read_unlock(ubi, vol_id, lnum); + return err; +} + +/** + * recover_peb - recover from write failure. + * @ubi: UBI device description object + * @pnum: the physical eraseblock to recover + * @vol_id: volume ID + * @lnum: logical eraseblock number + * @buf: data which was not written because of the write failure + * @offset: offset of the failed write + * @len: how many bytes should have been written + * + * This function is called in case of a write failure and moves all good data + * from the potentially bad physical eraseblock to a good physical eraseblock. + * This function also writes the data which was not written due to the failure. + * Returns new physical eraseblock number in case of success, and a negative + * error code in case of failure. + */ +static int recover_peb(struct ubi_device *ubi, int pnum, int vol_id, int lnum, + const void *buf, int offset, int len) +{ + int err, idx = vol_id2idx(ubi, vol_id), new_pnum, data_size, tries = 0; + struct ubi_volume *vol = ubi->volumes[idx]; + struct ubi_vid_hdr *vid_hdr; + unsigned char *new_buf; + + vid_hdr = ubi_zalloc_vid_hdr(ubi); + if (!vid_hdr) { + return -ENOMEM; + } + +retry: + new_pnum = ubi_wl_get_peb(ubi, UBI_UNKNOWN); + if (new_pnum < 0) { + ubi_free_vid_hdr(ubi, vid_hdr); + return new_pnum; + } + + ubi_msg("recover PEB %d, move data to PEB %d", pnum, new_pnum); + + err = ubi_io_read_vid_hdr(ubi, pnum, vid_hdr, 1); + if (err && err != UBI_IO_BITFLIPS) { + if (err > 0) + err = -EIO; + goto out_put; + } + + vid_hdr->sqnum = cpu_to_ubi64(next_sqnum(ubi)); + err = ubi_io_write_vid_hdr(ubi, new_pnum, vid_hdr); + if (err) + goto write_error; + + data_size = offset + len; + new_buf = kmalloc(data_size, GFP_KERNEL); + if (!new_buf) { + err = -ENOMEM; + goto out_put; + } + memset(new_buf + offset, 0xFF, len); + + /* Read everything before the area where the write failure happened */ + if (offset > 0) { + err = ubi_io_read_data(ubi, new_buf, pnum, 0, offset); + if (err && err != UBI_IO_BITFLIPS) { + kfree(new_buf); + goto out_put; + } + } + + memcpy(new_buf + offset, buf, len); + + err = ubi_io_write_data(ubi, new_buf, new_pnum, 0, data_size); + if (err) { + kfree(new_buf); + goto write_error; + } + + kfree(new_buf); + ubi_free_vid_hdr(ubi, vid_hdr); + + vol->eba_tbl[lnum] = new_pnum; + ubi_wl_put_peb(ubi, pnum, 1); + + ubi_msg("data was successfully recovered"); + return 0; + +out_put: + ubi_wl_put_peb(ubi, new_pnum, 1); + ubi_free_vid_hdr(ubi, vid_hdr); + return err; + +write_error: + /* + * Bad luck? This physical eraseblock is bad too? Crud. Let's try to + * get another one. + */ + ubi_warn("failed to write to PEB %d", new_pnum); + ubi_wl_put_peb(ubi, new_pnum, 1); + if (++tries > UBI_IO_RETRIES) { + ubi_free_vid_hdr(ubi, vid_hdr); + return err; + } + ubi_msg("try again"); + goto retry; +} + +/** + * ubi_eba_write_leb - write data to dynamic volume. + * @ubi: UBI device description object + * @vol_id: volume ID + * @lnum: logical eraseblock number + * @buf: the data to write + * @offset: offset within the logical eraseblock where to write + * @len: how many bytes to write + * @dtype: data type + * + * This function writes data to logical eraseblock @lnum of a dynamic volume + * @vol_id. Returns zero in case of success and a negative error code in case + * of failure. In case of error, it is possible that something was still + * written to the flash media, but may be some garbage. + */ +int ubi_eba_write_leb(struct ubi_device *ubi, int vol_id, int lnum, + const void *buf, int offset, int len, int dtype) +{ + int idx = vol_id2idx(ubi, vol_id), err, pnum, tries = 0; + struct ubi_volume *vol = ubi->volumes[idx]; + struct ubi_vid_hdr *vid_hdr; + + if (ubi->ro_mode) + return -EROFS; + + err = leb_write_lock(ubi, vol_id, lnum); + if (err) + return err; + + pnum = vol->eba_tbl[lnum]; + if (pnum >= 0) { + dbg_eba("write %d bytes at offset %d of LEB %d:%d, PEB %d", + len, offset, vol_id, lnum, pnum); + + err = ubi_io_write_data(ubi, buf, pnum, offset, len); + if (err) { + ubi_warn("failed to write data to PEB %d", pnum); + if (err == -EIO && ubi->bad_allowed) + err = recover_peb(ubi, pnum, vol_id, lnum, buf, offset, len); + if (err) + ubi_ro_mode(ubi); + } + leb_write_unlock(ubi, vol_id, lnum); + return err; + } + + /* + * The logical eraseblock is not mapped. We have to get a free physical + * eraseblock and write the volume identifier header there first. + */ + vid_hdr = ubi_zalloc_vid_hdr(ubi); + if (!vid_hdr) { + leb_write_unlock(ubi, vol_id, lnum); + return -ENOMEM; + } + + vid_hdr->vol_type = UBI_VID_DYNAMIC; + vid_hdr->sqnum = cpu_to_ubi64(next_sqnum(ubi)); + vid_hdr->vol_id = cpu_to_ubi32(vol_id); + vid_hdr->lnum = cpu_to_ubi32(lnum); + vid_hdr->compat = ubi_get_compat(ubi, vol_id); + vid_hdr->data_pad = cpu_to_ubi32(vol->data_pad); + +retry: + pnum = ubi_wl_get_peb(ubi, dtype); + if (pnum < 0) { + ubi_free_vid_hdr(ubi, vid_hdr); + leb_write_unlock(ubi, vol_id, lnum); + return pnum; + } + + dbg_eba("write VID hdr and %d bytes at offset %d of LEB %d:%d, PEB %d", + len, offset, vol_id, lnum, pnum); + + err = ubi_io_write_vid_hdr(ubi, pnum, vid_hdr); + if (err) { + ubi_warn("failed to write VID header to LEB %d:%d, PEB %d", + vol_id, lnum, pnum); + goto write_error; + } + + err = ubi_io_write_data(ubi, buf, pnum, offset, len); + if (err) { + ubi_warn("failed to write %d bytes at offset %d of LEB %d:%d, " + "PEB %d", len, offset, vol_id, lnum, pnum); + goto write_error; + } + + vol->eba_tbl[lnum] = pnum; + + leb_write_unlock(ubi, vol_id, lnum); + ubi_free_vid_hdr(ubi, vid_hdr); + return 0; + +write_error: + if (err != -EIO || !ubi->bad_allowed) { + ubi_ro_mode(ubi); + leb_write_unlock(ubi, vol_id, lnum); + ubi_free_vid_hdr(ubi, vid_hdr); + return err; + } + + /* + * Fortunately, this is the first write operation to this physical + * eraseblock, so just put it and request a new one. We assume that if + * this physical eraseblock went bad, the erase code will handle that. + */ + err = ubi_wl_put_peb(ubi, pnum, 1); + if (err || ++tries > UBI_IO_RETRIES) { + ubi_ro_mode(ubi); + leb_write_unlock(ubi, vol_id, lnum); + ubi_free_vid_hdr(ubi, vid_hdr); + return err; + } + + vid_hdr->sqnum = cpu_to_ubi64(next_sqnum(ubi)); + ubi_msg("try another PEB"); + goto retry; +} + +/** + * ubi_eba_write_leb_st - write data to static volume. + * @ubi: UBI device description object + * @vol_id: volume ID + * @lnum: logical eraseblock number + * @buf: data to write + * @len: how many bytes to write + * @dtype: data type + * @used_ebs: how many logical eraseblocks will this volume contain + * + * This function writes data to logical eraseblock @lnum of static volume + * @vol_id. The @used_ebs argument should contain total number of logical + * eraseblock in this static volume. + * + * When writing to the last logical eraseblock, the @len argument doesn't have + * to be aligned to the minimal I/O unit size. Instead, it has to be equivalent + * to the real data size, although the @buf buffer has to contain the + * alignment. In all other cases, @len has to be aligned. + * + * It is prohibited to write more then once to logical eraseblocks of static + * volumes. This function returns zero in case of success and a negative error + * code in case of failure. + */ +int ubi_eba_write_leb_st(struct ubi_device *ubi, int vol_id, int lnum, + const void *buf, int len, int dtype, int used_ebs) +{ + int err, pnum, tries = 0, data_size = len; + int idx = vol_id2idx(ubi, vol_id); + struct ubi_volume *vol = ubi->volumes[idx]; + struct ubi_vid_hdr *vid_hdr; + uint32_t crc; + + if (ubi->ro_mode) + return -EROFS; + + if (lnum == used_ebs - 1) + /* If this is the last LEB @len may be unaligned */ + len = ALIGN(data_size, ubi->min_io_size); + else + ubi_assert(len % ubi->min_io_size == 0); + + vid_hdr = ubi_zalloc_vid_hdr(ubi); + if (!vid_hdr) + return -ENOMEM; + + err = leb_write_lock(ubi, vol_id, lnum); + if (err) { + ubi_free_vid_hdr(ubi, vid_hdr); + return err; + } + + vid_hdr->sqnum = cpu_to_ubi64(next_sqnum(ubi)); + vid_hdr->vol_id = cpu_to_ubi32(vol_id); + vid_hdr->lnum = cpu_to_ubi32(lnum); + vid_hdr->compat = ubi_get_compat(ubi, vol_id); + vid_hdr->data_pad = cpu_to_ubi32(vol->data_pad); + + crc = crc32(UBI_CRC32_INIT, buf, data_size); + vid_hdr->vol_type = UBI_VID_STATIC; + vid_hdr->data_size = cpu_to_ubi32(data_size); + vid_hdr->used_ebs = cpu_to_ubi32(used_ebs); + vid_hdr->data_crc = cpu_to_ubi32(crc); + +retry: + pnum = ubi_wl_get_peb(ubi, dtype); + if (pnum < 0) { + ubi_free_vid_hdr(ubi, vid_hdr); + leb_write_unlock(ubi, vol_id, lnum); + return pnum; + } + + dbg_eba("write VID hdr and %d bytes at LEB %d:%d, PEB %d, used_ebs %d", + len, vol_id, lnum, pnum, used_ebs); + + err = ubi_io_write_vid_hdr(ubi, pnum, vid_hdr); + if (err) { + ubi_warn("failed to write VID header to LEB %d:%d, PEB %d", + vol_id, lnum, pnum); + goto write_error; + } + + err = ubi_io_write_data(ubi, buf, pnum, 0, len); + if (err) { + ubi_warn("failed to write %d bytes of data to PEB %d", + len, pnum); + goto write_error; + } + + ubi_assert(vol->eba_tbl[lnum] < 0); + vol->eba_tbl[lnum] = pnum; + + leb_write_unlock(ubi, vol_id, lnum); + ubi_free_vid_hdr(ubi, vid_hdr); + return 0; + +write_error: + if (err != -EIO || !ubi->bad_allowed) { + /* + * This flash device does not admit of bad eraseblocks or + * something nasty and unexpected happened. Switch to read-only + * mode just in case. + */ + ubi_ro_mode(ubi); + leb_write_unlock(ubi, vol_id, lnum); + ubi_free_vid_hdr(ubi, vid_hdr); + return err; + } + + err = ubi_wl_put_peb(ubi, pnum, 1); + if (err || ++tries > UBI_IO_RETRIES) { + ubi_ro_mode(ubi); + leb_write_unlock(ubi, vol_id, lnum); + ubi_free_vid_hdr(ubi, vid_hdr); + return err; + } + + vid_hdr->sqnum = cpu_to_ubi64(next_sqnum(ubi)); + ubi_msg("try another PEB"); + goto retry; +} + +/* + * ubi_eba_atomic_leb_change - change logical eraseblock atomically. + * @ubi: UBI device description object + * @vol_id: volume ID + * @lnum: logical eraseblock number + * @buf: data to write + * @len: how many bytes to write + * @dtype: data type + * + * This function changes the contents of a logical eraseblock atomically. @buf + * has to contain new logical eraseblock data, and @len - the length of the + * data, which has to be aligned. This function guarantees that in case of an + * unclean reboot the old contents is preserved. Returns zero in case of + * success and a negative error code in case of failure. + */ +int ubi_eba_atomic_leb_change(struct ubi_device *ubi, int vol_id, int lnum, + const void *buf, int len, int dtype) +{ + int err, pnum, tries = 0, idx = vol_id2idx(ubi, vol_id); + struct ubi_volume *vol = ubi->volumes[idx]; + struct ubi_vid_hdr *vid_hdr; + uint32_t crc; + + if (ubi->ro_mode) + return -EROFS; + + vid_hdr = ubi_zalloc_vid_hdr(ubi); + if (!vid_hdr) + return -ENOMEM; + + err = leb_write_lock(ubi, vol_id, lnum); + if (err) { + ubi_free_vid_hdr(ubi, vid_hdr); + return err; + } + + vid_hdr->sqnum = cpu_to_ubi64(next_sqnum(ubi)); + vid_hdr->vol_id = cpu_to_ubi32(vol_id); + vid_hdr->lnum = cpu_to_ubi32(lnum); + vid_hdr->compat = ubi_get_compat(ubi, vol_id); + vid_hdr->data_pad = cpu_to_ubi32(vol->data_pad); + + crc = crc32(UBI_CRC32_INIT, buf, len); + vid_hdr->vol_type = UBI_VID_STATIC; + vid_hdr->data_size = cpu_to_ubi32(len); + vid_hdr->copy_flag = 1; + vid_hdr->data_crc = cpu_to_ubi32(crc); + +retry: + pnum = ubi_wl_get_peb(ubi, dtype); + if (pnum < 0) { + ubi_free_vid_hdr(ubi, vid_hdr); + leb_write_unlock(ubi, vol_id, lnum); + return pnum; + } + + dbg_eba("change LEB %d:%d, PEB %d, write VID hdr to PEB %d", + vol_id, lnum, vol->eba_tbl[lnum], pnum); + + err = ubi_io_write_vid_hdr(ubi, pnum, vid_hdr); + if (err) { + ubi_warn("failed to write VID header to LEB %d:%d, PEB %d", + vol_id, lnum, pnum); + goto write_error; + } + + err = ubi_io_write_data(ubi, buf, pnum, 0, len); + if (err) { + ubi_warn("failed to write %d bytes of data to PEB %d", + len, pnum); + goto write_error; + } + + err = ubi_wl_put_peb(ubi, vol->eba_tbl[lnum], 1); + if (err) { + ubi_free_vid_hdr(ubi, vid_hdr); + leb_write_unlock(ubi, vol_id, lnum); + return err; + } + + vol->eba_tbl[lnum] = pnum; + leb_write_unlock(ubi, vol_id, lnum); + ubi_free_vid_hdr(ubi, vid_hdr); + return 0; + +write_error: + if (err != -EIO || !ubi->bad_allowed) { + /* + * This flash device does not admit of bad eraseblocks or + * something nasty and unexpected happened. Switch to read-only + * mode just in case. + */ + ubi_ro_mode(ubi); + leb_write_unlock(ubi, vol_id, lnum); + ubi_free_vid_hdr(ubi, vid_hdr); + return err; + } + + err = ubi_wl_put_peb(ubi, pnum, 1); + if (err || ++tries > UBI_IO_RETRIES) { + ubi_ro_mode(ubi); + leb_write_unlock(ubi, vol_id, lnum); + ubi_free_vid_hdr(ubi, vid_hdr); + return err; + } + + vid_hdr->sqnum = cpu_to_ubi64(next_sqnum(ubi)); + ubi_msg("try another PEB"); + goto retry; +} + +/** + * ltree_entry_ctor - lock tree entries slab cache constructor. + * @obj: the lock-tree entry to construct + * @cache: the lock tree entry slab cache + * @flags: constructor flags + */ +static void ltree_entry_ctor(void *obj, struct kmem_cache *cache, + unsigned long flags) +{ + struct ltree_entry *le = obj; + + if ((flags & (SLAB_CTOR_VERIFY | SLAB_CTOR_CONSTRUCTOR)) != + SLAB_CTOR_CONSTRUCTOR) + return; + + le->users = 0; + init_rwsem(&le->mutex); +} + +/** + * ubi_eba_copy_leb - copy logical eraseblock. + * @ubi: UBI device description object + * @from: physical eraseblock number from where to copy + * @to: physical eraseblock number where to copy + * @vid_hdr: VID header of the @from physical eraseblock + * + * This function copies logical eraseblock from physical eraseblock @from to + * physical eraseblock @to. The @vid_hdr buffer may be changed by this + * function. Returns zero in case of success, %UBI_IO_BITFLIPS if the operation + * was canceled because bit-flips were detected at the target PEB, and a + * negative error code in case of failure. + */ +int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, + struct ubi_vid_hdr *vid_hdr) +{ + int err, vol_id, lnum, data_size, aldata_size, pnum, idx; + struct ubi_volume *vol; + uint32_t crc; + void *buf, *buf1 = NULL; + + vol_id = ubi32_to_cpu(vid_hdr->vol_id); + lnum = ubi32_to_cpu(vid_hdr->lnum); + + dbg_eba("copy LEB %d:%d, PEB %d to PEB %d", vol_id, lnum, from, to); + + if (vid_hdr->vol_type == UBI_VID_STATIC) { + data_size = ubi32_to_cpu(vid_hdr->data_size); + aldata_size = ALIGN(data_size, ubi->min_io_size); + } else + data_size = aldata_size = + ubi->leb_size - ubi32_to_cpu(vid_hdr->data_pad); + + buf = kmalloc(aldata_size, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + /* + * We do not want anybody to write to this logical eraseblock while we + * are moving it, so we lock it. + */ + err = leb_write_lock(ubi, vol_id, lnum); + if (err) { + kfree(buf); + return err; + } + + /* + * But the logical eraseblock might have been put by this time. + * Cancel if it is true. + */ + idx = vol_id2idx(ubi, vol_id); + + /* + * We may race with volume deletion/re-size, so we have to hold + * @ubi->volumes_lock. + */ + spin_lock(&ubi->volumes_lock); + vol = ubi->volumes[idx]; + if (!vol) { + dbg_eba("volume %d was removed meanwhile", vol_id); + spin_unlock(&ubi->volumes_lock); + goto out_unlock; + } + + pnum = vol->eba_tbl[lnum]; + if (pnum != from) { + dbg_eba("LEB %d:%d is no longer mapped to PEB %d, mapped to " + "PEB %d, cancel", vol_id, lnum, from, pnum); + spin_unlock(&ubi->volumes_lock); + goto out_unlock; + } + spin_unlock(&ubi->volumes_lock); + + /* OK, now the LEB is locked and we can safely start moving it */ + + dbg_eba("read %d bytes of data", aldata_size); + err = ubi_io_read_data(ubi, buf, from, 0, aldata_size); + if (err && err != UBI_IO_BITFLIPS) { + ubi_warn("error %d while reading data from PEB %d", + err, from); + goto out_unlock; + } + + /* + * Now we have got to calculate how much data we have to to copy. In + * case of a static volume it is fairly easy - the VID header contains + * the data size. In case of a dynamic volume it is more difficult - we + * have to read the contents, cut 0xFF bytes from the end and copy only + * the first part. We must do this to avoid writing 0xFF bytes as it + * may have some side-effects. And not only this. It is important not + * to include those 0xFFs to CRC because later the they may be filled + * by data. + */ + if (vid_hdr->vol_type == UBI_VID_DYNAMIC) + aldata_size = data_size = + ubi_calc_data_len(ubi, buf, data_size); + + cond_resched(); + crc = crc32(UBI_CRC32_INIT, buf, data_size); + cond_resched(); + + /* + * It may turn out to me that the whole @from physical eraseblock + * contains only 0xFF bytes. Then we have to only write the VID header + * and do not write any data. This also means we should not set + * @vid_hdr->copy_flag, @vid_hdr->data_size, and @vid_hdr->data_crc. + */ + if (data_size > 0) { + vid_hdr->copy_flag = 1; + vid_hdr->data_size = cpu_to_ubi32(data_size); + vid_hdr->data_crc = cpu_to_ubi32(crc); + } + vid_hdr->sqnum = cpu_to_ubi64(next_sqnum(ubi)); + + err = ubi_io_write_vid_hdr(ubi, to, vid_hdr); + if (err) + goto out_unlock; + + cond_resched(); + + /* Read the VID header back and check if it was written correctly */ + err = ubi_io_read_vid_hdr(ubi, to, vid_hdr, 1); + if (err) { + if (err != UBI_IO_BITFLIPS) + ubi_warn("cannot read VID header back from PEB %d", to); + goto out_unlock; + } + + if (data_size > 0) { + err = ubi_io_write_data(ubi, buf, to, 0, aldata_size); + if (err) + goto out_unlock; + + /* + * We've written the data and are going to read it back to make + * sure it was written correctly. + */ + buf1 = kmalloc(aldata_size, GFP_KERNEL); + if (!buf1) { + err = -ENOMEM; + goto out_unlock; + } + + cond_resched(); + + err = ubi_io_read_data(ubi, buf1, to, 0, aldata_size); + if (err) { + if (err != UBI_IO_BITFLIPS) + ubi_warn("cannot read data back from PEB %d", + to); + goto out_unlock; + } + + cond_resched(); + + if (memcmp(buf, buf1, aldata_size)) { + ubi_warn("read data back from PEB %d - it is different", + to); + goto out_unlock; + } + } + + ubi_assert(vol->eba_tbl[lnum] == from); + vol->eba_tbl[lnum] = to; + + leb_write_unlock(ubi, vol_id, lnum); + kfree(buf); + kfree(buf1); + + return 0; + +out_unlock: + leb_write_unlock(ubi, vol_id, lnum); + kfree(buf); + kfree(buf1); + return err; +} + +/** + * ubi_eba_init_scan - initialize the EBA unit using scanning information. + * @ubi: UBI device description object + * @si: scanning information + * + * This function returns zero in case of success and a negative error code in + * case of failure. + */ +int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si) +{ + int i, j, err, num_volumes; + struct ubi_scan_volume *sv; + struct ubi_volume *vol; + struct ubi_scan_leb *seb; + struct rb_node *rb; + + dbg_eba("initialize EBA unit"); + + spin_lock_init(&ubi->ltree_lock); + ubi->ltree = RB_ROOT; + + if (ubi_devices_cnt == 0) { + ltree_slab = kmem_cache_create("ubi_ltree_slab", + sizeof(struct ltree_entry), 0, + 0, <ree_entry_ctor, NULL); + if (!ltree_slab) + return -ENOMEM; + } + + ubi->global_sqnum = si->max_sqnum + 1; + num_volumes = ubi->vtbl_slots + UBI_INT_VOL_COUNT; + + for (i = 0; i < num_volumes; i++) { + vol = ubi->volumes[i]; + if (!vol) + continue; + + cond_resched(); + + vol->eba_tbl = kmalloc(vol->reserved_pebs * sizeof(int), + GFP_KERNEL); + if (!vol->eba_tbl) { + err = -ENOMEM; + goto out_free; + } + + for (j = 0; j < vol->reserved_pebs; j++) + vol->eba_tbl[j] = UBI_LEB_UNMAPPED; + + sv = ubi_scan_find_sv(si, idx2vol_id(ubi, i)); + if (!sv) + continue; + + ubi_rb_for_each_entry(rb, seb, &sv->root, u.rb) { + if (seb->lnum >= vol->reserved_pebs) + /* + * This may happen in case of an unclean reboot + * during re-size. + */ + ubi_scan_move_to_list(sv, seb, &si->erase); + vol->eba_tbl[seb->lnum] = seb->pnum; + } + } + + if (ubi->bad_allowed) { + ubi_calculate_reserved(ubi); + + if (ubi->avail_pebs < ubi->beb_rsvd_level) { + /* No enough free physical eraseblocks */ + ubi->beb_rsvd_pebs = ubi->avail_pebs; + ubi_warn("cannot reserve enough PEBs for bad PEB " + "handling, reserved %d, need %d", + ubi->beb_rsvd_pebs, ubi->beb_rsvd_level); + } else + ubi->beb_rsvd_pebs = ubi->beb_rsvd_level; + + ubi->avail_pebs -= ubi->beb_rsvd_pebs; + ubi->rsvd_pebs += ubi->beb_rsvd_pebs; + } + + dbg_eba("EBA unit is initialized"); + return 0; + +out_free: + for (i = 0; i < num_volumes; i++) { + if (!ubi->volumes[i]) + continue; + kfree(ubi->volumes[i]->eba_tbl); + } + if (ubi_devices_cnt == 0) + kmem_cache_destroy(ltree_slab); + return err; +} + +/** + * ubi_eba_close - close EBA unit. + * @ubi: UBI device description object + */ +void ubi_eba_close(const struct ubi_device *ubi) +{ + int i, num_volumes = ubi->vtbl_slots + UBI_INT_VOL_COUNT; + + dbg_eba("close EBA unit"); + + for (i = 0; i < num_volumes; i++) { + if (!ubi->volumes[i]) + continue; + kfree(ubi->volumes[i]->eba_tbl); + } + if (ubi_devices_cnt == 1) + kmem_cache_destroy(ltree_slab); +} diff --git a/drivers/mtd/ubi/gluebi.c b/drivers/mtd/ubi/gluebi.c new file mode 100644 index 000000000000..c8bbfd1e67ab --- /dev/null +++ b/drivers/mtd/ubi/gluebi.c @@ -0,0 +1,324 @@ +/* + * Copyright (c) International Business Machines Corp., 2006 + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See + * the GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + * Author: Artem Bityutskiy (Битюцкий Артём), Joern Engel + */ + +/* + * This file includes implementation of fake MTD devices for each UBI volume. + * This sounds strange, but it is in fact quite useful to make MTD-oriented + * software (including all the legacy software) to work on top of UBI. + * + * Gluebi emulates MTD devices of "MTD_UBIVOLUME" type. Their minimal I/O unit + * size (mtd->writesize) is equivalent to the UBI minimal I/O unit. The + * eraseblock size is equivalent to the logical eraseblock size of the volume. + */ + +#include +#include "ubi.h" + +/** + * gluebi_get_device - get MTD device reference. + * @mtd: the MTD device description object + * + * This function is called every time the MTD device is being opened and + * implements the MTD get_device() operation. Returns zero in case of success + * and a negative error code in case of failure. + */ +static int gluebi_get_device(struct mtd_info *mtd) +{ + struct ubi_volume *vol; + + vol = container_of(mtd, struct ubi_volume, gluebi_mtd); + + /* + * We do not introduce locks for gluebi reference count because the + * get_device()/put_device() calls are already serialized at MTD. + */ + if (vol->gluebi_refcount > 0) { + /* + * The MTD device is already referenced and this is just one + * more reference. MTD allows many users to open the same + * volume simultaneously and do not distinguish between + * readers/writers/exclusive openers as UBI does. So we do not + * open the UBI volume again - just increase the reference + * counter and return. + */ + vol->gluebi_refcount += 1; + return 0; + } + + /* + * This is the first reference to this UBI volume via the MTD device + * interface. Open the corresponding volume in read-write mode. + */ + vol->gluebi_desc = ubi_open_volume(vol->ubi->ubi_num, vol->vol_id, + UBI_READWRITE); + if (IS_ERR(vol->gluebi_desc)) + return PTR_ERR(vol->gluebi_desc); + vol->gluebi_refcount += 1; + return 0; +} + +/** + * gluebi_put_device - put MTD device reference. + * @mtd: the MTD device description object + * + * This function is called every time the MTD device is being put. Returns + * zero in case of success and a negative error code in case of failure. + */ +static void gluebi_put_device(struct mtd_info *mtd) +{ + struct ubi_volume *vol; + + vol = container_of(mtd, struct ubi_volume, gluebi_mtd); + vol->gluebi_refcount -= 1; + ubi_assert(vol->gluebi_refcount >= 0); + if (vol->gluebi_refcount == 0) + ubi_close_volume(vol->gluebi_desc); +} + +/** + * gluebi_read - read operation of emulated MTD devices. + * @mtd: MTD device description object + * @from: absolute offset from where to read + * @len: how many bytes to read + * @retlen: count of read bytes is returned here + * @buf: buffer to store the read data + * + * This function returns zero in case of success and a negative error code in + * case of failure. + */ +static int gluebi_read(struct mtd_info *mtd, loff_t from, size_t len, + size_t *retlen, unsigned char *buf) +{ + int err = 0, lnum, offs, total_read; + struct ubi_volume *vol; + struct ubi_device *ubi; + uint64_t tmp = from; + + dbg_msg("read %zd bytes from offset %lld", len, from); + + if (len < 0 || from < 0 || from + len > mtd->size) + return -EINVAL; + + vol = container_of(mtd, struct ubi_volume, gluebi_mtd); + ubi = vol->ubi; + + offs = do_div(tmp, mtd->erasesize); + lnum = tmp; + + total_read = len; + while (total_read) { + size_t to_read = mtd->erasesize - offs; + + if (to_read > total_read) + to_read = total_read; + + err = ubi_eba_read_leb(ubi, vol->vol_id, lnum, buf, offs, + to_read, 0); + if (err) + break; + + lnum += 1; + offs = 0; + total_read -= to_read; + buf += to_read; + } + + *retlen = len - total_read; + return err; +} + +/** + * gluebi_write - write operation of emulated MTD devices. + * @mtd: MTD device description object + * @to: absolute offset where to write + * @len: how many bytes to write + * @retlen: count of written bytes is returned here + * @buf: buffer with data to write + * + * This function returns zero in case of success and a negative error code in + * case of failure. + */ +static int gluebi_write(struct mtd_info *mtd, loff_t to, size_t len, + size_t *retlen, const u_char *buf) +{ + int err = 0, lnum, offs, total_written; + struct ubi_volume *vol; + struct ubi_device *ubi; + uint64_t tmp = to; + + dbg_msg("write %zd bytes to offset %lld", len, to); + + if (len < 0 || to < 0 || len + to > mtd->size) + return -EINVAL; + + vol = container_of(mtd, struct ubi_volume, gluebi_mtd); + ubi = vol->ubi; + + if (ubi->ro_mode) + return -EROFS; + + offs = do_div(tmp, mtd->erasesize); + lnum = tmp; + + if (len % mtd->writesize || offs % mtd->writesize) + return -EINVAL; + + total_written = len; + while (total_written) { + size_t to_write = mtd->erasesize - offs; + + if (to_write > total_written) + to_write = total_written; + + err = ubi_eba_write_leb(ubi, vol->vol_id, lnum, buf, offs, + to_write, UBI_UNKNOWN); + if (err) + break; + + lnum += 1; + offs = 0; + total_written -= to_write; + buf += to_write; + } + + *retlen = len - total_written; + return err; +} + +/** + * gluebi_erase - erase operation of emulated MTD devices. + * @mtd: the MTD device description object + * @instr: the erase operation description + * + * This function calls the erase callback when finishes. Returns zero in case + * of success and a negative error code in case of failure. + */ +static int gluebi_erase(struct mtd_info *mtd, struct erase_info *instr) +{ + int err, i, lnum, count; + struct ubi_volume *vol; + struct ubi_device *ubi; + + dbg_msg("erase %u bytes at offset %u", instr->len, instr->addr); + + if (instr->addr < 0 || instr->addr > mtd->size - mtd->erasesize) + return -EINVAL; + + if (instr->len < 0 || instr->addr + instr->len > mtd->size) + return -EINVAL; + + if (instr->addr % mtd->writesize || instr->len % mtd->writesize) + return -EINVAL; + + lnum = instr->addr / mtd->erasesize; + count = instr->len / mtd->erasesize; + + vol = container_of(mtd, struct ubi_volume, gluebi_mtd); + ubi = vol->ubi; + + if (ubi->ro_mode) + return -EROFS; + + for (i = 0; i < count; i++) { + err = ubi_eba_unmap_leb(ubi, vol->vol_id, lnum + i); + if (err) + goto out_err; + } + + /* + * MTD erase operations are synchronous, so we have to make sure the + * physical eraseblock is wiped out. + */ + err = ubi_wl_flush(ubi); + if (err) + goto out_err; + + instr->state = MTD_ERASE_DONE; + mtd_erase_callback(instr); + return 0; + +out_err: + instr->state = MTD_ERASE_FAILED; + instr->fail_addr = lnum * mtd->erasesize; + return err; +} + +/** + * ubi_create_gluebi - initialize gluebi for an UBI volume. + * @ubi: UBI device description object + * @vol: volume description object + * + * This function is called when an UBI volume is created in order to create + * corresponding fake MTD device. Returns zero in case of success and a + * negative error code in case of failure. + */ +int ubi_create_gluebi(struct ubi_device *ubi, struct ubi_volume *vol) +{ + int err; + struct mtd_info *mtd = &vol->gluebi_mtd; + + mtd->name = kmemdup(vol->name, vol->name_len + 1, GFP_KERNEL); + if (!mtd->name) + return -ENOMEM; + + mtd->type = MTD_UBIVOLUME; + if (!ubi->ro_mode) + mtd->flags = MTD_WRITEABLE; + mtd->writesize = ubi->min_io_size; + mtd->owner = THIS_MODULE; + mtd->size = vol->usable_leb_size * vol->reserved_pebs; + mtd->erasesize = vol->usable_leb_size; + mtd->read = gluebi_read; + mtd->write = gluebi_write; + mtd->erase = gluebi_erase; + mtd->get_device = gluebi_get_device; + mtd->put_device = gluebi_put_device; + + if (add_mtd_device(mtd)) { + ubi_err("cannot not add MTD device\n"); + kfree(mtd->name); + return -ENFILE; + } + + dbg_msg("added mtd%d (\"%s\"), size %u, EB size %u", + mtd->index, mtd->name, mtd->size, mtd->erasesize); + return 0; +} + +/** + * ubi_destroy_gluebi - close gluebi for an UBI volume. + * @vol: volume description object + * + * This function is called when an UBI volume is removed in order to remove + * corresponding fake MTD device. Returns zero in case of success and a + * negative error code in case of failure. + */ +int ubi_destroy_gluebi(struct ubi_volume *vol) +{ + int err; + struct mtd_info *mtd = &vol->gluebi_mtd; + + dbg_msg("remove mtd%d", mtd->index); + err = del_mtd_device(mtd); + if (err) + return err; + kfree(mtd->name); + return 0; +} diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c new file mode 100644 index 000000000000..438914d05151 --- /dev/null +++ b/drivers/mtd/ubi/io.c @@ -0,0 +1,1259 @@ +/* + * Copyright (c) International Business Machines Corp., 2006 + * Copyright (c) Nokia Corporation, 2006, 2007 + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See + * the GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + * Author: Artem Bityutskiy (Битюцкий Артём) + */ + +/* + * UBI input/output unit. + * + * This unit provides a uniform way to work with all kinds of the underlying + * MTD devices. It also implements handy functions for reading and writing UBI + * headers. + * + * We are trying to have a paranoid mindset and not to trust to what we read + * from the flash media in order to be more secure and robust. So this unit + * validates every single header it reads from the flash media. + * + * Some words about how the eraseblock headers are stored. + * + * The erase counter header is always stored at offset zero. By default, the + * VID header is stored after the EC header at the closest aligned offset + * (i.e. aligned to the minimum I/O unit size). Data starts next to the VID + * header at the closest aligned offset. But this default layout may be + * changed. For example, for different reasons (e.g., optimization) UBI may be + * asked to put the VID header at further offset, and even at an unaligned + * offset. Of course, if the offset of the VID header is unaligned, UBI adds + * proper padding in front of it. Data offset may also be changed but it has to + * be aligned. + * + * About minimal I/O units. In general, UBI assumes flash device model where + * there is only one minimal I/O unit size. E.g., in case of NOR flash it is 1, + * in case of NAND flash it is a NAND page, etc. This is reported by MTD in the + * @ubi->mtd->writesize field. But as an exception, UBI admits of using another + * (smaller) minimal I/O unit size for EC and VID headers to make it possible + * to do different optimizations. + * + * This is extremely useful in case of NAND flashes which admit of several + * write operations to one NAND page. In this case UBI can fit EC and VID + * headers at one NAND page. Thus, UBI may use "sub-page" size as the minimal + * I/O unit for the headers (the @ubi->hdrs_min_io_size field). But it still + * reports NAND page size (@ubi->min_io_size) as a minimal I/O unit for the UBI + * users. + * + * Example: some Samsung NANDs with 2KiB pages allow 4x 512-byte writes, so + * although the minimal I/O unit is 2K, UBI uses 512 bytes for EC and VID + * headers. + * + * Q: why not just to treat sub-page as a minimal I/O unit of this flash + * device, e.g., make @ubi->min_io_size = 512 in the example above? + * + * A: because when writing a sub-page, MTD still writes a full 2K page but the + * bytes which are no relevant to the sub-page are 0xFF. So, basically, writing + * 4x512 sub-pages is 4 times slower then writing one 2KiB NAND page. Thus, we + * prefer to use sub-pages only for EV and VID headers. + * + * As it was noted above, the VID header may start at a non-aligned offset. + * For example, in case of a 2KiB page NAND flash with a 512 bytes sub-page, + * the VID header may reside at offset 1984 which is the last 64 bytes of the + * last sub-page (EC header is always at offset zero). This causes some + * difficulties when reading and writing VID headers. + * + * Suppose we have a 64-byte buffer and we read a VID header at it. We change + * the data and want to write this VID header out. As we can only write in + * 512-byte chunks, we have to allocate one more buffer and copy our VID header + * to offset 448 of this buffer. + * + * The I/O unit does the following trick in order to avoid this extra copy. + * It always allocates a @ubi->vid_hdr_alsize bytes buffer for the VID header + * and returns a pointer to offset @ubi->vid_hdr_shift of this buffer. When the + * VID header is being written out, it shifts the VID header pointer back and + * writes the whole sub-page. + */ + +#include +#include +#include "ubi.h" + +#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID +static int paranoid_check_not_bad(const struct ubi_device *ubi, int pnum); +static int paranoid_check_peb_ec_hdr(const struct ubi_device *ubi, int pnum); +static int paranoid_check_ec_hdr(const struct ubi_device *ubi, int pnum, + const struct ubi_ec_hdr *ec_hdr); +static int paranoid_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum); +static int paranoid_check_vid_hdr(const struct ubi_device *ubi, int pnum, + const struct ubi_vid_hdr *vid_hdr); +static int paranoid_check_all_ff(const struct ubi_device *ubi, int pnum, + int offset, int len); +#else +#define paranoid_check_not_bad(ubi, pnum) 0 +#define paranoid_check_peb_ec_hdr(ubi, pnum) 0 +#define paranoid_check_ec_hdr(ubi, pnum, ec_hdr) 0 +#define paranoid_check_peb_vid_hdr(ubi, pnum) 0 +#define paranoid_check_vid_hdr(ubi, pnum, vid_hdr) 0 +#define paranoid_check_all_ff(ubi, pnum, offset, len) 0 +#endif + +/** + * ubi_io_read - read data from a physical eraseblock. + * @ubi: UBI device description object + * @buf: buffer where to store the read data + * @pnum: physical eraseblock number to read from + * @offset: offset within the physical eraseblock from where to read + * @len: how many bytes to read + * + * This function reads data from offset @offset of physical eraseblock @pnum + * and stores the read data in the @buf buffer. The following return codes are + * possible: + * + * o %0 if all the requested data were successfully read; + * o %UBI_IO_BITFLIPS if all the requested data were successfully read, but + * correctable bit-flips were detected; this is harmless but may indicate + * that this eraseblock may become bad soon (but do not have to); + * o %-EBADMSG if the MTD subsystem reported about data data integrity + * problems, for example it can me an ECC error in case of NAND; this most + * probably means that the data is corrupted; + * o %-EIO if some I/O error occurred; + * o other negative error codes in case of other errors. + */ +int ubi_io_read(const struct ubi_device *ubi, void *buf, int pnum, int offset, + int len) +{ + int err, retries = 0; + size_t read; + loff_t addr; + + dbg_io("read %d bytes from PEB %d:%d", len, pnum, offset); + + ubi_assert(pnum >= 0 && pnum < ubi->peb_count); + ubi_assert(offset >= 0 && offset + len <= ubi->peb_size); + ubi_assert(len > 0); + + err = paranoid_check_not_bad(ubi, pnum); + if (err) + return err > 0 ? -EINVAL : err; + + addr = (loff_t)pnum * ubi->peb_size + offset; +retry: + err = ubi->mtd->read(ubi->mtd, addr, len, &read, buf); + if (err) { + if (err == -EUCLEAN) { + /* + * -EUCLEAN is reported if there was a bit-flip which + * was corrected, so this is harmless. + */ + ubi_msg("fixable bit-flip detected at PEB %d", pnum); + ubi_assert(len == read); + return UBI_IO_BITFLIPS; + } + + if (read != len && retries++ < UBI_IO_RETRIES) { + dbg_io("error %d while reading %d bytes from PEB %d:%d, " + "read only %zd bytes, retry", + err, len, pnum, offset, read); + yield(); + goto retry; + } + + ubi_err("error %d while reading %d bytes from PEB %d:%d, " + "read %zd bytes", err, len, pnum, offset, read); + ubi_dbg_dump_stack(); + } else { + ubi_assert(len == read); + + if (ubi_dbg_is_bitflip()) { + dbg_msg("bit-flip (emulated)"); + err = UBI_IO_BITFLIPS; + } + } + + return err; +} + +/** + * ubi_io_write - write data to a physical eraseblock. + * @ubi: UBI device description object + * @buf: buffer with the data to write + * @pnum: physical eraseblock number to write to + * @offset: offset within the physical eraseblock where to write + * @len: how many bytes to write + * + * This function writes @len bytes of data from buffer @buf to offset @offset + * of physical eraseblock @pnum. If all the data were successfully written, + * zero is returned. If an error occurred, this function returns a negative + * error code. If %-EIO is returned, the physical eraseblock most probably went + * bad. + * + * Note, in case of an error, it is possible that something was still written + * to the flash media, but may be some garbage. + */ +int ubi_io_write(const struct ubi_device *ubi, const void *buf, int pnum, + int offset, int len) +{ + int err; + size_t written; + loff_t addr; + + dbg_io("write %d bytes to PEB %d:%d", len, pnum, offset); + + ubi_assert(pnum >= 0 && pnum < ubi->peb_count); + ubi_assert(offset >= 0 && offset + len <= ubi->peb_size); + ubi_assert(offset % ubi->hdrs_min_io_size == 0); + ubi_assert(len > 0 && len % ubi->hdrs_min_io_size == 0); + + if (ubi->ro_mode) { + ubi_err("read-only mode"); + return -EROFS; + } + + /* The below has to be compiled out if paranoid checks are disabled */ + + err = paranoid_check_not_bad(ubi, pnum); + if (err) + return err > 0 ? -EINVAL : err; + + /* The area we are writing to has to contain all 0xFF bytes */ + err = paranoid_check_all_ff(ubi, pnum, offset, len); + if (err) + return err > 0 ? -EINVAL : err; + + if (offset >= ubi->leb_start) { + /* + * We write to the data area of the physical eraseblock. Make + * sure it has valid EC and VID headers. + */ + err = paranoid_check_peb_ec_hdr(ubi, pnum); + if (err) + return err > 0 ? -EINVAL : err; + err = paranoid_check_peb_vid_hdr(ubi, pnum); + if (err) + return err > 0 ? -EINVAL : err; + } + + if (ubi_dbg_is_write_failure()) { + dbg_err("cannot write %d bytes to PEB %d:%d " + "(emulated)", len, pnum, offset); + ubi_dbg_dump_stack(); + return -EIO; + } + + addr = (loff_t)pnum * ubi->peb_size + offset; + err = ubi->mtd->write(ubi->mtd, addr, len, &written, buf); + if (err) { + ubi_err("error %d while writing %d bytes to PEB %d:%d, written" + " %zd bytes", err, len, pnum, offset, written); + ubi_dbg_dump_stack(); + } else + ubi_assert(written == len); + + return err; +} + +/** + * erase_callback - MTD erasure call-back. + * @ei: MTD erase information object. + * + * Note, even though MTD erase interface is asynchronous, all the current + * implementations are synchronous anyway. + */ +static void erase_callback(struct erase_info *ei) +{ + wake_up_interruptible((wait_queue_head_t *)ei->priv); +} + +/** + * do_sync_erase - synchronously erase a physical eraseblock. + * @ubi: UBI device description object + * @pnum: the physical eraseblock number to erase + * + * This function synchronously erases physical eraseblock @pnum and returns + * zero in case of success and a negative error code in case of failure. If + * %-EIO is returned, the physical eraseblock most probably went bad. + */ +static int do_sync_erase(const struct ubi_device *ubi, int pnum) +{ + int err, retries = 0; + struct erase_info ei; + wait_queue_head_t wq; + + dbg_io("erase PEB %d", pnum); + +retry: + init_waitqueue_head(&wq); + memset(&ei, 0, sizeof(struct erase_info)); + + ei.mtd = ubi->mtd; + ei.addr = pnum * ubi->peb_size; + ei.len = ubi->peb_size; + ei.callback = erase_callback; + ei.priv = (unsigned long)&wq; + + err = ubi->mtd->erase(ubi->mtd, &ei); + if (err) { + if (retries++ < UBI_IO_RETRIES) { + dbg_io("error %d while erasing PEB %d, retry", + err, pnum); + yield(); + goto retry; + } + ubi_err("cannot erase PEB %d, error %d", pnum, err); + ubi_dbg_dump_stack(); + return err; + } + + err = wait_event_interruptible(wq, ei.state == MTD_ERASE_DONE || + ei.state == MTD_ERASE_FAILED); + if (err) { + ubi_err("interrupted PEB %d erasure", pnum); + return -EINTR; + } + + if (ei.state == MTD_ERASE_FAILED) { + if (retries++ < UBI_IO_RETRIES) { + dbg_io("error while erasing PEB %d, retry", pnum); + yield(); + goto retry; + } + ubi_err("cannot erase PEB %d", pnum); + ubi_dbg_dump_stack(); + return -EIO; + } + + err = paranoid_check_all_ff(ubi, pnum, 0, ubi->peb_size); + if (err) + return err > 0 ? -EINVAL : err; + + if (ubi_dbg_is_erase_failure() && !err) { + dbg_err("cannot erase PEB %d (emulated)", pnum); + return -EIO; + } + + return 0; +} + +/** + * check_pattern - check if buffer contains only a certain byte pattern. + * @buf: buffer to check + * @patt: the pattern to check + * @size: buffer size in bytes + * + * This function returns %1 in there are only @patt bytes in @buf, and %0 if + * something else was also found. + */ +static int check_pattern(const void *buf, uint8_t patt, int size) +{ + int i; + + for (i = 0; i < size; i++) + if (((const uint8_t *)buf)[i] != patt) + return 0; + return 1; +} + +/* Patterns to write to a physical eraseblock when torturing it */ +static uint8_t patterns[] = {0xa5, 0x5a, 0x0}; + +/** + * torture_peb - test a supposedly bad physical eraseblock. + * @ubi: UBI device description object + * @pnum: the physical eraseblock number to test + * + * This function returns %-EIO if the physical eraseblock did not pass the + * test, a positive number of erase operations done if the test was + * successfully passed, and other negative error codes in case of other errors. + */ +static int torture_peb(const struct ubi_device *ubi, int pnum) +{ + void *buf; + int err, i, patt_count; + + buf = kmalloc(ubi->peb_size, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + patt_count = ARRAY_SIZE(patterns); + ubi_assert(patt_count > 0); + + for (i = 0; i < patt_count; i++) { + err = do_sync_erase(ubi, pnum); + if (err) + goto out; + + /* Make sure the PEB contains only 0xFF bytes */ + err = ubi_io_read(ubi, buf, pnum, 0, ubi->peb_size); + if (err) + goto out; + + err = check_pattern(buf, 0xFF, ubi->peb_size); + if (err == 0) { + ubi_err("erased PEB %d, but a non-0xFF byte found", + pnum); + err = -EIO; + goto out; + } + + /* Write a pattern and check it */ + memset(buf, patterns[i], ubi->peb_size); + err = ubi_io_write(ubi, buf, pnum, 0, ubi->peb_size); + if (err) + goto out; + + memset(buf, ~patterns[i], ubi->peb_size); + err = ubi_io_read(ubi, buf, pnum, 0, ubi->peb_size); + if (err) + goto out; + + err = check_pattern(buf, patterns[i], ubi->peb_size); + if (err == 0) { + ubi_err("pattern %x checking failed for PEB %d", + patterns[i], pnum); + err = -EIO; + goto out; + } + } + + err = patt_count; + +out: + if (err == UBI_IO_BITFLIPS || err == -EBADMSG) + /* + * If a bit-flip or data integrity error was detected, the test + * has not passed because it happened on a freshly erased + * physical eraseblock which means something is wrong with it. + */ + err = -EIO; + kfree(buf); + return err; +} + +/** + * ubi_io_sync_erase - synchronously erase a physical eraseblock. + * @ubi: UBI device description object + * @pnum: physical eraseblock number to erase + * @torture: if this physical eraseblock has to be tortured + * + * This function synchronously erases physical eraseblock @pnum. If @torture + * flag is not zero, the physical eraseblock is checked by means of writing + * different patterns to it and reading them back. If the torturing is enabled, + * the physical eraseblock is erased more then once. + * + * This function returns the number of erasures made in case of success, %-EIO + * if the erasure failed or the torturing test failed, and other negative error + * codes in case of other errors. Note, %-EIO means that the physical + * eraseblock is bad. + */ +int ubi_io_sync_erase(const struct ubi_device *ubi, int pnum, int torture) +{ + int err, ret = 0; + + ubi_assert(pnum >= 0 && pnum < ubi->peb_count); + + err = paranoid_check_not_bad(ubi, pnum); + if (err != 0) + return err > 0 ? -EINVAL : err; + + if (ubi->ro_mode) { + ubi_err("read-only mode"); + return -EROFS; + } + + if (torture) { + ret = torture_peb(ubi, pnum); + if (ret < 0) + return ret; + } + + err = do_sync_erase(ubi, pnum); + if (err) + return err; + + return ret + 1; +} + +/** + * ubi_io_is_bad - check if a physical eraseblock is bad. + * @ubi: UBI device description object + * @pnum: the physical eraseblock number to check + * + * This function returns a positive number if the physical eraseblock is bad, + * zero if not, and a negative error code if an error occurred. + */ +int ubi_io_is_bad(const struct ubi_device *ubi, int pnum) +{ + struct mtd_info *mtd = ubi->mtd; + + ubi_assert(pnum >= 0 && pnum < ubi->peb_count); + + if (ubi->bad_allowed) { + int ret; + + ret = mtd->block_isbad(mtd, (loff_t)pnum * ubi->peb_size); + if (ret < 0) + ubi_err("error %d while checking if PEB %d is bad", + ret, pnum); + else if (ret) + dbg_io("PEB %d is bad", pnum); + return ret; + } + + return 0; +} + +/** + * ubi_io_mark_bad - mark a physical eraseblock as bad. + * @ubi: UBI device description object + * @pnum: the physical eraseblock number to mark + * + * This function returns zero in case of success and a negative error code in + * case of failure. + */ +int ubi_io_mark_bad(const struct ubi_device *ubi, int pnum) +{ + int err; + struct mtd_info *mtd = ubi->mtd; + + ubi_assert(pnum >= 0 && pnum < ubi->peb_count); + + if (ubi->ro_mode) { + ubi_err("read-only mode"); + return -EROFS; + } + + if (!ubi->bad_allowed) + return 0; + + err = mtd->block_markbad(mtd, (loff_t)pnum * ubi->peb_size); + if (err) + ubi_err("cannot mark PEB %d bad, error %d", pnum, err); + return err; +} + +/** + * validate_ec_hdr - validate an erase counter header. + * @ubi: UBI device description object + * @ec_hdr: the erase counter header to check + * + * This function returns zero if the erase counter header is OK, and %1 if + * not. + */ +static int validate_ec_hdr(const struct ubi_device *ubi, + const struct ubi_ec_hdr *ec_hdr) +{ + long long ec; + int vid_hdr_offset, leb_start; + + ec = ubi64_to_cpu(ec_hdr->ec); + vid_hdr_offset = ubi32_to_cpu(ec_hdr->vid_hdr_offset); + leb_start = ubi32_to_cpu(ec_hdr->data_offset); + + if (ec_hdr->version != UBI_VERSION) { + ubi_err("node with incompatible UBI version found: " + "this UBI version is %d, image version is %d", + UBI_VERSION, (int)ec_hdr->version); + goto bad; + } + + if (vid_hdr_offset != ubi->vid_hdr_offset) { + ubi_err("bad VID header offset %d, expected %d", + vid_hdr_offset, ubi->vid_hdr_offset); + goto bad; + } + + if (leb_start != ubi->leb_start) { + ubi_err("bad data offset %d, expected %d", + leb_start, ubi->leb_start); + goto bad; + } + + if (ec < 0 || ec > UBI_MAX_ERASECOUNTER) { + ubi_err("bad erase counter %lld", ec); + goto bad; + } + + return 0; + +bad: + ubi_err("bad EC header"); + ubi_dbg_dump_ec_hdr(ec_hdr); + ubi_dbg_dump_stack(); + return 1; +} + +/** + * ubi_io_read_ec_hdr - read and check an erase counter header. + * @ubi: UBI device description object + * @pnum: physical eraseblock to read from + * @ec_hdr: a &struct ubi_ec_hdr object where to store the read erase counter + * header + * @verbose: be verbose if the header is corrupted or was not found + * + * This function reads erase counter header from physical eraseblock @pnum and + * stores it in @ec_hdr. This function also checks CRC checksum of the read + * erase counter header. The following codes may be returned: + * + * o %0 if the CRC checksum is correct and the header was successfully read; + * o %UBI_IO_BITFLIPS if the CRC is correct, but bit-flips were detected + * and corrected by the flash driver; this is harmless but may indicate that + * this eraseblock may become bad soon (but may be not); + * o %UBI_IO_BAD_EC_HDR if the erase counter header is corrupted (a CRC error); + * o %UBI_IO_PEB_EMPTY if the physical eraseblock is empty; + * o a negative error code in case of failure. + */ +int ubi_io_read_ec_hdr(const struct ubi_device *ubi, int pnum, + struct ubi_ec_hdr *ec_hdr, int verbose) +{ + int err, read_err = 0; + uint32_t crc, magic, hdr_crc; + + dbg_io("read EC header from PEB %d", pnum); + ubi_assert(pnum >= 0 && pnum < ubi->peb_count); + + err = ubi_io_read(ubi, ec_hdr, pnum, 0, UBI_EC_HDR_SIZE); + if (err) { + if (err != UBI_IO_BITFLIPS && err != -EBADMSG) + return err; + + /* + * We read all the data, but either a correctable bit-flip + * occurred, or MTD reported about some data integrity error, + * like an ECC error in case of NAND. The former is harmless, + * the later may mean that the read data is corrupted. But we + * have a CRC check-sum and we will detect this. If the EC + * header is still OK, we just report this as there was a + * bit-flip. + */ + read_err = err; + } + + magic = ubi32_to_cpu(ec_hdr->magic); + if (magic != UBI_EC_HDR_MAGIC) { + /* + * The magic field is wrong. Let's check if we have read all + * 0xFF. If yes, this physical eraseblock is assumed to be + * empty. + * + * But if there was a read error, we do not test it for all + * 0xFFs. Even if it does contain all 0xFFs, this error + * indicates that something is still wrong with this physical + * eraseblock and we anyway cannot treat it as empty. + */ + if (read_err != -EBADMSG && + check_pattern(ec_hdr, 0xFF, UBI_EC_HDR_SIZE)) { + /* The physical eraseblock is supposedly empty */ + + /* + * The below is just a paranoid check, it has to be + * compiled out if paranoid checks are disabled. + */ + err = paranoid_check_all_ff(ubi, pnum, 0, + ubi->peb_size); + if (err) + return err > 0 ? UBI_IO_BAD_EC_HDR : err; + + if (verbose) + ubi_warn("no EC header found at PEB %d, " + "only 0xFF bytes", pnum); + return UBI_IO_PEB_EMPTY; + } + + /* + * This is not a valid erase counter header, and these are not + * 0xFF bytes. Report that the header is corrupted. + */ + if (verbose) { + ubi_warn("bad magic number at PEB %d: %08x instead of " + "%08x", pnum, magic, UBI_EC_HDR_MAGIC); + ubi_dbg_dump_ec_hdr(ec_hdr); + } + return UBI_IO_BAD_EC_HDR; + } + + crc = crc32(UBI_CRC32_INIT, ec_hdr, UBI_EC_HDR_SIZE_CRC); + hdr_crc = ubi32_to_cpu(ec_hdr->hdr_crc); + + if (hdr_crc != crc) { + if (verbose) { + ubi_warn("bad EC header CRC at PEB %d, calculated %#08x," + " read %#08x", pnum, crc, hdr_crc); + ubi_dbg_dump_ec_hdr(ec_hdr); + } + return UBI_IO_BAD_EC_HDR; + } + + /* And of course validate what has just been read from the media */ + err = validate_ec_hdr(ubi, ec_hdr); + if (err) { + ubi_err("validation failed for PEB %d", pnum); + return -EINVAL; + } + + return read_err ? UBI_IO_BITFLIPS : 0; +} + +/** + * ubi_io_write_ec_hdr - write an erase counter header. + * @ubi: UBI device description object + * @pnum: physical eraseblock to write to + * @ec_hdr: the erase counter header to write + * + * This function writes erase counter header described by @ec_hdr to physical + * eraseblock @pnum. It also fills most fields of @ec_hdr before writing, so + * the caller do not have to fill them. Callers must only fill the @ec_hdr->ec + * field. + * + * This function returns zero in case of success and a negative error code in + * case of failure. If %-EIO is returned, the physical eraseblock most probably + * went bad. + */ +int ubi_io_write_ec_hdr(const struct ubi_device *ubi, int pnum, + struct ubi_ec_hdr *ec_hdr) +{ + int err; + uint32_t crc; + + dbg_io("write EC header to PEB %d", pnum); + ubi_assert(pnum >= 0 && pnum < ubi->peb_count); + + ec_hdr->magic = cpu_to_ubi32(UBI_EC_HDR_MAGIC); + ec_hdr->version = UBI_VERSION; + ec_hdr->vid_hdr_offset = cpu_to_ubi32(ubi->vid_hdr_offset); + ec_hdr->data_offset = cpu_to_ubi32(ubi->leb_start); + crc = crc32(UBI_CRC32_INIT, ec_hdr, UBI_EC_HDR_SIZE_CRC); + ec_hdr->hdr_crc = cpu_to_ubi32(crc); + + err = paranoid_check_ec_hdr(ubi, pnum, ec_hdr); + if (err) + return -EINVAL; + + err = ubi_io_write(ubi, ec_hdr, pnum, 0, ubi->ec_hdr_alsize); + return err; +} + +/** + * validate_vid_hdr - validate a volume identifier header. + * @ubi: UBI device description object + * @vid_hdr: the volume identifier header to check + * + * This function checks that data stored in the volume identifier header + * @vid_hdr. Returns zero if the VID header is OK and %1 if not. + */ +static int validate_vid_hdr(const struct ubi_device *ubi, + const struct ubi_vid_hdr *vid_hdr) +{ + int vol_type = vid_hdr->vol_type; + int copy_flag = vid_hdr->copy_flag; + int vol_id = ubi32_to_cpu(vid_hdr->vol_id); + int lnum = ubi32_to_cpu(vid_hdr->lnum); + int compat = vid_hdr->compat; + int data_size = ubi32_to_cpu(vid_hdr->data_size); + int used_ebs = ubi32_to_cpu(vid_hdr->used_ebs); + int data_pad = ubi32_to_cpu(vid_hdr->data_pad); + int data_crc = ubi32_to_cpu(vid_hdr->data_crc); + int usable_leb_size = ubi->leb_size - data_pad; + + if (copy_flag != 0 && copy_flag != 1) { + dbg_err("bad copy_flag"); + goto bad; + } + + if (vol_id < 0 || lnum < 0 || data_size < 0 || used_ebs < 0 || + data_pad < 0) { + dbg_err("negative values"); + goto bad; + } + + if (vol_id >= UBI_MAX_VOLUMES && vol_id < UBI_INTERNAL_VOL_START) { + dbg_err("bad vol_id"); + goto bad; + } + + if (vol_id < UBI_INTERNAL_VOL_START && compat != 0) { + dbg_err("bad compat"); + goto bad; + } + + if (vol_id >= UBI_INTERNAL_VOL_START && compat != UBI_COMPAT_DELETE && + compat != UBI_COMPAT_RO && compat != UBI_COMPAT_PRESERVE && + compat != UBI_COMPAT_REJECT) { + dbg_err("bad compat"); + goto bad; + } + + if (vol_type != UBI_VID_DYNAMIC && vol_type != UBI_VID_STATIC) { + dbg_err("bad vol_type"); + goto bad; + } + + if (data_pad >= ubi->leb_size / 2) { + dbg_err("bad data_pad"); + goto bad; + } + + if (vol_type == UBI_VID_STATIC) { + /* + * Although from high-level point of view static volumes may + * contain zero bytes of data, but no VID headers can contain + * zero at these fields, because they empty volumes do not have + * mapped logical eraseblocks. + */ + if (used_ebs == 0) { + dbg_err("zero used_ebs"); + goto bad; + } + if (data_size == 0) { + dbg_err("zero data_size"); + goto bad; + } + if (lnum < used_ebs - 1) { + if (data_size != usable_leb_size) { + dbg_err("bad data_size"); + goto bad; + } + } else if (lnum == used_ebs - 1) { + if (data_size == 0) { + dbg_err("bad data_size at last LEB"); + goto bad; + } + } else { + dbg_err("too high lnum"); + goto bad; + } + } else { + if (copy_flag == 0) { + if (data_crc != 0) { + dbg_err("non-zero data CRC"); + goto bad; + } + if (data_size != 0) { + dbg_err("non-zero data_size"); + goto bad; + } + } else { + if (data_size == 0) { + dbg_err("zero data_size of copy"); + goto bad; + } + } + if (used_ebs != 0) { + dbg_err("bad used_ebs"); + goto bad; + } + } + + return 0; + +bad: + ubi_err("bad VID header"); + ubi_dbg_dump_vid_hdr(vid_hdr); + ubi_dbg_dump_stack(); + return 1; +} + +/** + * ubi_io_read_vid_hdr - read and check a volume identifier header. + * @ubi: UBI device description object + * @pnum: physical eraseblock number to read from + * @vid_hdr: &struct ubi_vid_hdr object where to store the read volume + * identifier header + * @verbose: be verbose if the header is corrupted or wasn't found + * + * This function reads the volume identifier header from physical eraseblock + * @pnum and stores it in @vid_hdr. It also checks CRC checksum of the read + * volume identifier header. The following codes may be returned: + * + * o %0 if the CRC checksum is correct and the header was successfully read; + * o %UBI_IO_BITFLIPS if the CRC is correct, but bit-flips were detected + * and corrected by the flash driver; this is harmless but may indicate that + * this eraseblock may become bad soon; + * o %UBI_IO_BAD_VID_HRD if the volume identifier header is corrupted (a CRC + * error detected); + * o %UBI_IO_PEB_FREE if the physical eraseblock is free (i.e., there is no VID + * header there); + * o a negative error code in case of failure. + */ +int ubi_io_read_vid_hdr(const struct ubi_device *ubi, int pnum, + struct ubi_vid_hdr *vid_hdr, int verbose) +{ + int err, read_err = 0; + uint32_t crc, magic, hdr_crc; + void *p; + + dbg_io("read VID header from PEB %d", pnum); + ubi_assert(pnum >= 0 && pnum < ubi->peb_count); + + p = (char *)vid_hdr - ubi->vid_hdr_shift; + err = ubi_io_read(ubi, p, pnum, ubi->vid_hdr_aloffset, + ubi->vid_hdr_alsize); + if (err) { + if (err != UBI_IO_BITFLIPS && err != -EBADMSG) + return err; + + /* + * We read all the data, but either a correctable bit-flip + * occurred, or MTD reported about some data integrity error, + * like an ECC error in case of NAND. The former is harmless, + * the later may mean the read data is corrupted. But we have a + * CRC check-sum and we will identify this. If the VID header is + * still OK, we just report this as there was a bit-flip. + */ + read_err = err; + } + + magic = ubi32_to_cpu(vid_hdr->magic); + if (magic != UBI_VID_HDR_MAGIC) { + /* + * If we have read all 0xFF bytes, the VID header probably does + * not exist and the physical eraseblock is assumed to be free. + * + * But if there was a read error, we do not test the data for + * 0xFFs. Even if it does contain all 0xFFs, this error + * indicates that something is still wrong with this physical + * eraseblock and it cannot be regarded as free. + */ + if (read_err != -EBADMSG && + check_pattern(vid_hdr, 0xFF, UBI_VID_HDR_SIZE)) { + /* The physical eraseblock is supposedly free */ + + /* + * The below is just a paranoid check, it has to be + * compiled out if paranoid checks are disabled. + */ + err = paranoid_check_all_ff(ubi, pnum, ubi->leb_start, + ubi->leb_size); + if (err) + return err > 0 ? UBI_IO_BAD_VID_HDR : err; + + if (verbose) + ubi_warn("no VID header found at PEB %d, " + "only 0xFF bytes", pnum); + return UBI_IO_PEB_FREE; + } + + /* + * This is not a valid VID header, and these are not 0xFF + * bytes. Report that the header is corrupted. + */ + if (verbose) { + ubi_warn("bad magic number at PEB %d: %08x instead of " + "%08x", pnum, magic, UBI_VID_HDR_MAGIC); + ubi_dbg_dump_vid_hdr(vid_hdr); + } + return UBI_IO_BAD_VID_HDR; + } + + crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_VID_HDR_SIZE_CRC); + hdr_crc = ubi32_to_cpu(vid_hdr->hdr_crc); + + if (hdr_crc != crc) { + if (verbose) { + ubi_warn("bad CRC at PEB %d, calculated %#08x, " + "read %#08x", pnum, crc, hdr_crc); + ubi_dbg_dump_vid_hdr(vid_hdr); + } + return UBI_IO_BAD_VID_HDR; + } + + /* Validate the VID header that we have just read */ + err = validate_vid_hdr(ubi, vid_hdr); + if (err) { + ubi_err("validation failed for PEB %d", pnum); + return -EINVAL; + } + + return read_err ? UBI_IO_BITFLIPS : 0; +} + +/** + * ubi_io_write_vid_hdr - write a volume identifier header. + * @ubi: UBI device description object + * @pnum: the physical eraseblock number to write to + * @vid_hdr: the volume identifier header to write + * + * This function writes the volume identifier header described by @vid_hdr to + * physical eraseblock @pnum. This function automatically fills the + * @vid_hdr->magic and the @vid_hdr->version fields, as well as calculates + * header CRC checksum and stores it at vid_hdr->hdr_crc. + * + * This function returns zero in case of success and a negative error code in + * case of failure. If %-EIO is returned, the physical eraseblock probably went + * bad. + */ +int ubi_io_write_vid_hdr(const struct ubi_device *ubi, int pnum, + struct ubi_vid_hdr *vid_hdr) +{ + int err; + uint32_t crc; + void *p; + + dbg_io("write VID header to PEB %d", pnum); + ubi_assert(pnum >= 0 && pnum < ubi->peb_count); + + err = paranoid_check_peb_ec_hdr(ubi, pnum); + if (err) + return err > 0 ? -EINVAL: err; + + vid_hdr->magic = cpu_to_ubi32(UBI_VID_HDR_MAGIC); + vid_hdr->version = UBI_VERSION; + crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_VID_HDR_SIZE_CRC); + vid_hdr->hdr_crc = cpu_to_ubi32(crc); + + err = paranoid_check_vid_hdr(ubi, pnum, vid_hdr); + if (err) + return -EINVAL; + + p = (char *)vid_hdr - ubi->vid_hdr_shift; + err = ubi_io_write(ubi, p, pnum, ubi->vid_hdr_aloffset, + ubi->vid_hdr_alsize); + return err; +} + +#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID + +/** + * paranoid_check_not_bad - ensure that a physical eraseblock is not bad. + * @ubi: UBI device description object + * @pnum: physical eraseblock number to check + * + * This function returns zero if the physical eraseblock is good, a positive + * number if it is bad and a negative error code if an error occurred. + */ +static int paranoid_check_not_bad(const struct ubi_device *ubi, int pnum) +{ + int err; + + err = ubi_io_is_bad(ubi, pnum); + if (!err) + return err; + + ubi_err("paranoid check failed for PEB %d", pnum); + ubi_dbg_dump_stack(); + return err; +} + +/** + * paranoid_check_ec_hdr - check if an erase counter header is all right. + * @ubi: UBI device description object + * @pnum: physical eraseblock number the erase counter header belongs to + * @ec_hdr: the erase counter header to check + * + * This function returns zero if the erase counter header contains valid + * values, and %1 if not. + */ +static int paranoid_check_ec_hdr(const struct ubi_device *ubi, int pnum, + const struct ubi_ec_hdr *ec_hdr) +{ + int err; + uint32_t magic; + + magic = ubi32_to_cpu(ec_hdr->magic); + if (magic != UBI_EC_HDR_MAGIC) { + ubi_err("bad magic %#08x, must be %#08x", + magic, UBI_EC_HDR_MAGIC); + goto fail; + } + + err = validate_ec_hdr(ubi, ec_hdr); + if (err) { + ubi_err("paranoid check failed for PEB %d", pnum); + goto fail; + } + + return 0; + +fail: + ubi_dbg_dump_ec_hdr(ec_hdr); + ubi_dbg_dump_stack(); + return 1; +} + +/** + * paranoid_check_peb_ec_hdr - check that the erase counter header of a + * physical eraseblock is in-place and is all right. + * @ubi: UBI device description object + * @pnum: the physical eraseblock number to check + * + * This function returns zero if the erase counter header is all right, %1 if + * not, and a negative error code if an error occurred. + */ +static int paranoid_check_peb_ec_hdr(const struct ubi_device *ubi, int pnum) +{ + int err; + uint32_t crc, hdr_crc; + struct ubi_ec_hdr *ec_hdr; + + ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL); + if (!ec_hdr) + return -ENOMEM; + + err = ubi_io_read(ubi, ec_hdr, pnum, 0, UBI_EC_HDR_SIZE); + if (err && err != UBI_IO_BITFLIPS && err != -EBADMSG) + goto exit; + + crc = crc32(UBI_CRC32_INIT, ec_hdr, UBI_EC_HDR_SIZE_CRC); + hdr_crc = ubi32_to_cpu(ec_hdr->hdr_crc); + if (hdr_crc != crc) { + ubi_err("bad CRC, calculated %#08x, read %#08x", crc, hdr_crc); + ubi_err("paranoid check failed for PEB %d", pnum); + ubi_dbg_dump_ec_hdr(ec_hdr); + ubi_dbg_dump_stack(); + err = 1; + goto exit; + } + + err = paranoid_check_ec_hdr(ubi, pnum, ec_hdr); + +exit: + kfree(ec_hdr); + return err; +} + +/** + * paranoid_check_vid_hdr - check that a volume identifier header is all right. + * @ubi: UBI device description object + * @pnum: physical eraseblock number the volume identifier header belongs to + * @vid_hdr: the volume identifier header to check + * + * This function returns zero if the volume identifier header is all right, and + * %1 if not. + */ +static int paranoid_check_vid_hdr(const struct ubi_device *ubi, int pnum, + const struct ubi_vid_hdr *vid_hdr) +{ + int err; + uint32_t magic; + + magic = ubi32_to_cpu(vid_hdr->magic); + if (magic != UBI_VID_HDR_MAGIC) { + ubi_err("bad VID header magic %#08x at PEB %d, must be %#08x", + magic, pnum, UBI_VID_HDR_MAGIC); + goto fail; + } + + err = validate_vid_hdr(ubi, vid_hdr); + if (err) { + ubi_err("paranoid check failed for PEB %d", pnum); + goto fail; + } + + return err; + +fail: + ubi_err("paranoid check failed for PEB %d", pnum); + ubi_dbg_dump_vid_hdr(vid_hdr); + ubi_dbg_dump_stack(); + return 1; + +} + +/** + * paranoid_check_peb_vid_hdr - check that the volume identifier header of a + * physical eraseblock is in-place and is all right. + * @ubi: UBI device description object + * @pnum: the physical eraseblock number to check + * + * This function returns zero if the volume identifier header is all right, + * %1 if not, and a negative error code if an error occurred. + */ +static int paranoid_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum) +{ + int err; + uint32_t crc, hdr_crc; + struct ubi_vid_hdr *vid_hdr; + void *p; + + vid_hdr = ubi_zalloc_vid_hdr(ubi); + if (!vid_hdr) + return -ENOMEM; + + p = (char *)vid_hdr - ubi->vid_hdr_shift; + err = ubi_io_read(ubi, p, pnum, ubi->vid_hdr_aloffset, + ubi->vid_hdr_alsize); + if (err && err != UBI_IO_BITFLIPS && err != -EBADMSG) + goto exit; + + crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_EC_HDR_SIZE_CRC); + hdr_crc = ubi32_to_cpu(vid_hdr->hdr_crc); + if (hdr_crc != crc) { + ubi_err("bad VID header CRC at PEB %d, calculated %#08x, " + "read %#08x", pnum, crc, hdr_crc); + ubi_err("paranoid check failed for PEB %d", pnum); + ubi_dbg_dump_vid_hdr(vid_hdr); + ubi_dbg_dump_stack(); + err = 1; + goto exit; + } + + err = paranoid_check_vid_hdr(ubi, pnum, vid_hdr); + +exit: + ubi_free_vid_hdr(ubi, vid_hdr); + return err; +} + +/** + * paranoid_check_all_ff - check that a region of flash is empty. + * @ubi: UBI device description object + * @pnum: the physical eraseblock number to check + * @offset: the starting offset within the physical eraseblock to check + * @len: the length of the region to check + * + * This function returns zero if only 0xFF bytes are present at offset + * @offset of the physical eraseblock @pnum, %1 if not, and a negative error + * code if an error occurred. + */ +static int paranoid_check_all_ff(const struct ubi_device *ubi, int pnum, + int offset, int len) +{ + size_t read; + int err; + void *buf; + loff_t addr = (loff_t)pnum * ubi->peb_size + offset; + + buf = kzalloc(len, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + err = ubi->mtd->read(ubi->mtd, addr, len, &read, buf); + if (err && err != -EUCLEAN) { + ubi_err("error %d while reading %d bytes from PEB %d:%d, " + "read %zd bytes", err, len, pnum, offset, read); + goto error; + } + + err = check_pattern(buf, 0xFF, len); + if (err == 0) { + ubi_err("flash region at PEB %d:%d, length %d does not " + "contain all 0xFF bytes", pnum, offset, len); + goto fail; + } + + kfree(buf); + return 0; + +fail: + ubi_err("paranoid check failed for PEB %d", pnum); + dbg_msg("hex dump of the %d-%d region", offset, offset + len); + ubi_dbg_hexdump(buf, len); + err = 1; +error: + ubi_dbg_dump_stack(); + kfree(buf); + return err; +} + +#endif /* CONFIG_MTD_UBI_DEBUG_PARANOID */ diff --git a/drivers/mtd/ubi/kapi.c b/drivers/mtd/ubi/kapi.c new file mode 100644 index 000000000000..d352c4575c3d --- /dev/null +++ b/drivers/mtd/ubi/kapi.c @@ -0,0 +1,575 @@ +/* + * Copyright (c) International Business Machines Corp., 2006 + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See + * the GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + * Author: Artem Bityutskiy (Битюцкий Артём) + */ + +/* This file mostly implements UBI kernel API functions */ + +#include +#include +#include +#include "ubi.h" + +/** + * ubi_get_device_info - get information about UBI device. + * @ubi_num: UBI device number + * @di: the information is stored here + * + * This function returns %0 in case of success and a %-ENODEV if there is no + * such UBI device. + */ +int ubi_get_device_info(int ubi_num, struct ubi_device_info *di) +{ + const struct ubi_device *ubi; + + if (!try_module_get(THIS_MODULE)) + return -ENODEV; + + if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES || + !ubi_devices[ubi_num]) { + module_put(THIS_MODULE); + return -ENODEV; + } + + ubi = ubi_devices[ubi_num]; + di->ubi_num = ubi->ubi_num; + di->leb_size = ubi->leb_size; + di->min_io_size = ubi->min_io_size; + di->ro_mode = ubi->ro_mode; + di->cdev = MKDEV(ubi->major, 0); + module_put(THIS_MODULE); + return 0; +} +EXPORT_SYMBOL_GPL(ubi_get_device_info); + +/** + * ubi_get_volume_info - get information about UBI volume. + * @desc: volume descriptor + * @vi: the information is stored here + */ +void ubi_get_volume_info(struct ubi_volume_desc *desc, + struct ubi_volume_info *vi) +{ + const struct ubi_volume *vol = desc->vol; + const struct ubi_device *ubi = vol->ubi; + + vi->vol_id = vol->vol_id; + vi->ubi_num = ubi->ubi_num; + vi->size = vol->reserved_pebs; + vi->used_bytes = vol->used_bytes; + vi->vol_type = vol->vol_type; + vi->corrupted = vol->corrupted; + vi->upd_marker = vol->upd_marker; + vi->alignment = vol->alignment; + vi->usable_leb_size = vol->usable_leb_size; + vi->name_len = vol->name_len; + vi->name = vol->name; + vi->cdev = MKDEV(ubi->major, vi->vol_id + 1); +} +EXPORT_SYMBOL_GPL(ubi_get_volume_info); + +/** + * ubi_open_volume - open UBI volume. + * @ubi_num: UBI device number + * @vol_id: volume ID + * @mode: open mode + * + * The @mode parameter specifies if the volume should be opened in read-only + * mode, read-write mode, or exclusive mode. The exclusive mode guarantees that + * nobody else will be able to open this volume. UBI allows to have many volume + * readers and one writer at a time. + * + * If a static volume is being opened for the first time since boot, it will be + * checked by this function, which means it will be fully read and the CRC + * checksum of each logical eraseblock will be checked. + * + * This function returns volume descriptor in case of success and a negative + * error code in case of failure. + */ +struct ubi_volume_desc *ubi_open_volume(int ubi_num, int vol_id, int mode) +{ + int err; + struct ubi_volume_desc *desc; + struct ubi_device *ubi = ubi_devices[ubi_num]; + struct ubi_volume *vol; + + dbg_msg("open device %d volume %d, mode %d", ubi_num, vol_id, mode); + + err = -ENODEV; + if (!try_module_get(THIS_MODULE)) + return ERR_PTR(err); + + if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES || !ubi) + goto out_put; + + err = -EINVAL; + if (vol_id < 0 || vol_id >= ubi->vtbl_slots) + goto out_put; + if (mode != UBI_READONLY && mode != UBI_READWRITE && + mode != UBI_EXCLUSIVE) + goto out_put; + + desc = kmalloc(sizeof(struct ubi_volume_desc), GFP_KERNEL); + if (!desc) { + err = -ENOMEM; + goto out_put; + } + + spin_lock(&ubi->volumes_lock); + vol = ubi->volumes[vol_id]; + if (!vol) { + err = -ENODEV; + goto out_unlock; + } + + err = -EBUSY; + switch (mode) { + case UBI_READONLY: + if (vol->exclusive) + goto out_unlock; + vol->readers += 1; + break; + + case UBI_READWRITE: + if (vol->exclusive || vol->writers > 0) + goto out_unlock; + vol->writers += 1; + break; + + case UBI_EXCLUSIVE: + if (vol->exclusive || vol->writers || vol->readers) + goto out_unlock; + vol->exclusive = 1; + break; + } + spin_unlock(&ubi->volumes_lock); + + desc->vol = vol; + desc->mode = mode; + + /* + * To prevent simultaneous checks of the same volume we use @vtbl_mutex, + * although it is not the purpose it was introduced for. + */ + mutex_lock(&ubi->vtbl_mutex); + if (!vol->checked) { + /* This is the first open - check the volume */ + err = ubi_check_volume(ubi, vol_id); + if (err < 0) { + mutex_unlock(&ubi->vtbl_mutex); + ubi_close_volume(desc); + return ERR_PTR(err); + } + if (err == 1) { + ubi_warn("volume %d on UBI device %d is corrupted", + vol_id, ubi->ubi_num); + vol->corrupted = 1; + } + vol->checked = 1; + } + mutex_unlock(&ubi->vtbl_mutex); + return desc; + +out_unlock: + spin_unlock(&ubi->volumes_lock); + kfree(desc); +out_put: + module_put(THIS_MODULE); + return ERR_PTR(err); +} +EXPORT_SYMBOL_GPL(ubi_open_volume); + +/** + * ubi_open_volume_nm - open UBI volume by name. + * @ubi_num: UBI device number + * @name: volume name + * @mode: open mode + * + * This function is similar to 'ubi_open_volume()', but opens a volume by name. + */ +struct ubi_volume_desc *ubi_open_volume_nm(int ubi_num, const char *name, + int mode) +{ + int i, vol_id = -1, len; + struct ubi_volume_desc *ret; + struct ubi_device *ubi; + + dbg_msg("open volume %s, mode %d", name, mode); + + if (!name) + return ERR_PTR(-EINVAL); + + len = strnlen(name, UBI_VOL_NAME_MAX + 1); + if (len > UBI_VOL_NAME_MAX) + return ERR_PTR(-EINVAL); + + ret = ERR_PTR(-ENODEV); + if (!try_module_get(THIS_MODULE)) + return ret; + + if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES || !ubi_devices[ubi_num]) + goto out_put; + + ubi = ubi_devices[ubi_num]; + + spin_lock(&ubi->volumes_lock); + /* Walk all volumes of this UBI device */ + for (i = 0; i < ubi->vtbl_slots; i++) { + struct ubi_volume *vol = ubi->volumes[i]; + + if (vol && len == vol->name_len && !strcmp(name, vol->name)) { + vol_id = i; + break; + } + } + spin_unlock(&ubi->volumes_lock); + + if (vol_id < 0) + goto out_put; + + ret = ubi_open_volume(ubi_num, vol_id, mode); + +out_put: + module_put(THIS_MODULE); + return ret; +} +EXPORT_SYMBOL_GPL(ubi_open_volume_nm); + +/** + * ubi_close_volume - close UBI volume. + * @desc: volume descriptor + */ +void ubi_close_volume(struct ubi_volume_desc *desc) +{ + struct ubi_volume *vol = desc->vol; + + dbg_msg("close volume %d, mode %d", vol->vol_id, desc->mode); + + spin_lock(&vol->ubi->volumes_lock); + switch (desc->mode) { + case UBI_READONLY: + vol->readers -= 1; + break; + case UBI_READWRITE: + vol->writers -= 1; + break; + case UBI_EXCLUSIVE: + vol->exclusive = 0; + } + spin_unlock(&vol->ubi->volumes_lock); + + kfree(desc); + module_put(THIS_MODULE); +} +EXPORT_SYMBOL_GPL(ubi_close_volume); + +/** + * ubi_leb_read - read data. + * @desc: volume descriptor + * @lnum: logical eraseblock number to read from + * @buf: buffer where to store the read data + * @offset: offset within the logical eraseblock to read from + * @len: how many bytes to read + * @check: whether UBI has to check the read data's CRC or not. + * + * This function reads data from offset @offset of logical eraseblock @lnum and + * stores the data at @buf. When reading from static volumes, @check specifies + * whether the data has to be checked or not. If yes, the whole logical + * eraseblock will be read and its CRC checksum will be checked (i.e., the CRC + * checksum is per-eraseblock). So checking may substantially slow down the + * read speed. The @check argument is ignored for dynamic volumes. + * + * In case of success, this function returns zero. In case of failure, this + * function returns a negative error code. + * + * %-EBADMSG error code is returned: + * o for both static and dynamic volumes if MTD driver has detected a data + * integrity problem (unrecoverable ECC checksum mismatch in case of NAND); + * o for static volumes in case of data CRC mismatch. + * + * If the volume is damaged because of an interrupted update this function just + * returns immediately with %-EBADF error code. + */ +int ubi_leb_read(struct ubi_volume_desc *desc, int lnum, char *buf, int offset, + int len, int check) +{ + struct ubi_volume *vol = desc->vol; + struct ubi_device *ubi = vol->ubi; + int err, vol_id = vol->vol_id; + + dbg_msg("read %d bytes from LEB %d:%d:%d", len, vol_id, lnum, offset); + + if (vol_id < 0 || vol_id >= ubi->vtbl_slots || lnum < 0 || + lnum >= vol->used_ebs || offset < 0 || len < 0 || + offset + len > vol->usable_leb_size) + return -EINVAL; + + if (vol->vol_type == UBI_STATIC_VOLUME && lnum == vol->used_ebs - 1 && + offset + len > vol->last_eb_bytes) + return -EINVAL; + + if (vol->upd_marker) + return -EBADF; + if (len == 0) + return 0; + + err = ubi_eba_read_leb(ubi, vol_id, lnum, buf, offset, len, check); + if (err && err == -EBADMSG && vol->vol_type == UBI_STATIC_VOLUME) { + ubi_warn("mark volume %d as corrupted", vol_id); + vol->corrupted = 1; + } + + return err; +} +EXPORT_SYMBOL_GPL(ubi_leb_read); + +/** + * ubi_leb_write - write data. + * @desc: volume descriptor + * @lnum: logical eraseblock number to write to + * @buf: data to write + * @offset: offset within the logical eraseblock where to write + * @len: how many bytes to write + * @dtype: expected data type + * + * This function writes @len bytes of data from @buf to offset @offset of + * logical eraseblock @lnum. The @dtype argument describes expected lifetime of + * the data. + * + * This function takes care of physical eraseblock write failures. If write to + * the physical eraseblock write operation fails, the logical eraseblock is + * re-mapped to another physical eraseblock, the data is recovered, and the + * write finishes. UBI has a pool of reserved physical eraseblocks for this. + * + * If all the data were successfully written, zero is returned. If an error + * occurred and UBI has not been able to recover from it, this function returns + * a negative error code. Note, in case of an error, it is possible that + * something was still written to the flash media, but that may be some + * garbage. + * + * If the volume is damaged because of an interrupted update this function just + * returns immediately with %-EBADF code. + */ +int ubi_leb_write(struct ubi_volume_desc *desc, int lnum, const void *buf, + int offset, int len, int dtype) +{ + struct ubi_volume *vol = desc->vol; + struct ubi_device *ubi = vol->ubi; + int vol_id = vol->vol_id; + + dbg_msg("write %d bytes to LEB %d:%d:%d", len, vol_id, lnum, offset); + + if (vol_id < 0 || vol_id >= ubi->vtbl_slots) + return -EINVAL; + + if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME) + return -EROFS; + + if (lnum < 0 || lnum >= vol->reserved_pebs || offset < 0 || len < 0 || + offset + len > vol->usable_leb_size || offset % ubi->min_io_size || + len % ubi->min_io_size) + return -EINVAL; + + if (dtype != UBI_LONGTERM && dtype != UBI_SHORTTERM && + dtype != UBI_UNKNOWN) + return -EINVAL; + + if (vol->upd_marker) + return -EBADF; + + if (len == 0) + return 0; + + return ubi_eba_write_leb(ubi, vol_id, lnum, buf, offset, len, dtype); +} +EXPORT_SYMBOL_GPL(ubi_leb_write); + +/* + * ubi_leb_change - change logical eraseblock atomically. + * @desc: volume descriptor + * @lnum: logical eraseblock number to change + * @buf: data to write + * @len: how many bytes to write + * @dtype: expected data type + * + * This function changes the contents of a logical eraseblock atomically. @buf + * has to contain new logical eraseblock data, and @len - the length of the + * data, which has to be aligned. The length may be shorter then the logical + * eraseblock size, ant the logical eraseblock may be appended to more times + * later on. This function guarantees that in case of an unclean reboot the old + * contents is preserved. Returns zero in case of success and a negative error + * code in case of failure. + */ +int ubi_leb_change(struct ubi_volume_desc *desc, int lnum, const void *buf, + int len, int dtype) +{ + struct ubi_volume *vol = desc->vol; + struct ubi_device *ubi = vol->ubi; + int vol_id = vol->vol_id; + + dbg_msg("atomically write %d bytes to LEB %d:%d", len, vol_id, lnum); + + if (vol_id < 0 || vol_id >= ubi->vtbl_slots) + return -EINVAL; + + if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME) + return -EROFS; + + if (lnum < 0 || lnum >= vol->reserved_pebs || len < 0 || + len > vol->usable_leb_size || len % ubi->min_io_size) + return -EINVAL; + + if (dtype != UBI_LONGTERM && dtype != UBI_SHORTTERM && + dtype != UBI_UNKNOWN) + return -EINVAL; + + if (vol->upd_marker) + return -EBADF; + + if (len == 0) + return 0; + + return ubi_eba_atomic_leb_change(ubi, vol_id, lnum, buf, len, dtype); +} +EXPORT_SYMBOL_GPL(ubi_leb_change); + +/** + * ubi_leb_erase - erase logical eraseblock. + * @desc: volume descriptor + * @lnum: logical eraseblock number + * + * This function un-maps logical eraseblock @lnum and synchronously erases the + * correspondent physical eraseblock. Returns zero in case of success and a + * negative error code in case of failure. + * + * If the volume is damaged because of an interrupted update this function just + * returns immediately with %-EBADF code. + */ +int ubi_leb_erase(struct ubi_volume_desc *desc, int lnum) +{ + struct ubi_volume *vol = desc->vol; + struct ubi_device *ubi = vol->ubi; + int err, vol_id = vol->vol_id; + + dbg_msg("erase LEB %d:%d", vol_id, lnum); + + if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME) + return -EROFS; + + if (lnum < 0 || lnum >= vol->reserved_pebs) + return -EINVAL; + + if (vol->upd_marker) + return -EBADF; + + err = ubi_eba_unmap_leb(ubi, vol_id, lnum); + if (err) + return err; + + return ubi_wl_flush(ubi); +} +EXPORT_SYMBOL_GPL(ubi_leb_erase); + +/** + * ubi_leb_unmap - un-map logical eraseblock. + * @desc: volume descriptor + * @lnum: logical eraseblock number + * + * This function un-maps logical eraseblock @lnum and schedules the + * corresponding physical eraseblock for erasure, so that it will eventually be + * physically erased in background. This operation is much faster then the + * erase operation. + * + * Unlike erase, the un-map operation does not guarantee that the logical + * eraseblock will contain all 0xFF bytes when UBI is initialized again. For + * example, if several logical eraseblocks are un-mapped, and an unclean reboot + * happens after this, the logical eraseblocks will not necessarily be + * un-mapped again when this MTD device is attached. They may actually be + * mapped to the same physical eraseblocks again. So, this function has to be + * used with care. + * + * In other words, when un-mapping a logical eraseblock, UBI does not store + * any information about this on the flash media, it just marks the logical + * eraseblock as "un-mapped" in RAM. If UBI is detached before the physical + * eraseblock is physically erased, it will be mapped again to the same logical + * eraseblock when the MTD device is attached again. + * + * The main and obvious use-case of this function is when the contents of a + * logical eraseblock has to be re-written. Then it is much more efficient to + * first un-map it, then write new data, rather then first erase it, then write + * new data. Note, once new data has been written to the logical eraseblock, + * UBI guarantees that the old contents has gone forever. In other words, if an + * unclean reboot happens after the logical eraseblock has been un-mapped and + * then written to, it will contain the last written data. + * + * This function returns zero in case of success and a negative error code in + * case of failure. If the volume is damaged because of an interrupted update + * this function just returns immediately with %-EBADF code. + */ +int ubi_leb_unmap(struct ubi_volume_desc *desc, int lnum) +{ + struct ubi_volume *vol = desc->vol; + struct ubi_device *ubi = vol->ubi; + int vol_id = vol->vol_id; + + dbg_msg("unmap LEB %d:%d", vol_id, lnum); + + if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME) + return -EROFS; + + if (lnum < 0 || lnum >= vol->reserved_pebs) + return -EINVAL; + + if (vol->upd_marker) + return -EBADF; + + return ubi_eba_unmap_leb(ubi, vol_id, lnum); +} +EXPORT_SYMBOL_GPL(ubi_leb_unmap); + +/** + * ubi_is_mapped - check if logical eraseblock is mapped. + * @desc: volume descriptor + * @lnum: logical eraseblock number + * + * This function checks if logical eraseblock @lnum is mapped to a physical + * eraseblock. If a logical eraseblock is un-mapped, this does not necessarily + * mean it will still be un-mapped after the UBI device is re-attached. The + * logical eraseblock may become mapped to the physical eraseblock it was last + * mapped to. + * + * This function returns %1 if the LEB is mapped, %0 if not, and a negative + * error code in case of failure. If the volume is damaged because of an + * interrupted update this function just returns immediately with %-EBADF error + * code. + */ +int ubi_is_mapped(struct ubi_volume_desc *desc, int lnum) +{ + struct ubi_volume *vol = desc->vol; + + dbg_msg("test LEB %d:%d", vol->vol_id, lnum); + + if (lnum < 0 || lnum >= vol->reserved_pebs) + return -EINVAL; + + if (vol->upd_marker) + return -EBADF; + + return vol->eba_tbl[lnum] >= 0; +} +EXPORT_SYMBOL_GPL(ubi_is_mapped); diff --git a/drivers/mtd/ubi/misc.c b/drivers/mtd/ubi/misc.c new file mode 100644 index 000000000000..38d4e6757dc7 --- /dev/null +++ b/drivers/mtd/ubi/misc.c @@ -0,0 +1,105 @@ +/* + * Copyright (c) International Business Machines Corp., 2006 + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See + * the GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + * Author: Artem Bityutskiy (Битюцкий Артём) + */ + +/* Here we keep miscellaneous functions which are used all over the UBI code */ + +#include "ubi.h" + +/** + * calc_data_len - calculate how much real data is stored in a buffer. + * @ubi: UBI device description object + * @buf: a buffer with the contents of the physical eraseblock + * @length: the buffer length + * + * This function calculates how much "real data" is stored in @buf and returnes + * the length. Continuous 0xFF bytes at the end of the buffer are not + * considered as "real data". + */ +int ubi_calc_data_len(const struct ubi_device *ubi, const void *buf, + int length) +{ + int i; + + ubi_assert(length % ubi->min_io_size == 0); + + for (i = length - 1; i >= 0; i--) + if (((const uint8_t *)buf)[i] != 0xFF) + break; + + /* The resulting length must be aligned to the minimum flash I/O size */ + length = ALIGN(i + 1, ubi->min_io_size); + return length; +} + +/** + * ubi_check_volume - check the contents of a static volume. + * @ubi: UBI device description object + * @vol_id: ID of the volume to check + * + * This function checks if static volume @vol_id is corrupted by fully reading + * it and checking data CRC. This function returns %0 if the volume is not + * corrupted, %1 if it is corrupted and a negative error code in case of + * failure. Dynamic volumes are not checked and zero is returned immediately. + */ +int ubi_check_volume(struct ubi_device *ubi, int vol_id) +{ + void *buf; + int err = 0, i; + struct ubi_volume *vol = ubi->volumes[vol_id]; + + if (vol->vol_type != UBI_STATIC_VOLUME) + return 0; + + buf = kmalloc(vol->usable_leb_size, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + for (i = 0; i < vol->used_ebs; i++) { + int size; + + if (i == vol->used_ebs - 1) + size = vol->last_eb_bytes; + else + size = vol->usable_leb_size; + + err = ubi_eba_read_leb(ubi, vol_id, i, buf, 0, size, 1); + if (err) { + if (err == -EBADMSG) + err = 1; + break; + } + } + + kfree(buf); + return err; +} + +/** + * ubi_calculate_rsvd_pool - calculate how many PEBs must be reserved for bad + * eraseblock handling. + * @ubi: UBI device description object + */ +void ubi_calculate_reserved(struct ubi_device *ubi) +{ + ubi->beb_rsvd_level = ubi->good_peb_count/100; + ubi->beb_rsvd_level *= CONFIG_MTD_UBI_BEB_RESERVE; + if (ubi->beb_rsvd_level < MIN_RESEVED_PEBS) + ubi->beb_rsvd_level = MIN_RESEVED_PEBS; +} diff --git a/drivers/mtd/ubi/scan.c b/drivers/mtd/ubi/scan.c new file mode 100644 index 000000000000..473f3200b868 --- /dev/null +++ b/drivers/mtd/ubi/scan.c @@ -0,0 +1,1368 @@ +/* + * Copyright (c) International Business Machines Corp., 2006 + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See + * the GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + * Author: Artem Bityutskiy (Битюцкий Артём) + */ + +/* + * UBI scanning unit. + * + * This unit is responsible for scanning the flash media, checking UBI + * headers and providing complete information about the UBI flash image. + * + * The scanning information is reoresented by a &struct ubi_scan_info' object. + * Information about found volumes is represented by &struct ubi_scan_volume + * objects which are kept in volume RB-tree with root at the @volumes field. + * The RB-tree is indexed by the volume ID. + * + * Found logical eraseblocks are represented by &struct ubi_scan_leb objects. + * These objects are kept in per-volume RB-trees with the root at the + * corresponding &struct ubi_scan_volume object. To put it differently, we keep + * an RB-tree of per-volume objects and each of these objects is the root of + * RB-tree of per-eraseblock objects. + * + * Corrupted physical eraseblocks are put to the @corr list, free physical + * eraseblocks are put to the @free list and the physical eraseblock to be + * erased are put to the @erase list. + */ + +#include +#include +#include "ubi.h" + +#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID +static int paranoid_check_si(const struct ubi_device *ubi, + struct ubi_scan_info *si); +#else +#define paranoid_check_si(ubi, si) 0 +#endif + +/* Temporary variables used during scanning */ +static struct ubi_ec_hdr *ech; +static struct ubi_vid_hdr *vidh; + +int ubi_scan_add_to_list(struct ubi_scan_info *si, int pnum, int ec, + struct list_head *list) +{ + struct ubi_scan_leb *seb; + + if (list == &si->free) + dbg_bld("add to free: PEB %d, EC %d", pnum, ec); + else if (list == &si->erase) + dbg_bld("add to erase: PEB %d, EC %d", pnum, ec); + else if (list == &si->corr) + dbg_bld("add to corrupted: PEB %d, EC %d", pnum, ec); + else if (list == &si->alien) + dbg_bld("add to alien: PEB %d, EC %d", pnum, ec); + else + BUG(); + + seb = kmalloc(sizeof(struct ubi_scan_leb), GFP_KERNEL); + if (!seb) + return -ENOMEM; + + seb->pnum = pnum; + seb->ec = ec; + list_add_tail(&seb->u.list, list); + return 0; +} + +/** + * commit_to_mean_value - commit intermediate results to the final mean erase + * counter value. + * @si: scanning information + * + * This is a helper function which calculates partial mean erase counter mean + * value and adds it to the resulting mean value. As we can work only in + * integer arithmetic and we want to calculate the mean value of erase counter + * accurately, we first sum erase counter values in @si->ec_sum variable and + * count these components in @si->ec_count. If this temporary @si->ec_sum is + * going to overflow, we calculate the partial mean value + * (@si->ec_sum/@si->ec_count) and add it to @si->mean_ec. + */ +static void commit_to_mean_value(struct ubi_scan_info *si) +{ + si->ec_sum /= si->ec_count; + if (si->ec_sum % si->ec_count >= si->ec_count / 2) + si->mean_ec += 1; + si->mean_ec += si->ec_sum; +} + +/** + * validate_vid_hdr - check that volume identifier header is correct and + * consistent. + * @vid_hdr: the volume identifier header to check + * @sv: information about the volume this logical eraseblock belongs to + * @pnum: physical eraseblock number the VID header came from + * + * This function checks that data stored in @vid_hdr is consistent. Returns + * non-zero if an inconsistency was found and zero if not. + * + * Note, UBI does sanity check of everything it reads from the flash media. + * Most of the checks are done in the I/O unit. Here we check that the + * information in the VID header is consistent to the information in other VID + * headers of the same volume. + */ +static int validate_vid_hdr(const struct ubi_vid_hdr *vid_hdr, + const struct ubi_scan_volume *sv, int pnum) +{ + int vol_type = vid_hdr->vol_type; + int vol_id = ubi32_to_cpu(vid_hdr->vol_id); + int used_ebs = ubi32_to_cpu(vid_hdr->used_ebs); + int data_pad = ubi32_to_cpu(vid_hdr->data_pad); + + if (sv->leb_count != 0) { + int sv_vol_type; + + /* + * This is not the first logical eraseblock belonging to this + * volume. Ensure that the data in its VID header is consistent + * to the data in previous logical eraseblock headers. + */ + + if (vol_id != sv->vol_id) { + dbg_err("inconsistent vol_id"); + goto bad; + } + + if (sv->vol_type == UBI_STATIC_VOLUME) + sv_vol_type = UBI_VID_STATIC; + else + sv_vol_type = UBI_VID_DYNAMIC; + + if (vol_type != sv_vol_type) { + dbg_err("inconsistent vol_type"); + goto bad; + } + + if (used_ebs != sv->used_ebs) { + dbg_err("inconsistent used_ebs"); + goto bad; + } + + if (data_pad != sv->data_pad) { + dbg_err("inconsistent data_pad"); + goto bad; + } + } + + return 0; + +bad: + ubi_err("inconsistent VID header at PEB %d", pnum); + ubi_dbg_dump_vid_hdr(vid_hdr); + ubi_dbg_dump_sv(sv); + return -EINVAL; +} + +/** + * add_volume - add volume to the scanning information. + * @si: scanning information + * @vol_id: ID of the volume to add + * @pnum: physical eraseblock number + * @vid_hdr: volume identifier header + * + * If the volume corresponding to the @vid_hdr logical eraseblock is already + * present in the scanning information, this function does nothing. Otherwise + * it adds corresponding volume to the scanning information. Returns a pointer + * to the scanning volume object in case of success and a negative error code + * in case of failure. + */ +static struct ubi_scan_volume *add_volume(struct ubi_scan_info *si, int vol_id, + int pnum, + const struct ubi_vid_hdr *vid_hdr) +{ + struct ubi_scan_volume *sv; + struct rb_node **p = &si->volumes.rb_node, *parent = NULL; + + ubi_assert(vol_id == ubi32_to_cpu(vid_hdr->vol_id)); + + /* Walk the volume RB-tree to look if this volume is already present */ + while (*p) { + parent = *p; + sv = rb_entry(parent, struct ubi_scan_volume, rb); + + if (vol_id == sv->vol_id) + return sv; + + if (vol_id > sv->vol_id) + p = &(*p)->rb_left; + else + p = &(*p)->rb_right; + } + + /* The volume is absent - add it */ + sv = kmalloc(sizeof(struct ubi_scan_volume), GFP_KERNEL); + if (!sv) + return ERR_PTR(-ENOMEM); + + sv->highest_lnum = sv->leb_count = 0; + si->max_sqnum = 0; + sv->vol_id = vol_id; + sv->root = RB_ROOT; + sv->used_ebs = ubi32_to_cpu(vid_hdr->used_ebs); + sv->data_pad = ubi32_to_cpu(vid_hdr->data_pad); + sv->compat = vid_hdr->compat; + sv->vol_type = vid_hdr->vol_type == UBI_VID_DYNAMIC ? UBI_DYNAMIC_VOLUME + : UBI_STATIC_VOLUME; + if (vol_id > si->highest_vol_id) + si->highest_vol_id = vol_id; + + rb_link_node(&sv->rb, parent, p); + rb_insert_color(&sv->rb, &si->volumes); + si->vols_found += 1; + dbg_bld("added volume %d", vol_id); + return sv; +} + +/** + * compare_lebs - find out which logical eraseblock is newer. + * @ubi: UBI device description object + * @seb: first logical eraseblock to compare + * @pnum: physical eraseblock number of the second logical eraseblock to + * compare + * @vid_hdr: volume identifier header of the second logical eraseblock + * + * This function compares 2 copies of a LEB and informs which one is newer. In + * case of success this function returns a positive value, in case of failure, a + * negative error code is returned. The success return codes use the following + * bits: + * o bit 0 is cleared: the first PEB (described by @seb) is newer then the + * second PEB (described by @pnum and @vid_hdr); + * o bit 0 is set: the second PEB is newer; + * o bit 1 is cleared: no bit-flips were detected in the newer LEB; + * o bit 1 is set: bit-flips were detected in the newer LEB; + * o bit 2 is cleared: the older LEB is not corrupted; + * o bit 2 is set: the older LEB is corrupted. + */ +static int compare_lebs(const struct ubi_device *ubi, + const struct ubi_scan_leb *seb, int pnum, + const struct ubi_vid_hdr *vid_hdr) +{ + void *buf; + int len, err, second_is_newer, bitflips = 0, corrupted = 0; + uint32_t data_crc, crc; + struct ubi_vid_hdr *vidh = NULL; + unsigned long long sqnum2 = ubi64_to_cpu(vid_hdr->sqnum); + + if (seb->sqnum == 0 && sqnum2 == 0) { + long long abs, v1 = seb->leb_ver, v2 = ubi32_to_cpu(vid_hdr->leb_ver); + + /* + * UBI constantly increases the logical eraseblock version + * number and it can overflow. Thus, we have to bear in mind + * that versions that are close to %0xFFFFFFFF are less then + * versions that are close to %0. + * + * The UBI WL unit guarantees that the number of pending tasks + * is not greater then %0x7FFFFFFF. So, if the difference + * between any two versions is greater or equivalent to + * %0x7FFFFFFF, there was an overflow and the logical + * eraseblock with lower version is actually newer then the one + * with higher version. + * + * FIXME: but this is anyway obsolete and will be removed at + * some point. + */ + + dbg_bld("using old crappy leb_ver stuff"); + + abs = v1 - v2; + if (abs < 0) + abs = -abs; + + if (abs < 0x7FFFFFFF) + /* Non-overflow situation */ + second_is_newer = (v2 > v1); + else + second_is_newer = (v2 < v1); + } else + /* Obviously the LEB with lower sequence counter is older */ + second_is_newer = sqnum2 > seb->sqnum; + + /* + * Now we know which copy is newer. If the copy flag of the PEB with + * newer version is not set, then we just return, otherwise we have to + * check data CRC. For the second PEB we already have the VID header, + * for the first one - we'll need to re-read it from flash. + * + * FIXME: this may be optimized so that we wouldn't read twice. + */ + + if (second_is_newer) { + if (!vid_hdr->copy_flag) { + /* It is not a copy, so it is newer */ + dbg_bld("second PEB %d is newer, copy_flag is unset", + pnum); + return 1; + } + } else { + pnum = seb->pnum; + + vidh = ubi_zalloc_vid_hdr(ubi); + if (!vidh) + return -ENOMEM; + + err = ubi_io_read_vid_hdr(ubi, pnum, vidh, 0); + if (err) { + if (err == UBI_IO_BITFLIPS) + bitflips = 1; + else { + dbg_err("VID of PEB %d header is bad, but it " + "was OK earlier", pnum); + if (err > 0) + err = -EIO; + + goto out_free_vidh; + } + } + + if (!vidh->copy_flag) { + /* It is not a copy, so it is newer */ + dbg_bld("first PEB %d is newer, copy_flag is unset", + pnum); + err = bitflips << 1; + goto out_free_vidh; + } + + vid_hdr = vidh; + } + + /* Read the data of the copy and check the CRC */ + + len = ubi32_to_cpu(vid_hdr->data_size); + buf = kmalloc(len, GFP_KERNEL); + if (!buf) { + err = -ENOMEM; + goto out_free_vidh; + } + + err = ubi_io_read_data(ubi, buf, pnum, 0, len); + if (err && err != UBI_IO_BITFLIPS) + goto out_free_buf; + + data_crc = ubi32_to_cpu(vid_hdr->data_crc); + crc = crc32(UBI_CRC32_INIT, buf, len); + if (crc != data_crc) { + dbg_bld("PEB %d CRC error: calculated %#08x, must be %#08x", + pnum, crc, data_crc); + corrupted = 1; + bitflips = 0; + second_is_newer = !second_is_newer; + } else { + dbg_bld("PEB %d CRC is OK", pnum); + bitflips = !!err; + } + + kfree(buf); + ubi_free_vid_hdr(ubi, vidh); + + if (second_is_newer) + dbg_bld("second PEB %d is newer, copy_flag is set", pnum); + else + dbg_bld("first PEB %d is newer, copy_flag is set", pnum); + + return second_is_newer | (bitflips << 1) | (corrupted << 2); + +out_free_buf: + kfree(buf); +out_free_vidh: + ubi_free_vid_hdr(ubi, vidh); + ubi_assert(err < 0); + return err; +} + +/** + * ubi_scan_add_used - add information about a physical eraseblock to the + * scanning information. + * @ubi: UBI device description object + * @si: scanning information + * @pnum: the physical eraseblock number + * @ec: erase counter + * @vid_hdr: the volume identifier header + * @bitflips: if bit-flips were detected when this physical eraseblock was read + * + * This function returns zero in case of success and a negative error code in + * case of failure. + */ +int ubi_scan_add_used(const struct ubi_device *ubi, struct ubi_scan_info *si, + int pnum, int ec, const struct ubi_vid_hdr *vid_hdr, + int bitflips) +{ + int err, vol_id, lnum; + uint32_t leb_ver; + unsigned long long sqnum; + struct ubi_scan_volume *sv; + struct ubi_scan_leb *seb; + struct rb_node **p, *parent = NULL; + + vol_id = ubi32_to_cpu(vid_hdr->vol_id); + lnum = ubi32_to_cpu(vid_hdr->lnum); + sqnum = ubi64_to_cpu(vid_hdr->sqnum); + leb_ver = ubi32_to_cpu(vid_hdr->leb_ver); + + dbg_bld("PEB %d, LEB %d:%d, EC %d, sqnum %llu, ver %u, bitflips %d", + pnum, vol_id, lnum, ec, sqnum, leb_ver, bitflips); + + sv = add_volume(si, vol_id, pnum, vid_hdr); + if (IS_ERR(sv) < 0) + return PTR_ERR(sv); + + /* + * Walk the RB-tree of logical eraseblocks of volume @vol_id to look + * if this is the first instance of this logical eraseblock or not. + */ + p = &sv->root.rb_node; + while (*p) { + int cmp_res; + + parent = *p; + seb = rb_entry(parent, struct ubi_scan_leb, u.rb); + if (lnum != seb->lnum) { + if (lnum < seb->lnum) + p = &(*p)->rb_left; + else + p = &(*p)->rb_right; + continue; + } + + /* + * There is already a physical eraseblock describing the same + * logical eraseblock present. + */ + + dbg_bld("this LEB already exists: PEB %d, sqnum %llu, " + "LEB ver %u, EC %d", seb->pnum, seb->sqnum, + seb->leb_ver, seb->ec); + + /* + * Make sure that the logical eraseblocks have different + * versions. Otherwise the image is bad. + */ + if (seb->leb_ver == leb_ver && leb_ver != 0) { + ubi_err("two LEBs with same version %u", leb_ver); + ubi_dbg_dump_seb(seb, 0); + ubi_dbg_dump_vid_hdr(vid_hdr); + return -EINVAL; + } + + /* + * Make sure that the logical eraseblocks have different + * sequence numbers. Otherwise the image is bad. + * + * FIXME: remove 'sqnum != 0' check when leb_ver is removed. + */ + if (seb->sqnum == sqnum && sqnum != 0) { + ubi_err("two LEBs with same sequence number %llu", + sqnum); + ubi_dbg_dump_seb(seb, 0); + ubi_dbg_dump_vid_hdr(vid_hdr); + return -EINVAL; + } + + /* + * Now we have to drop the older one and preserve the newer + * one. + */ + cmp_res = compare_lebs(ubi, seb, pnum, vid_hdr); + if (cmp_res < 0) + return cmp_res; + + if (cmp_res & 1) { + /* + * This logical eraseblock is newer then the one + * found earlier. + */ + err = validate_vid_hdr(vid_hdr, sv, pnum); + if (err) + return err; + + if (cmp_res & 4) + err = ubi_scan_add_to_list(si, seb->pnum, + seb->ec, &si->corr); + else + err = ubi_scan_add_to_list(si, seb->pnum, + seb->ec, &si->erase); + if (err) + return err; + + seb->ec = ec; + seb->pnum = pnum; + seb->scrub = ((cmp_res & 2) || bitflips); + seb->sqnum = sqnum; + seb->leb_ver = leb_ver; + + if (sv->highest_lnum == lnum) + sv->last_data_size = + ubi32_to_cpu(vid_hdr->data_size); + + return 0; + } else { + /* + * This logical eraseblock is older then the one found + * previously. + */ + if (cmp_res & 4) + return ubi_scan_add_to_list(si, pnum, ec, + &si->corr); + else + return ubi_scan_add_to_list(si, pnum, ec, + &si->erase); + } + } + + /* + * We've met this logical eraseblock for the first time, add it to the + * scanning information. + */ + + err = validate_vid_hdr(vid_hdr, sv, pnum); + if (err) + return err; + + seb = kmalloc(sizeof(struct ubi_scan_leb), GFP_KERNEL); + if (!seb) + return -ENOMEM; + + seb->ec = ec; + seb->pnum = pnum; + seb->lnum = lnum; + seb->sqnum = sqnum; + seb->scrub = bitflips; + seb->leb_ver = leb_ver; + + if (sv->highest_lnum <= lnum) { + sv->highest_lnum = lnum; + sv->last_data_size = ubi32_to_cpu(vid_hdr->data_size); + } + + if (si->max_sqnum < sqnum) + si->max_sqnum = sqnum; + + sv->leb_count += 1; + rb_link_node(&seb->u.rb, parent, p); + rb_insert_color(&seb->u.rb, &sv->root); + return 0; +} + +/** + * ubi_scan_find_sv - find information about a particular volume in the + * scanning information. + * @si: scanning information + * @vol_id: the requested volume ID + * + * This function returns a pointer to the volume description or %NULL if there + * are no data about this volume in the scanning information. + */ +struct ubi_scan_volume *ubi_scan_find_sv(const struct ubi_scan_info *si, + int vol_id) +{ + struct ubi_scan_volume *sv; + struct rb_node *p = si->volumes.rb_node; + + while (p) { + sv = rb_entry(p, struct ubi_scan_volume, rb); + + if (vol_id == sv->vol_id) + return sv; + + if (vol_id > sv->vol_id) + p = p->rb_left; + else + p = p->rb_right; + } + + return NULL; +} + +/** + * ubi_scan_find_seb - find information about a particular logical + * eraseblock in the volume scanning information. + * @sv: a pointer to the volume scanning information + * @lnum: the requested logical eraseblock + * + * This function returns a pointer to the scanning logical eraseblock or %NULL + * if there are no data about it in the scanning volume information. + */ +struct ubi_scan_leb *ubi_scan_find_seb(const struct ubi_scan_volume *sv, + int lnum) +{ + struct ubi_scan_leb *seb; + struct rb_node *p = sv->root.rb_node; + + while (p) { + seb = rb_entry(p, struct ubi_scan_leb, u.rb); + + if (lnum == seb->lnum) + return seb; + + if (lnum > seb->lnum) + p = p->rb_left; + else + p = p->rb_right; + } + + return NULL; +} + +/** + * ubi_scan_rm_volume - delete scanning information about a volume. + * @si: scanning information + * @sv: the volume scanning information to delete + */ +void ubi_scan_rm_volume(struct ubi_scan_info *si, struct ubi_scan_volume *sv) +{ + struct rb_node *rb; + struct ubi_scan_leb *seb; + + dbg_bld("remove scanning information about volume %d", sv->vol_id); + + while ((rb = rb_first(&sv->root))) { + seb = rb_entry(rb, struct ubi_scan_leb, u.rb); + rb_erase(&seb->u.rb, &sv->root); + list_add_tail(&seb->u.list, &si->erase); + } + + rb_erase(&sv->rb, &si->volumes); + kfree(sv); + si->vols_found -= 1; +} + +/** + * ubi_scan_erase_peb - erase a physical eraseblock. + * @ubi: UBI device description object + * @si: scanning information + * @pnum: physical eraseblock number to erase; + * @ec: erase counter value to write (%UBI_SCAN_UNKNOWN_EC if it is unknown) + * + * This function erases physical eraseblock 'pnum', and writes the erase + * counter header to it. This function should only be used on UBI device + * initialization stages, when the EBA unit had not been yet initialized. This + * function returns zero in case of success and a negative error code in case + * of failure. + */ +int ubi_scan_erase_peb(const struct ubi_device *ubi, + const struct ubi_scan_info *si, int pnum, int ec) +{ + int err; + struct ubi_ec_hdr *ec_hdr; + + ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL); + if (!ec_hdr) + return -ENOMEM; + + if ((long long)ec >= UBI_MAX_ERASECOUNTER) { + /* + * Erase counter overflow. Upgrade UBI and use 64-bit + * erase counters internally. + */ + ubi_err("erase counter overflow at PEB %d, EC %d", pnum, ec); + return -EINVAL; + } + + ec_hdr->ec = cpu_to_ubi64(ec); + + err = ubi_io_sync_erase(ubi, pnum, 0); + if (err < 0) + goto out_free; + + err = ubi_io_write_ec_hdr(ubi, pnum, ec_hdr); + +out_free: + kfree(ec_hdr); + return err; +} + +/** + * ubi_scan_get_free_peb - get a free physical eraseblock. + * @ubi: UBI device description object + * @si: scanning information + * + * This function returns a free physical eraseblock. It is supposed to be + * called on the UBI initialization stages when the wear-leveling unit is not + * initialized yet. This function picks a physical eraseblocks from one of the + * lists, writes the EC header if it is needed, and removes it from the list. + * + * This function returns scanning physical eraseblock information in case of + * success and an error code in case of failure. + */ +struct ubi_scan_leb *ubi_scan_get_free_peb(const struct ubi_device *ubi, + struct ubi_scan_info *si) +{ + int err = 0, i; + struct ubi_scan_leb *seb; + + if (!list_empty(&si->free)) { + seb = list_entry(si->free.next, struct ubi_scan_leb, u.list); + list_del(&seb->u.list); + dbg_bld("return free PEB %d, EC %d", seb->pnum, seb->ec); + return seb; + } + + for (i = 0; i < 2; i++) { + struct list_head *head; + struct ubi_scan_leb *tmp_seb; + + if (i == 0) + head = &si->erase; + else + head = &si->corr; + + /* + * We try to erase the first physical eraseblock from the @head + * list and pick it if we succeed, or try to erase the + * next one if not. And so forth. We don't want to take care + * about bad eraseblocks here - they'll be handled later. + */ + list_for_each_entry_safe(seb, tmp_seb, head, u.list) { + if (seb->ec == UBI_SCAN_UNKNOWN_EC) + seb->ec = si->mean_ec; + + err = ubi_scan_erase_peb(ubi, si, seb->pnum, seb->ec+1); + if (err) + continue; + + seb->ec += 1; + list_del(&seb->u.list); + dbg_bld("return PEB %d, EC %d", seb->pnum, seb->ec); + return seb; + } + } + + ubi_err("no eraseblocks found"); + return ERR_PTR(-ENOSPC); +} + +/** + * process_eb - read UBI headers, check them and add corresponding data + * to the scanning information. + * @ubi: UBI device description object + * @si: scanning information + * @pnum: the physical eraseblock number + * + * This function returns a zero if the physical eraseblock was succesfully + * handled and a negative error code in case of failure. + */ +static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si, int pnum) +{ + long long ec; + int err, bitflips = 0, vol_id, ec_corr = 0; + + dbg_bld("scan PEB %d", pnum); + + /* Skip bad physical eraseblocks */ + err = ubi_io_is_bad(ubi, pnum); + if (err < 0) + return err; + else if (err) { + /* + * FIXME: this is actually duty of the I/O unit to initialize + * this, but MTD does not provide enough information. + */ + si->bad_peb_count += 1; + return 0; + } + + err = ubi_io_read_ec_hdr(ubi, pnum, ech, 0); + if (err < 0) + return err; + else if (err == UBI_IO_BITFLIPS) + bitflips = 1; + else if (err == UBI_IO_PEB_EMPTY) + return ubi_scan_add_to_list(si, pnum, UBI_SCAN_UNKNOWN_EC, + &si->erase); + else if (err == UBI_IO_BAD_EC_HDR) { + /* + * We have to also look at the VID header, possibly it is not + * corrupted. Set %bitflips flag in order to make this PEB be + * moved and EC be re-created. + */ + ec_corr = 1; + ec = UBI_SCAN_UNKNOWN_EC; + bitflips = 1; + } + + si->is_empty = 0; + + if (!ec_corr) { + /* Make sure UBI version is OK */ + if (ech->version != UBI_VERSION) { + ubi_err("this UBI version is %d, image version is %d", + UBI_VERSION, (int)ech->version); + return -EINVAL; + } + + ec = ubi64_to_cpu(ech->ec); + if (ec > UBI_MAX_ERASECOUNTER) { + /* + * Erase counter overflow. The EC headers have 64 bits + * reserved, but we anyway make use of only 31 bit + * values, as this seems to be enough for any existing + * flash. Upgrade UBI and use 64-bit erase counters + * internally. + */ + ubi_err("erase counter overflow, max is %d", + UBI_MAX_ERASECOUNTER); + ubi_dbg_dump_ec_hdr(ech); + return -EINVAL; + } + } + + /* OK, we've done with the EC header, let's look at the VID header */ + + err = ubi_io_read_vid_hdr(ubi, pnum, vidh, 0); + if (err < 0) + return err; + else if (err == UBI_IO_BITFLIPS) + bitflips = 1; + else if (err == UBI_IO_BAD_VID_HDR || + (err == UBI_IO_PEB_FREE && ec_corr)) { + /* VID header is corrupted */ + err = ubi_scan_add_to_list(si, pnum, ec, &si->corr); + if (err) + return err; + goto adjust_mean_ec; + } else if (err == UBI_IO_PEB_FREE) { + /* No VID header - the physical eraseblock is free */ + err = ubi_scan_add_to_list(si, pnum, ec, &si->free); + if (err) + return err; + goto adjust_mean_ec; + } + + vol_id = ubi32_to_cpu(vidh->vol_id); + if (vol_id > UBI_MAX_VOLUMES && vol_id != UBI_LAYOUT_VOL_ID) { + int lnum = ubi32_to_cpu(vidh->lnum); + + /* Unsupported internal volume */ + switch (vidh->compat) { + case UBI_COMPAT_DELETE: + ubi_msg("\"delete\" compatible internal volume %d:%d" + " found, remove it", vol_id, lnum); + err = ubi_scan_add_to_list(si, pnum, ec, &si->corr); + if (err) + return err; + break; + + case UBI_COMPAT_RO: + ubi_msg("read-only compatible internal volume %d:%d" + " found, switch to read-only mode", + vol_id, lnum); + ubi->ro_mode = 1; + break; + + case UBI_COMPAT_PRESERVE: + ubi_msg("\"preserve\" compatible internal volume %d:%d" + " found", vol_id, lnum); + err = ubi_scan_add_to_list(si, pnum, ec, &si->alien); + if (err) + return err; + si->alien_peb_count += 1; + return 0; + + case UBI_COMPAT_REJECT: + ubi_err("incompatible internal volume %d:%d found", + vol_id, lnum); + return -EINVAL; + } + } + + /* Both UBI headers seem to be fine */ + err = ubi_scan_add_used(ubi, si, pnum, ec, vidh, bitflips); + if (err) + return err; + +adjust_mean_ec: + if (!ec_corr) { + if (si->ec_sum + ec < ec) { + commit_to_mean_value(si); + si->ec_sum = 0; + si->ec_count = 0; + } else { + si->ec_sum += ec; + si->ec_count += 1; + } + + if (ec > si->max_ec) + si->max_ec = ec; + if (ec < si->min_ec) + si->min_ec = ec; + } + + return 0; +} + +/** + * ubi_scan - scan an MTD device. + * @ubi: UBI device description object + * + * This function does full scanning of an MTD device and returns complete + * information about it. In case of failure, an error code is returned. + */ +struct ubi_scan_info *ubi_scan(struct ubi_device *ubi) +{ + int err, pnum; + struct rb_node *rb1, *rb2; + struct ubi_scan_volume *sv; + struct ubi_scan_leb *seb; + struct ubi_scan_info *si; + + si = kzalloc(sizeof(struct ubi_scan_info), GFP_KERNEL); + if (!si) + return ERR_PTR(-ENOMEM); + + INIT_LIST_HEAD(&si->corr); + INIT_LIST_HEAD(&si->free); + INIT_LIST_HEAD(&si->erase); + INIT_LIST_HEAD(&si->alien); + si->volumes = RB_ROOT; + si->is_empty = 1; + + err = -ENOMEM; + ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL); + if (!ech) + goto out_si; + + vidh = ubi_zalloc_vid_hdr(ubi); + if (!vidh) + goto out_ech; + + for (pnum = 0; pnum < ubi->peb_count; pnum++) { + cond_resched(); + + dbg_msg("process PEB %d", pnum); + err = process_eb(ubi, si, pnum); + if (err < 0) + goto out_vidh; + } + + dbg_msg("scanning is finished"); + + /* Finish mean erase counter calculations */ + if (si->ec_count) + commit_to_mean_value(si); + + if (si->is_empty) + ubi_msg("empty MTD device detected"); + + /* + * In case of unknown erase counter we use the mean erase counter + * value. + */ + ubi_rb_for_each_entry(rb1, sv, &si->volumes, rb) { + ubi_rb_for_each_entry(rb2, seb, &sv->root, u.rb) + if (seb->ec == UBI_SCAN_UNKNOWN_EC) + seb->ec = si->mean_ec; + } + + list_for_each_entry(seb, &si->free, u.list) { + if (seb->ec == UBI_SCAN_UNKNOWN_EC) + seb->ec = si->mean_ec; + } + + list_for_each_entry(seb, &si->corr, u.list) + if (seb->ec == UBI_SCAN_UNKNOWN_EC) + seb->ec = si->mean_ec; + + list_for_each_entry(seb, &si->erase, u.list) + if (seb->ec == UBI_SCAN_UNKNOWN_EC) + seb->ec = si->mean_ec; + + err = paranoid_check_si(ubi, si); + if (err) { + if (err > 0) + err = -EINVAL; + goto out_vidh; + } + + ubi_free_vid_hdr(ubi, vidh); + kfree(ech); + + return si; + +out_vidh: + ubi_free_vid_hdr(ubi, vidh); +out_ech: + kfree(ech); +out_si: + ubi_scan_destroy_si(si); + return ERR_PTR(err); +} + +/** + * destroy_sv - free the scanning volume information + * @sv: scanning volume information + * + * This function destroys the volume RB-tree (@sv->root) and the scanning + * volume information. + */ +static void destroy_sv(struct ubi_scan_volume *sv) +{ + struct ubi_scan_leb *seb; + struct rb_node *this = sv->root.rb_node; + + while (this) { + if (this->rb_left) + this = this->rb_left; + else if (this->rb_right) + this = this->rb_right; + else { + seb = rb_entry(this, struct ubi_scan_leb, u.rb); + this = rb_parent(this); + if (this) { + if (this->rb_left == &seb->u.rb) + this->rb_left = NULL; + else + this->rb_right = NULL; + } + + kfree(seb); + } + } + kfree(sv); +} + +/** + * ubi_scan_destroy_si - destroy scanning information. + * @si: scanning information + */ +void ubi_scan_destroy_si(struct ubi_scan_info *si) +{ + struct ubi_scan_leb *seb, *seb_tmp; + struct ubi_scan_volume *sv; + struct rb_node *rb; + + list_for_each_entry_safe(seb, seb_tmp, &si->alien, u.list) { + list_del(&seb->u.list); + kfree(seb); + } + list_for_each_entry_safe(seb, seb_tmp, &si->erase, u.list) { + list_del(&seb->u.list); + kfree(seb); + } + list_for_each_entry_safe(seb, seb_tmp, &si->corr, u.list) { + list_del(&seb->u.list); + kfree(seb); + } + list_for_each_entry_safe(seb, seb_tmp, &si->free, u.list) { + list_del(&seb->u.list); + kfree(seb); + } + + /* Destroy the volume RB-tree */ + rb = si->volumes.rb_node; + while (rb) { + if (rb->rb_left) + rb = rb->rb_left; + else if (rb->rb_right) + rb = rb->rb_right; + else { + sv = rb_entry(rb, struct ubi_scan_volume, rb); + + rb = rb_parent(rb); + if (rb) { + if (rb->rb_left == &sv->rb) + rb->rb_left = NULL; + else + rb->rb_right = NULL; + } + + destroy_sv(sv); + } + } + + kfree(si); +} + +#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID + +/** + * paranoid_check_si - check if the scanning information is correct and + * consistent. + * @ubi: UBI device description object + * @si: scanning information + * + * This function returns zero if the scanning information is all right, %1 if + * not and a negative error code if an error occurred. + */ +static int paranoid_check_si(const struct ubi_device *ubi, + struct ubi_scan_info *si) +{ + int pnum, err, vols_found = 0; + struct rb_node *rb1, *rb2; + struct ubi_scan_volume *sv; + struct ubi_scan_leb *seb, *last_seb; + uint8_t *buf; + + /* + * At first, check that scanning information is ok. + */ + ubi_rb_for_each_entry(rb1, sv, &si->volumes, rb) { + int leb_count = 0; + + cond_resched(); + + vols_found += 1; + + if (si->is_empty) { + ubi_err("bad is_empty flag"); + goto bad_sv; + } + + if (sv->vol_id < 0 || sv->highest_lnum < 0 || + sv->leb_count < 0 || sv->vol_type < 0 || sv->used_ebs < 0 || + sv->data_pad < 0 || sv->last_data_size < 0) { + ubi_err("negative values"); + goto bad_sv; + } + + if (sv->vol_id >= UBI_MAX_VOLUMES && + sv->vol_id < UBI_INTERNAL_VOL_START) { + ubi_err("bad vol_id"); + goto bad_sv; + } + + if (sv->vol_id > si->highest_vol_id) { + ubi_err("highest_vol_id is %d, but vol_id %d is there", + si->highest_vol_id, sv->vol_id); + goto out; + } + + if (sv->vol_type != UBI_DYNAMIC_VOLUME && + sv->vol_type != UBI_STATIC_VOLUME) { + ubi_err("bad vol_type"); + goto bad_sv; + } + + if (sv->data_pad > ubi->leb_size / 2) { + ubi_err("bad data_pad"); + goto bad_sv; + } + + last_seb = NULL; + ubi_rb_for_each_entry(rb2, seb, &sv->root, u.rb) { + cond_resched(); + + last_seb = seb; + leb_count += 1; + + if (seb->pnum < 0 || seb->ec < 0) { + ubi_err("negative values"); + goto bad_seb; + } + + if (seb->ec < si->min_ec) { + ubi_err("bad si->min_ec (%d), %d found", + si->min_ec, seb->ec); + goto bad_seb; + } + + if (seb->ec > si->max_ec) { + ubi_err("bad si->max_ec (%d), %d found", + si->max_ec, seb->ec); + goto bad_seb; + } + + if (seb->pnum >= ubi->peb_count) { + ubi_err("too high PEB number %d, total PEBs %d", + seb->pnum, ubi->peb_count); + goto bad_seb; + } + + if (sv->vol_type == UBI_STATIC_VOLUME) { + if (seb->lnum >= sv->used_ebs) { + ubi_err("bad lnum or used_ebs"); + goto bad_seb; + } + } else { + if (sv->used_ebs != 0) { + ubi_err("non-zero used_ebs"); + goto bad_seb; + } + } + + if (seb->lnum > sv->highest_lnum) { + ubi_err("incorrect highest_lnum or lnum"); + goto bad_seb; + } + } + + if (sv->leb_count != leb_count) { + ubi_err("bad leb_count, %d objects in the tree", + leb_count); + goto bad_sv; + } + + if (!last_seb) + continue; + + seb = last_seb; + + if (seb->lnum != sv->highest_lnum) { + ubi_err("bad highest_lnum"); + goto bad_seb; + } + } + + if (vols_found != si->vols_found) { + ubi_err("bad si->vols_found %d, should be %d", + si->vols_found, vols_found); + goto out; + } + + /* Check that scanning information is correct */ + ubi_rb_for_each_entry(rb1, sv, &si->volumes, rb) { + last_seb = NULL; + ubi_rb_for_each_entry(rb2, seb, &sv->root, u.rb) { + int vol_type; + + cond_resched(); + + last_seb = seb; + + err = ubi_io_read_vid_hdr(ubi, seb->pnum, vidh, 1); + if (err && err != UBI_IO_BITFLIPS) { + ubi_err("VID header is not OK (%d)", err); + if (err > 0) + err = -EIO; + return err; + } + + vol_type = vidh->vol_type == UBI_VID_DYNAMIC ? + UBI_DYNAMIC_VOLUME : UBI_STATIC_VOLUME; + if (sv->vol_type != vol_type) { + ubi_err("bad vol_type"); + goto bad_vid_hdr; + } + + if (seb->sqnum != ubi64_to_cpu(vidh->sqnum)) { + ubi_err("bad sqnum %llu", seb->sqnum); + goto bad_vid_hdr; + } + + if (sv->vol_id != ubi32_to_cpu(vidh->vol_id)) { + ubi_err("bad vol_id %d", sv->vol_id); + goto bad_vid_hdr; + } + + if (sv->compat != vidh->compat) { + ubi_err("bad compat %d", vidh->compat); + goto bad_vid_hdr; + } + + if (seb->lnum != ubi32_to_cpu(vidh->lnum)) { + ubi_err("bad lnum %d", seb->lnum); + goto bad_vid_hdr; + } + + if (sv->used_ebs != ubi32_to_cpu(vidh->used_ebs)) { + ubi_err("bad used_ebs %d", sv->used_ebs); + goto bad_vid_hdr; + } + + if (sv->data_pad != ubi32_to_cpu(vidh->data_pad)) { + ubi_err("bad data_pad %d", sv->data_pad); + goto bad_vid_hdr; + } + + if (seb->leb_ver != ubi32_to_cpu(vidh->leb_ver)) { + ubi_err("bad leb_ver %u", seb->leb_ver); + goto bad_vid_hdr; + } + } + + if (!last_seb) + continue; + + if (sv->highest_lnum != ubi32_to_cpu(vidh->lnum)) { + ubi_err("bad highest_lnum %d", sv->highest_lnum); + goto bad_vid_hdr; + } + + if (sv->last_data_size != ubi32_to_cpu(vidh->data_size)) { + ubi_err("bad last_data_size %d", sv->last_data_size); + goto bad_vid_hdr; + } + } + + /* + * Make sure that all the physical eraseblocks are in one of the lists + * or trees. + */ + buf = kmalloc(ubi->peb_count, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + memset(buf, 1, ubi->peb_count); + for (pnum = 0; pnum < ubi->peb_count; pnum++) { + err = ubi_io_is_bad(ubi, pnum); + if (err < 0) + return err; + else if (err) + buf[pnum] = 0; + } + + ubi_rb_for_each_entry(rb1, sv, &si->volumes, rb) + ubi_rb_for_each_entry(rb2, seb, &sv->root, u.rb) + buf[seb->pnum] = 0; + + list_for_each_entry(seb, &si->free, u.list) + buf[seb->pnum] = 0; + + list_for_each_entry(seb, &si->corr, u.list) + buf[seb->pnum] = 0; + + list_for_each_entry(seb, &si->erase, u.list) + buf[seb->pnum] = 0; + + list_for_each_entry(seb, &si->alien, u.list) + buf[seb->pnum] = 0; + + err = 0; + for (pnum = 0; pnum < ubi->peb_count; pnum++) + if (buf[pnum]) { + ubi_err("PEB %d is not referred", pnum); + err = 1; + } + + kfree(buf); + if (err) + goto out; + return 0; + +bad_seb: + ubi_err("bad scanning information about LEB %d", seb->lnum); + ubi_dbg_dump_seb(seb, 0); + ubi_dbg_dump_sv(sv); + goto out; + +bad_sv: + ubi_err("bad scanning information about volume %d", sv->vol_id); + ubi_dbg_dump_sv(sv); + goto out; + +bad_vid_hdr: + ubi_err("bad scanning information about volume %d", sv->vol_id); + ubi_dbg_dump_sv(sv); + ubi_dbg_dump_vid_hdr(vidh); + +out: + ubi_dbg_dump_stack(); + return 1; +} + +#endif /* CONFIG_MTD_UBI_DEBUG_PARANOID */ diff --git a/drivers/mtd/ubi/scan.h b/drivers/mtd/ubi/scan.h new file mode 100644 index 000000000000..3949f6192c76 --- /dev/null +++ b/drivers/mtd/ubi/scan.h @@ -0,0 +1,167 @@ +/* + * Copyright (c) International Business Machines Corp., 2006 + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See + * the GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + * Author: Artem Bityutskiy (Битюцкий Артём) + */ + +#ifndef __UBI_SCAN_H__ +#define __UBI_SCAN_H__ + +/* The erase counter value for this physical eraseblock is unknown */ +#define UBI_SCAN_UNKNOWN_EC (-1) + +/** + * struct ubi_scan_leb - scanning information about a physical eraseblock. + * @ec: erase counter (%UBI_SCAN_UNKNOWN_EC if it is unknown) + * @pnum: physical eraseblock number + * @lnum: logical eraseblock number + * @scrub: if this physical eraseblock needs scrubbing + * @sqnum: sequence number + * @u: unions RB-tree or @list links + * @u.rb: link in the per-volume RB-tree of &struct ubi_scan_leb objects + * @u.list: link in one of the eraseblock lists + * @leb_ver: logical eraseblock version (obsolete) + * + * One object of this type is allocated for each physical eraseblock during + * scanning. + */ +struct ubi_scan_leb { + int ec; + int pnum; + int lnum; + int scrub; + unsigned long long sqnum; + union { + struct rb_node rb; + struct list_head list; + } u; + uint32_t leb_ver; +}; + +/** + * struct ubi_scan_volume - scanning information about a volume. + * @vol_id: volume ID + * @highest_lnum: highest logical eraseblock number in this volume + * @leb_count: number of logical eraseblocks in this volume + * @vol_type: volume type + * @used_ebs: number of used logical eraseblocks in this volume (only for + * static volumes) + * @last_data_size: amount of data in the last logical eraseblock of this + * volume (always equivalent to the usable logical eraseblock size in case of + * dynamic volumes) + * @data_pad: how many bytes at the end of logical eraseblocks of this volume + * are not used (due to volume alignment) + * @compat: compatibility flags of this volume + * @rb: link in the volume RB-tree + * @root: root of the RB-tree containing all the eraseblock belonging to this + * volume (&struct ubi_scan_leb objects) + * + * One object of this type is allocated for each volume during scanning. + */ +struct ubi_scan_volume { + int vol_id; + int highest_lnum; + int leb_count; + int vol_type; + int used_ebs; + int last_data_size; + int data_pad; + int compat; + struct rb_node rb; + struct rb_root root; +}; + +/** + * struct ubi_scan_info - UBI scanning information. + * @volumes: root of the volume RB-tree + * @corr: list of corrupted physical eraseblocks + * @free: list of free physical eraseblocks + * @erase: list of physical eraseblocks which have to be erased + * @alien: list of physical eraseblocks which should not be used by UBI (e.g., + * @bad_peb_count: count of bad physical eraseblocks + * those belonging to "preserve"-compatible internal volumes) + * @vols_found: number of volumes found during scanning + * @highest_vol_id: highest volume ID + * @alien_peb_count: count of physical eraseblocks in the @alien list + * @is_empty: flag indicating whether the MTD device is empty or not + * @min_ec: lowest erase counter value + * @max_ec: highest erase counter value + * @max_sqnum: highest sequence number value + * @mean_ec: mean erase counter value + * @ec_sum: a temporary variable used when calculating @mean_ec + * @ec_count: a temporary variable used when calculating @mean_ec + * + * This data structure contains the result of scanning and may be used by other + * UBI units to build final UBI data structures, further error-recovery and so + * on. + */ +struct ubi_scan_info { + struct rb_root volumes; + struct list_head corr; + struct list_head free; + struct list_head erase; + struct list_head alien; + int bad_peb_count; + int vols_found; + int highest_vol_id; + int alien_peb_count; + int is_empty; + int min_ec; + int max_ec; + unsigned long long max_sqnum; + int mean_ec; + int ec_sum; + int ec_count; +}; + +struct ubi_device; +struct ubi_vid_hdr; + +/* + * ubi_scan_move_to_list - move a physical eraseblock from the volume tree to a + * list. + * + * @sv: volume scanning information + * @seb: scanning eraseblock infprmation + * @list: the list to move to + */ +static inline void ubi_scan_move_to_list(struct ubi_scan_volume *sv, + struct ubi_scan_leb *seb, + struct list_head *list) +{ + rb_erase(&seb->u.rb, &sv->root); + list_add_tail(&seb->u.list, list); +} + +int ubi_scan_add_to_list(struct ubi_scan_info *si, int pnum, int ec, + struct list_head *list); +int ubi_scan_add_used(const struct ubi_device *ubi, struct ubi_scan_info *si, + int pnum, int ec, const struct ubi_vid_hdr *vid_hdr, + int bitflips); +struct ubi_scan_volume *ubi_scan_find_sv(const struct ubi_scan_info *si, + int vol_id); +struct ubi_scan_leb *ubi_scan_find_seb(const struct ubi_scan_volume *sv, + int lnum); +void ubi_scan_rm_volume(struct ubi_scan_info *si, struct ubi_scan_volume *sv); +struct ubi_scan_leb *ubi_scan_get_free_peb(const struct ubi_device *ubi, + struct ubi_scan_info *si); +int ubi_scan_erase_peb(const struct ubi_device *ubi, + const struct ubi_scan_info *si, int pnum, int ec); +struct ubi_scan_info *ubi_scan(struct ubi_device *ubi); +void ubi_scan_destroy_si(struct ubi_scan_info *si); + +#endif /* !__UBI_SCAN_H__ */ diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h new file mode 100644 index 000000000000..feb647f108f0 --- /dev/null +++ b/drivers/mtd/ubi/ubi.h @@ -0,0 +1,535 @@ +/* + * Copyright (c) International Business Machines Corp., 2006 + * Copyright (c) Nokia Corporation, 2006, 2007 + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See + * the GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + * Author: Artem Bityutskiy (Битюцкий Артём) + */ + +#ifndef __UBI_UBI_H__ +#define __UBI_UBI_H__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "scan.h" +#include "debug.h" + +/* Maximum number of supported UBI devices */ +#define UBI_MAX_DEVICES 32 + +/* UBI name used for character devices, sysfs, etc */ +#define UBI_NAME_STR "ubi" + +/* Normal UBI messages */ +#define ubi_msg(fmt, ...) printk(KERN_NOTICE "UBI: " fmt "\n", ##__VA_ARGS__) +/* UBI warning messages */ +#define ubi_warn(fmt, ...) printk(KERN_WARNING "UBI warning: %s: " fmt "\n", \ + __FUNCTION__, ##__VA_ARGS__) +/* UBI error messages */ +#define ubi_err(fmt, ...) printk(KERN_ERR "UBI error: %s: " fmt "\n", \ + __FUNCTION__, ##__VA_ARGS__) + +/* Lowest number PEBs reserved for bad PEB handling */ +#define MIN_RESEVED_PEBS 2 + +/* Background thread name pattern */ +#define UBI_BGT_NAME_PATTERN "ubi_bgt%dd" + +/* This marker in the EBA table means that the LEB is um-mapped */ +#define UBI_LEB_UNMAPPED -1 + +/* + * In case of errors, UBI tries to repeat the operation several times before + * returning error. The below constant defines how many times UBI re-tries. + */ +#define UBI_IO_RETRIES 3 + +/* + * Error codes returned by the I/O unit. + * + * UBI_IO_PEB_EMPTY: the physical eraseblock is empty, i.e. it contains only + * 0xFF bytes + * UBI_IO_PEB_FREE: the physical eraseblock is free, i.e. it contains only a + * valid erase counter header, and the rest are %0xFF bytes + * UBI_IO_BAD_EC_HDR: the erase counter header is corrupted (bad magic or CRC) + * UBI_IO_BAD_VID_HDR: the volume identifier header is corrupted (bad magic or + * CRC) + * UBI_IO_BITFLIPS: bit-flips were detected and corrected + */ +enum { + UBI_IO_PEB_EMPTY = 1, + UBI_IO_PEB_FREE, + UBI_IO_BAD_EC_HDR, + UBI_IO_BAD_VID_HDR, + UBI_IO_BITFLIPS +}; + +extern int ubi_devices_cnt; +extern struct ubi_device *ubi_devices[]; + +struct ubi_volume_desc; + +/** + * struct ubi_volume - UBI volume description data structure. + * @dev: device object to make use of the the Linux device model + * @cdev: character device object to create character device + * @ubi: reference to the UBI device description object + * @vol_id: volume ID + * @readers: number of users holding this volume in read-only mode + * @writers: number of users holding this volume in read-write mode + * @exclusive: whether somebody holds this volume in exclusive mode + * @removed: if the volume was removed + * @checked: if this static volume was checked + * + * @reserved_pebs: how many physical eraseblocks are reserved for this volume + * @vol_type: volume type (%UBI_DYNAMIC_VOLUME or %UBI_STATIC_VOLUME) + * @usable_leb_size: logical eraseblock size without padding + * @used_ebs: how many logical eraseblocks in this volume contain data + * @last_eb_bytes: how many bytes are stored in the last logical eraseblock + * @used_bytes: how many bytes of data this volume contains + * @upd_marker: non-zero if the update marker is set for this volume + * @corrupted: non-zero if the volume is corrupted (static volumes only) + * @alignment: volume alignment + * @data_pad: how many bytes are not used at the end of physical eraseblocks to + * satisfy the requested alignment + * @name_len: volume name length + * @name: volume name + * + * @updating: whether the volume is being updated + * @upd_ebs: how many eraseblocks are expected to be updated + * @upd_bytes: how many bytes are expected to be received + * @upd_received: how many update bytes were already received + * @upd_buf: update buffer which is used to collect update data + * + * @eba_tbl: EBA table of this volume (LEB->PEB mapping) + * + * @gluebi_desc: gluebi UBI volume descriptor + * @gluebi_refcount: reference count of the gluebi MTD device + * @gluebi_mtd: MTD device description object of the gluebi MTD device + * + * The @corrupted field indicates that the volume's contents is corrupted. + * Since UBI protects only static volumes, this field is not relevant to + * dynamic volumes - it is user's responsibility to assure their data + * integrity. + * + * The @upd_marker flag indicates that this volume is either being updated at + * the moment or is damaged because of an unclean reboot. + */ +struct ubi_volume { + struct device dev; + struct cdev cdev; + struct ubi_device *ubi; + int vol_id; + int readers; + int writers; + int exclusive; + int removed; + int checked; + + int reserved_pebs; + int vol_type; + int usable_leb_size; + int used_ebs; + int last_eb_bytes; + long long used_bytes; + int upd_marker; + int corrupted; + int alignment; + int data_pad; + int name_len; + char name[UBI_VOL_NAME_MAX+1]; + + int updating; + int upd_ebs; + long long upd_bytes; + long long upd_received; + void *upd_buf; + + int *eba_tbl; + +#ifdef CONFIG_MTD_UBI_GLUEBI + /* Gluebi-related stuff may be compiled out */ + struct ubi_volume_desc *gluebi_desc; + int gluebi_refcount; + struct mtd_info gluebi_mtd; +#endif +}; + +/** + * struct ubi_volume_desc - descriptor of the UBI volume returned when it is + * opened. + * @vol: reference to the corresponding volume description object + * @mode: open mode (%UBI_READONLY, %UBI_READWRITE, or %UBI_EXCLUSIVE) + */ +struct ubi_volume_desc { + struct ubi_volume *vol; + int mode; +}; + +struct ubi_wl_entry; + +/** + * struct ubi_device - UBI device description structure + * @dev: class device object to use the the Linux device model + * @cdev: character device object to create character device + * @ubi_num: UBI device number + * @ubi_name: UBI device name + * @major: character device major number + * @vol_count: number of volumes in this UBI device + * @volumes: volumes of this UBI device + * @volumes_lock: protects @volumes, @rsvd_pebs, @avail_pebs, beb_rsvd_pebs, + * @beb_rsvd_level, @bad_peb_count, @good_peb_count, @vol_count, @vol->readers, + * @vol->writers, @vol->exclusive, @vol->removed, @vol->mapping and + * @vol->eba_tbl. + * + * @rsvd_pebs: count of reserved physical eraseblocks + * @avail_pebs: count of available physical eraseblocks + * @beb_rsvd_pebs: how many physical eraseblocks are reserved for bad PEB + * handling + * @beb_rsvd_level: normal level of PEBs reserved for bad PEB handling + * + * @vtbl_slots: how many slots are available in the volume table + * @vtbl_size: size of the volume table in bytes + * @vtbl: in-RAM volume table copy + * + * @max_ec: current highest erase counter value + * @mean_ec: current mean erase counter value + * + * global_sqnum: global sequence number + * @ltree_lock: protects the lock tree and @global_sqnum + * @ltree: the lock tree + * @vtbl_mutex: protects on-flash volume table + * + * @used: RB-tree of used physical eraseblocks + * @free: RB-tree of free physical eraseblocks + * @scrub: RB-tree of physical eraseblocks which need scrubbing + * @prot: protection trees + * @prot.pnum: protection tree indexed by physical eraseblock numbers + * @prot.aec: protection tree indexed by absolute erase counter value + * @wl_lock: protects the @used, @free, @prot, @lookuptbl, @abs_ec, @move_from, + * @move_to, @move_to_put @erase_pending, @wl_scheduled, and @works + * fields + * @wl_scheduled: non-zero if the wear-leveling was scheduled + * @lookuptbl: a table to quickly find a &struct ubi_wl_entry object for any + * physical eraseblock + * @abs_ec: absolute erase counter + * @move_from: physical eraseblock from where the data is being moved + * @move_to: physical eraseblock where the data is being moved to + * @move_from_put: if the "from" PEB was put + * @move_to_put: if the "to" PEB was put + * @works: list of pending works + * @works_count: count of pending works + * @bgt_thread: background thread description object + * @thread_enabled: if the background thread is enabled + * @bgt_name: background thread name + * + * @flash_size: underlying MTD device size (in bytes) + * @peb_count: count of physical eraseblocks on the MTD device + * @peb_size: physical eraseblock size + * @bad_peb_count: count of bad physical eraseblocks + * @good_peb_count: count of good physical eraseblocks + * @min_io_size: minimal input/output unit size of the underlying MTD device + * @hdrs_min_io_size: minimal I/O unit size used for VID and EC headers + * @ro_mode: if the UBI device is in read-only mode + * @leb_size: logical eraseblock size + * @leb_start: starting offset of logical eraseblocks within physical + * eraseblocks + * @ec_hdr_alsize: size of the EC header aligned to @hdrs_min_io_size + * @vid_hdr_alsize: size of the VID header aligned to @hdrs_min_io_size + * @vid_hdr_offset: starting offset of the volume identifier header (might be + * unaligned) + * @vid_hdr_aloffset: starting offset of the VID header aligned to + * @hdrs_min_io_size + * @vid_hdr_shift: contains @vid_hdr_offset - @vid_hdr_aloffset + * @bad_allowed: whether the MTD device admits of bad physical eraseblocks or + * not + * @mtd: MTD device descriptor + */ +struct ubi_device { + struct cdev cdev; + struct device dev; + int ubi_num; + char ubi_name[sizeof(UBI_NAME_STR)+5]; + int major; + int vol_count; + struct ubi_volume *volumes[UBI_MAX_VOLUMES+UBI_INT_VOL_COUNT]; + spinlock_t volumes_lock; + + int rsvd_pebs; + int avail_pebs; + int beb_rsvd_pebs; + int beb_rsvd_level; + + int vtbl_slots; + int vtbl_size; + struct ubi_vtbl_record *vtbl; + struct mutex vtbl_mutex; + + int max_ec; + int mean_ec; + + /* EBA unit's stuff */ + unsigned long long global_sqnum; + spinlock_t ltree_lock; + struct rb_root ltree; + + /* Wear-leveling unit's stuff */ + struct rb_root used; + struct rb_root free; + struct rb_root scrub; + struct { + struct rb_root pnum; + struct rb_root aec; + } prot; + spinlock_t wl_lock; + int wl_scheduled; + struct ubi_wl_entry **lookuptbl; + unsigned long long abs_ec; + struct ubi_wl_entry *move_from; + struct ubi_wl_entry *move_to; + int move_from_put; + int move_to_put; + struct list_head works; + int works_count; + struct task_struct *bgt_thread; + int thread_enabled; + char bgt_name[sizeof(UBI_BGT_NAME_PATTERN)+2]; + + /* I/O unit's stuff */ + long long flash_size; + int peb_count; + int peb_size; + int bad_peb_count; + int good_peb_count; + int min_io_size; + int hdrs_min_io_size; + int ro_mode; + int leb_size; + int leb_start; + int ec_hdr_alsize; + int vid_hdr_alsize; + int vid_hdr_offset; + int vid_hdr_aloffset; + int vid_hdr_shift; + int bad_allowed; + struct mtd_info *mtd; +}; + +extern struct file_operations ubi_cdev_operations; +extern struct file_operations ubi_vol_cdev_operations; +extern struct class *ubi_class; + +/* vtbl.c */ +int ubi_change_vtbl_record(struct ubi_device *ubi, int idx, + struct ubi_vtbl_record *vtbl_rec); +int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_scan_info *si); + +/* vmt.c */ +int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req); +int ubi_remove_volume(struct ubi_volume_desc *desc); +int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs); +int ubi_add_volume(struct ubi_device *ubi, int vol_id); +void ubi_free_volume(struct ubi_device *ubi, int vol_id); + +/* upd.c */ +int ubi_start_update(struct ubi_device *ubi, int vol_id, long long bytes); +int ubi_more_update_data(struct ubi_device *ubi, int vol_id, + const void __user *buf, int count); + +/* misc.c */ +int ubi_calc_data_len(const struct ubi_device *ubi, const void *buf, int length); +int ubi_check_volume(struct ubi_device *ubi, int vol_id); +void ubi_calculate_reserved(struct ubi_device *ubi); + +/* gluebi.c */ +#ifdef CONFIG_MTD_UBI_GLUEBI +int ubi_create_gluebi(struct ubi_device *ubi, struct ubi_volume *vol); +int ubi_destroy_gluebi(struct ubi_volume *vol); +#else +#define ubi_create_gluebi(ubi, vol) 0 +#define ubi_destroy_gluebi(vol) 0 +#endif + +/* eba.c */ +int ubi_eba_unmap_leb(struct ubi_device *ubi, int vol_id, int lnum); +int ubi_eba_read_leb(struct ubi_device *ubi, int vol_id, int lnum, void *buf, + int offset, int len, int check); +int ubi_eba_write_leb(struct ubi_device *ubi, int vol_id, int lnum, + const void *buf, int offset, int len, int dtype); +int ubi_eba_write_leb_st(struct ubi_device *ubi, int vol_id, int lnum, + const void *buf, int len, int dtype, + int used_ebs); +int ubi_eba_atomic_leb_change(struct ubi_device *ubi, int vol_id, int lnum, + const void *buf, int len, int dtype); +int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, + struct ubi_vid_hdr *vid_hdr); +int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si); +void ubi_eba_close(const struct ubi_device *ubi); + +/* wl.c */ +int ubi_wl_get_peb(struct ubi_device *ubi, int dtype); +int ubi_wl_put_peb(struct ubi_device *ubi, int pnum, int torture); +int ubi_wl_flush(struct ubi_device *ubi); +int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum); +int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si); +void ubi_wl_close(struct ubi_device *ubi); + +/* io.c */ +int ubi_io_read(const struct ubi_device *ubi, void *buf, int pnum, int offset, + int len); +int ubi_io_write(const struct ubi_device *ubi, const void *buf, int pnum, + int offset, int len); +int ubi_io_sync_erase(const struct ubi_device *ubi, int pnum, int torture); +int ubi_io_is_bad(const struct ubi_device *ubi, int pnum); +int ubi_io_mark_bad(const struct ubi_device *ubi, int pnum); +int ubi_io_read_ec_hdr(const struct ubi_device *ubi, int pnum, + struct ubi_ec_hdr *ec_hdr, int verbose); +int ubi_io_write_ec_hdr(const struct ubi_device *ubi, int pnum, + struct ubi_ec_hdr *ec_hdr); +int ubi_io_read_vid_hdr(const struct ubi_device *ubi, int pnum, + struct ubi_vid_hdr *vid_hdr, int verbose); +int ubi_io_write_vid_hdr(const struct ubi_device *ubi, int pnum, + struct ubi_vid_hdr *vid_hdr); + +/* + * ubi_rb_for_each_entry - walk an RB-tree. + * @rb: a pointer to type 'struct rb_node' to to use as a loop counter + * @pos: a pointer to RB-tree entry type to use as a loop counter + * @root: RB-tree's root + * @member: the name of the 'struct rb_node' within the RB-tree entry + */ +#define ubi_rb_for_each_entry(rb, pos, root, member) \ + for (rb = rb_first(root), \ + pos = (rb ? container_of(rb, typeof(*pos), member) : NULL); \ + rb; \ + rb = rb_next(rb), pos = container_of(rb, typeof(*pos), member)) + +/** + * ubi_zalloc_vid_hdr - allocate a volume identifier header object. + * @ubi: UBI device description object + * + * This function returns a pointer to the newly allocated and zero-filled + * volume identifier header object in case of success and %NULL in case of + * failure. + */ +static inline struct ubi_vid_hdr *ubi_zalloc_vid_hdr(const struct ubi_device *ubi) +{ + void *vid_hdr; + + vid_hdr = kzalloc(ubi->vid_hdr_alsize, GFP_KERNEL); + if (!vid_hdr) + return NULL; + + /* + * VID headers may be stored at un-aligned flash offsets, so we shift + * the pointer. + */ + return vid_hdr + ubi->vid_hdr_shift; +} + +/** + * ubi_free_vid_hdr - free a volume identifier header object. + * @ubi: UBI device description object + * @vid_hdr: the object to free + */ +static inline void ubi_free_vid_hdr(const struct ubi_device *ubi, + struct ubi_vid_hdr *vid_hdr) +{ + void *p = vid_hdr; + + if (!p) + return; + + kfree(p - ubi->vid_hdr_shift); +} + +/* + * This function is equivalent to 'ubi_io_read()', but @offset is relative to + * the beginning of the logical eraseblock, not to the beginning of the + * physical eraseblock. + */ +static inline int ubi_io_read_data(const struct ubi_device *ubi, void *buf, + int pnum, int offset, int len) +{ + ubi_assert(offset >= 0); + return ubi_io_read(ubi, buf, pnum, offset + ubi->leb_start, len); +} + +/* + * This function is equivalent to 'ubi_io_write()', but @offset is relative to + * the beginning of the logical eraseblock, not to the beginning of the + * physical eraseblock. + */ +static inline int ubi_io_write_data(const struct ubi_device *ubi, const void *buf, + int pnum, int offset, int len) +{ + ubi_assert(offset >= 0); + return ubi_io_write(ubi, buf, pnum, offset + ubi->leb_start, len); +} + +/** + * ubi_ro_mode - switch to read-only mode. + * @ubi: UBI device description object + */ +static inline void ubi_ro_mode(struct ubi_device *ubi) +{ + ubi->ro_mode = 1; + ubi_warn("switch to read-only mode"); +} + +/** + * vol_id2idx - get table index by volume ID. + * @ubi: UBI device description object + * @vol_id: volume ID + */ +static inline int vol_id2idx(const struct ubi_device *ubi, int vol_id) +{ + if (vol_id >= UBI_INTERNAL_VOL_START) + return vol_id - UBI_INTERNAL_VOL_START + ubi->vtbl_slots; + else + return vol_id; +} + +/** + * idx2vol_id - get volume ID by table index. + * @ubi: UBI device description object + * @idx: table index + */ +static inline int idx2vol_id(const struct ubi_device *ubi, int idx) +{ + if (idx >= ubi->vtbl_slots) + return idx - ubi->vtbl_slots + UBI_INTERNAL_VOL_START; + else + return idx; +} + +#endif /* !__UBI_UBI_H__ */ diff --git a/drivers/mtd/ubi/upd.c b/drivers/mtd/ubi/upd.c new file mode 100644 index 000000000000..8925b977e3dc --- /dev/null +++ b/drivers/mtd/ubi/upd.c @@ -0,0 +1,348 @@ +/* + * Copyright (c) International Business Machines Corp., 2006 + * Copyright (c) Nokia Corporation, 2006 + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See + * the GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + * Author: Artem Bityutskiy (Битюцкий Артём) + * + * Jan 2007: Alexander Schmidt, hacked per-volume update. + */ + +/* + * This file contains implementation of the volume update functionality. + * + * The update operation is based on the per-volume update marker which is + * stored in the volume table. The update marker is set before the update + * starts, and removed after the update has been finished. So if the update was + * interrupted by an unclean re-boot or due to some other reasons, the update + * marker stays on the flash media and UBI finds it when it attaches the MTD + * device next time. If the update marker is set for a volume, the volume is + * treated as damaged and most I/O operations are prohibited. Only a new update + * operation is allowed. + * + * Note, in general it is possible to implement the update operation as a + * transaction with a roll-back capability. + */ + +#include +#include +#include +#include "ubi.h" + +/** + * set_update_marker - set update marker. + * @ubi: UBI device description object + * @vol_id: volume ID + * + * This function sets the update marker flag for volume @vol_id. Returns zero + * in case of success and a negative error code in case of failure. + */ +static int set_update_marker(struct ubi_device *ubi, int vol_id) +{ + int err; + struct ubi_vtbl_record vtbl_rec; + struct ubi_volume *vol = ubi->volumes[vol_id]; + + dbg_msg("set update marker for volume %d", vol_id); + + if (vol->upd_marker) { + ubi_assert(ubi->vtbl[vol_id].upd_marker); + dbg_msg("already set"); + return 0; + } + + memcpy(&vtbl_rec, &ubi->vtbl[vol_id], sizeof(struct ubi_vtbl_record)); + vtbl_rec.upd_marker = 1; + + err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec); + vol->upd_marker = 1; + return err; +} + +/** + * clear_update_marker - clear update marker. + * @ubi: UBI device description object + * @vol_id: volume ID + * @bytes: new data size in bytes + * + * This function clears the update marker for volume @vol_id, sets new volume + * data size and clears the "corrupted" flag (static volumes only). Returns + * zero in case of success and a negative error code in case of failure. + */ +static int clear_update_marker(struct ubi_device *ubi, int vol_id, long long bytes) +{ + int err; + uint64_t tmp; + struct ubi_vtbl_record vtbl_rec; + struct ubi_volume *vol = ubi->volumes[vol_id]; + + dbg_msg("clear update marker for volume %d", vol_id); + + memcpy(&vtbl_rec, &ubi->vtbl[vol_id], sizeof(struct ubi_vtbl_record)); + ubi_assert(vol->upd_marker && vtbl_rec.upd_marker); + vtbl_rec.upd_marker = 0; + + if (vol->vol_type == UBI_STATIC_VOLUME) { + vol->corrupted = 0; + vol->used_bytes = tmp = bytes; + vol->last_eb_bytes = do_div(tmp, vol->usable_leb_size); + vol->used_ebs = tmp; + if (vol->last_eb_bytes) + vol->used_ebs += 1; + else + vol->last_eb_bytes = vol->usable_leb_size; + } + + err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec); + vol->upd_marker = 0; + return err; +} + +/** + * ubi_start_update - start volume update. + * @ubi: UBI device description object + * @vol_id: volume ID + * @bytes: update bytes + * + * This function starts volume update operation. If @bytes is zero, the volume + * is just wiped out. Returns zero in case of success and a negative error code + * in case of failure. + */ +int ubi_start_update(struct ubi_device *ubi, int vol_id, long long bytes) +{ + int i, err; + uint64_t tmp; + struct ubi_volume *vol = ubi->volumes[vol_id]; + + dbg_msg("start update of volume %d, %llu bytes", vol_id, bytes); + vol->updating = 1; + + err = set_update_marker(ubi, vol_id); + if (err) + return err; + + /* Before updating - wipe out the volume */ + for (i = 0; i < vol->reserved_pebs; i++) { + err = ubi_eba_unmap_leb(ubi, vol_id, i); + if (err) + return err; + } + + if (bytes == 0) { + err = clear_update_marker(ubi, vol_id, 0); + if (err) + return err; + err = ubi_wl_flush(ubi); + if (!err) + vol->updating = 0; + } + + vol->upd_buf = kmalloc(ubi->leb_size, GFP_KERNEL); + if (!vol->upd_buf) + return -ENOMEM; + + tmp = bytes; + vol->upd_ebs = !!do_div(tmp, vol->usable_leb_size); + vol->upd_ebs += tmp; + vol->upd_bytes = bytes; + vol->upd_received = 0; + return 0; +} + +/** + * write_leb - write update data. + * @ubi: UBI device description object + * @vol_id: volume ID + * @lnum: logical eraseblock number + * @buf: data to write + * @len: data size + * @used_ebs: how many logical eraseblocks will this volume contain (static + * volumes only) + * + * This function writes update data to corresponding logical eraseblock. In + * case of dynamic volume, this function checks if the data contains 0xFF bytes + * at the end. If yes, the 0xFF bytes are cut and not written. So if the whole + * buffer contains only 0xFF bytes, the LEB is left unmapped. + * + * The reason why we skip the trailing 0xFF bytes in case of dynamic volume is + * that we want to make sure that more data may be appended to the logical + * eraseblock in future. Indeed, writing 0xFF bytes may have side effects and + * this PEB won't be writable anymore. So if one writes the file-system image + * to the UBI volume where 0xFFs mean free space - UBI makes sure this free + * space is writable after the update. + * + * We do not do this for static volumes because they are read-only. But this + * also cannot be done because we have to store per-LEB CRC and the correct + * data length. + * + * This function returns zero in case of success and a negative error code in + * case of failure. + */ +static int write_leb(struct ubi_device *ubi, int vol_id, int lnum, void *buf, + int len, int used_ebs) +{ + int err, l; + struct ubi_volume *vol = ubi->volumes[vol_id]; + + if (vol->vol_type == UBI_DYNAMIC_VOLUME) { + l = ALIGN(len, ubi->min_io_size); + memset(buf + len, 0xFF, l - len); + + l = ubi_calc_data_len(ubi, buf, l); + if (l == 0) { + dbg_msg("all %d bytes contain 0xFF - skip", len); + return 0; + } + if (len != l) + dbg_msg("skip last %d bytes (0xFF)", len - l); + + err = ubi_eba_write_leb(ubi, vol_id, lnum, buf, 0, l, + UBI_UNKNOWN); + } else { + /* + * When writing static volume, and this is the last logical + * eraseblock, the length (@len) does not have to be aligned to + * the minimal flash I/O unit. The 'ubi_eba_write_leb_st()' + * function accepts exact (unaligned) length and stores it in + * the VID header. And it takes care of proper alignment by + * padding the buffer. Here we just make sure the padding will + * contain zeros, not random trash. + */ + memset(buf + len, 0, vol->usable_leb_size - len); + err = ubi_eba_write_leb_st(ubi, vol_id, lnum, buf, len, + UBI_UNKNOWN, used_ebs); + } + + return err; +} + +/** + * ubi_more_update_data - write more update data. + * @vol: volume description object + * @buf: write data (user-space memory buffer) + * @count: how much bytes to write + * + * This function writes more data to the volume which is being updated. It may + * be called arbitrary number of times until all of the update data arrive. + * This function returns %0 in case of success, number of bytes written during + * the last call if the whole volume update was successfully finished, and a + * negative error code in case of failure. + */ +int ubi_more_update_data(struct ubi_device *ubi, int vol_id, + const void __user *buf, int count) +{ + uint64_t tmp; + struct ubi_volume *vol = ubi->volumes[vol_id]; + int lnum, offs, err = 0, len, to_write = count; + + dbg_msg("write %d of %lld bytes, %lld already passed", + count, vol->upd_bytes, vol->upd_received); + + if (ubi->ro_mode) + return -EROFS; + + tmp = vol->upd_received; + offs = do_div(tmp, vol->usable_leb_size); + lnum = tmp; + + if (vol->upd_received + count > vol->upd_bytes) + to_write = count = vol->upd_bytes - vol->upd_received; + + /* + * When updating volumes, we accumulate whole logical eraseblock of + * data and write it at once. + */ + if (offs != 0) { + /* + * This is a write to the middle of the logical eraseblock. We + * copy the data to our update buffer and wait for more data or + * flush it if the whole eraseblock is written or the update + * is finished. + */ + + len = vol->usable_leb_size - offs; + if (len > count) + len = count; + + err = copy_from_user(vol->upd_buf + offs, buf, len); + if (err) + return -EFAULT; + + if (offs + len == vol->usable_leb_size || + vol->upd_received + len == vol->upd_bytes) { + int flush_len = offs + len; + + /* + * OK, we gathered either the whole eraseblock or this + * is the last chunk, it's time to flush the buffer. + */ + ubi_assert(flush_len <= vol->usable_leb_size); + err = write_leb(ubi, vol_id, lnum, vol->upd_buf, + flush_len, vol->upd_ebs); + if (err) + return err; + } + + vol->upd_received += len; + count -= len; + buf += len; + lnum += 1; + } + + /* + * If we've got more to write, let's continue. At this point we know we + * are starting from the beginning of an eraseblock. + */ + while (count) { + if (count > vol->usable_leb_size) + len = vol->usable_leb_size; + else + len = count; + + err = copy_from_user(vol->upd_buf, buf, len); + if (err) + return -EFAULT; + + if (len == vol->usable_leb_size || + vol->upd_received + len == vol->upd_bytes) { + err = write_leb(ubi, vol_id, lnum, vol->upd_buf, len, + vol->upd_ebs); + if (err) + break; + } + + vol->upd_received += len; + count -= len; + lnum += 1; + buf += len; + } + + ubi_assert(vol->upd_received <= vol->upd_bytes); + if (vol->upd_received == vol->upd_bytes) { + /* The update is finished, clear the update marker */ + err = clear_update_marker(ubi, vol_id, vol->upd_bytes); + if (err) + return err; + err = ubi_wl_flush(ubi); + if (err == 0) { + err = to_write; + kfree(vol->upd_buf); + vol->updating = 0; + } + } + + return err; +} diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c new file mode 100644 index 000000000000..622d0d18952c --- /dev/null +++ b/drivers/mtd/ubi/vmt.c @@ -0,0 +1,809 @@ +/* + * Copyright (c) International Business Machines Corp., 2006 + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See + * the GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + * Author: Artem Bityutskiy (Битюцкий Артём) + */ + +/* + * This file contains implementation of volume creation, deletion, updating and + * resizing. + */ + +#include +#include +#include "ubi.h" + +#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID +static void paranoid_check_volumes(struct ubi_device *ubi); +#else +#define paranoid_check_volumes(ubi) +#endif + +static ssize_t vol_attribute_show(struct device *dev, + struct device_attribute *attr, char *buf); + +/* Device attributes corresponding to files in '//class/ubi/ubiX_Y' */ +static struct device_attribute vol_reserved_ebs = + __ATTR(reserved_ebs, S_IRUGO, vol_attribute_show, NULL); +static struct device_attribute vol_type = + __ATTR(type, S_IRUGO, vol_attribute_show, NULL); +static struct device_attribute vol_name = + __ATTR(name, S_IRUGO, vol_attribute_show, NULL); +static struct device_attribute vol_corrupted = + __ATTR(corrupted, S_IRUGO, vol_attribute_show, NULL); +static struct device_attribute vol_alignment = + __ATTR(alignment, S_IRUGO, vol_attribute_show, NULL); +static struct device_attribute vol_usable_eb_size = + __ATTR(usable_eb_size, S_IRUGO, vol_attribute_show, NULL); +static struct device_attribute vol_data_bytes = + __ATTR(data_bytes, S_IRUGO, vol_attribute_show, NULL); +static struct device_attribute vol_upd_marker = + __ATTR(upd_marker, S_IRUGO, vol_attribute_show, NULL); + +/* + * "Show" method for files in '//class/ubi/ubiX_Y/'. + * + * Consider a situation: + * A. process 1 opens a sysfs file related to volume Y, say + * //class/ubi/ubiX_Y/reserved_ebs; + * B. process 2 removes volume Y; + * C. process 1 starts reading the //class/ubi/ubiX_Y/reserved_ebs file; + * + * What we want to do in a situation like that is to return error when the file + * is read. This is done by means of the 'removed' flag and the 'vol_lock' of + * the UBI volume description object. + */ +static ssize_t vol_attribute_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + int ret; + struct ubi_volume *vol = container_of(dev, struct ubi_volume, dev); + + spin_lock(&vol->ubi->volumes_lock); + if (vol->removed) { + spin_unlock(&vol->ubi->volumes_lock); + return -ENODEV; + } + if (attr == &vol_reserved_ebs) + ret = sprintf(buf, "%d\n", vol->reserved_pebs); + else if (attr == &vol_type) { + const char *tp; + tp = vol->vol_type == UBI_DYNAMIC_VOLUME ? "dynamic" : "static"; + ret = sprintf(buf, "%s\n", tp); + } else if (attr == &vol_name) + ret = sprintf(buf, "%s\n", vol->name); + else if (attr == &vol_corrupted) + ret = sprintf(buf, "%d\n", vol->corrupted); + else if (attr == &vol_alignment) + ret = sprintf(buf, "%d\n", vol->alignment); + else if (attr == &vol_usable_eb_size) { + ret = sprintf(buf, "%d\n", vol->usable_leb_size); + } else if (attr == &vol_data_bytes) + ret = sprintf(buf, "%lld\n", vol->used_bytes); + else if (attr == &vol_upd_marker) + ret = sprintf(buf, "%d\n", vol->upd_marker); + else + BUG(); + spin_unlock(&vol->ubi->volumes_lock); + return ret; +} + +/* Release method for volume devices */ +static void vol_release(struct device *dev) +{ + struct ubi_volume *vol = container_of(dev, struct ubi_volume, dev); + ubi_assert(vol->removed); + kfree(vol); +} + +/** + * volume_sysfs_init - initialize sysfs for new volume. + * @ubi: UBI device description object + * @vol: volume description object + * + * This function returns zero in case of success and a negative error code in + * case of failure. + * + * Note, this function does not free allocated resources in case of failure - + * the caller does it. This is because this would cause release() here and the + * caller would oops. + */ +static int volume_sysfs_init(struct ubi_device *ubi, struct ubi_volume *vol) +{ + int err; + + err = device_create_file(&vol->dev, &vol_reserved_ebs); + if (err) + return err; + err = device_create_file(&vol->dev, &vol_type); + if (err) + return err; + err = device_create_file(&vol->dev, &vol_name); + if (err) + return err; + err = device_create_file(&vol->dev, &vol_corrupted); + if (err) + return err; + err = device_create_file(&vol->dev, &vol_alignment); + if (err) + return err; + err = device_create_file(&vol->dev, &vol_usable_eb_size); + if (err) + return err; + err = device_create_file(&vol->dev, &vol_data_bytes); + if (err) + return err; + err = device_create_file(&vol->dev, &vol_upd_marker); + if (err) + return err; + return 0; +} + +/** + * volume_sysfs_close - close sysfs for a volume. + * @vol: volume description object + */ +static void volume_sysfs_close(struct ubi_volume *vol) +{ + device_remove_file(&vol->dev, &vol_upd_marker); + device_remove_file(&vol->dev, &vol_data_bytes); + device_remove_file(&vol->dev, &vol_usable_eb_size); + device_remove_file(&vol->dev, &vol_alignment); + device_remove_file(&vol->dev, &vol_corrupted); + device_remove_file(&vol->dev, &vol_name); + device_remove_file(&vol->dev, &vol_type); + device_remove_file(&vol->dev, &vol_reserved_ebs); + device_unregister(&vol->dev); +} + +/** + * ubi_create_volume - create volume. + * @ubi: UBI device description object + * @req: volume creation request + * + * This function creates volume described by @req. If @req->vol_id id + * %UBI_VOL_NUM_AUTO, this function automatically assigne ID to the new volume + * and saves it in @req->vol_id. Returns zero in case of success and a negative + * error code in case of failure. + */ +int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req) +{ + int i, err, vol_id = req->vol_id; + struct ubi_volume *vol; + struct ubi_vtbl_record vtbl_rec; + uint64_t bytes; + + if (ubi->ro_mode) + return -EROFS; + + vol = kzalloc(sizeof(struct ubi_volume), GFP_KERNEL); + if (!vol) + return -ENOMEM; + + spin_lock(&ubi->volumes_lock); + + if (vol_id == UBI_VOL_NUM_AUTO) { + /* Find unused volume ID */ + dbg_msg("search for vacant volume ID"); + for (i = 0; i < ubi->vtbl_slots; i++) + if (!ubi->volumes[i]) { + vol_id = i; + break; + } + + if (vol_id == UBI_VOL_NUM_AUTO) { + dbg_err("out of volume IDs"); + err = -ENFILE; + goto out_unlock; + } + req->vol_id = vol_id; + } + + dbg_msg("volume ID %d, %llu bytes, type %d, name %s", + vol_id, (unsigned long long)req->bytes, + (int)req->vol_type, req->name); + + /* Ensure that this volume does not exist */ + err = -EEXIST; + if (ubi->volumes[vol_id]) { + dbg_err("volume %d already exists", vol_id); + goto out_unlock; + } + + /* Ensure that the name is unique */ + for (i = 0; i < ubi->vtbl_slots; i++) + if (ubi->volumes[i] && + ubi->volumes[i]->name_len == req->name_len && + strcmp(ubi->volumes[i]->name, req->name) == 0) { + dbg_err("volume \"%s\" exists (ID %d)", req->name, i); + goto out_unlock; + } + + /* Calculate how many eraseblocks are requested */ + vol->usable_leb_size = ubi->leb_size - ubi->leb_size % req->alignment; + bytes = req->bytes; + if (do_div(bytes, vol->usable_leb_size)) + vol->reserved_pebs = 1; + vol->reserved_pebs += bytes; + + /* Reserve physical eraseblocks */ + if (vol->reserved_pebs > ubi->avail_pebs) { + dbg_err("not enough PEBs, only %d available", ubi->avail_pebs); + spin_unlock(&ubi->volumes_lock); + err = -ENOSPC; + goto out_unlock; + } + ubi->avail_pebs -= vol->reserved_pebs; + ubi->rsvd_pebs += vol->reserved_pebs; + + vol->vol_id = vol_id; + vol->alignment = req->alignment; + vol->data_pad = ubi->leb_size % vol->alignment; + vol->vol_type = req->vol_type; + vol->name_len = req->name_len; + memcpy(vol->name, req->name, vol->name_len + 1); + vol->exclusive = 1; + vol->ubi = ubi; + ubi->volumes[vol_id] = vol; + spin_unlock(&ubi->volumes_lock); + + /* + * Finish all pending erases because there may be some LEBs belonging + * to the same volume ID. + */ + err = ubi_wl_flush(ubi); + if (err) + goto out_acc; + + vol->eba_tbl = kmalloc(vol->reserved_pebs * sizeof(int), GFP_KERNEL); + if (!vol->eba_tbl) { + err = -ENOMEM; + goto out_acc; + } + + for (i = 0; i < vol->reserved_pebs; i++) + vol->eba_tbl[i] = UBI_LEB_UNMAPPED; + + if (vol->vol_type == UBI_DYNAMIC_VOLUME) { + vol->used_ebs = vol->reserved_pebs; + vol->last_eb_bytes = vol->usable_leb_size; + vol->used_bytes = vol->used_ebs * vol->usable_leb_size; + } else { + bytes = vol->used_bytes; + vol->last_eb_bytes = do_div(bytes, vol->usable_leb_size); + vol->used_ebs = bytes; + if (vol->last_eb_bytes) + vol->used_ebs += 1; + else + vol->last_eb_bytes = vol->usable_leb_size; + } + + /* Register character device for the volume */ + cdev_init(&vol->cdev, &ubi_vol_cdev_operations); + vol->cdev.owner = THIS_MODULE; + err = cdev_add(&vol->cdev, MKDEV(ubi->major, vol_id + 1), 1); + if (err) { + ubi_err("cannot add character device for volume %d", vol_id); + goto out_mapping; + } + + err = ubi_create_gluebi(ubi, vol); + if (err) + goto out_cdev; + + vol->dev.release = vol_release; + vol->dev.parent = &ubi->dev; + vol->dev.devt = MKDEV(ubi->major, vol->vol_id + 1); + vol->dev.class = ubi_class; + sprintf(&vol->dev.bus_id[0], "%s_%d", ubi->ubi_name, vol->vol_id); + err = device_register(&vol->dev); + if (err) + goto out_gluebi; + + err = volume_sysfs_init(ubi, vol); + if (err) + goto out_sysfs; + + /* Fill volume table record */ + memset(&vtbl_rec, 0, sizeof(struct ubi_vtbl_record)); + vtbl_rec.reserved_pebs = cpu_to_ubi32(vol->reserved_pebs); + vtbl_rec.alignment = cpu_to_ubi32(vol->alignment); + vtbl_rec.data_pad = cpu_to_ubi32(vol->data_pad); + vtbl_rec.name_len = cpu_to_ubi16(vol->name_len); + if (vol->vol_type == UBI_DYNAMIC_VOLUME) + vtbl_rec.vol_type = UBI_VID_DYNAMIC; + else + vtbl_rec.vol_type = UBI_VID_STATIC; + memcpy(vtbl_rec.name, vol->name, vol->name_len + 1); + + err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec); + if (err) + goto out_sysfs; + + spin_lock(&ubi->volumes_lock); + ubi->vol_count += 1; + vol->exclusive = 0; + spin_unlock(&ubi->volumes_lock); + + paranoid_check_volumes(ubi); + return 0; + +out_gluebi: + err = ubi_destroy_gluebi(vol); +out_cdev: + cdev_del(&vol->cdev); +out_mapping: + kfree(vol->eba_tbl); +out_acc: + spin_lock(&ubi->volumes_lock); + ubi->rsvd_pebs -= vol->reserved_pebs; + ubi->avail_pebs += vol->reserved_pebs; +out_unlock: + spin_unlock(&ubi->volumes_lock); + kfree(vol); + return err; + + /* + * We are registered, so @vol is destroyed in the release function and + * we have to de-initialize differently. + */ +out_sysfs: + err = ubi_destroy_gluebi(vol); + cdev_del(&vol->cdev); + kfree(vol->eba_tbl); + spin_lock(&ubi->volumes_lock); + ubi->rsvd_pebs -= vol->reserved_pebs; + ubi->avail_pebs += vol->reserved_pebs; + spin_unlock(&ubi->volumes_lock); + volume_sysfs_close(vol); + return err; +} + +/** + * ubi_remove_volume - remove volume. + * @desc: volume descriptor + * + * This function removes volume described by @desc. The volume has to be opened + * in "exclusive" mode. Returns zero in case of success and a negative error + * code in case of failure. + */ +int ubi_remove_volume(struct ubi_volume_desc *desc) +{ + struct ubi_volume *vol = desc->vol; + struct ubi_device *ubi = vol->ubi; + int i, err, vol_id = vol->vol_id, reserved_pebs = vol->reserved_pebs; + + dbg_msg("remove UBI volume %d", vol_id); + ubi_assert(desc->mode == UBI_EXCLUSIVE); + ubi_assert(vol == ubi->volumes[vol_id]); + + if (ubi->ro_mode) + return -EROFS; + + err = ubi_destroy_gluebi(vol); + if (err) + return err; + + err = ubi_change_vtbl_record(ubi, vol_id, NULL); + if (err) + return err; + + for (i = 0; i < vol->reserved_pebs; i++) { + err = ubi_eba_unmap_leb(ubi, vol_id, i); + if (err) + return err; + } + + spin_lock(&ubi->volumes_lock); + vol->removed = 1; + ubi->volumes[vol_id] = NULL; + spin_unlock(&ubi->volumes_lock); + + kfree(vol->eba_tbl); + vol->eba_tbl = NULL; + cdev_del(&vol->cdev); + volume_sysfs_close(vol); + kfree(desc); + + spin_lock(&ubi->volumes_lock); + ubi->rsvd_pebs -= reserved_pebs; + ubi->avail_pebs += reserved_pebs; + i = ubi->beb_rsvd_level - ubi->beb_rsvd_pebs; + if (i > 0) { + i = ubi->avail_pebs >= i ? i : ubi->avail_pebs; + ubi->avail_pebs -= i; + ubi->rsvd_pebs += i; + ubi->beb_rsvd_pebs += i; + if (i > 0) + ubi_msg("reserve more %d PEBs", i); + } + ubi->vol_count -= 1; + spin_unlock(&ubi->volumes_lock); + + paranoid_check_volumes(ubi); + module_put(THIS_MODULE); + return 0; +} + +/** + * ubi_resize_volume - re-size volume. + * @desc: volume descriptor + * @reserved_pebs: new size in physical eraseblocks + * + * This function returns zero in case of success, and a negative error code in + * case of failure. + */ +int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs) +{ + int i, err, pebs, *new_mapping; + struct ubi_volume *vol = desc->vol; + struct ubi_device *ubi = vol->ubi; + struct ubi_vtbl_record vtbl_rec; + int vol_id = vol->vol_id; + + if (ubi->ro_mode) + return -EROFS; + + dbg_msg("re-size volume %d to from %d to %d PEBs", + vol_id, vol->reserved_pebs, reserved_pebs); + ubi_assert(desc->mode == UBI_EXCLUSIVE); + ubi_assert(vol == ubi->volumes[vol_id]); + + if (vol->vol_type == UBI_STATIC_VOLUME && + reserved_pebs < vol->used_ebs) { + dbg_err("too small size %d, %d LEBs contain data", + reserved_pebs, vol->used_ebs); + return -EINVAL; + } + + /* If the size is the same, we have nothing to do */ + if (reserved_pebs == vol->reserved_pebs) + return 0; + + new_mapping = kmalloc(reserved_pebs * sizeof(int), GFP_KERNEL); + if (!new_mapping) + return -ENOMEM; + + for (i = 0; i < reserved_pebs; i++) + new_mapping[i] = UBI_LEB_UNMAPPED; + + /* Reserve physical eraseblocks */ + pebs = reserved_pebs - vol->reserved_pebs; + if (pebs > 0) { + spin_lock(&ubi->volumes_lock); + if (pebs > ubi->avail_pebs) { + dbg_err("not enough PEBs: requested %d, available %d", + pebs, ubi->avail_pebs); + spin_unlock(&ubi->volumes_lock); + err = -ENOSPC; + goto out_free; + } + ubi->avail_pebs -= pebs; + ubi->rsvd_pebs += pebs; + for (i = 0; i < vol->reserved_pebs; i++) + new_mapping[i] = vol->eba_tbl[i]; + kfree(vol->eba_tbl); + vol->eba_tbl = new_mapping; + spin_unlock(&ubi->volumes_lock); + } + + /* Change volume table record */ + memcpy(&vtbl_rec, &ubi->vtbl[vol_id], sizeof(struct ubi_vtbl_record)); + vtbl_rec.reserved_pebs = cpu_to_ubi32(reserved_pebs); + err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec); + if (err) + goto out_acc; + + if (pebs < 0) { + for (i = 0; i < -pebs; i++) { + err = ubi_eba_unmap_leb(ubi, vol_id, reserved_pebs + i); + if (err) + goto out_acc; + } + spin_lock(&ubi->volumes_lock); + ubi->rsvd_pebs += pebs; + ubi->avail_pebs -= pebs; + pebs = ubi->beb_rsvd_level - ubi->beb_rsvd_pebs; + if (pebs > 0) { + pebs = ubi->avail_pebs >= pebs ? pebs : ubi->avail_pebs; + ubi->avail_pebs -= pebs; + ubi->rsvd_pebs += pebs; + ubi->beb_rsvd_pebs += pebs; + if (pebs > 0) + ubi_msg("reserve more %d PEBs", pebs); + } + for (i = 0; i < reserved_pebs; i++) + new_mapping[i] = vol->eba_tbl[i]; + kfree(vol->eba_tbl); + vol->eba_tbl = new_mapping; + spin_unlock(&ubi->volumes_lock); + } + + vol->reserved_pebs = reserved_pebs; + if (vol->vol_type == UBI_DYNAMIC_VOLUME) { + vol->used_ebs = reserved_pebs; + vol->last_eb_bytes = vol->usable_leb_size; + vol->used_bytes = vol->used_ebs * vol->usable_leb_size; + } + + paranoid_check_volumes(ubi); + return 0; + +out_acc: + if (pebs > 0) { + spin_lock(&ubi->volumes_lock); + ubi->rsvd_pebs -= pebs; + ubi->avail_pebs += pebs; + spin_unlock(&ubi->volumes_lock); + } +out_free: + kfree(new_mapping); + return err; +} + +/** + * ubi_add_volume - add volume. + * @ubi: UBI device description object + * @vol_id: volume ID + * + * This function adds an existin volume and initializes all its data + * structures. Returnes zero in case of success and a negative error code in + * case of failure. + */ +int ubi_add_volume(struct ubi_device *ubi, int vol_id) +{ + int err; + struct ubi_volume *vol = ubi->volumes[vol_id]; + + dbg_msg("add volume %d", vol_id); + ubi_dbg_dump_vol_info(vol); + ubi_assert(vol); + + /* Register character device for the volume */ + cdev_init(&vol->cdev, &ubi_vol_cdev_operations); + vol->cdev.owner = THIS_MODULE; + err = cdev_add(&vol->cdev, MKDEV(ubi->major, vol->vol_id + 1), 1); + if (err) { + ubi_err("cannot add character device for volume %d", vol_id); + return err; + } + + err = ubi_create_gluebi(ubi, vol); + if (err) + goto out_cdev; + + vol->dev.release = vol_release; + vol->dev.parent = &ubi->dev; + vol->dev.devt = MKDEV(ubi->major, vol->vol_id + 1); + vol->dev.class = ubi_class; + sprintf(&vol->dev.bus_id[0], "%s_%d", ubi->ubi_name, vol->vol_id); + err = device_register(&vol->dev); + if (err) + goto out_gluebi; + + err = volume_sysfs_init(ubi, vol); + if (err) { + cdev_del(&vol->cdev); + err = ubi_destroy_gluebi(vol); + volume_sysfs_close(vol); + return err; + } + + paranoid_check_volumes(ubi); + return 0; + +out_gluebi: + err = ubi_destroy_gluebi(vol); +out_cdev: + cdev_del(&vol->cdev); + return err; +} + +/** + * ubi_free_volume - free volume. + * @ubi: UBI device description object + * @vol_id: volume ID + * + * This function frees all resources for volume @vol_id but does not remove it. + * Used only when the UBI device is detached. + */ +void ubi_free_volume(struct ubi_device *ubi, int vol_id) +{ + int err; + struct ubi_volume *vol = ubi->volumes[vol_id]; + + dbg_msg("free volume %d", vol_id); + ubi_assert(vol); + + vol->removed = 1; + err = ubi_destroy_gluebi(vol); + ubi->volumes[vol_id] = NULL; + cdev_del(&vol->cdev); + volume_sysfs_close(vol); +} + +#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID + +/** + * paranoid_check_volume - check volume information. + * @ubi: UBI device description object + * @vol_id: volume ID + */ +static void paranoid_check_volume(const struct ubi_device *ubi, int vol_id) +{ + int idx = vol_id2idx(ubi, vol_id); + int reserved_pebs, alignment, data_pad, vol_type, name_len, upd_marker; + const struct ubi_volume *vol = ubi->volumes[idx]; + long long n; + const char *name; + + reserved_pebs = ubi32_to_cpu(ubi->vtbl[vol_id].reserved_pebs); + + if (!vol) { + if (reserved_pebs) { + ubi_err("no volume info, but volume exists"); + goto fail; + } + return; + } + + if (vol->reserved_pebs < 0 || vol->alignment < 0 || vol->data_pad < 0 || + vol->name_len < 0) { + ubi_err("negative values"); + goto fail; + } + if (vol->alignment > ubi->leb_size || vol->alignment == 0) { + ubi_err("bad alignment"); + goto fail; + } + + n = vol->alignment % ubi->min_io_size; + if (vol->alignment != 1 && n) { + ubi_err("alignment is not multiple of min I/O unit"); + goto fail; + } + + n = ubi->leb_size % vol->alignment; + if (vol->data_pad != n) { + ubi_err("bad data_pad, has to be %lld", n); + goto fail; + } + + if (vol->vol_type != UBI_DYNAMIC_VOLUME && + vol->vol_type != UBI_STATIC_VOLUME) { + ubi_err("bad vol_type"); + goto fail; + } + + if (vol->upd_marker != 0 && vol->upd_marker != 1) { + ubi_err("bad upd_marker"); + goto fail; + } + + if (vol->upd_marker && vol->corrupted) { + dbg_err("update marker and corrupted simultaneously"); + goto fail; + } + + if (vol->reserved_pebs > ubi->good_peb_count) { + ubi_err("too large reserved_pebs"); + goto fail; + } + + n = ubi->leb_size - vol->data_pad; + if (vol->usable_leb_size != ubi->leb_size - vol->data_pad) { + ubi_err("bad usable_leb_size, has to be %lld", n); + goto fail; + } + + if (vol->name_len > UBI_VOL_NAME_MAX) { + ubi_err("too long volume name, max is %d", UBI_VOL_NAME_MAX); + goto fail; + } + + if (!vol->name) { + ubi_err("NULL volume name"); + goto fail; + } + + n = strnlen(vol->name, vol->name_len + 1); + if (n != vol->name_len) { + ubi_err("bad name_len %lld", n); + goto fail; + } + + n = vol->used_ebs * vol->usable_leb_size; + if (vol->vol_type == UBI_DYNAMIC_VOLUME) { + if (vol->corrupted != 0) { + ubi_err("corrupted dynamic volume"); + goto fail; + } + if (vol->used_ebs != vol->reserved_pebs) { + ubi_err("bad used_ebs"); + goto fail; + } + if (vol->last_eb_bytes != vol->usable_leb_size) { + ubi_err("bad last_eb_bytes"); + goto fail; + } + if (vol->used_bytes != n) { + ubi_err("bad used_bytes"); + goto fail; + } + } else { + if (vol->corrupted != 0 && vol->corrupted != 1) { + ubi_err("bad corrupted"); + goto fail; + } + if (vol->used_ebs < 0 || vol->used_ebs > vol->reserved_pebs) { + ubi_err("bad used_ebs"); + goto fail; + } + if (vol->last_eb_bytes < 0 || + vol->last_eb_bytes > vol->usable_leb_size) { + ubi_err("bad last_eb_bytes"); + goto fail; + } + if (vol->used_bytes < 0 || vol->used_bytes > n || + vol->used_bytes < n - vol->usable_leb_size) { + ubi_err("bad used_bytes"); + goto fail; + } + } + + alignment = ubi32_to_cpu(ubi->vtbl[vol_id].alignment); + data_pad = ubi32_to_cpu(ubi->vtbl[vol_id].data_pad); + name_len = ubi16_to_cpu(ubi->vtbl[vol_id].name_len); + upd_marker = ubi->vtbl[vol_id].upd_marker; + name = &ubi->vtbl[vol_id].name[0]; + if (ubi->vtbl[vol_id].vol_type == UBI_VID_DYNAMIC) + vol_type = UBI_DYNAMIC_VOLUME; + else + vol_type = UBI_STATIC_VOLUME; + + if (alignment != vol->alignment || data_pad != vol->data_pad || + upd_marker != vol->upd_marker || vol_type != vol->vol_type || + name_len!= vol->name_len || strncmp(name, vol->name, name_len)) { + ubi_err("volume info is different"); + goto fail; + } + + return; + +fail: + ubi_err("paranoid check failed"); + ubi_dbg_dump_vol_info(vol); + ubi_dbg_dump_vtbl_record(&ubi->vtbl[vol_id], vol_id); + BUG(); +} + +/** + * paranoid_check_volumes - check information about all volumes. + * @ubi: UBI device description object + */ +static void paranoid_check_volumes(struct ubi_device *ubi) +{ + int i; + + mutex_lock(&ubi->vtbl_mutex); + spin_lock(&ubi->volumes_lock); + for (i = 0; i < ubi->vtbl_slots; i++) + paranoid_check_volume(ubi, i); + spin_unlock(&ubi->volumes_lock); + mutex_unlock(&ubi->vtbl_mutex); +} +#endif diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c new file mode 100644 index 000000000000..b6fd6bbd941e --- /dev/null +++ b/drivers/mtd/ubi/vtbl.c @@ -0,0 +1,809 @@ +/* + * Copyright (c) International Business Machines Corp., 2006 + * Copyright (c) Nokia Corporation, 2006, 2007 + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See + * the GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + * Author: Artem Bityutskiy (Битюцкий Артём) + */ + +/* + * This file includes volume table manipulation code. The volume table is an + * on-flash table containing volume meta-data like name, number of reserved + * physical eraseblocks, type, etc. The volume table is stored in the so-called + * "layout volume". + * + * The layout volume is an internal volume which is organized as follows. It + * consists of two logical eraseblocks - LEB 0 and LEB 1. Each logical + * eraseblock stores one volume table copy, i.e. LEB 0 and LEB 1 duplicate each + * other. This redundancy guarantees robustness to unclean reboots. The volume + * table is basically an array of volume table records. Each record contains + * full information about the volume and protected by a CRC checksum. + * + * The volume table is changed, it is first changed in RAM. Then LEB 0 is + * erased, and the updated volume table is written back to LEB 0. Then same for + * LEB 1. This scheme guarantees recoverability from unclean reboots. + * + * In this UBI implementation the on-flash volume table does not contain any + * information about how many data static volumes contain. This information may + * be found from the scanning data. + * + * But it would still be beneficial to store this information in the volume + * table. For example, suppose we have a static volume X, and all its physical + * eraseblocks became bad for some reasons. Suppose we are attaching the + * corresponding MTD device, the scanning has found no logical eraseblocks + * corresponding to the volume X. According to the volume table volume X does + * exist. So we don't know whether it is just empty or all its physical + * eraseblocks went bad. So we cannot alarm the user about this corruption. + * + * The volume table also stores so-called "update marker", which is used for + * volume updates. Before updating the volume, the update marker is set, and + * after the update operation is finished, the update marker is cleared. So if + * the update operation was interrupted (e.g. by an unclean reboot) - the + * update marker is still there and we know that the volume's contents is + * damaged. + */ + +#include +#include +#include +#include "ubi.h" + +#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID +static void paranoid_vtbl_check(const struct ubi_device *ubi); +#else +#define paranoid_vtbl_check(ubi) +#endif + +/* Empty volume table record */ +static struct ubi_vtbl_record empty_vtbl_record; + +/** + * ubi_change_vtbl_record - change volume table record. + * @ubi: UBI device description object + * @idx: table index to change + * @vtbl_rec: new volume table record + * + * This function changes volume table record @idx. If @vtbl_rec is %NULL, empty + * volume table record is written. The caller does not have to calculate CRC of + * the record as it is done by this function. Returns zero in case of success + * and a negative error code in case of failure. + */ +int ubi_change_vtbl_record(struct ubi_device *ubi, int idx, + struct ubi_vtbl_record *vtbl_rec) +{ + int i, err; + uint32_t crc; + + ubi_assert(idx >= 0 && idx < ubi->vtbl_slots); + + if (!vtbl_rec) + vtbl_rec = &empty_vtbl_record; + else { + crc = crc32(UBI_CRC32_INIT, vtbl_rec, UBI_VTBL_RECORD_SIZE_CRC); + vtbl_rec->crc = cpu_to_ubi32(crc); + } + + dbg_msg("change record %d", idx); + ubi_dbg_dump_vtbl_record(vtbl_rec, idx); + + mutex_lock(&ubi->vtbl_mutex); + memcpy(&ubi->vtbl[idx], vtbl_rec, sizeof(struct ubi_vtbl_record)); + for (i = 0; i < UBI_LAYOUT_VOLUME_EBS; i++) { + err = ubi_eba_unmap_leb(ubi, UBI_LAYOUT_VOL_ID, i); + if (err) { + mutex_unlock(&ubi->vtbl_mutex); + return err; + } + err = ubi_eba_write_leb(ubi, UBI_LAYOUT_VOL_ID, i, ubi->vtbl, 0, + ubi->vtbl_size, UBI_LONGTERM); + if (err) { + mutex_unlock(&ubi->vtbl_mutex); + return err; + } + } + + paranoid_vtbl_check(ubi); + mutex_unlock(&ubi->vtbl_mutex); + return ubi_wl_flush(ubi); +} + +/** + * vol_til_check - check if volume table is not corrupted and contains sensible + * data. + * + * @ubi: UBI device description object + * @vtbl: volume table + * + * This function returns zero if @vtbl is all right, %1 if CRC is incorrect, + * and %-EINVAL if it contains inconsistent data. + */ +static int vtbl_check(const struct ubi_device *ubi, + const struct ubi_vtbl_record *vtbl) +{ + int i, n, reserved_pebs, alignment, data_pad, vol_type, name_len; + int upd_marker; + uint32_t crc; + const char *name; + + for (i = 0; i < ubi->vtbl_slots; i++) { + cond_resched(); + + reserved_pebs = ubi32_to_cpu(vtbl[i].reserved_pebs); + alignment = ubi32_to_cpu(vtbl[i].alignment); + data_pad = ubi32_to_cpu(vtbl[i].data_pad); + upd_marker = vtbl[i].upd_marker; + vol_type = vtbl[i].vol_type; + name_len = ubi16_to_cpu(vtbl[i].name_len); + name = &vtbl[i].name[0]; + + crc = crc32(UBI_CRC32_INIT, &vtbl[i], UBI_VTBL_RECORD_SIZE_CRC); + if (ubi32_to_cpu(vtbl[i].crc) != crc) { + ubi_err("bad CRC at record %u: %#08x, not %#08x", + i, crc, ubi32_to_cpu(vtbl[i].crc)); + ubi_dbg_dump_vtbl_record(&vtbl[i], i); + return 1; + } + + if (reserved_pebs == 0) { + if (memcmp(&vtbl[i], &empty_vtbl_record, + UBI_VTBL_RECORD_SIZE)) { + dbg_err("bad empty record"); + goto bad; + } + continue; + } + + if (reserved_pebs < 0 || alignment < 0 || data_pad < 0 || + name_len < 0) { + dbg_err("negative values"); + goto bad; + } + + if (alignment > ubi->leb_size || alignment == 0) { + dbg_err("bad alignment"); + goto bad; + } + + n = alignment % ubi->min_io_size; + if (alignment != 1 && n) { + dbg_err("alignment is not multiple of min I/O unit"); + goto bad; + } + + n = ubi->leb_size % alignment; + if (data_pad != n) { + dbg_err("bad data_pad, has to be %d", n); + goto bad; + } + + if (vol_type != UBI_VID_DYNAMIC && vol_type != UBI_VID_STATIC) { + dbg_err("bad vol_type"); + goto bad; + } + + if (upd_marker != 0 && upd_marker != 1) { + dbg_err("bad upd_marker"); + goto bad; + } + + if (reserved_pebs > ubi->good_peb_count) { + dbg_err("too large reserved_pebs, good PEBs %d", + ubi->good_peb_count); + goto bad; + } + + if (name_len > UBI_VOL_NAME_MAX) { + dbg_err("too long volume name, max %d", + UBI_VOL_NAME_MAX); + goto bad; + } + + if (name[0] == '\0') { + dbg_err("NULL volume name"); + goto bad; + } + + if (name_len != strnlen(name, name_len + 1)) { + dbg_err("bad name_len"); + goto bad; + } + } + + /* Checks that all names are unique */ + for (i = 0; i < ubi->vtbl_slots - 1; i++) { + for (n = i + 1; n < ubi->vtbl_slots; n++) { + int len1 = ubi16_to_cpu(vtbl[i].name_len); + int len2 = ubi16_to_cpu(vtbl[n].name_len); + + if (len1 > 0 && len1 == len2 && + !strncmp(vtbl[i].name, vtbl[n].name, len1)) { + ubi_err("volumes %d and %d have the same name" + " \"%s\"", i, n, vtbl[i].name); + ubi_dbg_dump_vtbl_record(&vtbl[i], i); + ubi_dbg_dump_vtbl_record(&vtbl[n], n); + return -EINVAL; + } + } + } + + return 0; + +bad: + ubi_err("volume table check failed, record %d", i); + ubi_dbg_dump_vtbl_record(&vtbl[i], i); + return -EINVAL; +} + +/** + * create_vtbl - create a copy of volume table. + * @ubi: UBI device description object + * @si: scanning information + * @copy: number of the volume table copy + * @vtbl: contents of the volume table + * + * This function returns zero in case of success and a negative error code in + * case of failure. + */ +static int create_vtbl(const struct ubi_device *ubi, struct ubi_scan_info *si, + int copy, void *vtbl) +{ + int err, tries = 0; + static struct ubi_vid_hdr *vid_hdr; + struct ubi_scan_volume *sv; + struct ubi_scan_leb *new_seb, *old_seb = NULL; + + ubi_msg("create volume table (copy #%d)", copy + 1); + + vid_hdr = ubi_zalloc_vid_hdr(ubi); + if (!vid_hdr) + return -ENOMEM; + + /* + * Check if there is a logical eraseblock which would have to contain + * this volume table copy was found during scanning. It has to be wiped + * out. + */ + sv = ubi_scan_find_sv(si, UBI_LAYOUT_VOL_ID); + if (sv) + old_seb = ubi_scan_find_seb(sv, copy); + +retry: + new_seb = ubi_scan_get_free_peb(ubi, si); + if (IS_ERR(new_seb)) { + err = PTR_ERR(new_seb); + goto out_free; + } + + vid_hdr->vol_type = UBI_VID_DYNAMIC; + vid_hdr->vol_id = cpu_to_ubi32(UBI_LAYOUT_VOL_ID); + vid_hdr->compat = UBI_LAYOUT_VOLUME_COMPAT; + vid_hdr->data_size = vid_hdr->used_ebs = + vid_hdr->data_pad = cpu_to_ubi32(0); + vid_hdr->lnum = cpu_to_ubi32(copy); + vid_hdr->sqnum = cpu_to_ubi64(++si->max_sqnum); + vid_hdr->leb_ver = cpu_to_ubi32(old_seb ? old_seb->leb_ver + 1: 0); + + /* The EC header is already there, write the VID header */ + err = ubi_io_write_vid_hdr(ubi, new_seb->pnum, vid_hdr); + if (err) + goto write_error; + + /* Write the layout volume contents */ + err = ubi_io_write_data(ubi, vtbl, new_seb->pnum, 0, ubi->vtbl_size); + if (err) + goto write_error; + + /* + * And add it to the scanning information. Don't delete the old + * @old_seb as it will be deleted and freed in 'ubi_scan_add_used()'. + */ + err = ubi_scan_add_used(ubi, si, new_seb->pnum, new_seb->ec, + vid_hdr, 0); + kfree(new_seb); + ubi_free_vid_hdr(ubi, vid_hdr); + return err; + +write_error: + kfree(new_seb); + /* May be this physical eraseblock went bad, try to pick another one */ + if (++tries <= 5) { + err = ubi_scan_add_to_list(si, new_seb->pnum, new_seb->ec, + &si->corr); + if (!err) + goto retry; + } +out_free: + ubi_free_vid_hdr(ubi, vid_hdr); + return err; + +} + +/** + * process_lvol - process the layout volume. + * @ubi: UBI device description object + * @si: scanning information + * @sv: layout volume scanning information + * + * This function is responsible for reading the layout volume, ensuring it is + * not corrupted, and recovering from corruptions if needed. Returns volume + * table in case of success and a negative error code in case of failure. + */ +static struct ubi_vtbl_record *process_lvol(const struct ubi_device *ubi, + struct ubi_scan_info *si, + struct ubi_scan_volume *sv) +{ + int err; + struct rb_node *rb; + struct ubi_scan_leb *seb; + struct ubi_vtbl_record *leb[UBI_LAYOUT_VOLUME_EBS] = { NULL, NULL }; + int leb_corrupted[UBI_LAYOUT_VOLUME_EBS] = {1, 1}; + + /* + * UBI goes through the following steps when it changes the layout + * volume: + * a. erase LEB 0; + * b. write new data to LEB 0; + * c. erase LEB 1; + * d. write new data to LEB 1. + * + * Before the change, both LEBs contain the same data. + * + * Due to unclean reboots, the contents of LEB 0 may be lost, but there + * should LEB 1. So it is OK if LEB 0 is corrupted while LEB 1 is not. + * Similarly, LEB 1 may be lost, but there should be LEB 0. And + * finally, unclean reboots may result in a situation when neither LEB + * 0 nor LEB 1 are corrupted, but they are different. In this case, LEB + * 0 contains more recent information. + * + * So the plan is to first check LEB 0. Then + * a. if LEB 0 is OK, it must be containing the most resent data; then + * we compare it with LEB 1, and if they are different, we copy LEB + * 0 to LEB 1; + * b. if LEB 0 is corrupted, but LEB 1 has to be OK, and we copy LEB 1 + * to LEB 0. + */ + + dbg_msg("check layout volume"); + + /* Read both LEB 0 and LEB 1 into memory */ + ubi_rb_for_each_entry(rb, seb, &sv->root, u.rb) { + leb[seb->lnum] = kzalloc(ubi->vtbl_size, GFP_KERNEL); + if (!leb[seb->lnum]) { + err = -ENOMEM; + goto out_free; + } + + err = ubi_io_read_data(ubi, leb[seb->lnum], seb->pnum, 0, + ubi->vtbl_size); + if (err == UBI_IO_BITFLIPS || err == -EBADMSG) + /* Scrub the PEB later */ + seb->scrub = 1; + else if (err) + goto out_free; + } + + err = -EINVAL; + if (leb[0]) { + leb_corrupted[0] = vtbl_check(ubi, leb[0]); + if (leb_corrupted[0] < 0) + goto out_free; + } + + if (!leb_corrupted[0]) { + /* LEB 0 is OK */ + if (leb[1]) + leb_corrupted[1] = memcmp(leb[0], leb[1], ubi->vtbl_size); + if (leb_corrupted[1]) { + ubi_warn("volume table copy #2 is corrupted"); + err = create_vtbl(ubi, si, 1, leb[0]); + if (err) + goto out_free; + ubi_msg("volume table was restored"); + } + + /* Both LEB 1 and LEB 2 are OK and consistent */ + kfree(leb[1]); + return leb[0]; + } else { + /* LEB 0 is corrupted or does not exist */ + if (leb[1]) { + leb_corrupted[1] = vtbl_check(ubi, leb[1]); + if (leb_corrupted[1] < 0) + goto out_free; + } + if (leb_corrupted[1]) { + /* Both LEB 0 and LEB 1 are corrupted */ + ubi_err("both volume tables are corrupted"); + goto out_free; + } + + ubi_warn("volume table copy #1 is corrupted"); + err = create_vtbl(ubi, si, 0, leb[1]); + if (err) + goto out_free; + ubi_msg("volume table was restored"); + + kfree(leb[0]); + return leb[1]; + } + +out_free: + kfree(leb[0]); + kfree(leb[1]); + return ERR_PTR(err); +} + +/** + * create_empty_lvol - create empty layout volume. + * @ubi: UBI device description object + * @si: scanning information + * + * This function returns volume table contents in case of success and a + * negative error code in case of failure. + */ +static struct ubi_vtbl_record *create_empty_lvol(const struct ubi_device *ubi, + struct ubi_scan_info *si) +{ + int i; + struct ubi_vtbl_record *vtbl; + + vtbl = kzalloc(ubi->vtbl_size, GFP_KERNEL); + if (!vtbl) + return ERR_PTR(-ENOMEM); + + for (i = 0; i < ubi->vtbl_slots; i++) + memcpy(&vtbl[i], &empty_vtbl_record, UBI_VTBL_RECORD_SIZE); + + for (i = 0; i < UBI_LAYOUT_VOLUME_EBS; i++) { + int err; + + err = create_vtbl(ubi, si, i, vtbl); + if (err) { + kfree(vtbl); + return ERR_PTR(err); + } + } + + return vtbl; +} + +/** + * init_volumes - initialize volume information for existing volumes. + * @ubi: UBI device description object + * @si: scanning information + * @vtbl: volume table + * + * This function allocates volume description objects for existing volumes. + * Returns zero in case of success and a negative error code in case of + * failure. + */ +static int init_volumes(struct ubi_device *ubi, const struct ubi_scan_info *si, + const struct ubi_vtbl_record *vtbl) +{ + int i, reserved_pebs = 0; + struct ubi_scan_volume *sv; + struct ubi_volume *vol; + + for (i = 0; i < ubi->vtbl_slots; i++) { + cond_resched(); + + if (ubi32_to_cpu(vtbl[i].reserved_pebs) == 0) + continue; /* Empty record */ + + vol = kzalloc(sizeof(struct ubi_volume), GFP_KERNEL); + if (!vol) + return -ENOMEM; + + vol->reserved_pebs = ubi32_to_cpu(vtbl[i].reserved_pebs); + vol->alignment = ubi32_to_cpu(vtbl[i].alignment); + vol->data_pad = ubi32_to_cpu(vtbl[i].data_pad); + vol->vol_type = vtbl[i].vol_type == UBI_VID_DYNAMIC ? + UBI_DYNAMIC_VOLUME : UBI_STATIC_VOLUME; + vol->name_len = ubi16_to_cpu(vtbl[i].name_len); + vol->usable_leb_size = ubi->leb_size - vol->data_pad; + memcpy(vol->name, vtbl[i].name, vol->name_len); + vol->name[vol->name_len] = '\0'; + vol->vol_id = i; + + ubi_assert(!ubi->volumes[i]); + ubi->volumes[i] = vol; + ubi->vol_count += 1; + vol->ubi = ubi; + reserved_pebs += vol->reserved_pebs; + + /* + * In case of dynamic volume UBI knows nothing about how many + * data is stored there. So assume the whole volume is used. + */ + if (vol->vol_type == UBI_DYNAMIC_VOLUME) { + vol->used_ebs = vol->reserved_pebs; + vol->last_eb_bytes = vol->usable_leb_size; + vol->used_bytes = vol->used_ebs * vol->usable_leb_size; + continue; + } + + /* Static volumes only */ + sv = ubi_scan_find_sv(si, i); + if (!sv) { + /* + * No eraseblocks belonging to this volume found. We + * don't actually know whether this static volume is + * completely corrupted or just contains no data. And + * we cannot know this as long as data size is not + * stored on flash. So we just assume the volume is + * empty. FIXME: this should be handled. + */ + continue; + } + + if (sv->leb_count != sv->used_ebs) { + /* + * We found a static volume which misses several + * eraseblocks. Treat it as corrupted. + */ + ubi_warn("static volume %d misses %d LEBs - corrupted", + sv->vol_id, sv->used_ebs - sv->leb_count); + vol->corrupted = 1; + continue; + } + + vol->used_ebs = sv->used_ebs; + vol->used_bytes = (vol->used_ebs - 1) * vol->usable_leb_size; + vol->used_bytes += sv->last_data_size; + vol->last_eb_bytes = sv->last_data_size; + } + + vol = kzalloc(sizeof(struct ubi_volume), GFP_KERNEL); + if (!vol) + return -ENOMEM; + + vol->reserved_pebs = UBI_LAYOUT_VOLUME_EBS; + vol->alignment = 1; + vol->vol_type = UBI_DYNAMIC_VOLUME; + vol->name_len = sizeof(UBI_LAYOUT_VOLUME_NAME) - 1; + memcpy(vol->name, UBI_LAYOUT_VOLUME_NAME, vol->name_len + 1); + vol->usable_leb_size = ubi->leb_size; + vol->used_ebs = vol->reserved_pebs; + vol->last_eb_bytes = vol->reserved_pebs; + vol->used_bytes = vol->used_ebs * (ubi->leb_size - vol->data_pad); + vol->vol_id = UBI_LAYOUT_VOL_ID; + + ubi_assert(!ubi->volumes[i]); + ubi->volumes[vol_id2idx(ubi, vol->vol_id)] = vol; + reserved_pebs += vol->reserved_pebs; + ubi->vol_count += 1; + vol->ubi = ubi; + + if (reserved_pebs > ubi->avail_pebs) + ubi_err("not enough PEBs, required %d, available %d", + reserved_pebs, ubi->avail_pebs); + ubi->rsvd_pebs += reserved_pebs; + ubi->avail_pebs -= reserved_pebs; + + return 0; +} + +/** + * check_sv - check volume scanning information. + * @vol: UBI volume description object + * @sv: volume scanning information + * + * This function returns zero if the volume scanning information is consistent + * to the data read from the volume tabla, and %-EINVAL if not. + */ +static int check_sv(const struct ubi_volume *vol, + const struct ubi_scan_volume *sv) +{ + if (sv->highest_lnum >= vol->reserved_pebs) { + dbg_err("bad highest_lnum"); + goto bad; + } + if (sv->leb_count > vol->reserved_pebs) { + dbg_err("bad leb_count"); + goto bad; + } + if (sv->vol_type != vol->vol_type) { + dbg_err("bad vol_type"); + goto bad; + } + if (sv->used_ebs > vol->reserved_pebs) { + dbg_err("bad used_ebs"); + goto bad; + } + if (sv->data_pad != vol->data_pad) { + dbg_err("bad data_pad"); + goto bad; + } + return 0; + +bad: + ubi_err("bad scanning information"); + ubi_dbg_dump_sv(sv); + ubi_dbg_dump_vol_info(vol); + return -EINVAL; +} + +/** + * check_scanning_info - check that scanning information. + * @ubi: UBI device description object + * @si: scanning information + * + * Even though we protect on-flash data by CRC checksums, we still don't trust + * the media. This function ensures that scanning information is consistent to + * the information read from the volume table. Returns zero if the scanning + * information is OK and %-EINVAL if it is not. + */ +static int check_scanning_info(const struct ubi_device *ubi, + struct ubi_scan_info *si) +{ + int err, i; + struct ubi_scan_volume *sv; + struct ubi_volume *vol; + + if (si->vols_found > UBI_INT_VOL_COUNT + ubi->vtbl_slots) { + ubi_err("scanning found %d volumes, maximum is %d + %d", + si->vols_found, UBI_INT_VOL_COUNT, ubi->vtbl_slots); + return -EINVAL; + } + + if (si->highest_vol_id >= ubi->vtbl_slots + UBI_INT_VOL_COUNT&& + si->highest_vol_id < UBI_INTERNAL_VOL_START) { + ubi_err("too large volume ID %d found by scanning", + si->highest_vol_id); + return -EINVAL; + } + + + for (i = 0; i < ubi->vtbl_slots + UBI_INT_VOL_COUNT; i++) { + cond_resched(); + + sv = ubi_scan_find_sv(si, i); + vol = ubi->volumes[i]; + if (!vol) { + if (sv) + ubi_scan_rm_volume(si, sv); + continue; + } + + if (vol->reserved_pebs == 0) { + ubi_assert(i < ubi->vtbl_slots); + + if (!sv) + continue; + + /* + * During scanning we found a volume which does not + * exist according to the information in the volume + * table. This must have happened due to an unclean + * reboot while the volume was being removed. Discard + * these eraseblocks. + */ + ubi_msg("finish volume %d removal", sv->vol_id); + ubi_scan_rm_volume(si, sv); + } else if (sv) { + err = check_sv(vol, sv); + if (err) + return err; + } + } + + return 0; +} + +/** + * ubi_read_volume_table - read volume table. + * information. + * @ubi: UBI device description object + * @si: scanning information + * + * This function reads volume table, checks it, recover from errors if needed, + * or creates it if needed. Returns zero in case of success and a negative + * error code in case of failure. + */ +int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_scan_info *si) +{ + int i, err; + struct ubi_scan_volume *sv; + + empty_vtbl_record.crc = cpu_to_ubi32(0xf116c36b); + + /* + * The number of supported volumes is limited by the eraseblock size + * and by the UBI_MAX_VOLUMES constant. + */ + ubi->vtbl_slots = ubi->leb_size / UBI_VTBL_RECORD_SIZE; + if (ubi->vtbl_slots > UBI_MAX_VOLUMES) + ubi->vtbl_slots = UBI_MAX_VOLUMES; + + ubi->vtbl_size = ubi->vtbl_slots * UBI_VTBL_RECORD_SIZE; + ubi->vtbl_size = ALIGN(ubi->vtbl_size, ubi->min_io_size); + + sv = ubi_scan_find_sv(si, UBI_LAYOUT_VOL_ID); + if (!sv) { + /* + * No logical eraseblocks belonging to the layout volume were + * found. This could mean that the flash is just empty. In + * this case we create empty layout volume. + * + * But if flash is not empty this must be a corruption or the + * MTD device just contains garbage. + */ + if (si->is_empty) { + ubi->vtbl = create_empty_lvol(ubi, si); + if (IS_ERR(ubi->vtbl)) + return PTR_ERR(ubi->vtbl); + } else { + ubi_err("the layout volume was not found"); + return -EINVAL; + } + } else { + if (sv->leb_count > UBI_LAYOUT_VOLUME_EBS) { + /* This must not happen with proper UBI images */ + dbg_err("too many LEBs (%d) in layout volume", + sv->leb_count); + return -EINVAL; + } + + ubi->vtbl = process_lvol(ubi, si, sv); + if (IS_ERR(ubi->vtbl)) + return PTR_ERR(ubi->vtbl); + } + + ubi->avail_pebs = ubi->good_peb_count; + + /* + * The layout volume is OK, initialize the corresponding in-RAM data + * structures. + */ + err = init_volumes(ubi, si, ubi->vtbl); + if (err) + goto out_free; + + /* + * Get sure that the scanning information is consistent to the + * information stored in the volume table. + */ + err = check_scanning_info(ubi, si); + if (err) + goto out_free; + + return 0; + +out_free: + kfree(ubi->vtbl); + for (i = 0; i < ubi->vtbl_slots + UBI_INT_VOL_COUNT; i++) + if (ubi->volumes[i]) { + kfree(ubi->volumes[i]); + ubi->volumes[i] = NULL; + } + return err; +} + +#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID + +/** + * paranoid_vtbl_check - check volume table. + * @ubi: UBI device description object + */ +static void paranoid_vtbl_check(const struct ubi_device *ubi) +{ + if (vtbl_check(ubi, ubi->vtbl)) { + ubi_err("paranoid check failed"); + BUG(); + } +} + +#endif /* CONFIG_MTD_UBI_DEBUG_PARANOID */ diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c new file mode 100644 index 000000000000..9ecaf77eca9e --- /dev/null +++ b/drivers/mtd/ubi/wl.c @@ -0,0 +1,1671 @@ +/* + * Copyright (c) International Business Machines Corp., 2006 + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See + * the GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + * Authors: Artem Bityutskiy (Битюцкий Артём), Thomas Gleixner + */ + +/* + * UBI wear-leveling unit. + * + * This unit is responsible for wear-leveling. It works in terms of physical + * eraseblocks and erase counters and knows nothing about logical eraseblocks, + * volumes, etc. From this unit's perspective all physical eraseblocks are of + * two types - used and free. Used physical eraseblocks are those that were + * "get" by the 'ubi_wl_get_peb()' function, and free physical eraseblocks are + * those that were put by the 'ubi_wl_put_peb()' function. + * + * Physical eraseblocks returned by 'ubi_wl_get_peb()' have only erase counter + * header. The rest of the physical eraseblock contains only 0xFF bytes. + * + * When physical eraseblocks are returned to the WL unit by means of the + * 'ubi_wl_put_peb()' function, they are scheduled for erasure. The erasure is + * done asynchronously in context of the per-UBI device background thread, + * which is also managed by the WL unit. + * + * The wear-leveling is ensured by means of moving the contents of used + * physical eraseblocks with low erase counter to free physical eraseblocks + * with high erase counter. + * + * The 'ubi_wl_get_peb()' function accepts data type hints which help to pick + * an "optimal" physical eraseblock. For example, when it is known that the + * physical eraseblock will be "put" soon because it contains short-term data, + * the WL unit may pick a free physical eraseblock with low erase counter, and + * so forth. + * + * If the WL unit fails to erase a physical eraseblock, it marks it as bad. + * + * This unit is also responsible for scrubbing. If a bit-flip is detected in a + * physical eraseblock, it has to be moved. Technically this is the same as + * moving it for wear-leveling reasons. + * + * As it was said, for the UBI unit all physical eraseblocks are either "free" + * or "used". Free eraseblock are kept in the @wl->free RB-tree, while used + * eraseblocks are kept in a set of different RB-trees: @wl->used, + * @wl->prot.pnum, @wl->prot.aec, and @wl->scrub. + * + * Note, in this implementation, we keep a small in-RAM object for each physical + * eraseblock. This is surely not a scalable solution. But it appears to be good + * enough for moderately large flashes and it is simple. In future, one may + * re-work this unit and make it more scalable. + * + * At the moment this unit does not utilize the sequence number, which was + * introduced relatively recently. But it would be wise to do this because the + * sequence number of a logical eraseblock characterizes how old is it. For + * example, when we move a PEB with low erase counter, and we need to pick the + * target PEB, we pick a PEB with the highest EC if our PEB is "old" and we + * pick target PEB with an average EC if our PEB is not very "old". This is a + * room for future re-works of the WL unit. + * + * FIXME: looks too complex, should be simplified (later). + */ + +#include +#include +#include +#include +#include "ubi.h" + +/* Number of physical eraseblocks reserved for wear-leveling purposes */ +#define WL_RESERVED_PEBS 1 + +/* + * How many erase cycles are short term, unknown, and long term physical + * eraseblocks protected. + */ +#define ST_PROTECTION 16 +#define U_PROTECTION 10 +#define LT_PROTECTION 4 + +/* + * Maximum difference between two erase counters. If this threshold is + * exceeded, the WL unit starts moving data from used physical eraseblocks with + * low erase counter to free physical eraseblocks with high erase counter. + */ +#define UBI_WL_THRESHOLD CONFIG_MTD_UBI_WL_THRESHOLD + +/* + * When a physical eraseblock is moved, the WL unit has to pick the target + * physical eraseblock to move to. The simplest way would be just to pick the + * one with the highest erase counter. But in certain workloads this could lead + * to an unlimited wear of one or few physical eraseblock. Indeed, imagine a + * situation when the picked physical eraseblock is constantly erased after the + * data is written to it. So, we have a constant which limits the highest erase + * counter of the free physical eraseblock to pick. Namely, the WL unit does + * not pick eraseblocks with erase counter greater then the lowest erase + * counter plus %WL_FREE_MAX_DIFF. + */ +#define WL_FREE_MAX_DIFF (2*UBI_WL_THRESHOLD) + +/* + * Maximum number of consecutive background thread failures which is enough to + * switch to read-only mode. + */ +#define WL_MAX_FAILURES 32 + +/** + * struct ubi_wl_entry - wear-leveling entry. + * @rb: link in the corresponding RB-tree + * @ec: erase counter + * @pnum: physical eraseblock number + * + * Each physical eraseblock has a corresponding &struct wl_entry object which + * may be kept in different RB-trees. + */ +struct ubi_wl_entry { + struct rb_node rb; + int ec; + int pnum; +}; + +/** + * struct ubi_wl_prot_entry - PEB protection entry. + * @rb_pnum: link in the @wl->prot.pnum RB-tree + * @rb_aec: link in the @wl->prot.aec RB-tree + * @abs_ec: the absolute erase counter value when the protection ends + * @e: the wear-leveling entry of the physical eraseblock under protection + * + * When the WL unit returns a physical eraseblock, the physical eraseblock is + * protected from being moved for some "time". For this reason, the physical + * eraseblock is not directly moved from the @wl->free tree to the @wl->used + * tree. There is one more tree in between where this physical eraseblock is + * temporarily stored (@wl->prot). + * + * All this protection stuff is needed because: + * o we don't want to move physical eraseblocks just after we have given them + * to the user; instead, we first want to let users fill them up with data; + * + * o there is a chance that the user will put the physical eraseblock very + * soon, so it makes sense not to move it for some time, but wait; this is + * especially important in case of "short term" physical eraseblocks. + * + * Physical eraseblocks stay protected only for limited time. But the "time" is + * measured in erase cycles in this case. This is implemented with help of the + * absolute erase counter (@wl->abs_ec). When it reaches certain value, the + * physical eraseblocks are moved from the protection trees (@wl->prot.*) to + * the @wl->used tree. + * + * Protected physical eraseblocks are searched by physical eraseblock number + * (when they are put) and by the absolute erase counter (to check if it is + * time to move them to the @wl->used tree). So there are actually 2 RB-trees + * storing the protected physical eraseblocks: @wl->prot.pnum and + * @wl->prot.aec. They are referred to as the "protection" trees. The + * first one is indexed by the physical eraseblock number. The second one is + * indexed by the absolute erase counter. Both trees store + * &struct ubi_wl_prot_entry objects. + * + * Each physical eraseblock has 2 main states: free and used. The former state + * corresponds to the @wl->free tree. The latter state is split up on several + * sub-states: + * o the WL movement is allowed (@wl->used tree); + * o the WL movement is temporarily prohibited (@wl->prot.pnum and + * @wl->prot.aec trees); + * o scrubbing is needed (@wl->scrub tree). + * + * Depending on the sub-state, wear-leveling entries of the used physical + * eraseblocks may be kept in one of those trees. + */ +struct ubi_wl_prot_entry { + struct rb_node rb_pnum; + struct rb_node rb_aec; + unsigned long long abs_ec; + struct ubi_wl_entry *e; +}; + +/** + * struct ubi_work - UBI work description data structure. + * @list: a link in the list of pending works + * @func: worker function + * @priv: private data of the worker function + * + * @e: physical eraseblock to erase + * @torture: if the physical eraseblock has to be tortured + * + * The @func pointer points to the worker function. If the @cancel argument is + * not zero, the worker has to free the resources and exit immediately. The + * worker has to return zero in case of success and a negative error code in + * case of failure. + */ +struct ubi_work { + struct list_head list; + int (*func)(struct ubi_device *ubi, struct ubi_work *wrk, int cancel); + /* The below fields are only relevant to erasure works */ + struct ubi_wl_entry *e; + int torture; +}; + +#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID +static int paranoid_check_ec(const struct ubi_device *ubi, int pnum, int ec); +static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e, + struct rb_root *root); +#else +#define paranoid_check_ec(ubi, pnum, ec) 0 +#define paranoid_check_in_wl_tree(e, root) +#endif + +/* Slab cache for wear-leveling entries */ +static struct kmem_cache *wl_entries_slab; + +/** + * tree_empty - a helper function to check if an RB-tree is empty. + * @root: the root of the tree + * + * This function returns non-zero if the RB-tree is empty and zero if not. + */ +static inline int tree_empty(struct rb_root *root) +{ + return root->rb_node == NULL; +} + +/** + * wl_tree_add - add a wear-leveling entry to a WL RB-tree. + * @e: the wear-leveling entry to add + * @root: the root of the tree + * + * Note, we use (erase counter, physical eraseblock number) pairs as keys in + * the @ubi->used and @ubi->free RB-trees. + */ +static void wl_tree_add(struct ubi_wl_entry *e, struct rb_root *root) +{ + struct rb_node **p, *parent = NULL; + + p = &root->rb_node; + while (*p) { + struct ubi_wl_entry *e1; + + parent = *p; + e1 = rb_entry(parent, struct ubi_wl_entry, rb); + + if (e->ec < e1->ec) + p = &(*p)->rb_left; + else if (e->ec > e1->ec) + p = &(*p)->rb_right; + else { + ubi_assert(e->pnum != e1->pnum); + if (e->pnum < e1->pnum) + p = &(*p)->rb_left; + else + p = &(*p)->rb_right; + } + } + + rb_link_node(&e->rb, parent, p); + rb_insert_color(&e->rb, root); +} + + +/* + * Helper functions to add and delete wear-leveling entries from different + * trees. + */ + +static void free_tree_add(struct ubi_device *ubi, struct ubi_wl_entry *e) +{ + wl_tree_add(e, &ubi->free); +} +static inline void used_tree_add(struct ubi_device *ubi, + struct ubi_wl_entry *e) +{ + wl_tree_add(e, &ubi->used); +} +static inline void scrub_tree_add(struct ubi_device *ubi, + struct ubi_wl_entry *e) +{ + wl_tree_add(e, &ubi->scrub); +} +static inline void free_tree_del(struct ubi_device *ubi, + struct ubi_wl_entry *e) +{ + paranoid_check_in_wl_tree(e, &ubi->free); + rb_erase(&e->rb, &ubi->free); +} +static inline void used_tree_del(struct ubi_device *ubi, + struct ubi_wl_entry *e) +{ + paranoid_check_in_wl_tree(e, &ubi->used); + rb_erase(&e->rb, &ubi->used); +} +static inline void scrub_tree_del(struct ubi_device *ubi, + struct ubi_wl_entry *e) +{ + paranoid_check_in_wl_tree(e, &ubi->scrub); + rb_erase(&e->rb, &ubi->scrub); +} + +/** + * do_work - do one pending work. + * @ubi: UBI device description object + * + * This function returns zero in case of success and a negative error code in + * case of failure. + */ +static int do_work(struct ubi_device *ubi) +{ + int err; + struct ubi_work *wrk; + + spin_lock(&ubi->wl_lock); + + if (list_empty(&ubi->works)) { + spin_unlock(&ubi->wl_lock); + return 0; + } + + wrk = list_entry(ubi->works.next, struct ubi_work, list); + list_del(&wrk->list); + spin_unlock(&ubi->wl_lock); + + /* + * Call the worker function. Do not touch the work structure + * after this call as it will have been freed or reused by that + * time by the worker function. + */ + err = wrk->func(ubi, wrk, 0); + if (err) + ubi_err("work failed with error code %d", err); + + spin_lock(&ubi->wl_lock); + ubi->works_count -= 1; + ubi_assert(ubi->works_count >= 0); + spin_unlock(&ubi->wl_lock); + return err; +} + +/** + * produce_free_peb - produce a free physical eraseblock. + * @ubi: UBI device description object + * + * This function tries to make a free PEB by means of synchronous execution of + * pending works. This may be needed if, for example the background thread is + * disabled. Returns zero in case of success and a negative error code in case + * of failure. + */ +static int produce_free_peb(struct ubi_device *ubi) +{ + int err; + + spin_lock(&ubi->wl_lock); + while (tree_empty(&ubi->free)) { + spin_unlock(&ubi->wl_lock); + + dbg_wl("do one work synchronously"); + err = do_work(ubi); + if (err) + return err; + + spin_lock(&ubi->wl_lock); + } + spin_unlock(&ubi->wl_lock); + + return 0; +} + +/** + * in_wl_tree - check if wear-leveling entry is present in a WL RB-tree. + * @e: the wear-leveling entry to check + * @root: the root of the tree + * + * This function returns non-zero if @e is in the @root RB-tree and zero if it + * is not. + */ +static int in_wl_tree(struct ubi_wl_entry *e, struct rb_root *root) +{ + struct rb_node *p; + + p = root->rb_node; + while (p) { + struct ubi_wl_entry *e1; + + e1 = rb_entry(p, struct ubi_wl_entry, rb); + + if (e->pnum == e1->pnum) { + ubi_assert(e == e1); + return 1; + } + + if (e->ec < e1->ec) + p = p->rb_left; + else if (e->ec > e1->ec) + p = p->rb_right; + else { + ubi_assert(e->pnum != e1->pnum); + if (e->pnum < e1->pnum) + p = p->rb_left; + else + p = p->rb_right; + } + } + + return 0; +} + +/** + * prot_tree_add - add physical eraseblock to protection trees. + * @ubi: UBI device description object + * @e: the physical eraseblock to add + * @pe: protection entry object to use + * @abs_ec: absolute erase counter value when this physical eraseblock has + * to be removed from the protection trees. + * + * @wl->lock has to be locked. + */ +static void prot_tree_add(struct ubi_device *ubi, struct ubi_wl_entry *e, + struct ubi_wl_prot_entry *pe, int abs_ec) +{ + struct rb_node **p, *parent = NULL; + struct ubi_wl_prot_entry *pe1; + + pe->e = e; + pe->abs_ec = ubi->abs_ec + abs_ec; + + p = &ubi->prot.pnum.rb_node; + while (*p) { + parent = *p; + pe1 = rb_entry(parent, struct ubi_wl_prot_entry, rb_pnum); + + if (e->pnum < pe1->e->pnum) + p = &(*p)->rb_left; + else + p = &(*p)->rb_right; + } + rb_link_node(&pe->rb_pnum, parent, p); + rb_insert_color(&pe->rb_pnum, &ubi->prot.pnum); + + p = &ubi->prot.aec.rb_node; + parent = NULL; + while (*p) { + parent = *p; + pe1 = rb_entry(parent, struct ubi_wl_prot_entry, rb_aec); + + if (pe->abs_ec < pe1->abs_ec) + p = &(*p)->rb_left; + else + p = &(*p)->rb_right; + } + rb_link_node(&pe->rb_aec, parent, p); + rb_insert_color(&pe->rb_aec, &ubi->prot.aec); +} + +/** + * find_wl_entry - find wear-leveling entry closest to certain erase counter. + * @root: the RB-tree where to look for + * @max: highest possible erase counter + * + * This function looks for a wear leveling entry with erase counter closest to + * @max and less then @max. + */ +static struct ubi_wl_entry *find_wl_entry(struct rb_root *root, int max) +{ + struct rb_node *p; + struct ubi_wl_entry *e; + + e = rb_entry(rb_first(root), struct ubi_wl_entry, rb); + max += e->ec; + + p = root->rb_node; + while (p) { + struct ubi_wl_entry *e1; + + e1 = rb_entry(p, struct ubi_wl_entry, rb); + if (e1->ec >= max) + p = p->rb_left; + else { + p = p->rb_right; + e = e1; + } + } + + return e; +} + +/** + * ubi_wl_get_peb - get a physical eraseblock. + * @ubi: UBI device description object + * @dtype: type of data which will be stored in this physical eraseblock + * + * This function returns a physical eraseblock in case of success and a + * negative error code in case of failure. Might sleep. + */ +int ubi_wl_get_peb(struct ubi_device *ubi, int dtype) +{ + int err, protect, medium_ec; + struct ubi_wl_entry *e, *first, *last; + struct ubi_wl_prot_entry *pe; + + ubi_assert(dtype == UBI_LONGTERM || dtype == UBI_SHORTTERM || + dtype == UBI_UNKNOWN); + + pe = kmalloc(sizeof(struct ubi_wl_prot_entry), GFP_KERNEL); + if (!pe) + return -ENOMEM; + +retry: + spin_lock(&ubi->wl_lock); + if (tree_empty(&ubi->free)) { + if (ubi->works_count == 0) { + ubi_assert(list_empty(&ubi->works)); + ubi_err("no free eraseblocks"); + spin_unlock(&ubi->wl_lock); + kfree(pe); + return -ENOSPC; + } + spin_unlock(&ubi->wl_lock); + + err = produce_free_peb(ubi); + if (err < 0) { + kfree(pe); + return err; + } + goto retry; + } + + switch (dtype) { + case UBI_LONGTERM: + /* + * For long term data we pick a physical eraseblock + * with high erase counter. But the highest erase + * counter we can pick is bounded by the the lowest + * erase counter plus %WL_FREE_MAX_DIFF. + */ + e = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF); + protect = LT_PROTECTION; + break; + case UBI_UNKNOWN: + /* + * For unknown data we pick a physical eraseblock with + * medium erase counter. But we by no means can pick a + * physical eraseblock with erase counter greater or + * equivalent than the lowest erase counter plus + * %WL_FREE_MAX_DIFF. + */ + first = rb_entry(rb_first(&ubi->free), + struct ubi_wl_entry, rb); + last = rb_entry(rb_last(&ubi->free), + struct ubi_wl_entry, rb); + + if (last->ec - first->ec < WL_FREE_MAX_DIFF) + e = rb_entry(ubi->free.rb_node, + struct ubi_wl_entry, rb); + else { + medium_ec = (first->ec + WL_FREE_MAX_DIFF)/2; + e = find_wl_entry(&ubi->free, medium_ec); + } + protect = U_PROTECTION; + break; + case UBI_SHORTTERM: + /* + * For short term data we pick a physical eraseblock + * with the lowest erase counter as we expect it will + * be erased soon. + */ + e = rb_entry(rb_first(&ubi->free), + struct ubi_wl_entry, rb); + protect = ST_PROTECTION; + break; + default: + protect = 0; + e = NULL; + BUG(); + } + + /* + * Move the physical eraseblock to the protection trees where it will + * be protected from being moved for some time. + */ + free_tree_del(ubi, e); + prot_tree_add(ubi, e, pe, protect); + + dbg_wl("PEB %d EC %d, protection %d", e->pnum, e->ec, protect); + spin_unlock(&ubi->wl_lock); + + return e->pnum; +} + +/** + * prot_tree_del - remove a physical eraseblock from the protection trees + * @ubi: UBI device description object + * @pnum: the physical eraseblock to remove + */ +static void prot_tree_del(struct ubi_device *ubi, int pnum) +{ + struct rb_node *p; + struct ubi_wl_prot_entry *pe = NULL; + + p = ubi->prot.pnum.rb_node; + while (p) { + + pe = rb_entry(p, struct ubi_wl_prot_entry, rb_pnum); + + if (pnum == pe->e->pnum) + break; + + if (pnum < pe->e->pnum) + p = p->rb_left; + else + p = p->rb_right; + } + + ubi_assert(pe->e->pnum == pnum); + rb_erase(&pe->rb_aec, &ubi->prot.aec); + rb_erase(&pe->rb_pnum, &ubi->prot.pnum); + kfree(pe); +} + +/** + * sync_erase - synchronously erase a physical eraseblock. + * @ubi: UBI device description object + * @e: the the physical eraseblock to erase + * @torture: if the physical eraseblock has to be tortured + * + * This function returns zero in case of success and a negative error code in + * case of failure. + */ +static int sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, int torture) +{ + int err; + struct ubi_ec_hdr *ec_hdr; + unsigned long long ec = e->ec; + + dbg_wl("erase PEB %d, old EC %llu", e->pnum, ec); + + err = paranoid_check_ec(ubi, e->pnum, e->ec); + if (err > 0) + return -EINVAL; + + ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL); + if (!ec_hdr) + return -ENOMEM; + + err = ubi_io_sync_erase(ubi, e->pnum, torture); + if (err < 0) + goto out_free; + + ec += err; + if (ec > UBI_MAX_ERASECOUNTER) { + /* + * Erase counter overflow. Upgrade UBI and use 64-bit + * erase counters internally. + */ + ubi_err("erase counter overflow at PEB %d, EC %llu", + e->pnum, ec); + err = -EINVAL; + goto out_free; + } + + dbg_wl("erased PEB %d, new EC %llu", e->pnum, ec); + + ec_hdr->ec = cpu_to_ubi64(ec); + + err = ubi_io_write_ec_hdr(ubi, e->pnum, ec_hdr); + if (err) + goto out_free; + + e->ec = ec; + spin_lock(&ubi->wl_lock); + if (e->ec > ubi->max_ec) + ubi->max_ec = e->ec; + spin_unlock(&ubi->wl_lock); + +out_free: + kfree(ec_hdr); + return err; +} + +/** + * check_protection_over - check if it is time to stop protecting some + * physical eraseblocks. + * @ubi: UBI device description object + * + * This function is called after each erase operation, when the absolute erase + * counter is incremented, to check if some physical eraseblock have not to be + * protected any longer. These physical eraseblocks are moved from the + * protection trees to the used tree. + */ +static void check_protection_over(struct ubi_device *ubi) +{ + struct ubi_wl_prot_entry *pe; + + /* + * There may be several protected physical eraseblock to remove, + * process them all. + */ + while (1) { + spin_lock(&ubi->wl_lock); + if (tree_empty(&ubi->prot.aec)) { + spin_unlock(&ubi->wl_lock); + break; + } + + pe = rb_entry(rb_first(&ubi->prot.aec), + struct ubi_wl_prot_entry, rb_aec); + + if (pe->abs_ec > ubi->abs_ec) { + spin_unlock(&ubi->wl_lock); + break; + } + + dbg_wl("PEB %d protection over, abs_ec %llu, PEB abs_ec %llu", + pe->e->pnum, ubi->abs_ec, pe->abs_ec); + rb_erase(&pe->rb_aec, &ubi->prot.aec); + rb_erase(&pe->rb_pnum, &ubi->prot.pnum); + used_tree_add(ubi, pe->e); + spin_unlock(&ubi->wl_lock); + + kfree(pe); + cond_resched(); + } +} + +/** + * schedule_ubi_work - schedule a work. + * @ubi: UBI device description object + * @wrk: the work to schedule + * + * This function enqueues a work defined by @wrk to the tail of the pending + * works list. + */ +static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk) +{ + spin_lock(&ubi->wl_lock); + list_add_tail(&wrk->list, &ubi->works); + ubi_assert(ubi->works_count >= 0); + ubi->works_count += 1; + if (ubi->thread_enabled) + wake_up_process(ubi->bgt_thread); + spin_unlock(&ubi->wl_lock); +} + +static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk, + int cancel); + +/** + * schedule_erase - schedule an erase work. + * @ubi: UBI device description object + * @e: the WL entry of the physical eraseblock to erase + * @torture: if the physical eraseblock has to be tortured + * + * This function returns zero in case of success and a %-ENOMEM in case of + * failure. + */ +static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, + int torture) +{ + struct ubi_work *wl_wrk; + + dbg_wl("schedule erasure of PEB %d, EC %d, torture %d", + e->pnum, e->ec, torture); + + wl_wrk = kmalloc(sizeof(struct ubi_work), GFP_KERNEL); + if (!wl_wrk) + return -ENOMEM; + + wl_wrk->func = &erase_worker; + wl_wrk->e = e; + wl_wrk->torture = torture; + + schedule_ubi_work(ubi, wl_wrk); + return 0; +} + +/** + * wear_leveling_worker - wear-leveling worker function. + * @ubi: UBI device description object + * @wrk: the work object + * @cancel: non-zero if the worker has to free memory and exit + * + * This function copies a more worn out physical eraseblock to a less worn out + * one. Returns zero in case of success and a negative error code in case of + * failure. + */ +static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, + int cancel) +{ + int err, put = 0; + struct ubi_wl_entry *e1, *e2; + struct ubi_vid_hdr *vid_hdr; + + kfree(wrk); + + if (cancel) + return 0; + + vid_hdr = ubi_zalloc_vid_hdr(ubi); + if (!vid_hdr) + return -ENOMEM; + + spin_lock(&ubi->wl_lock); + + /* + * Only one WL worker at a time is supported at this implementation, so + * make sure a PEB is not being moved already. + */ + if (ubi->move_to || tree_empty(&ubi->free) || + (tree_empty(&ubi->used) && tree_empty(&ubi->scrub))) { + /* + * Only one WL worker at a time is supported at this + * implementation, so if a LEB is already being moved, cancel. + * + * No free physical eraseblocks? Well, we cancel wear-leveling + * then. It will be triggered again when a free physical + * eraseblock appears. + * + * No used physical eraseblocks? They must be temporarily + * protected from being moved. They will be moved to the + * @ubi->used tree later and the wear-leveling will be + * triggered again. + */ + dbg_wl("cancel WL, a list is empty: free %d, used %d", + tree_empty(&ubi->free), tree_empty(&ubi->used)); + ubi->wl_scheduled = 0; + spin_unlock(&ubi->wl_lock); + ubi_free_vid_hdr(ubi, vid_hdr); + return 0; + } + + if (tree_empty(&ubi->scrub)) { + /* + * Now pick the least worn-out used physical eraseblock and a + * highly worn-out free physical eraseblock. If the erase + * counters differ much enough, start wear-leveling. + */ + e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, rb); + e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF); + + if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) { + dbg_wl("no WL needed: min used EC %d, max free EC %d", + e1->ec, e2->ec); + ubi->wl_scheduled = 0; + spin_unlock(&ubi->wl_lock); + ubi_free_vid_hdr(ubi, vid_hdr); + return 0; + } + used_tree_del(ubi, e1); + dbg_wl("move PEB %d EC %d to PEB %d EC %d", + e1->pnum, e1->ec, e2->pnum, e2->ec); + } else { + e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, rb); + e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF); + scrub_tree_del(ubi, e1); + dbg_wl("scrub PEB %d to PEB %d", e1->pnum, e2->pnum); + } + + free_tree_del(ubi, e2); + ubi_assert(!ubi->move_from && !ubi->move_to); + ubi_assert(!ubi->move_to_put && !ubi->move_from_put); + ubi->move_from = e1; + ubi->move_to = e2; + spin_unlock(&ubi->wl_lock); + + /* + * Now we are going to copy physical eraseblock @e1->pnum to @e2->pnum. + * We so far do not know which logical eraseblock our physical + * eraseblock (@e1) belongs to. We have to read the volume identifier + * header first. + */ + + err = ubi_io_read_vid_hdr(ubi, e1->pnum, vid_hdr, 0); + if (err && err != UBI_IO_BITFLIPS) { + if (err == UBI_IO_PEB_FREE) { + /* + * We are trying to move PEB without a VID header. UBI + * always write VID headers shortly after the PEB was + * given, so we have a situation when it did not have + * chance to write it down because it was preempted. + * Just re-schedule the work, so that next time it will + * likely have the VID header in place. + */ + dbg_wl("PEB %d has no VID header", e1->pnum); + err = 0; + } else { + ubi_err("error %d while reading VID header from PEB %d", + err, e1->pnum); + if (err > 0) + err = -EIO; + } + goto error; + } + + err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vid_hdr); + if (err) { + if (err == UBI_IO_BITFLIPS) + err = 0; + goto error; + } + + ubi_free_vid_hdr(ubi, vid_hdr); + spin_lock(&ubi->wl_lock); + if (!ubi->move_to_put) + used_tree_add(ubi, e2); + else + put = 1; + ubi->move_from = ubi->move_to = NULL; + ubi->move_from_put = ubi->move_to_put = 0; + ubi->wl_scheduled = 0; + spin_unlock(&ubi->wl_lock); + + if (put) { + /* + * Well, the target PEB was put meanwhile, schedule it for + * erasure. + */ + dbg_wl("PEB %d was put meanwhile, erase", e2->pnum); + err = schedule_erase(ubi, e2, 0); + if (err) { + kmem_cache_free(wl_entries_slab, e2); + ubi_ro_mode(ubi); + } + } + + err = schedule_erase(ubi, e1, 0); + if (err) { + kmem_cache_free(wl_entries_slab, e1); + ubi_ro_mode(ubi); + } + + dbg_wl("done"); + return err; + + /* + * Some error occurred. @e1 was not changed, so return it back. @e2 + * might be changed, schedule it for erasure. + */ +error: + if (err) + dbg_wl("error %d occurred, cancel operation", err); + ubi_assert(err <= 0); + + ubi_free_vid_hdr(ubi, vid_hdr); + spin_lock(&ubi->wl_lock); + ubi->wl_scheduled = 0; + if (ubi->move_from_put) + put = 1; + else + used_tree_add(ubi, e1); + ubi->move_from = ubi->move_to = NULL; + ubi->move_from_put = ubi->move_to_put = 0; + spin_unlock(&ubi->wl_lock); + + if (put) { + /* + * Well, the target PEB was put meanwhile, schedule it for + * erasure. + */ + dbg_wl("PEB %d was put meanwhile, erase", e1->pnum); + err = schedule_erase(ubi, e1, 0); + if (err) { + kmem_cache_free(wl_entries_slab, e1); + ubi_ro_mode(ubi); + } + } + + err = schedule_erase(ubi, e2, 0); + if (err) { + kmem_cache_free(wl_entries_slab, e2); + ubi_ro_mode(ubi); + } + + yield(); + return err; +} + +/** + * ensure_wear_leveling - schedule wear-leveling if it is needed. + * @ubi: UBI device description object + * + * This function checks if it is time to start wear-leveling and schedules it + * if yes. This function returns zero in case of success and a negative error + * code in case of failure. + */ +static int ensure_wear_leveling(struct ubi_device *ubi) +{ + int err = 0; + struct ubi_wl_entry *e1; + struct ubi_wl_entry *e2; + struct ubi_work *wrk; + + spin_lock(&ubi->wl_lock); + if (ubi->wl_scheduled) + /* Wear-leveling is already in the work queue */ + goto out_unlock; + + /* + * If the ubi->scrub tree is not empty, scrubbing is needed, and the + * the WL worker has to be scheduled anyway. + */ + if (tree_empty(&ubi->scrub)) { + if (tree_empty(&ubi->used) || tree_empty(&ubi->free)) + /* No physical eraseblocks - no deal */ + goto out_unlock; + + /* + * We schedule wear-leveling only if the difference between the + * lowest erase counter of used physical eraseblocks and a high + * erase counter of free physical eraseblocks is greater then + * %UBI_WL_THRESHOLD. + */ + e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, rb); + e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF); + + if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) + goto out_unlock; + dbg_wl("schedule wear-leveling"); + } else + dbg_wl("schedule scrubbing"); + + ubi->wl_scheduled = 1; + spin_unlock(&ubi->wl_lock); + + wrk = kmalloc(sizeof(struct ubi_work), GFP_KERNEL); + if (!wrk) { + err = -ENOMEM; + goto out_cancel; + } + + wrk->func = &wear_leveling_worker; + schedule_ubi_work(ubi, wrk); + return err; + +out_cancel: + spin_lock(&ubi->wl_lock); + ubi->wl_scheduled = 0; +out_unlock: + spin_unlock(&ubi->wl_lock); + return err; +} + +/** + * erase_worker - physical eraseblock erase worker function. + * @ubi: UBI device description object + * @wl_wrk: the work object + * @cancel: non-zero if the worker has to free memory and exit + * + * This function erases a physical eraseblock and perform torture testing if + * needed. It also takes care about marking the physical eraseblock bad if + * needed. Returns zero in case of success and a negative error code in case of + * failure. + */ +static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk, + int cancel) +{ + int err; + struct ubi_wl_entry *e = wl_wrk->e; + int pnum = e->pnum; + + if (cancel) { + dbg_wl("cancel erasure of PEB %d EC %d", pnum, e->ec); + kfree(wl_wrk); + kmem_cache_free(wl_entries_slab, e); + return 0; + } + + dbg_wl("erase PEB %d EC %d", pnum, e->ec); + + err = sync_erase(ubi, e, wl_wrk->torture); + if (!err) { + /* Fine, we've erased it successfully */ + kfree(wl_wrk); + + spin_lock(&ubi->wl_lock); + ubi->abs_ec += 1; + free_tree_add(ubi, e); + spin_unlock(&ubi->wl_lock); + + /* + * One more erase operation has happened, take care about protected + * physical eraseblocks. + */ + check_protection_over(ubi); + + /* And take care about wear-leveling */ + err = ensure_wear_leveling(ubi); + return err; + } + + kfree(wl_wrk); + kmem_cache_free(wl_entries_slab, e); + + if (err != -EIO) { + /* + * If this is not %-EIO, we have no idea what to do. Scheduling + * this physical eraseblock for erasure again would cause + * errors again and again. Well, lets switch to RO mode. + */ + ubi_ro_mode(ubi); + return err; + } + + /* It is %-EIO, the PEB went bad */ + + if (!ubi->bad_allowed) { + ubi_err("bad physical eraseblock %d detected", pnum); + ubi_ro_mode(ubi); + err = -EIO; + } else { + int need; + + spin_lock(&ubi->volumes_lock); + need = ubi->beb_rsvd_level - ubi->beb_rsvd_pebs + 1; + if (need > 0) { + need = ubi->avail_pebs >= need ? need : ubi->avail_pebs; + ubi->avail_pebs -= need; + ubi->rsvd_pebs += need; + ubi->beb_rsvd_pebs += need; + if (need > 0) + ubi_msg("reserve more %d PEBs", need); + } + + if (ubi->beb_rsvd_pebs == 0) { + spin_unlock(&ubi->volumes_lock); + ubi_err("no reserved physical eraseblocks"); + ubi_ro_mode(ubi); + return -EIO; + } + + spin_unlock(&ubi->volumes_lock); + ubi_msg("mark PEB %d as bad", pnum); + + err = ubi_io_mark_bad(ubi, pnum); + if (err) { + ubi_ro_mode(ubi); + return err; + } + + spin_lock(&ubi->volumes_lock); + ubi->beb_rsvd_pebs -= 1; + ubi->bad_peb_count += 1; + ubi->good_peb_count -= 1; + ubi_calculate_reserved(ubi); + if (ubi->beb_rsvd_pebs == 0) + ubi_warn("last PEB from the reserved pool was used"); + spin_unlock(&ubi->volumes_lock); + } + + return err; +} + +/** + * ubi_wl_put_peb - return a physical eraseblock to the wear-leveling + * unit. + * @ubi: UBI device description object + * @pnum: physical eraseblock to return + * @torture: if this physical eraseblock has to be tortured + * + * This function is called to return physical eraseblock @pnum to the pool of + * free physical eraseblocks. The @torture flag has to be set if an I/O error + * occurred to this @pnum and it has to be tested. This function returns zero + * in case of success and a negative error code in case of failure. + */ +int ubi_wl_put_peb(struct ubi_device *ubi, int pnum, int torture) +{ + int err; + struct ubi_wl_entry *e; + + dbg_wl("PEB %d", pnum); + ubi_assert(pnum >= 0); + ubi_assert(pnum < ubi->peb_count); + + spin_lock(&ubi->wl_lock); + + e = ubi->lookuptbl[pnum]; + if (e == ubi->move_from) { + /* + * User is putting the physical eraseblock which was selected to + * be moved. It will be scheduled for erasure in the + * wear-leveling worker. + */ + dbg_wl("PEB %d is being moved", pnum); + ubi_assert(!ubi->move_from_put); + ubi->move_from_put = 1; + spin_unlock(&ubi->wl_lock); + return 0; + } else if (e == ubi->move_to) { + /* + * User is putting the physical eraseblock which was selected + * as the target the data is moved to. It may happen if the EBA + * unit already re-mapped the LEB but the WL unit did has not + * put the PEB to the "used" tree. + */ + dbg_wl("PEB %d is the target of data moving", pnum); + ubi_assert(!ubi->move_to_put); + ubi->move_to_put = 1; + spin_unlock(&ubi->wl_lock); + return 0; + } else { + if (in_wl_tree(e, &ubi->used)) + used_tree_del(ubi, e); + else if (in_wl_tree(e, &ubi->scrub)) + scrub_tree_del(ubi, e); + else + prot_tree_del(ubi, e->pnum); + } + spin_unlock(&ubi->wl_lock); + + err = schedule_erase(ubi, e, torture); + if (err) { + spin_lock(&ubi->wl_lock); + used_tree_add(ubi, e); + spin_unlock(&ubi->wl_lock); + } + + return err; +} + +/** + * ubi_wl_scrub_peb - schedule a physical eraseblock for scrubbing. + * @ubi: UBI device description object + * @pnum: the physical eraseblock to schedule + * + * If a bit-flip in a physical eraseblock is detected, this physical eraseblock + * needs scrubbing. This function schedules a physical eraseblock for + * scrubbing which is done in background. This function returns zero in case of + * success and a negative error code in case of failure. + */ +int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum) +{ + struct ubi_wl_entry *e; + + ubi_msg("schedule PEB %d for scrubbing", pnum); + +retry: + spin_lock(&ubi->wl_lock); + e = ubi->lookuptbl[pnum]; + if (e == ubi->move_from || in_wl_tree(e, &ubi->scrub)) { + spin_unlock(&ubi->wl_lock); + return 0; + } + + if (e == ubi->move_to) { + /* + * This physical eraseblock was used to move data to. The data + * was moved but the PEB was not yet inserted to the proper + * tree. We should just wait a little and let the WL worker + * proceed. + */ + spin_unlock(&ubi->wl_lock); + dbg_wl("the PEB %d is not in proper tree, retry", pnum); + yield(); + goto retry; + } + + if (in_wl_tree(e, &ubi->used)) + used_tree_del(ubi, e); + else + prot_tree_del(ubi, pnum); + + scrub_tree_add(ubi, e); + spin_unlock(&ubi->wl_lock); + + /* + * Technically scrubbing is the same as wear-leveling, so it is done + * by the WL worker. + */ + return ensure_wear_leveling(ubi); +} + +/** + * ubi_wl_flush - flush all pending works. + * @ubi: UBI device description object + * + * This function returns zero in case of success and a negative error code in + * case of failure. + */ +int ubi_wl_flush(struct ubi_device *ubi) +{ + int err, pending_count; + + pending_count = ubi->works_count; + + dbg_wl("flush (%d pending works)", pending_count); + + /* + * Erase while the pending works queue is not empty, but not more then + * the number of currently pending works. + */ + while (pending_count-- > 0) { + err = do_work(ubi); + if (err) + return err; + } + + return 0; +} + +/** + * tree_destroy - destroy an RB-tree. + * @root: the root of the tree to destroy + */ +static void tree_destroy(struct rb_root *root) +{ + struct rb_node *rb; + struct ubi_wl_entry *e; + + rb = root->rb_node; + while (rb) { + if (rb->rb_left) + rb = rb->rb_left; + else if (rb->rb_right) + rb = rb->rb_right; + else { + e = rb_entry(rb, struct ubi_wl_entry, rb); + + rb = rb_parent(rb); + if (rb) { + if (rb->rb_left == &e->rb) + rb->rb_left = NULL; + else + rb->rb_right = NULL; + } + + kmem_cache_free(wl_entries_slab, e); + } + } +} + +/** + * ubi_thread - UBI background thread. + * @u: the UBI device description object pointer + */ +static int ubi_thread(void *u) +{ + int failures = 0; + struct ubi_device *ubi = u; + + ubi_msg("background thread \"%s\" started, PID %d", + ubi->bgt_name, current->pid); + + for (;;) { + int err; + + if (kthread_should_stop()) + goto out; + + if (try_to_freeze()) + continue; + + spin_lock(&ubi->wl_lock); + if (list_empty(&ubi->works) || ubi->ro_mode || + !ubi->thread_enabled) { + set_current_state(TASK_INTERRUPTIBLE); + spin_unlock(&ubi->wl_lock); + schedule(); + continue; + } + spin_unlock(&ubi->wl_lock); + + err = do_work(ubi); + if (err) { + ubi_err("%s: work failed with error code %d", + ubi->bgt_name, err); + if (failures++ > WL_MAX_FAILURES) { + /* + * Too many failures, disable the thread and + * switch to read-only mode. + */ + ubi_msg("%s: %d consecutive failures", + ubi->bgt_name, WL_MAX_FAILURES); + ubi_ro_mode(ubi); + break; + } + } else + failures = 0; + + cond_resched(); + } + +out: + dbg_wl("background thread \"%s\" is killed", ubi->bgt_name); + return 0; +} + +/** + * cancel_pending - cancel all pending works. + * @ubi: UBI device description object + */ +static void cancel_pending(struct ubi_device *ubi) +{ + while (!list_empty(&ubi->works)) { + struct ubi_work *wrk; + + wrk = list_entry(ubi->works.next, struct ubi_work, list); + list_del(&wrk->list); + wrk->func(ubi, wrk, 1); + ubi->works_count -= 1; + ubi_assert(ubi->works_count >= 0); + } +} + +/** + * ubi_wl_init_scan - initialize the wear-leveling unit using scanning + * information. + * @ubi: UBI device description object + * @si: scanning information + * + * This function returns zero in case of success, and a negative error code in + * case of failure. + */ +int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si) +{ + int err; + struct rb_node *rb1, *rb2; + struct ubi_scan_volume *sv; + struct ubi_scan_leb *seb, *tmp; + struct ubi_wl_entry *e; + + + ubi->used = ubi->free = ubi->scrub = RB_ROOT; + ubi->prot.pnum = ubi->prot.aec = RB_ROOT; + spin_lock_init(&ubi->wl_lock); + ubi->max_ec = si->max_ec; + INIT_LIST_HEAD(&ubi->works); + + sprintf(ubi->bgt_name, UBI_BGT_NAME_PATTERN, ubi->ubi_num); + + ubi->bgt_thread = kthread_create(ubi_thread, ubi, ubi->bgt_name); + if (IS_ERR(ubi->bgt_thread)) { + err = PTR_ERR(ubi->bgt_thread); + ubi_err("cannot spawn \"%s\", error %d", ubi->bgt_name, + err); + return err; + } + + if (ubi_devices_cnt == 0) { + wl_entries_slab = kmem_cache_create("ubi_wl_entry_slab", + sizeof(struct ubi_wl_entry), + 0, 0, NULL, NULL); + if (!wl_entries_slab) + return -ENOMEM; + } + + err = -ENOMEM; + ubi->lookuptbl = kzalloc(ubi->peb_count * sizeof(void *), GFP_KERNEL); + if (!ubi->lookuptbl) + goto out_free; + + list_for_each_entry_safe(seb, tmp, &si->erase, u.list) { + cond_resched(); + + e = kmem_cache_alloc(wl_entries_slab, GFP_KERNEL); + if (!e) + goto out_free; + + e->pnum = seb->pnum; + e->ec = seb->ec; + ubi->lookuptbl[e->pnum] = e; + if (schedule_erase(ubi, e, 0)) { + kmem_cache_free(wl_entries_slab, e); + goto out_free; + } + } + + list_for_each_entry(seb, &si->free, u.list) { + cond_resched(); + + e = kmem_cache_alloc(wl_entries_slab, GFP_KERNEL); + if (!e) + goto out_free; + + e->pnum = seb->pnum; + e->ec = seb->ec; + ubi_assert(e->ec >= 0); + free_tree_add(ubi, e); + ubi->lookuptbl[e->pnum] = e; + } + + list_for_each_entry(seb, &si->corr, u.list) { + cond_resched(); + + e = kmem_cache_alloc(wl_entries_slab, GFP_KERNEL); + if (!e) + goto out_free; + + e->pnum = seb->pnum; + e->ec = seb->ec; + ubi->lookuptbl[e->pnum] = e; + if (schedule_erase(ubi, e, 0)) { + kmem_cache_free(wl_entries_slab, e); + goto out_free; + } + } + + ubi_rb_for_each_entry(rb1, sv, &si->volumes, rb) { + ubi_rb_for_each_entry(rb2, seb, &sv->root, u.rb) { + cond_resched(); + + e = kmem_cache_alloc(wl_entries_slab, GFP_KERNEL); + if (!e) + goto out_free; + + e->pnum = seb->pnum; + e->ec = seb->ec; + ubi->lookuptbl[e->pnum] = e; + if (!seb->scrub) { + dbg_wl("add PEB %d EC %d to the used tree", + e->pnum, e->ec); + used_tree_add(ubi, e); + } else { + dbg_wl("add PEB %d EC %d to the scrub tree", + e->pnum, e->ec); + scrub_tree_add(ubi, e); + } + } + } + + if (WL_RESERVED_PEBS > ubi->avail_pebs) { + ubi_err("no enough physical eraseblocks (%d, need %d)", + ubi->avail_pebs, WL_RESERVED_PEBS); + goto out_free; + } + ubi->avail_pebs -= WL_RESERVED_PEBS; + ubi->rsvd_pebs += WL_RESERVED_PEBS; + + /* Schedule wear-leveling if needed */ + err = ensure_wear_leveling(ubi); + if (err) + goto out_free; + + return 0; + +out_free: + cancel_pending(ubi); + tree_destroy(&ubi->used); + tree_destroy(&ubi->free); + tree_destroy(&ubi->scrub); + kfree(ubi->lookuptbl); + if (ubi_devices_cnt == 0) + kmem_cache_destroy(wl_entries_slab); + return err; +} + +/** + * protection_trees_destroy - destroy the protection RB-trees. + * @ubi: UBI device description object + */ +static void protection_trees_destroy(struct ubi_device *ubi) +{ + struct rb_node *rb; + struct ubi_wl_prot_entry *pe; + + rb = ubi->prot.aec.rb_node; + while (rb) { + if (rb->rb_left) + rb = rb->rb_left; + else if (rb->rb_right) + rb = rb->rb_right; + else { + pe = rb_entry(rb, struct ubi_wl_prot_entry, rb_aec); + + rb = rb_parent(rb); + if (rb) { + if (rb->rb_left == &pe->rb_aec) + rb->rb_left = NULL; + else + rb->rb_right = NULL; + } + + kmem_cache_free(wl_entries_slab, pe->e); + kfree(pe); + } + } +} + +/** + * ubi_wl_close - close the wear-leveling unit. + * @ubi: UBI device description object + */ +void ubi_wl_close(struct ubi_device *ubi) +{ + dbg_wl("disable \"%s\"", ubi->bgt_name); + if (ubi->bgt_thread) + kthread_stop(ubi->bgt_thread); + + dbg_wl("close the UBI wear-leveling unit"); + + cancel_pending(ubi); + protection_trees_destroy(ubi); + tree_destroy(&ubi->used); + tree_destroy(&ubi->free); + tree_destroy(&ubi->scrub); + kfree(ubi->lookuptbl); + if (ubi_devices_cnt == 1) + kmem_cache_destroy(wl_entries_slab); +} + +#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID + +/** + * paranoid_check_ec - make sure that the erase counter of a physical eraseblock + * is correct. + * @ubi: UBI device description object + * @pnum: the physical eraseblock number to check + * @ec: the erase counter to check + * + * This function returns zero if the erase counter of physical eraseblock @pnum + * is equivalent to @ec, %1 if not, and a negative error code if an error + * occurred. + */ +static int paranoid_check_ec(const struct ubi_device *ubi, int pnum, int ec) +{ + int err; + long long read_ec; + struct ubi_ec_hdr *ec_hdr; + + ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL); + if (!ec_hdr) + return -ENOMEM; + + err = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0); + if (err && err != UBI_IO_BITFLIPS) { + /* The header does not have to exist */ + err = 0; + goto out_free; + } + + read_ec = ubi64_to_cpu(ec_hdr->ec); + if (ec != read_ec) { + ubi_err("paranoid check failed for PEB %d", pnum); + ubi_err("read EC is %lld, should be %d", read_ec, ec); + ubi_dbg_dump_stack(); + err = 1; + } else + err = 0; + +out_free: + kfree(ec_hdr); + return err; +} + +/** + * paranoid_check_in_wl_tree - make sure that a wear-leveling entry is present + * in a WL RB-tree. + * @e: the wear-leveling entry to check + * @root: the root of the tree + * + * This function returns zero if @e is in the @root RB-tree and %1 if it + * is not. + */ +static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e, + struct rb_root *root) +{ + if (in_wl_tree(e, root)) + return 0; + + ubi_err("paranoid check failed for PEB %d, EC %d, RB-tree %p ", + e->pnum, e->ec, root); + ubi_dbg_dump_stack(); + return 1; +} + +#endif /* CONFIG_MTD_UBI_DEBUG_PARANOID */ diff --git a/include/linux/mtd/ubi.h b/include/linux/mtd/ubi.h new file mode 100644 index 000000000000..3d967b6b120a --- /dev/null +++ b/include/linux/mtd/ubi.h @@ -0,0 +1,202 @@ +/* + * Copyright (c) International Business Machines Corp., 2006 + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See + * the GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + * Author: Artem Bityutskiy (Битюцкий Артём) + */ + +#ifndef __LINUX_UBI_H__ +#define __LINUX_UBI_H__ + +#include +#include +#include + +/* + * UBI data type hint constants. + * + * UBI_LONGTERM: long-term data + * UBI_SHORTTERM: short-term data + * UBI_UNKNOWN: data persistence is unknown + * + * These constants are used when data is written to UBI volumes in order to + * help the UBI wear-leveling unit to find more appropriate physical + * eraseblocks. + */ +enum { + UBI_LONGTERM = 1, + UBI_SHORTTERM, + UBI_UNKNOWN +}; + +/* + * enum ubi_open_mode - UBI volume open mode constants. + * + * UBI_READONLY: read-only mode + * UBI_READWRITE: read-write mode + * UBI_EXCLUSIVE: exclusive mode + */ +enum { + UBI_READONLY = 1, + UBI_READWRITE, + UBI_EXCLUSIVE +}; + +/** + * struct ubi_volume_info - UBI volume description data structure. + * @vol_id: volume ID + * @ubi_num: UBI device number this volume belongs to + * @size: how many physical eraseblocks are reserved for this volume + * @used_bytes: how many bytes of data this volume contains + * @used_ebs: how many physical eraseblocks of this volume actually contain any + * data + * @vol_type: volume type (%UBI_DYNAMIC_VOLUME or %UBI_STATIC_VOLUME) + * @corrupted: non-zero if the volume is corrupted (static volumes only) + * @upd_marker: non-zero if the volume has update marker set + * @alignment: volume alignment + * @usable_leb_size: how many bytes are available in logical eraseblocks of + * this volume + * @name_len: volume name length + * @name: volume name + * @cdev: UBI volume character device major and minor numbers + * + * The @corrupted flag is only relevant to static volumes and is always zero + * for dynamic ones. This is because UBI does not care about dynamic volume + * data protection and only cares about protecting static volume data. + * + * The @upd_marker flag is set if the volume update operation was interrupted. + * Before touching the volume data during the update operation, UBI first sets + * the update marker flag for this volume. If the volume update operation was + * further interrupted, the update marker indicates this. If the update marker + * is set, the contents of the volume is certainly damaged and a new volume + * update operation has to be started. + * + * To put it differently, @corrupted and @upd_marker fields have different + * semantics: + * o the @corrupted flag means that this static volume is corrupted for some + * reasons, but not because an interrupted volume update + * o the @upd_marker field means that the volume is damaged because of an + * interrupted update operation. + * + * I.e., the @corrupted flag is never set if the @upd_marker flag is set. + * + * The @used_bytes and @used_ebs fields are only really needed for static + * volumes and contain the number of bytes stored in this static volume and how + * many eraseblock this data occupies. In case of dynamic volumes, the + * @used_bytes field is equivalent to @size*@usable_leb_size, and the @used_ebs + * field is equivalent to @size. + * + * In general, logical eraseblock size is a property of the UBI device, not + * of the UBI volume. Indeed, the logical eraseblock size depends on the + * physical eraseblock size and on how much bytes UBI headers consume. But + * because of the volume alignment (@alignment), the usable size of logical + * eraseblocks if a volume may be less. The following equation is true: + * @usable_leb_size = LEB size - (LEB size mod @alignment), + * where LEB size is the logical eraseblock size defined by the UBI device. + * + * The alignment is multiple to the minimal flash input/output unit size or %1 + * if all the available space is used. + * + * To put this differently, alignment may be considered is a way to change + * volume logical eraseblock sizes. + */ +struct ubi_volume_info { + int ubi_num; + int vol_id; + int size; + long long used_bytes; + int used_ebs; + int vol_type; + int corrupted; + int upd_marker; + int alignment; + int usable_leb_size; + int name_len; + const char *name; + dev_t cdev; +}; + +/** + * struct ubi_device_info - UBI device description data structure. + * @ubi_num: ubi device number + * @leb_size: logical eraseblock size on this UBI device + * @min_io_size: minimal I/O unit size + * @ro_mode: if this device is in read-only mode + * @cdev: UBI character device major and minor numbers + * + * Note, @leb_size is the logical eraseblock size offered by the UBI device. + * Volumes of this UBI device may have smaller logical eraseblock size if their + * alignment is not equivalent to %1. + */ +struct ubi_device_info { + int ubi_num; + int leb_size; + int min_io_size; + int ro_mode; + dev_t cdev; +}; + +/* UBI descriptor given to users when they open UBI volumes */ +struct ubi_volume_desc; + +int ubi_get_device_info(int ubi_num, struct ubi_device_info *di); +void ubi_get_volume_info(struct ubi_volume_desc *desc, + struct ubi_volume_info *vi); +struct ubi_volume_desc *ubi_open_volume(int ubi_num, int vol_id, int mode); +struct ubi_volume_desc *ubi_open_volume_nm(int ubi_num, const char *name, + int mode); +void ubi_close_volume(struct ubi_volume_desc *desc); +int ubi_leb_read(struct ubi_volume_desc *desc, int lnum, char *buf, int offset, + int len, int check); +int ubi_leb_write(struct ubi_volume_desc *desc, int lnum, const void *buf, + int offset, int len, int dtype); +int ubi_leb_change(struct ubi_volume_desc *desc, int lnum, const void *buf, + int len, int dtype); +int ubi_leb_erase(struct ubi_volume_desc *desc, int lnum); +int ubi_leb_unmap(struct ubi_volume_desc *desc, int lnum); +int ubi_is_mapped(struct ubi_volume_desc *desc, int lnum); + +/* + * This function is the same as the 'ubi_leb_read()' function, but it does not + * provide the checking capability. + */ +static inline int ubi_read(struct ubi_volume_desc *desc, int lnum, char *buf, + int offset, int len) +{ + return ubi_leb_read(desc, lnum, buf, offset, len, 0); +} + +/* + * This function is the same as the 'ubi_leb_write()' functions, but it does + * not have the data type argument. + */ +static inline int ubi_write(struct ubi_volume_desc *desc, int lnum, + const void *buf, int offset, int len) +{ + return ubi_leb_write(desc, lnum, buf, offset, len, UBI_UNKNOWN); +} + +/* + * This function is the same as the 'ubi_leb_change()' functions, but it does + * not have the data type argument. + */ +static inline int ubi_change(struct ubi_volume_desc *desc, int lnum, + const void *buf, int len) +{ + return ubi_leb_change(desc, lnum, buf, len, UBI_UNKNOWN); +} + +#endif /* !__LINUX_UBI_H__ */ diff --git a/include/mtd/Kbuild b/include/mtd/Kbuild index e0fe92b03a4e..4d46b3bdebd8 100644 --- a/include/mtd/Kbuild +++ b/include/mtd/Kbuild @@ -3,3 +3,5 @@ header-y += jffs2-user.h header-y += mtd-abi.h header-y += mtd-user.h header-y += nftl-user.h +header-y += ubi-header.h +header-y += ubi-user.h diff --git a/include/mtd/mtd-abi.h b/include/mtd/mtd-abi.h index 8e501a75a764..f71dac420394 100644 --- a/include/mtd/mtd-abi.h +++ b/include/mtd/mtd-abi.h @@ -24,6 +24,7 @@ struct mtd_oob_buf { #define MTD_NORFLASH 3 #define MTD_NANDFLASH 4 #define MTD_DATAFLASH 6 +#define MTD_UBIVOLUME 7 #define MTD_WRITEABLE 0x400 /* Device is writeable */ #define MTD_BIT_WRITEABLE 0x800 /* Single bits can be flipped */ diff --git a/include/mtd/ubi-header.h b/include/mtd/ubi-header.h new file mode 100644 index 000000000000..fa479c71aa34 --- /dev/null +++ b/include/mtd/ubi-header.h @@ -0,0 +1,360 @@ +/* + * Copyright (c) International Business Machines Corp., 2006 + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See + * the GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + * Authors: Artem Bityutskiy (Битюцкий Артём) + * Thomas Gleixner + * Frank Haverkamp + * Oliver Lohmann + * Andreas Arnez + */ + +/* + * This file defines the layout of UBI headers and all the other UBI on-flash + * data structures. May be included by user-space. + */ + +#ifndef __UBI_HEADER_H__ +#define __UBI_HEADER_H__ + +#include + +/* The version of UBI images supported by this implementation */ +#define UBI_VERSION 1 + +/* The highest erase counter value supported by this implementation */ +#define UBI_MAX_ERASECOUNTER 0x7FFFFFFF + +/* The initial CRC32 value used when calculating CRC checksums */ +#define UBI_CRC32_INIT 0xFFFFFFFFU + +/* Erase counter header magic number (ASCII "UBI#") */ +#define UBI_EC_HDR_MAGIC 0x55424923 +/* Volume identifier header magic number (ASCII "UBI!") */ +#define UBI_VID_HDR_MAGIC 0x55424921 + +/* + * Volume type constants used in the volume identifier header. + * + * @UBI_VID_DYNAMIC: dynamic volume + * @UBI_VID_STATIC: static volume + */ +enum { + UBI_VID_DYNAMIC = 1, + UBI_VID_STATIC = 2 +}; + +/* + * Compatibility constants used by internal volumes. + * + * @UBI_COMPAT_DELETE: delete this internal volume before anything is written + * to the flash + * @UBI_COMPAT_RO: attach this device in read-only mode + * @UBI_COMPAT_PRESERVE: preserve this internal volume - do not touch its + * physical eraseblocks, don't allow the wear-leveling unit to move them + * @UBI_COMPAT_REJECT: reject this UBI image + */ +enum { + UBI_COMPAT_DELETE = 1, + UBI_COMPAT_RO = 2, + UBI_COMPAT_PRESERVE = 4, + UBI_COMPAT_REJECT = 5 +}; + +/* + * ubi16_t/ubi32_t/ubi64_t - 16, 32, and 64-bit integers used in UBI on-flash + * data structures. + */ +typedef struct { + uint16_t int16; +} __attribute__ ((packed)) ubi16_t; + +typedef struct { + uint32_t int32; +} __attribute__ ((packed)) ubi32_t; + +typedef struct { + uint64_t int64; +} __attribute__ ((packed)) ubi64_t; + +/* + * In this implementation of UBI uses the big-endian format for on-flash + * integers. The below are the corresponding conversion macros. + */ +#define cpu_to_ubi16(x) ((ubi16_t){__cpu_to_be16(x)}) +#define ubi16_to_cpu(x) ((uint16_t)__be16_to_cpu((x).int16)) + +#define cpu_to_ubi32(x) ((ubi32_t){__cpu_to_be32(x)}) +#define ubi32_to_cpu(x) ((uint32_t)__be32_to_cpu((x).int32)) + +#define cpu_to_ubi64(x) ((ubi64_t){__cpu_to_be64(x)}) +#define ubi64_to_cpu(x) ((uint64_t)__be64_to_cpu((x).int64)) + +/* Sizes of UBI headers */ +#define UBI_EC_HDR_SIZE sizeof(struct ubi_ec_hdr) +#define UBI_VID_HDR_SIZE sizeof(struct ubi_vid_hdr) + +/* Sizes of UBI headers without the ending CRC */ +#define UBI_EC_HDR_SIZE_CRC (UBI_EC_HDR_SIZE - sizeof(ubi32_t)) +#define UBI_VID_HDR_SIZE_CRC (UBI_VID_HDR_SIZE - sizeof(ubi32_t)) + +/** + * struct ubi_ec_hdr - UBI erase counter header. + * @magic: erase counter header magic number (%UBI_EC_HDR_MAGIC) + * @version: version of UBI implementation which is supposed to accept this + * UBI image + * @padding1: reserved for future, zeroes + * @ec: the erase counter + * @vid_hdr_offset: where the VID header starts + * @data_offset: where the user data start + * @padding2: reserved for future, zeroes + * @hdr_crc: erase counter header CRC checksum + * + * The erase counter header takes 64 bytes and has a plenty of unused space for + * future usage. The unused fields are zeroed. The @version field is used to + * indicate the version of UBI implementation which is supposed to be able to + * work with this UBI image. If @version is greater then the current UBI + * version, the image is rejected. This may be useful in future if something + * is changed radically. This field is duplicated in the volume identifier + * header. + * + * The @vid_hdr_offset and @data_offset fields contain the offset of the the + * volume identifier header and user data, relative to the beginning of the + * physical eraseblock. These values have to be the same for all physical + * eraseblocks. + */ +struct ubi_ec_hdr { + ubi32_t magic; + uint8_t version; + uint8_t padding1[3]; + ubi64_t ec; /* Warning: the current limit is 31-bit anyway! */ + ubi32_t vid_hdr_offset; + ubi32_t data_offset; + uint8_t padding2[36]; + ubi32_t hdr_crc; +} __attribute__ ((packed)); + +/** + * struct ubi_vid_hdr - on-flash UBI volume identifier header. + * @magic: volume identifier header magic number (%UBI_VID_HDR_MAGIC) + * @version: UBI implementation version which is supposed to accept this UBI + * image (%UBI_VERSION) + * @vol_type: volume type (%UBI_VID_DYNAMIC or %UBI_VID_STATIC) + * @copy_flag: if this logical eraseblock was copied from another physical + * eraseblock (for wear-leveling reasons) + * @compat: compatibility of this volume (%0, %UBI_COMPAT_DELETE, + * %UBI_COMPAT_IGNORE, %UBI_COMPAT_PRESERVE, or %UBI_COMPAT_REJECT) + * @vol_id: ID of this volume + * @lnum: logical eraseblock number + * @leb_ver: version of this logical eraseblock (IMPORTANT: obsolete, to be + * removed, kept only for not breaking older UBI users) + * @data_size: how many bytes of data this logical eraseblock contains + * @used_ebs: total number of used logical eraseblocks in this volume + * @data_pad: how many bytes at the end of this physical eraseblock are not + * used + * @data_crc: CRC checksum of the data stored in this logical eraseblock + * @padding1: reserved for future, zeroes + * @sqnum: sequence number + * @padding2: reserved for future, zeroes + * @hdr_crc: volume identifier header CRC checksum + * + * The @sqnum is the value of the global sequence counter at the time when this + * VID header was created. The global sequence counter is incremented each time + * UBI writes a new VID header to the flash, i.e. when it maps a logical + * eraseblock to a new physical eraseblock. The global sequence counter is an + * unsigned 64-bit integer and we assume it never overflows. The @sqnum + * (sequence number) is used to distinguish between older and newer versions of + * logical eraseblocks. + * + * There are 2 situations when there may be more then one physical eraseblock + * corresponding to the same logical eraseblock, i.e., having the same @vol_id + * and @lnum values in the volume identifier header. Suppose we have a logical + * eraseblock L and it is mapped to the physical eraseblock P. + * + * 1. Because UBI may erase physical eraseblocks asynchronously, the following + * situation is possible: L is asynchronously erased, so P is scheduled for + * erasure, then L is written to,i.e. mapped to another physical eraseblock P1, + * so P1 is written to, then an unclean reboot happens. Result - there are 2 + * physical eraseblocks P and P1 corresponding to the same logical eraseblock + * L. But P1 has greater sequence number, so UBI picks P1 when it attaches the + * flash. + * + * 2. From time to time UBI moves logical eraseblocks to other physical + * eraseblocks for wear-leveling reasons. If, for example, UBI moves L from P + * to P1, and an unclean reboot happens before P is physically erased, there + * are two physical eraseblocks P and P1 corresponding to L and UBI has to + * select one of them when the flash is attached. The @sqnum field says which + * PEB is the original (obviously P will have lower @sqnum) and the copy. But + * it is not enough to select the physical eraseblock with the higher sequence + * number, because the unclean reboot could have happen in the middle of the + * copying process, so the data in P is corrupted. It is also not enough to + * just select the physical eraseblock with lower sequence number, because the + * data there may be old (consider a case if more data was added to P1 after + * the copying). Moreover, the unclean reboot may happen when the erasure of P + * was just started, so it result in unstable P, which is "mostly" OK, but + * still has unstable bits. + * + * UBI uses the @copy_flag field to indicate that this logical eraseblock is a + * copy. UBI also calculates data CRC when the data is moved and stores it at + * the @data_crc field of the copy (P1). So when UBI needs to pick one physical + * eraseblock of two (P or P1), the @copy_flag of the newer one (P1) is + * examined. If it is cleared, the situation* is simple and the newer one is + * picked. If it is set, the data CRC of the copy (P1) is examined. If the CRC + * checksum is correct, this physical eraseblock is selected (P1). Otherwise + * the older one (P) is selected. + * + * Note, there is an obsolete @leb_ver field which was used instead of @sqnum + * in the past. But it is not used anymore and we keep it in order to be able + * to deal with old UBI images. It will be removed at some point. + * + * There are 2 sorts of volumes in UBI: user volumes and internal volumes. + * Internal volumes are not seen from outside and are used for various internal + * UBI purposes. In this implementation there is only one internal volume - the + * layout volume. Internal volumes are the main mechanism of UBI extensions. + * For example, in future one may introduce a journal internal volume. Internal + * volumes have their own reserved range of IDs. + * + * The @compat field is only used for internal volumes and contains the "degree + * of their compatibility". It is always zero for user volumes. This field + * provides a mechanism to introduce UBI extensions and to be still compatible + * with older UBI binaries. For example, if someone introduced a journal in + * future, he would probably use %UBI_COMPAT_DELETE compatibility for the + * journal volume. And in this case, older UBI binaries, which know nothing + * about the journal volume, would just delete this volume and work perfectly + * fine. This is similar to what Ext2fs does when it is fed by an Ext3fs image + * - it just ignores the Ext3fs journal. + * + * The @data_crc field contains the CRC checksum of the contents of the logical + * eraseblock if this is a static volume. In case of dynamic volumes, it does + * not contain the CRC checksum as a rule. The only exception is when the + * data of the physical eraseblock was moved by the wear-leveling unit, then + * the wear-leveling unit calculates the data CRC and stores it in the + * @data_crc field. And of course, the @copy_flag is %in this case. + * + * The @data_size field is used only for static volumes because UBI has to know + * how many bytes of data are stored in this eraseblock. For dynamic volumes, + * this field usually contains zero. The only exception is when the data of the + * physical eraseblock was moved to another physical eraseblock for + * wear-leveling reasons. In this case, UBI calculates CRC checksum of the + * contents and uses both @data_crc and @data_size fields. In this case, the + * @data_size field contains data size. + * + * The @used_ebs field is used only for static volumes and indicates how many + * eraseblocks the data of the volume takes. For dynamic volumes this field is + * not used and always contains zero. + * + * The @data_pad is calculated when volumes are created using the alignment + * parameter. So, effectively, the @data_pad field reduces the size of logical + * eraseblocks of this volume. This is very handy when one uses block-oriented + * software (say, cramfs) on top of the UBI volume. + */ +struct ubi_vid_hdr { + ubi32_t magic; + uint8_t version; + uint8_t vol_type; + uint8_t copy_flag; + uint8_t compat; + ubi32_t vol_id; + ubi32_t lnum; + ubi32_t leb_ver; /* obsolete, to be removed, don't use */ + ubi32_t data_size; + ubi32_t used_ebs; + ubi32_t data_pad; + ubi32_t data_crc; + uint8_t padding1[4]; + ubi64_t sqnum; + uint8_t padding2[12]; + ubi32_t hdr_crc; +} __attribute__ ((packed)); + +/* Internal UBI volumes count */ +#define UBI_INT_VOL_COUNT 1 + +/* + * Starting ID of internal volumes. There is reserved room for 4096 internal + * volumes. + */ +#define UBI_INTERNAL_VOL_START (0x7FFFFFFF - 4096) + +/* The layout volume contains the volume table */ + +#define UBI_LAYOUT_VOL_ID UBI_INTERNAL_VOL_START +#define UBI_LAYOUT_VOLUME_EBS 2 +#define UBI_LAYOUT_VOLUME_NAME "layout volume" +#define UBI_LAYOUT_VOLUME_COMPAT UBI_COMPAT_REJECT + +/* The maximum number of volumes per one UBI device */ +#define UBI_MAX_VOLUMES 128 + +/* The maximum volume name length */ +#define UBI_VOL_NAME_MAX 127 + +/* Size of the volume table record */ +#define UBI_VTBL_RECORD_SIZE sizeof(struct ubi_vtbl_record) + +/* Size of the volume table record without the ending CRC */ +#define UBI_VTBL_RECORD_SIZE_CRC (UBI_VTBL_RECORD_SIZE - sizeof(ubi32_t)) + +/** + * struct ubi_vtbl_record - a record in the volume table. + * @reserved_pebs: how many physical eraseblocks are reserved for this volume + * @alignment: volume alignment + * @data_pad: how many bytes are unused at the end of the each physical + * eraseblock to satisfy the requested alignment + * @vol_type: volume type (%UBI_DYNAMIC_VOLUME or %UBI_STATIC_VOLUME) + * @upd_marker: if volume update was started but not finished + * @name_len: volume name length + * @name: the volume name + * @padding2: reserved, zeroes + * @crc: a CRC32 checksum of the record + * + * The volume table records are stored in the volume table, which is stored in + * the layout volume. The layout volume consists of 2 logical eraseblock, each + * of which contains a copy of the volume table (i.e., the volume table is + * duplicated). The volume table is an array of &struct ubi_vtbl_record + * objects indexed by the volume ID. + * + * If the size of the logical eraseblock is large enough to fit + * %UBI_MAX_VOLUMES records, the volume table contains %UBI_MAX_VOLUMES + * records. Otherwise, it contains as many records as it can fit (i.e., size of + * logical eraseblock divided by sizeof(struct ubi_vtbl_record)). + * + * The @upd_marker flag is used to implement volume update. It is set to %1 + * before update and set to %0 after the update. So if the update operation was + * interrupted, UBI knows that the volume is corrupted. + * + * The @alignment field is specified when the volume is created and cannot be + * later changed. It may be useful, for example, when a block-oriented file + * system works on top of UBI. The @data_pad field is calculated using the + * logical eraseblock size and @alignment. The alignment must be multiple to the + * minimal flash I/O unit. If @alignment is 1, all the available space of + * the physical eraseblocks is used. + * + * Empty records contain all zeroes and the CRC checksum of those zeroes. + */ +struct ubi_vtbl_record { + ubi32_t reserved_pebs; + ubi32_t alignment; + ubi32_t data_pad; + uint8_t vol_type; + uint8_t upd_marker; + ubi16_t name_len; + uint8_t name[UBI_VOL_NAME_MAX+1]; + uint8_t padding2[24]; + ubi32_t crc; +} __attribute__ ((packed)); + +#endif /* !__UBI_HEADER_H__ */ diff --git a/include/mtd/ubi-user.h b/include/mtd/ubi-user.h new file mode 100644 index 000000000000..fe06ded0e6b8 --- /dev/null +++ b/include/mtd/ubi-user.h @@ -0,0 +1,161 @@ +/* + * Copyright (c) International Business Machines Corp., 2006 + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See + * the GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + * Author: Artem Bityutskiy (Битюцкий Артём) + */ + +#ifndef __UBI_USER_H__ +#define __UBI_USER_H__ + +/* + * UBI volume creation + * ~~~~~~~~~~~~~~~~~~~ + * + * UBI volumes are created via the %UBI_IOCMKVOL IOCTL command of UBI character + * device. A &struct ubi_mkvol_req object has to be properly filled and a + * pointer to it has to be passed to the IOCTL. + * + * UBI volume deletion + * ~~~~~~~~~~~~~~~~~~~ + * + * To delete a volume, the %UBI_IOCRMVOL IOCTL command of the UBI character + * device should be used. A pointer to the 32-bit volume ID hast to be passed + * to the IOCTL. + * + * UBI volume re-size + * ~~~~~~~~~~~~~~~~~~ + * + * To re-size a volume, the %UBI_IOCRSVOL IOCTL command of the UBI character + * device should be used. A &struct ubi_rsvol_req object has to be properly + * filled and a pointer to it has to be passed to the IOCTL. + * + * UBI volume update + * ~~~~~~~~~~~~~~~~~ + * + * Volume update should be done via the %UBI_IOCVOLUP IOCTL command of the + * corresponding UBI volume character device. A pointer to a 64-bit update + * size should be passed to the IOCTL. After then, UBI expects user to write + * this number of bytes to the volume character device. The update is finished + * when the claimed number of bytes is passed. So, the volume update sequence + * is something like: + * + * fd = open("/dev/my_volume"); + * ioctl(fd, UBI_IOCVOLUP, &image_size); + * write(fd, buf, image_size); + * close(fd); + */ + +/* + * When a new volume is created, users may either specify the volume number they + * want to create or to let UBI automatically assign a volume number using this + * constant. + */ +#define UBI_VOL_NUM_AUTO (-1) + +/* Maximum volume name length */ +#define UBI_MAX_VOLUME_NAME 127 + +/* IOCTL commands of UBI character devices */ + +#define UBI_IOC_MAGIC 'o' + +/* Create an UBI volume */ +#define UBI_IOCMKVOL _IOW(UBI_IOC_MAGIC, 0, struct ubi_mkvol_req) +/* Remove an UBI volume */ +#define UBI_IOCRMVOL _IOW(UBI_IOC_MAGIC, 1, int32_t) +/* Re-size an UBI volume */ +#define UBI_IOCRSVOL _IOW(UBI_IOC_MAGIC, 2, struct ubi_rsvol_req) + +/* IOCTL commands of UBI volume character devices */ + +#define UBI_VOL_IOC_MAGIC 'O' + +/* Start UBI volume update */ +#define UBI_IOCVOLUP _IOW(UBI_VOL_IOC_MAGIC, 0, int64_t) +/* An eraseblock erasure command, used for debugging, disabled by default */ +#define UBI_IOCEBER _IOW(UBI_VOL_IOC_MAGIC, 1, int32_t) + +/* + * UBI volume type constants. + * + * @UBI_DYNAMIC_VOLUME: dynamic volume + * @UBI_STATIC_VOLUME: static volume + */ +enum { + UBI_DYNAMIC_VOLUME = 3, + UBI_STATIC_VOLUME = 4 +}; + +/** + * struct ubi_mkvol_req - volume description data structure used in + * volume creation requests. + * @vol_id: volume number + * @alignment: volume alignment + * @bytes: volume size in bytes + * @vol_type: volume type (%UBI_DYNAMIC_VOLUME or %UBI_STATIC_VOLUME) + * @padding1: reserved for future, not used + * @name_len: volume name length + * @padding2: reserved for future, not used + * @name: volume name + * + * This structure is used by userspace programs when creating new volumes. The + * @used_bytes field is only necessary when creating static volumes. + * + * The @alignment field specifies the required alignment of the volume logical + * eraseblock. This means, that the size of logical eraseblocks will be aligned + * to this number, i.e., + * (UBI device logical eraseblock size) mod (@alignment) = 0. + * + * To put it differently, the logical eraseblock of this volume may be slightly + * shortened in order to make it properly aligned. The alignment has to be + * multiple of the flash minimal input/output unit, or %1 to utilize the entire + * available space of logical eraseblocks. + * + * The @alignment field may be useful, for example, when one wants to maintain + * a block device on top of an UBI volume. In this case, it is desirable to fit + * an integer number of blocks in logical eraseblocks of this UBI volume. With + * alignment it is possible to update this volume using plane UBI volume image + * BLOBs, without caring about how to properly align them. + */ +struct ubi_mkvol_req { + int32_t vol_id; + int32_t alignment; + int64_t bytes; + int8_t vol_type; + int8_t padding1; + int16_t name_len; + int8_t padding2[4]; + char name[UBI_MAX_VOLUME_NAME+1]; +} __attribute__ ((packed)); + +/** + * struct ubi_rsvol_req - a data structure used in volume re-size requests. + * @vol_id: ID of the volume to re-size + * @bytes: new size of the volume in bytes + * + * Re-sizing is possible for both dynamic and static volumes. But while dynamic + * volumes may be re-sized arbitrarily, static volumes cannot be made to be + * smaller then the number of bytes they bear. To arbitrarily shrink a static + * volume, it must be wiped out first (by means of volume update operation with + * zero number of bytes). + */ +struct ubi_rsvol_req { + int64_t bytes; + int32_t vol_id; +} __attribute__ ((packed)); + +#endif /* __UBI_USER_H__ */ -- cgit v1.2.3-59-g8ed1b From 068d9f6eb9369a00eb45be91c07653cfef65f4a0 Mon Sep 17 00:00:00 2001 From: Hans-Christian Egtvedt Date: Wed, 31 Jan 2007 18:01:45 +0100 Subject: [AVR32] Add nwait and tdf parameters to SMC configuration Complete the SMC configuration code by adding nwait and tdf parameter. After this change, we support the same parameters as the hardware. Signed-off-by: Haavard Skinnemoen --- arch/avr32/mach-at32ap/hsmc.c | 23 +++++++++++++++++++++++ include/asm-avr32/arch-at32ap/smc.h | 22 ++++++++++++++++++++++ 2 files changed, 45 insertions(+) (limited to 'include') diff --git a/arch/avr32/mach-at32ap/hsmc.c b/arch/avr32/mach-at32ap/hsmc.c index 7691721928a7..5e22a750632b 100644 --- a/arch/avr32/mach-at32ap/hsmc.c +++ b/arch/avr32/mach-at32ap/hsmc.c @@ -75,12 +75,35 @@ int smc_set_configuration(int cs, const struct smc_config *config) return -EINVAL; } + switch (config->nwait_mode) { + case 0: + mode |= HSMC_BF(EXNW_MODE, HSMC_EXNW_MODE_DISABLED); + break; + case 1: + mode |= HSMC_BF(EXNW_MODE, HSMC_EXNW_MODE_RESERVED); + break; + case 2: + mode |= HSMC_BF(EXNW_MODE, HSMC_EXNW_MODE_FROZEN); + break; + case 3: + mode |= HSMC_BF(EXNW_MODE, HSMC_EXNW_MODE_READY); + break; + default: + return -EINVAL; + } + + if (config->tdf_cycles) { + mode |= HSMC_BF(TDF_CYCLES, config->tdf_cycles); + } + if (config->nrd_controlled) mode |= HSMC_BIT(READ_MODE); if (config->nwe_controlled) mode |= HSMC_BIT(WRITE_MODE); if (config->byte_write) mode |= HSMC_BIT(BAT); + if (config->tdf_mode) + mode |= HSMC_BIT(TDF_MODE); pr_debug("smc cs%d: setup/%08x pulse/%08x cycle/%08x mode/%08x\n", cs, setup, pulse, cycle, mode); diff --git a/include/asm-avr32/arch-at32ap/smc.h b/include/asm-avr32/arch-at32ap/smc.h index 3732b328303d..07152b7fd9c9 100644 --- a/include/asm-avr32/arch-at32ap/smc.h +++ b/include/asm-avr32/arch-at32ap/smc.h @@ -47,11 +47,33 @@ struct smc_config { */ unsigned int nwe_controlled:1; + /* + * 0: NWAIT is disabled + * 1: Reserved + * 2: NWAIT is frozen mode + * 3: NWAIT in ready mode + */ + unsigned int nwait_mode:2; + /* * 0: Byte select access type * 1: Byte write access type */ unsigned int byte_write:1; + + /* + * Number of clock cycles before data is released after + * the rising edge of the read controlling signal + * + * Total cycles from SMC is tdf_cycles + 1 + */ + unsigned int tdf_cycles:4; + + /* + * 0: TDF optimization disabled + * 1: TDF optimization enabled + */ + unsigned int tdf_mode:1; }; extern int smc_set_configuration(int cs, const struct smc_config *config); -- cgit v1.2.3-59-g8ed1b From 7760989e5e2900e484e9115e6e690c6ce0b0221c Mon Sep 17 00:00:00 2001 From: Hans-Christian Egtvedt Date: Mon, 12 Mar 2007 18:15:16 +0100 Subject: [AVR32] Change system timer from count-compare to Timer/Counter 0 Due to limitation of the count-compare system timer (not able to count when CPU is in sleep), the system timer had to be changed to use a peripheral timer/counter. The old COUNT-COMPARE code is still present in time.c as weak functions. The new timer is added to the architecture directory. This patch sets up TC0 as system timer The new timer has been tested on AT32AP7000/ATSTK1000 at 100 Hz, 250 Hz, 300 Hz and 1000 Hz. For more details about the timer/counter see the datasheet for AT32AP700x available at http://www.atmel.com/dyn/products/product_card.asp?part_id=3903 Signed-off-by: Hans-Christian Egtvedt Signed-off-by: Haavard Skinnemoen --- arch/avr32/kernel/time.c | 150 ++++++++++++------------ arch/avr32/mach-at32ap/Makefile | 1 + arch/avr32/mach-at32ap/at32ap7000.c | 18 +++ arch/avr32/mach-at32ap/time-tc.c | 217 +++++++++++++++++++++++++++++++++++ include/asm-avr32/arch-at32ap/time.h | 112 ++++++++++++++++++ 5 files changed, 425 insertions(+), 73 deletions(-) create mode 100644 arch/avr32/mach-at32ap/time-tc.c create mode 100644 include/asm-avr32/arch-at32ap/time.h (limited to 'include') diff --git a/arch/avr32/kernel/time.c b/arch/avr32/kernel/time.c index c10833f2ee0c..7014a3571ec0 100644 --- a/arch/avr32/kernel/time.c +++ b/arch/avr32/kernel/time.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2004-2006 Atmel Corporation + * Copyright (C) 2004-2007 Atmel Corporation * * Based on MIPS implementation arch/mips/kernel/time.c * Copyright 2001 MontaVista Software Inc. @@ -20,18 +20,25 @@ #include #include #include +#include #include #include #include #include -static cycle_t read_cycle_count(void) +/* how many counter cycles in a jiffy? */ +static u32 cycles_per_jiffy; + +/* the count value for the next timer interrupt */ +static u32 expirelo; + +cycle_t __weak read_cycle_count(void) { return (cycle_t)sysreg_read(COUNT); } -static struct clocksource clocksource_avr32 = { +struct clocksource __weak clocksource_avr32 = { .name = "avr32", .rating = 350, .read = read_cycle_count, @@ -40,12 +47,20 @@ static struct clocksource clocksource_avr32 = { .flags = CLOCK_SOURCE_IS_CONTINUOUS, }; +irqreturn_t __weak timer_interrupt(int irq, void *dev_id); + +struct irqaction timer_irqaction = { + .handler = timer_interrupt, + .flags = IRQF_DISABLED, + .name = "timer", +}; + /* * By default we provide the null RTC ops */ static unsigned long null_rtc_get_time(void) { - return mktime(2004, 1, 1, 0, 0, 0); + return mktime(2007, 1, 1, 0, 0, 0); } static int null_rtc_set_time(unsigned long sec) @@ -56,23 +71,14 @@ static int null_rtc_set_time(unsigned long sec) static unsigned long (*rtc_get_time)(void) = null_rtc_get_time; static int (*rtc_set_time)(unsigned long) = null_rtc_set_time; -/* how many counter cycles in a jiffy? */ -static unsigned long cycles_per_jiffy; - -/* cycle counter value at the previous timer interrupt */ -static unsigned int timerhi, timerlo; - -/* the count value for the next timer interrupt */ -static unsigned int expirelo; - static void avr32_timer_ack(void) { - unsigned int count; + u32 count; /* Ack this timer interrupt and set the next one */ expirelo += cycles_per_jiffy; + /* setting COMPARE to 0 stops the COUNT-COMPARE */ if (expirelo == 0) { - printk(KERN_DEBUG "expirelo == 0\n"); sysreg_write(COMPARE, expirelo + 1); } else { sysreg_write(COMPARE, expirelo); @@ -86,27 +92,56 @@ static void avr32_timer_ack(void) } } -static unsigned int avr32_hpt_read(void) +int __weak avr32_hpt_init(void) { - return sysreg_read(COUNT); + int ret; + unsigned long mult, shift, count_hz; + + count_hz = clk_get_rate(boot_cpu_data.clk); + shift = clocksource_avr32.shift; + mult = clocksource_hz2mult(count_hz, shift); + clocksource_avr32.mult = mult; + + { + u64 tmp; + + tmp = TICK_NSEC; + tmp <<= shift; + tmp += mult / 2; + do_div(tmp, mult); + + cycles_per_jiffy = tmp; + } + + ret = setup_irq(0, &timer_irqaction); + if (ret) { + pr_debug("timer: could not request IRQ 0: %d\n", ret); + return -ENODEV; + } + + printk(KERN_INFO "timer: AT32AP COUNT-COMPARE at irq 0, " + "%lu.%03lu MHz\n", + ((count_hz + 500) / 1000) / 1000, + ((count_hz + 500) / 1000) % 1000); + + return 0; } /* * Taken from MIPS c0_hpt_timer_init(). * - * Why is it so complicated, and what is "count"? My assumption is - * that `count' specifies the "reference cycle", i.e. the cycle since - * reset that should mean "zero". The reason COUNT is written twice is - * probably to make sure we don't get any timer interrupts while we - * are messing with the counter. + * The reason COUNT is written twice is probably to make sure we don't get any + * timer interrupts while we are messing with the counter. */ -static void avr32_hpt_init(unsigned int count) +int __weak avr32_hpt_start(void) { - count = sysreg_read(COUNT) - count; + u32 count = sysreg_read(COUNT); expirelo = (count / cycles_per_jiffy + 1) * cycles_per_jiffy; sysreg_write(COUNT, expirelo - cycles_per_jiffy); sysreg_write(COMPARE, expirelo); sysreg_write(COUNT, count); + + return 0; } /* @@ -115,26 +150,18 @@ static void avr32_hpt_init(unsigned int count) * * In UP mode, it is invoked from the (global) timer_interrupt. */ -static void local_timer_interrupt(int irq, void *dev_id) +void local_timer_interrupt(int irq, void *dev_id) { if (current->pid) profile_tick(CPU_PROFILING); update_process_times(user_mode(get_irq_regs())); } -static irqreturn_t -timer_interrupt(int irq, void *dev_id) +irqreturn_t __weak timer_interrupt(int irq, void *dev_id) { - unsigned int count; - /* ack timer interrupt and try to set next interrupt */ - count = avr32_hpt_read(); avr32_timer_ack(); - /* Update timerhi/timerlo for intra-jiffy calibration */ - timerhi += count < timerlo; /* Wrap around */ - timerlo = count; - /* * Call the generic timer interrupt handler */ @@ -153,60 +180,37 @@ timer_interrupt(int irq, void *dev_id) return IRQ_HANDLED; } -static struct irqaction timer_irqaction = { - .handler = timer_interrupt, - .flags = IRQF_DISABLED, - .name = "timer", -}; - void __init time_init(void) { - unsigned long mult, shift, count_hz; int ret; + /* + * Make sure we don't get any COMPARE interrupts before we can + * handle them. + */ + sysreg_write(COMPARE, 0); + xtime.tv_sec = rtc_get_time(); xtime.tv_nsec = 0; set_normalized_timespec(&wall_to_monotonic, -xtime.tv_sec, -xtime.tv_nsec); - printk("Before time_init: count=%08lx, compare=%08lx\n", - (unsigned long)sysreg_read(COUNT), - (unsigned long)sysreg_read(COMPARE)); - - count_hz = clk_get_rate(boot_cpu_data.clk); - shift = clocksource_avr32.shift; - mult = clocksource_hz2mult(count_hz, shift); - clocksource_avr32.mult = mult; - - printk("Cycle counter: mult=%lu, shift=%lu\n", mult, shift); - - { - u64 tmp; - - tmp = TICK_NSEC; - tmp <<= shift; - tmp += mult / 2; - do_div(tmp, mult); - - cycles_per_jiffy = tmp; + ret = avr32_hpt_init(); + if (ret) { + pr_debug("timer: failed setup: %d\n", ret); + return; } - /* This sets up the high precision timer for the first interrupt. */ - avr32_hpt_init(avr32_hpt_read()); - - printk("After time_init: count=%08lx, compare=%08lx\n", - (unsigned long)sysreg_read(COUNT), - (unsigned long)sysreg_read(COMPARE)); - ret = clocksource_register(&clocksource_avr32); if (ret) - printk(KERN_ERR - "timer: could not register clocksource: %d\n", ret); + pr_debug("timer: could not register clocksource: %d\n", ret); - ret = setup_irq(0, &timer_irqaction); - if (ret) - printk("timer: could not request IRQ 0: %d\n", ret); + ret = avr32_hpt_start(); + if (ret) { + pr_debug("timer: failed starting: %d\n", ret); + return; + } } static struct sysdev_class timer_class = { diff --git a/arch/avr32/mach-at32ap/Makefile b/arch/avr32/mach-at32ap/Makefile index b21bea9af8b1..f1d395724ac6 100644 --- a/arch/avr32/mach-at32ap/Makefile +++ b/arch/avr32/mach-at32ap/Makefile @@ -1,2 +1,3 @@ obj-y += at32ap.o clock.o intc.o extint.o pio.o hsmc.o obj-$(CONFIG_CPU_AT32AP7000) += at32ap7000.o +obj-$(CONFIG_CPU_AT32AP7000) += time-tc.o diff --git a/arch/avr32/mach-at32ap/at32ap7000.c b/arch/avr32/mach-at32ap/at32ap7000.c index 32c7045141c2..6eeda60b8288 100644 --- a/arch/avr32/mach-at32ap/at32ap7000.c +++ b/arch/avr32/mach-at32ap/at32ap7000.c @@ -503,6 +503,21 @@ static inline void set_ebi_sfr_bits(u32 mask) clk_disable(&hmatrix_clk); } +/* -------------------------------------------------------------------- + * System Timer/Counter (TC) + * -------------------------------------------------------------------- */ +static struct resource at32_systc0_resource[] = { + PBMEM(0xfff00c00), + IRQ(22), +}; +struct platform_device at32_systc0_device = { + .name = "systc", + .id = 0, + .resource = at32_systc0_resource, + .num_resources = ARRAY_SIZE(at32_systc0_resource), +}; +DEV_CLK(pclk, at32_systc0, pbb, 3); + /* -------------------------------------------------------------------- * PIO * -------------------------------------------------------------------- */ @@ -551,6 +566,8 @@ void __init at32_add_system_devices(void) platform_device_register(&smc0_device); platform_device_register(&pdc_device); + platform_device_register(&at32_systc0_device); + platform_device_register(&pio0_device); platform_device_register(&pio1_device); platform_device_register(&pio2_device); @@ -1000,6 +1017,7 @@ struct clk *at32_clock_list[] = { &pio2_mck, &pio3_mck, &pio4_mck, + &at32_systc0_pclk, &atmel_usart0_usart, &atmel_usart1_usart, &atmel_usart2_usart, diff --git a/arch/avr32/mach-at32ap/time-tc.c b/arch/avr32/mach-at32ap/time-tc.c new file mode 100644 index 000000000000..7ac0e94d8ed5 --- /dev/null +++ b/arch/avr32/mach-at32ap/time-tc.c @@ -0,0 +1,217 @@ +/* + * Copyright (C) 2004-2007 Atmel Corporation + * + * Based on MIPS implementation arch/mips/kernel/time.c + * Copyright 2001 MontaVista Software Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include + +/* how many counter cycles in a jiffy? */ +static u32 cycles_per_jiffy; + +/* the count value for the next timer interrupt */ +static u32 expirelo; + +/* the I/O registers of the TC module */ +static void __iomem *ioregs; + +cycle_t read_cycle_count(void) +{ + return (cycle_t)timer_read(ioregs, 0, CV); +} + +struct clocksource clocksource_avr32 = { + .name = "avr32", + .rating = 342, + .read = read_cycle_count, + .mask = CLOCKSOURCE_MASK(16), + .shift = 16, + .flags = CLOCK_SOURCE_IS_CONTINUOUS, +}; + +static void avr32_timer_ack(void) +{ + u16 count = expirelo; + + /* Ack this timer interrupt and set the next one, use a u16 + * variable so it will wrap around correctly */ + count += cycles_per_jiffy; + expirelo = count; + timer_write(ioregs, 0, RC, expirelo); + + /* Check to see if we have missed any timer interrupts */ + count = timer_read(ioregs, 0, CV); + if ((count - expirelo) < 0x7fff) { + expirelo = count + cycles_per_jiffy; + timer_write(ioregs, 0, RC, expirelo); + } +} + +u32 avr32_hpt_read(void) +{ + return timer_read(ioregs, 0, CV); +} + +static int avr32_timer_calc_div_and_set_jiffies(struct clk *pclk) +{ + unsigned int cycles_max = (clocksource_avr32.mask + 1) / 2; + unsigned int divs[] = { 4, 8, 16, 32 }; + int divs_size = sizeof(divs) / sizeof(*divs); + int i = 0; + unsigned long count_hz; + unsigned long shift; + unsigned long mult; + int clock_div = -1; + u64 tmp; + + shift = clocksource_avr32.shift; + + do { + count_hz = clk_get_rate(pclk) / divs[i]; + mult = clocksource_hz2mult(count_hz, shift); + clocksource_avr32.mult = mult; + + tmp = TICK_NSEC; + tmp <<= shift; + tmp += mult / 2; + do_div(tmp, mult); + + cycles_per_jiffy = tmp; + } while (cycles_per_jiffy > cycles_max && ++i < divs_size); + + clock_div = i + 1; + + if (clock_div > divs_size) { + pr_debug("timer: could not calculate clock divider\n"); + return -EFAULT; + } + + /* Set the clock divider */ + timer_write(ioregs, 0, CMR, TIMER_BF(CMR_TCCLKS, clock_div)); + + return 0; +} + +int avr32_hpt_init(unsigned int count) +{ + struct resource *regs; + struct clk *pclk; + int irq = -1; + int ret = 0; + + ret = -ENXIO; + + irq = platform_get_irq(&at32_systc0_device, 0); + if (irq < 0) { + pr_debug("timer: could not get irq\n"); + goto out_error; + } + + pclk = clk_get(&at32_systc0_device.dev, "pclk"); + if (IS_ERR(pclk)) { + pr_debug("timer: could not get clk: %ld\n", PTR_ERR(pclk)); + goto out_error; + } + + regs = platform_get_resource(&at32_systc0_device, IORESOURCE_MEM, 0); + if (!regs) { + pr_debug("timer: could not get resource\n"); + goto out_error_clk; + } + + ioregs = ioremap(regs->start, regs->end - regs->start + 1); + if (!ioregs) { + pr_debug("timer: could not get ioregs\n"); + goto out_error_clk; + } + + ret = avr32_timer_calc_div_and_set_jiffies(pclk); + if (ret) + goto out_error_io; + + ret = setup_irq(irq, &timer_irqaction); + if (ret) { + pr_debug("timer: could not request irq %d: %d\n", + irq, ret); + goto out_error_io; + } + + expirelo = (timer_read(ioregs, 0, CV) / cycles_per_jiffy + 1) + * cycles_per_jiffy; + + /* Enable clock and interrupts on RC compare */ + timer_write(ioregs, 0, CCR, TIMER_BIT(CCR_CLKEN)); + timer_write(ioregs, 0, IER, TIMER_BIT(IER_CPCS)); + /* Set cycles to first interrupt */ + timer_write(ioregs, 0, RC, expirelo); + + printk(KERN_INFO "timer: AT32AP system timer/counter at 0x%p irq %d\n", + ioregs, irq); + + return 0; + +out_error_io: + iounmap(ioregs); +out_error_clk: + clk_put(pclk); +out_error: + return ret; +} + +int avr32_hpt_start(void) +{ + timer_write(ioregs, 0, CCR, TIMER_BIT(CCR_SWTRG)); + return 0; +} + +irqreturn_t timer_interrupt(int irq, void *dev_id) +{ + unsigned int sr = timer_read(ioregs, 0, SR); + + if (sr & TIMER_BIT(SR_CPCS)) { + /* ack timer interrupt and try to set next interrupt */ + avr32_timer_ack(); + + /* + * Call the generic timer interrupt handler + */ + write_seqlock(&xtime_lock); + do_timer(1); + write_sequnlock(&xtime_lock); + + /* + * In UP mode, we call local_timer_interrupt() to do profiling + * and process accounting. + * + * SMP is not supported yet. + */ + local_timer_interrupt(irq, dev_id); + + return IRQ_HANDLED; + } + + return IRQ_NONE; +} diff --git a/include/asm-avr32/arch-at32ap/time.h b/include/asm-avr32/arch-at32ap/time.h new file mode 100644 index 000000000000..cc8a43418a4d --- /dev/null +++ b/include/asm-avr32/arch-at32ap/time.h @@ -0,0 +1,112 @@ +/* + * Copyright (C) 2007 Atmel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef _ASM_AVR32_ARCH_AT32AP_TIME_H +#define _ASM_AVR32_ARCH_AT32AP_TIME_H + +#include + +extern struct irqaction timer_irqaction; +extern struct platform_device at32_systc0_device; +extern void local_timer_interrupt(int irq, void *dev_id); + +#define TIMER_BCR 0x000000c0 +#define TIMER_BCR_SYNC 0 +#define TIMER_BMR 0x000000c4 +#define TIMER_BMR_TC0XC0S 0 +#define TIMER_BMR_TC1XC1S 2 +#define TIMER_BMR_TC2XC2S 4 +#define TIMER_CCR 0x00000000 +#define TIMER_CCR_CLKDIS 1 +#define TIMER_CCR_CLKEN 0 +#define TIMER_CCR_SWTRG 2 +#define TIMER_CMR 0x00000004 +#define TIMER_CMR_ABETRG 10 +#define TIMER_CMR_ACPA 16 +#define TIMER_CMR_ACPC 18 +#define TIMER_CMR_AEEVT 20 +#define TIMER_CMR_ASWTRG 22 +#define TIMER_CMR_BCPB 24 +#define TIMER_CMR_BCPC 26 +#define TIMER_CMR_BEEVT 28 +#define TIMER_CMR_BSWTRG 30 +#define TIMER_CMR_BURST 4 +#define TIMER_CMR_CLKI 3 +#define TIMER_CMR_CPCDIS 7 +#define TIMER_CMR_CPCSTOP 6 +#define TIMER_CMR_CPCTRG 14 +#define TIMER_CMR_EEVT 10 +#define TIMER_CMR_EEVTEDG 8 +#define TIMER_CMR_ENETRG 12 +#define TIMER_CMR_ETRGEDG 8 +#define TIMER_CMR_LDBDIS 7 +#define TIMER_CMR_LDBSTOP 6 +#define TIMER_CMR_LDRA 16 +#define TIMER_CMR_LDRB 18 +#define TIMER_CMR_TCCLKS 0 +#define TIMER_CMR_WAVE 15 +#define TIMER_CMR_WAVSEL 13 +#define TIMER_CV 0x00000010 +#define TIMER_CV_CV 0 +#define TIMER_IDR 0x00000028 +#define TIMER_IDR_COVFS 0 +#define TIMER_IDR_CPAS 2 +#define TIMER_IDR_CPBS 3 +#define TIMER_IDR_CPCS 4 +#define TIMER_IDR_ETRGS 7 +#define TIMER_IDR_LDRAS 5 +#define TIMER_IDR_LDRBS 6 +#define TIMER_IDR_LOVRS 1 +#define TIMER_IER 0x00000024 +#define TIMER_IER_COVFS 0 +#define TIMER_IER_CPAS 2 +#define TIMER_IER_CPBS 3 +#define TIMER_IER_CPCS 4 +#define TIMER_IER_ETRGS 7 +#define TIMER_IER_LDRAS 5 +#define TIMER_IER_LDRBS 6 +#define TIMER_IER_LOVRS 1 +#define TIMER_IMR 0x0000002c +#define TIMER_IMR_COVFS 0 +#define TIMER_IMR_CPAS 2 +#define TIMER_IMR_CPBS 3 +#define TIMER_IMR_CPCS 4 +#define TIMER_IMR_ETRGS 7 +#define TIMER_IMR_LDRAS 5 +#define TIMER_IMR_LDRBS 6 +#define TIMER_IMR_LOVRS 1 +#define TIMER_RA 0x00000014 +#define TIMER_RA_RA 0 +#define TIMER_RB 0x00000018 +#define TIMER_RB_RB 0 +#define TIMER_RC 0x0000001c +#define TIMER_RC_RC 0 +#define TIMER_SR 0x00000020 +#define TIMER_SR_CLKSTA 16 +#define TIMER_SR_COVFS 0 +#define TIMER_SR_CPAS 2 +#define TIMER_SR_CPBS 3 +#define TIMER_SR_CPCS 4 +#define TIMER_SR_ETRGS 7 +#define TIMER_SR_LDRAS 5 +#define TIMER_SR_LDRBS 6 +#define TIMER_SR_LOVRS 1 +#define TIMER_SR_MTIOA 17 +#define TIMER_SR_MTIOB 18 + +/* Bit manipulation macros */ +#define TIMER_BIT(name) (1 << TIMER_##name) +#define TIMER_BF(name,value) ((value) << TIMER_##name) + +/* Register access macros */ +#define timer_read(port,instance,reg) \ + __raw_readl(port + (0x40 * instance) + TIMER_##reg) +#define timer_write(port,instance,reg,value) \ + __raw_writel((value), port + (0x40 * instance) + TIMER_##reg) + +#endif /* _ASM_AVR32_ARCH_AT32AP_TIME_H */ -- cgit v1.2.3-59-g8ed1b From 19b7ce8bad718a2850ea19aeb7383f1728596c24 Mon Sep 17 00:00:00 2001 From: Hans-Christian Egtvedt Date: Mon, 26 Feb 2007 13:50:43 +0100 Subject: [AVR32] Put cpu in sleep 0 when idle. This patch puts the CPU in sleep 0 when doing nothing, idle. This will turn of the CPU clock and thus save power. The CPU is waken again when an interrupt occurs. Signed-off-by: Hans-Christian Egtvedt Signed-off-by: Haavard Skinnemoen --- arch/avr32/kernel/entry-avr32b.S | 42 +++++++++++++++++++++++++++++++++++----- arch/avr32/kernel/process.c | 5 +++-- include/asm-avr32/thread_info.h | 2 ++ 3 files changed, 42 insertions(+), 7 deletions(-) (limited to 'include') diff --git a/arch/avr32/kernel/entry-avr32b.S b/arch/avr32/kernel/entry-avr32b.S index eeb66792bc37..5f5f7e42f51b 100644 --- a/arch/avr32/kernel/entry-avr32b.S +++ b/arch/avr32/kernel/entry-avr32b.S @@ -630,9 +630,12 @@ irq_level\level: rcall do_IRQ lddsp r4, sp[REG_SR] - andh r4, (MODE_MASK >> 16), COH + bfextu r4, r4, SYSREG_M0_OFFSET, 3 + cp.w r4, MODE_SUPERVISOR >> SYSREG_M0_OFFSET + breq 2f + cp.w r4, MODE_USER >> SYSREG_M0_OFFSET #ifdef CONFIG_PREEMPT - brne 2f + brne 3f #else brne 1f #endif @@ -649,9 +652,18 @@ irq_level\level: sub sp, -4 /* ignore r12_orig */ rete +2: get_thread_info r0 + ld.w r1, r0[TI_flags] + bld r1, TIF_CPU_GOING_TO_SLEEP #ifdef CONFIG_PREEMPT -2: - get_thread_info r0 + brcc 3f +#else + brcc 1b +#endif + sub r1, pc, . - cpu_idle_skip_sleep + stdsp sp[REG_PC], r1 +#ifdef CONFIG_PREEMPT +3: get_thread_info r0 ld.w r2, r0[TI_preempt_count] cp.w r2, 0 brne 1b @@ -662,12 +674,32 @@ irq_level\level: bld r4, SYSREG_GM_OFFSET brcs 1b rcall preempt_schedule_irq - rjmp 1b #endif + rjmp 1b .endm .section .irq.text,"ax",@progbits +.global cpu_idle_sleep +cpu_idle_sleep: + mask_interrupts + get_thread_info r8 + ld.w r9, r8[TI_flags] + bld r9, TIF_NEED_RESCHED + brcs cpu_idle_enable_int_and_exit + sbr r9, TIF_CPU_GOING_TO_SLEEP + st.w r8[TI_flags], r9 + unmask_interrupts + sleep 0 +cpu_idle_skip_sleep: + mask_interrupts + ld.w r9, r8[TI_flags] + cbr r9, TIF_CPU_GOING_TO_SLEEP + st.w r8[TI_flags], r9 +cpu_idle_enable_int_and_exit: + unmask_interrupts + retal r12 + .global irq_level0 .global irq_level1 .global irq_level2 diff --git a/arch/avr32/kernel/process.c b/arch/avr32/kernel/process.c index 0b4325946a41..4f8d2d474740 100644 --- a/arch/avr32/kernel/process.c +++ b/arch/avr32/kernel/process.c @@ -19,6 +19,8 @@ void (*pm_power_off)(void) = NULL; EXPORT_SYMBOL(pm_power_off); +extern void cpu_idle_sleep(void); + /* * This file handles the architecture-dependent parts of process handling.. */ @@ -27,9 +29,8 @@ void cpu_idle(void) { /* endless idle loop with no priority at all */ while (1) { - /* TODO: Enter sleep mode */ while (!need_resched()) - cpu_relax(); + cpu_idle_sleep(); preempt_enable_no_resched(); schedule(); preempt_disable(); diff --git a/include/asm-avr32/thread_info.h b/include/asm-avr32/thread_info.h index d1f5b35ebd54..a2e606dd4f4a 100644 --- a/include/asm-avr32/thread_info.h +++ b/include/asm-avr32/thread_info.h @@ -83,6 +83,7 @@ static inline struct thread_info *current_thread_info(void) #define TIF_SINGLE_STEP 6 /* single step after next break */ #define TIF_MEMDIE 7 #define TIF_RESTORE_SIGMASK 8 /* restore signal mask in do_signal */ +#define TIF_CPU_GOING_TO_SLEEP 9 /* CPU is entering sleep 0 mode */ #define TIF_USERSPACE 31 /* true if FS sets userspace */ #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) @@ -94,6 +95,7 @@ static inline struct thread_info *current_thread_info(void) #define _TIF_SINGLE_STEP (1 << TIF_SINGLE_STEP) #define _TIF_MEMDIE (1 << TIF_MEMDIE) #define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK) +#define _TIF_CPU_GOING_TO_SLEEP (1 << TIF_CPU_GOING_TO_SLEEP) /* XXX: These two masks must never span more than 16 bits! */ /* work to do on interrupt/exception return */ -- cgit v1.2.3-59-g8ed1b From 535c806c26ef602d578792083df52b31803b961e Mon Sep 17 00:00:00 2001 From: Haavard Skinnemoen Date: Tue, 13 Mar 2007 14:17:14 +0100 Subject: [AVR32] Clean up asm/sysreg.h Fix indentation and remove spurious comments in asm-avr32/sysreg.h Signed-off-by: Haavard Skinnemoen --- include/asm-avr32/sysreg.h | 543 +++++++++++++++++++++------------------------ 1 file changed, 249 insertions(+), 294 deletions(-) (limited to 'include') diff --git a/include/asm-avr32/sysreg.h b/include/asm-avr32/sysreg.h index f91975f330f6..c02bc8304b13 100644 --- a/include/asm-avr32/sysreg.h +++ b/include/asm-avr32/sysreg.h @@ -7,326 +7,281 @@ * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ -#ifndef __ASM_AVR32_SYSREG_H__ -#define __ASM_AVR32_SYSREG_H__ +#ifndef __ASM_AVR32_SYSREG_H +#define __ASM_AVR32_SYSREG_H /* sysreg register offsets */ -#define SYSREG_SR 0x0000 -#define SYSREG_EVBA 0x0004 -#define SYSREG_ACBA 0x0008 -#define SYSREG_CPUCR 0x000c -#define SYSREG_ECR 0x0010 -#define SYSREG_RSR_SUP 0x0014 -#define SYSREG_RSR_INT0 0x0018 -#define SYSREG_RSR_INT1 0x001c -#define SYSREG_RSR_INT2 0x0020 -#define SYSREG_RSR_INT3 0x0024 -#define SYSREG_RSR_EX 0x0028 -#define SYSREG_RSR_NMI 0x002c -#define SYSREG_RSR_DBG 0x0030 -#define SYSREG_RAR_SUP 0x0034 -#define SYSREG_RAR_INT0 0x0038 -#define SYSREG_RAR_INT1 0x003c -#define SYSREG_RAR_INT2 0x0040 -#define SYSREG_RAR_INT3 0x0044 -#define SYSREG_RAR_EX 0x0048 -#define SYSREG_RAR_NMI 0x004c -#define SYSREG_RAR_DBG 0x0050 -#define SYSREG_JECR 0x0054 -#define SYSREG_JOSP 0x0058 -#define SYSREG_JAVA_LV0 0x005c -#define SYSREG_JAVA_LV1 0x0060 -#define SYSREG_JAVA_LV2 0x0064 -#define SYSREG_JAVA_LV3 0x0068 -#define SYSREG_JAVA_LV4 0x006c -#define SYSREG_JAVA_LV5 0x0070 -#define SYSREG_JAVA_LV6 0x0074 -#define SYSREG_JAVA_LV7 0x0078 -#define SYSREG_JTBA 0x007c -#define SYSREG_JBCR 0x0080 -#define SYSREG_CONFIG0 0x0100 -#define SYSREG_CONFIG1 0x0104 -#define SYSREG_COUNT 0x0108 -#define SYSREG_COMPARE 0x010c -#define SYSREG_TLBEHI 0x0110 -#define SYSREG_TLBELO 0x0114 -#define SYSREG_PTBR 0x0118 -#define SYSREG_TLBEAR 0x011c -#define SYSREG_MMUCR 0x0120 -#define SYSREG_TLBARLO 0x0124 -#define SYSREG_TLBARHI 0x0128 -#define SYSREG_PCCNT 0x012c -#define SYSREG_PCNT0 0x0130 -#define SYSREG_PCNT1 0x0134 -#define SYSREG_PCCR 0x0138 -#define SYSREG_BEAR 0x013c +#define SYSREG_SR 0x0000 +#define SYSREG_EVBA 0x0004 +#define SYSREG_ACBA 0x0008 +#define SYSREG_CPUCR 0x000c +#define SYSREG_ECR 0x0010 +#define SYSREG_RSR_SUP 0x0014 +#define SYSREG_RSR_INT0 0x0018 +#define SYSREG_RSR_INT1 0x001c +#define SYSREG_RSR_INT2 0x0020 +#define SYSREG_RSR_INT3 0x0024 +#define SYSREG_RSR_EX 0x0028 +#define SYSREG_RSR_NMI 0x002c +#define SYSREG_RSR_DBG 0x0030 +#define SYSREG_RAR_SUP 0x0034 +#define SYSREG_RAR_INT0 0x0038 +#define SYSREG_RAR_INT1 0x003c +#define SYSREG_RAR_INT2 0x0040 +#define SYSREG_RAR_INT3 0x0044 +#define SYSREG_RAR_EX 0x0048 +#define SYSREG_RAR_NMI 0x004c +#define SYSREG_RAR_DBG 0x0050 +#define SYSREG_JECR 0x0054 +#define SYSREG_JOSP 0x0058 +#define SYSREG_JAVA_LV0 0x005c +#define SYSREG_JAVA_LV1 0x0060 +#define SYSREG_JAVA_LV2 0x0064 +#define SYSREG_JAVA_LV3 0x0068 +#define SYSREG_JAVA_LV4 0x006c +#define SYSREG_JAVA_LV5 0x0070 +#define SYSREG_JAVA_LV6 0x0074 +#define SYSREG_JAVA_LV7 0x0078 +#define SYSREG_JTBA 0x007c +#define SYSREG_JBCR 0x0080 +#define SYSREG_CONFIG0 0x0100 +#define SYSREG_CONFIG1 0x0104 +#define SYSREG_COUNT 0x0108 +#define SYSREG_COMPARE 0x010c +#define SYSREG_TLBEHI 0x0110 +#define SYSREG_TLBELO 0x0114 +#define SYSREG_PTBR 0x0118 +#define SYSREG_TLBEAR 0x011c +#define SYSREG_MMUCR 0x0120 +#define SYSREG_TLBARLO 0x0124 +#define SYSREG_TLBARHI 0x0128 +#define SYSREG_PCCNT 0x012c +#define SYSREG_PCNT0 0x0130 +#define SYSREG_PCNT1 0x0134 +#define SYSREG_PCCR 0x0138 +#define SYSREG_BEAR 0x013c +#define SYSREG_SABAL 0x0300 +#define SYSREG_SABAH 0x0304 +#define SYSREG_SABD 0x0308 /* Bitfields in SR */ -#define SYSREG_SR_C_OFFSET 0 -#define SYSREG_SR_C_SIZE 1 -#define SYSREG_Z_OFFSET 1 -#define SYSREG_Z_SIZE 1 -#define SYSREG_SR_N_OFFSET 2 -#define SYSREG_SR_N_SIZE 1 -#define SYSREG_SR_V_OFFSET 3 -#define SYSREG_SR_V_SIZE 1 -#define SYSREG_Q_OFFSET 4 -#define SYSREG_Q_SIZE 1 -#define SYSREG_GM_OFFSET 16 -#define SYSREG_GM_SIZE 1 -#define SYSREG_I0M_OFFSET 17 -#define SYSREG_I0M_SIZE 1 -#define SYSREG_I1M_OFFSET 18 -#define SYSREG_I1M_SIZE 1 -#define SYSREG_I2M_OFFSET 19 -#define SYSREG_I2M_SIZE 1 -#define SYSREG_I3M_OFFSET 20 -#define SYSREG_I3M_SIZE 1 -#define SYSREG_EM_OFFSET 21 -#define SYSREG_EM_SIZE 1 -#define SYSREG_M0_OFFSET 22 -#define SYSREG_M0_SIZE 1 -#define SYSREG_M1_OFFSET 23 -#define SYSREG_M1_SIZE 1 -#define SYSREG_M2_OFFSET 24 -#define SYSREG_M2_SIZE 1 -#define SYSREG_SR_D_OFFSET 26 -#define SYSREG_SR_D_SIZE 1 -#define SYSREG_DM_OFFSET 27 -#define SYSREG_DM_SIZE 1 -#define SYSREG_SR_J_OFFSET 28 -#define SYSREG_SR_J_SIZE 1 -#define SYSREG_R_OFFSET 29 -#define SYSREG_R_SIZE 1 -#define SYSREG_H_OFFSET 30 -#define SYSREG_H_SIZE 1 - -/* Bitfields in EVBA */ - -/* Bitfields in ACBA */ +#define SYSREG_SR_C_OFFSET 0 +#define SYSREG_SR_C_SIZE 1 +#define SYSREG_Z_OFFSET 1 +#define SYSREG_Z_SIZE 1 +#define SYSREG_SR_N_OFFSET 2 +#define SYSREG_SR_N_SIZE 1 +#define SYSREG_SR_V_OFFSET 3 +#define SYSREG_SR_V_SIZE 1 +#define SYSREG_Q_OFFSET 4 +#define SYSREG_Q_SIZE 1 +#define SYSREG_L_OFFSET 5 +#define SYSREG_L_SIZE 1 +#define SYSREG_T_OFFSET 14 +#define SYSREG_T_SIZE 1 +#define SYSREG_SR_R_OFFSET 15 +#define SYSREG_SR_R_SIZE 1 +#define SYSREG_GM_OFFSET 16 +#define SYSREG_GM_SIZE 1 +#define SYSREG_I0M_OFFSET 17 +#define SYSREG_I0M_SIZE 1 +#define SYSREG_I1M_OFFSET 18 +#define SYSREG_I1M_SIZE 1 +#define SYSREG_I2M_OFFSET 19 +#define SYSREG_I2M_SIZE 1 +#define SYSREG_I3M_OFFSET 20 +#define SYSREG_I3M_SIZE 1 +#define SYSREG_EM_OFFSET 21 +#define SYSREG_EM_SIZE 1 +#define SYSREG_M0_OFFSET 22 +#define SYSREG_M0_SIZE 1 +#define SYSREG_M1_OFFSET 23 +#define SYSREG_M1_SIZE 1 +#define SYSREG_M2_OFFSET 24 +#define SYSREG_M2_SIZE 1 +#define SYSREG_SR_D_OFFSET 26 +#define SYSREG_SR_D_SIZE 1 +#define SYSREG_DM_OFFSET 27 +#define SYSREG_DM_SIZE 1 +#define SYSREG_SR_J_OFFSET 28 +#define SYSREG_SR_J_SIZE 1 +#define SYSREG_H_OFFSET 29 +#define SYSREG_H_SIZE 1 /* Bitfields in CPUCR */ -#define SYSREG_BI_OFFSET 0 -#define SYSREG_BI_SIZE 1 -#define SYSREG_BE_OFFSET 1 -#define SYSREG_BE_SIZE 1 -#define SYSREG_FE_OFFSET 2 -#define SYSREG_FE_SIZE 1 -#define SYSREG_RE_OFFSET 3 -#define SYSREG_RE_SIZE 1 -#define SYSREG_IBE_OFFSET 4 -#define SYSREG_IBE_SIZE 1 -#define SYSREG_IEE_OFFSET 5 -#define SYSREG_IEE_SIZE 1 - -/* Bitfields in ECR */ -#define SYSREG_ECR_OFFSET 0 -#define SYSREG_ECR_SIZE 32 - -/* Bitfields in RSR_SUP */ - -/* Bitfields in RSR_INT0 */ - -/* Bitfields in RSR_INT1 */ - -/* Bitfields in RSR_INT2 */ - -/* Bitfields in RSR_INT3 */ - -/* Bitfields in RSR_EX */ - -/* Bitfields in RSR_NMI */ - -/* Bitfields in RSR_DBG */ - -/* Bitfields in RAR_SUP */ - -/* Bitfields in RAR_INT0 */ - -/* Bitfields in RAR_INT1 */ - -/* Bitfields in RAR_INT2 */ - -/* Bitfields in RAR_INT3 */ - -/* Bitfields in RAR_EX */ - -/* Bitfields in RAR_NMI */ - -/* Bitfields in RAR_DBG */ - -/* Bitfields in JECR */ - -/* Bitfields in JOSP */ - -/* Bitfields in JAVA_LV0 */ - -/* Bitfields in JAVA_LV1 */ - -/* Bitfields in JAVA_LV2 */ - -/* Bitfields in JAVA_LV3 */ - -/* Bitfields in JAVA_LV4 */ - -/* Bitfields in JAVA_LV5 */ - -/* Bitfields in JAVA_LV6 */ - -/* Bitfields in JAVA_LV7 */ - -/* Bitfields in JTBA */ - -/* Bitfields in JBCR */ +#define SYSREG_BI_OFFSET 0 +#define SYSREG_BI_SIZE 1 +#define SYSREG_BE_OFFSET 1 +#define SYSREG_BE_SIZE 1 +#define SYSREG_FE_OFFSET 2 +#define SYSREG_FE_SIZE 1 +#define SYSREG_RE_OFFSET 3 +#define SYSREG_RE_SIZE 1 +#define SYSREG_IBE_OFFSET 4 +#define SYSREG_IBE_SIZE 1 +#define SYSREG_IEE_OFFSET 5 +#define SYSREG_IEE_SIZE 1 /* Bitfields in CONFIG0 */ -#define SYSREG_CONFIG0_D_OFFSET 1 -#define SYSREG_CONFIG0_D_SIZE 1 -#define SYSREG_CONFIG0_S_OFFSET 2 -#define SYSREG_CONFIG0_S_SIZE 1 -#define SYSREG_O_OFFSET 3 -#define SYSREG_O_SIZE 1 -#define SYSREG_P_OFFSET 4 -#define SYSREG_P_SIZE 1 -#define SYSREG_CONFIG0_J_OFFSET 5 -#define SYSREG_CONFIG0_J_SIZE 1 -#define SYSREG_F_OFFSET 6 -#define SYSREG_F_SIZE 1 -#define SYSREG_MMUT_OFFSET 7 -#define SYSREG_MMUT_SIZE 3 -#define SYSREG_AR_OFFSET 10 -#define SYSREG_AR_SIZE 3 -#define SYSREG_AT_OFFSET 13 -#define SYSREG_AT_SIZE 3 -#define SYSREG_PROCESSORREVISION_OFFSET 16 -#define SYSREG_PROCESSORREVISION_SIZE 8 -#define SYSREG_PROCESSORID_OFFSET 24 -#define SYSREG_PROCESSORID_SIZE 8 +#define SYSREG_CONFIG0_R_OFFSET 0 +#define SYSREG_CONFIG0_R_SIZE 1 +#define SYSREG_CONFIG0_D_OFFSET 1 +#define SYSREG_CONFIG0_D_SIZE 1 +#define SYSREG_CONFIG0_S_OFFSET 2 +#define SYSREG_CONFIG0_S_SIZE 1 +#define SYSREG_CONFIG0_O_OFFSET 3 +#define SYSREG_CONFIG0_O_SIZE 1 +#define SYSREG_CONFIG0_P_OFFSET 4 +#define SYSREG_CONFIG0_P_SIZE 1 +#define SYSREG_CONFIG0_J_OFFSET 5 +#define SYSREG_CONFIG0_J_SIZE 1 +#define SYSREG_CONFIG0_F_OFFSET 6 +#define SYSREG_CONFIG0_F_SIZE 1 +#define SYSREG_MMUT_OFFSET 7 +#define SYSREG_MMUT_SIZE 3 +#define SYSREG_AR_OFFSET 10 +#define SYSREG_AR_SIZE 3 +#define SYSREG_AT_OFFSET 13 +#define SYSREG_AT_SIZE 3 +#define SYSREG_PROCESSORREVISION_OFFSET 16 +#define SYSREG_PROCESSORREVISION_SIZE 8 +#define SYSREG_PROCESSORID_OFFSET 24 +#define SYSREG_PROCESSORID_SIZE 8 /* Bitfields in CONFIG1 */ -#define SYSREG_DASS_OFFSET 0 -#define SYSREG_DASS_SIZE 3 -#define SYSREG_DLSZ_OFFSET 3 -#define SYSREG_DLSZ_SIZE 3 -#define SYSREG_DSET_OFFSET 6 -#define SYSREG_DSET_SIZE 4 -#define SYSREG_IASS_OFFSET 10 -#define SYSREG_IASS_SIZE 2 -#define SYSREG_ILSZ_OFFSET 13 -#define SYSREG_ILSZ_SIZE 3 -#define SYSREG_ISET_OFFSET 16 -#define SYSREG_ISET_SIZE 4 -#define SYSREG_DMMUSZ_OFFSET 20 -#define SYSREG_DMMUSZ_SIZE 6 -#define SYSREG_IMMUSZ_OFFSET 26 -#define SYSREG_IMMUSZ_SIZE 6 - -/* Bitfields in COUNT */ - -/* Bitfields in COMPARE */ +#define SYSREG_DASS_OFFSET 0 +#define SYSREG_DASS_SIZE 3 +#define SYSREG_DLSZ_OFFSET 3 +#define SYSREG_DLSZ_SIZE 3 +#define SYSREG_DSET_OFFSET 6 +#define SYSREG_DSET_SIZE 4 +#define SYSREG_IASS_OFFSET 10 +#define SYSREG_IASS_SIZE 3 +#define SYSREG_ILSZ_OFFSET 13 +#define SYSREG_ILSZ_SIZE 3 +#define SYSREG_ISET_OFFSET 16 +#define SYSREG_ISET_SIZE 4 +#define SYSREG_DMMUSZ_OFFSET 20 +#define SYSREG_DMMUSZ_SIZE 6 +#define SYSREG_IMMUSZ_OFFSET 26 +#define SYSREG_IMMUSZ_SIZE 6 /* Bitfields in TLBEHI */ -#define SYSREG_ASID_OFFSET 0 -#define SYSREG_ASID_SIZE 8 -#define SYSREG_TLBEHI_I_OFFSET 8 -#define SYSREG_TLBEHI_I_SIZE 1 -#define SYSREG_TLBEHI_V_OFFSET 9 -#define SYSREG_TLBEHI_V_SIZE 1 -#define SYSREG_VPN_OFFSET 10 -#define SYSREG_VPN_SIZE 22 +#define SYSREG_ASID_OFFSET 0 +#define SYSREG_ASID_SIZE 8 +#define SYSREG_TLBEHI_I_OFFSET 8 +#define SYSREG_TLBEHI_I_SIZE 1 +#define SYSREG_TLBEHI_V_OFFSET 9 +#define SYSREG_TLBEHI_V_SIZE 1 +#define SYSREG_VPN_OFFSET 10 +#define SYSREG_VPN_SIZE 22 /* Bitfields in TLBELO */ -#define SYSREG_W_OFFSET 0 -#define SYSREG_W_SIZE 1 -#define SYSREG_TLBELO_D_OFFSET 1 -#define SYSREG_TLBELO_D_SIZE 1 -#define SYSREG_SZ_OFFSET 2 -#define SYSREG_SZ_SIZE 2 -#define SYSREG_AP_OFFSET 4 -#define SYSREG_AP_SIZE 3 -#define SYSREG_B_OFFSET 7 -#define SYSREG_B_SIZE 1 -#define SYSREG_G_OFFSET 8 -#define SYSREG_G_SIZE 1 -#define SYSREG_TLBELO_C_OFFSET 9 -#define SYSREG_TLBELO_C_SIZE 1 -#define SYSREG_PFN_OFFSET 10 -#define SYSREG_PFN_SIZE 22 - -/* Bitfields in PTBR */ - -/* Bitfields in TLBEAR */ +#define SYSREG_W_OFFSET 0 +#define SYSREG_W_SIZE 1 +#define SYSREG_TLBELO_D_OFFSET 1 +#define SYSREG_TLBELO_D_SIZE 1 +#define SYSREG_SZ_OFFSET 2 +#define SYSREG_SZ_SIZE 2 +#define SYSREG_AP_OFFSET 4 +#define SYSREG_AP_SIZE 3 +#define SYSREG_B_OFFSET 7 +#define SYSREG_B_SIZE 1 +#define SYSREG_G_OFFSET 8 +#define SYSREG_G_SIZE 1 +#define SYSREG_TLBELO_C_OFFSET 9 +#define SYSREG_TLBELO_C_SIZE 1 +#define SYSREG_PFN_OFFSET 10 +#define SYSREG_PFN_SIZE 22 /* Bitfields in MMUCR */ -#define SYSREG_E_OFFSET 0 -#define SYSREG_E_SIZE 1 -#define SYSREG_M_OFFSET 1 -#define SYSREG_M_SIZE 1 -#define SYSREG_MMUCR_I_OFFSET 2 -#define SYSREG_MMUCR_I_SIZE 1 -#define SYSREG_MMUCR_N_OFFSET 3 -#define SYSREG_MMUCR_N_SIZE 1 -#define SYSREG_MMUCR_S_OFFSET 4 -#define SYSREG_MMUCR_S_SIZE 1 -#define SYSREG_DLA_OFFSET 8 -#define SYSREG_DLA_SIZE 6 -#define SYSREG_DRP_OFFSET 14 -#define SYSREG_DRP_SIZE 6 -#define SYSREG_ILA_OFFSET 20 -#define SYSREG_ILA_SIZE 6 -#define SYSREG_IRP_OFFSET 26 -#define SYSREG_IRP_SIZE 6 - -/* Bitfields in TLBARLO */ - -/* Bitfields in TLBARHI */ - -/* Bitfields in PCCNT */ - -/* Bitfields in PCNT0 */ - -/* Bitfields in PCNT1 */ +#define SYSREG_E_OFFSET 0 +#define SYSREG_E_SIZE 1 +#define SYSREG_M_OFFSET 1 +#define SYSREG_M_SIZE 1 +#define SYSREG_MMUCR_I_OFFSET 2 +#define SYSREG_MMUCR_I_SIZE 1 +#define SYSREG_MMUCR_N_OFFSET 3 +#define SYSREG_MMUCR_N_SIZE 1 +#define SYSREG_MMUCR_S_OFFSET 4 +#define SYSREG_MMUCR_S_SIZE 1 +#define SYSREG_DLA_OFFSET 8 +#define SYSREG_DLA_SIZE 6 +#define SYSREG_DRP_OFFSET 14 +#define SYSREG_DRP_SIZE 6 +#define SYSREG_ILA_OFFSET 20 +#define SYSREG_ILA_SIZE 6 +#define SYSREG_IRP_OFFSET 26 +#define SYSREG_IRP_SIZE 6 /* Bitfields in PCCR */ - -/* Bitfields in BEAR */ +#define SYSREG_PCCR_R_OFFSET 1 +#define SYSREG_PCCR_R_SIZE 1 +#define SYSREG_PCCR_C_OFFSET 2 +#define SYSREG_PCCR_C_SIZE 1 +#define SYSREG_PCCR_S_OFFSET 3 +#define SYSREG_PCCR_S_SIZE 1 +#define SYSREG_IEC_OFFSET 4 +#define SYSREG_IEC_SIZE 1 +#define SYSREG_IE0_OFFSET 5 +#define SYSREG_IE0_SIZE 1 +#define SYSREG_IE1_OFFSET 6 +#define SYSREG_IE1_SIZE 1 +#define SYSREG_FC_OFFSET 8 +#define SYSREG_FC_SIZE 1 +#define SYSREG_F0_OFFSET 9 +#define SYSREG_F0_SIZE 1 +#define SYSREG_F1_OFFSET 10 +#define SYSREG_F1_SIZE 1 +#define SYSREG_CONF0_OFFSET 12 +#define SYSREG_CONF0_SIZE 6 +#define SYSREG_CONF1_OFFSET 18 +#define SYSREG_CONF1_SIZE 6 /* Constants for ECR */ -#define ECR_UNRECOVERABLE 0 -#define ECR_TLB_MULTIPLE 1 -#define ECR_BUS_ERROR_WRITE 2 -#define ECR_BUS_ERROR_READ 3 -#define ECR_NMI 4 -#define ECR_ADDR_ALIGN_X 5 -#define ECR_PROTECTION_X 6 -#define ECR_DEBUG 7 -#define ECR_ILLEGAL_OPCODE 8 -#define ECR_UNIMPL_INSTRUCTION 9 -#define ECR_PRIVILEGE_VIOLATION 10 -#define ECR_FPE 11 -#define ECR_COPROC_ABSENT 12 -#define ECR_ADDR_ALIGN_R 13 -#define ECR_ADDR_ALIGN_W 14 -#define ECR_PROTECTION_R 15 -#define ECR_PROTECTION_W 16 -#define ECR_DTLB_MODIFIED 17 -#define ECR_TLB_MISS_X 20 -#define ECR_TLB_MISS_R 24 -#define ECR_TLB_MISS_W 28 +#define ECR_UNRECOVERABLE 0 +#define ECR_TLB_MULTIPLE 1 +#define ECR_BUS_ERROR_WRITE 2 +#define ECR_BUS_ERROR_READ 3 +#define ECR_NMI 4 +#define ECR_ADDR_ALIGN_X 5 +#define ECR_PROTECTION_X 6 +#define ECR_DEBUG 7 +#define ECR_ILLEGAL_OPCODE 8 +#define ECR_UNIMPL_INSTRUCTION 9 +#define ECR_PRIVILEGE_VIOLATION 10 +#define ECR_FPE 11 +#define ECR_COPROC_ABSENT 12 +#define ECR_ADDR_ALIGN_R 13 +#define ECR_ADDR_ALIGN_W 14 +#define ECR_PROTECTION_R 15 +#define ECR_PROTECTION_W 16 +#define ECR_DTLB_MODIFIED 17 +#define ECR_TLB_MISS_X 20 +#define ECR_TLB_MISS_R 24 +#define ECR_TLB_MISS_W 28 /* Bit manipulation macros */ -#define SYSREG_BIT(name) (1 << SYSREG_##name##_OFFSET) -#define SYSREG_BF(name,value) (((value) & ((1 << SYSREG_##name##_SIZE) - 1)) << SYSREG_##name##_OFFSET) -#define SYSREG_BFEXT(name,value) (((value) >> SYSREG_##name##_OFFSET) & ((1 << SYSREG_##name##_SIZE) - 1)) -#define SYSREG_BFINS(name,value,old) (((old) & ~(((1 << SYSREG_##name##_SIZE) - 1) << SYSREG_##name##_OFFSET)) | SYSREG_BF(name,value)) +#define SYSREG_BIT(name) \ + (1 << SYSREG_##name##_OFFSET) +#define SYSREG_BF(name,value) \ + (((value) & ((1 << SYSREG_##name##_SIZE) - 1)) \ + << SYSREG_##name##_OFFSET) +#define SYSREG_BFEXT(name,value)\ + (((value) >> SYSREG_##name##_OFFSET) \ + & ((1 << SYSREG_##name##_SIZE) - 1)) +#define SYSREG_BFINS(name,value,old) \ + (((old) & ~(((1 << SYSREG_##name##_SIZE) - 1) \ + << SYSREG_##name##_OFFSET)) \ + | SYSREG_BF(name,value)) +/* Register access macros */ #ifdef __CHECKER__ extern unsigned long __builtin_mfsr(unsigned long reg); extern void __builtin_mtsr(unsigned long reg, unsigned long value); #endif -/* Register access macros */ -#define sysreg_read(reg) __builtin_mfsr(SYSREG_##reg) -#define sysreg_write(reg, value) __builtin_mtsr(SYSREG_##reg, value) +#define sysreg_read(reg) __builtin_mfsr(SYSREG_##reg) +#define sysreg_write(reg, value) __builtin_mtsr(SYSREG_##reg, value) -#endif /* __ASM_AVR32_SYSREG_H__ */ +#endif /* __ASM_AVR32_SYSREG_H */ -- cgit v1.2.3-59-g8ed1b From 3b328c98093702c584692bffabd440800b383d73 Mon Sep 17 00:00:00 2001 From: Haavard Skinnemoen Date: Tue, 13 Mar 2007 15:30:38 +0100 Subject: [AVR32] Clean up cpu identification and add features bitmap Clean up the cpu identification code, using definitions from instead of hardcoded constants. Also, add a features bitmap to struct avr32_cpuinfo to allow other code to make decisions based upon what the running cpu is actually capable of. Signed-off-by: Haavard Skinnemoen --- arch/avr32/kernel/cpu.c | 64 +++++++++++++++++++++++++++++-------------- include/asm-avr32/processor.h | 9 ++++++ 2 files changed, 53 insertions(+), 20 deletions(-) (limited to 'include') diff --git a/arch/avr32/kernel/cpu.c b/arch/avr32/kernel/cpu.c index 2e72fd2699df..2714cf6452b5 100644 --- a/arch/avr32/kernel/cpu.c +++ b/arch/avr32/kernel/cpu.c @@ -209,16 +209,17 @@ static const char *mmu_types[] = { void __init setup_processor(void) { unsigned long config0, config1; + unsigned long features; unsigned cpu_id, cpu_rev, arch_id, arch_rev, mmu_type; unsigned tmp; - config0 = sysreg_read(CONFIG0); /* 0x0000013e; */ - config1 = sysreg_read(CONFIG1); /* 0x01f689a2; */ - cpu_id = config0 >> 24; - cpu_rev = (config0 >> 16) & 0xff; - arch_id = (config0 >> 13) & 0x07; - arch_rev = (config0 >> 10) & 0x07; - mmu_type = (config0 >> 7) & 0x03; + config0 = sysreg_read(CONFIG0); + config1 = sysreg_read(CONFIG1); + cpu_id = SYSREG_BFEXT(PROCESSORID, config0); + cpu_rev = SYSREG_BFEXT(PROCESSORREVISION, config0); + arch_id = SYSREG_BFEXT(AT, config0); + arch_rev = SYSREG_BFEXT(AR, config0); + mmu_type = SYSREG_BFEXT(MMUT, config0); boot_cpu_data.arch_type = arch_id; boot_cpu_data.cpu_type = cpu_id; @@ -226,16 +227,16 @@ void __init setup_processor(void) boot_cpu_data.cpu_revision = cpu_rev; boot_cpu_data.tlb_config = mmu_type; - tmp = (config1 >> 13) & 0x07; + tmp = SYSREG_BFEXT(ILSZ, config1); if (tmp) { - boot_cpu_data.icache.ways = 1 << ((config1 >> 10) & 0x07); - boot_cpu_data.icache.sets = 1 << ((config1 >> 16) & 0x0f); + boot_cpu_data.icache.ways = 1 << SYSREG_BFEXT(IASS, config1); + boot_cpu_data.icache.sets = 1 << SYSREG_BFEXT(ISET, config1); boot_cpu_data.icache.linesz = 1 << (tmp + 1); } - tmp = (config1 >> 3) & 0x07; + tmp = SYSREG_BFEXT(DLSZ, config1); if (tmp) { - boot_cpu_data.dcache.ways = 1 << (config1 & 0x07); - boot_cpu_data.dcache.sets = 1 << ((config1 >> 6) & 0x0f); + boot_cpu_data.dcache.ways = 1 << SYSREG_BFEXT(DASS, config1); + boot_cpu_data.dcache.sets = 1 << SYSREG_BFEXT(DSET, config1); boot_cpu_data.dcache.linesz = 1 << (tmp + 1); } @@ -250,16 +251,39 @@ void __init setup_processor(void) cpu_names[cpu_id], cpu_id, cpu_rev, arch_names[arch_id], arch_rev); printk ("CPU: MMU configuration: %s\n", mmu_types[mmu_type]); + printk ("CPU: features:"); - if (config0 & (1 << 6)) - printk(" fpu"); - if (config0 & (1 << 5)) - printk(" java"); - if (config0 & (1 << 4)) - printk(" perfctr"); - if (config0 & (1 << 3)) + features = 0; + if (config0 & SYSREG_BIT(CONFIG0_R)) { + features |= AVR32_FEATURE_RMW; + printk(" rmw"); + } + if (config0 & SYSREG_BIT(CONFIG0_D)) { + features |= AVR32_FEATURE_DSP; + printk(" dsp"); + } + if (config0 & SYSREG_BIT(CONFIG0_S)) { + features |= AVR32_FEATURE_SIMD; + printk(" simd"); + } + if (config0 & SYSREG_BIT(CONFIG0_O)) { + features |= AVR32_FEATURE_OCD; printk(" ocd"); + } + if (config0 & SYSREG_BIT(CONFIG0_P)) { + features |= AVR32_FEATURE_PCTR; + printk(" perfctr"); + } + if (config0 & SYSREG_BIT(CONFIG0_J)) { + features |= AVR32_FEATURE_JAVA; + printk(" java"); + } + if (config0 & SYSREG_BIT(CONFIG0_F)) { + features |= AVR32_FEATURE_FPU; + printk(" fpu"); + } printk("\n"); + boot_cpu_data.features = features; } #ifdef CONFIG_PROC_FS diff --git a/include/asm-avr32/processor.h b/include/asm-avr32/processor.h index f6913778a45f..cbb89ccd3f05 100644 --- a/include/asm-avr32/processor.h +++ b/include/asm-avr32/processor.h @@ -40,6 +40,14 @@ enum tlb_config { TLB_INVALID }; +#define AVR32_FEATURE_RMW (1 << 0) +#define AVR32_FEATURE_DSP (1 << 1) +#define AVR32_FEATURE_SIMD (1 << 2) +#define AVR32_FEATURE_OCD (1 << 3) +#define AVR32_FEATURE_PCTR (1 << 4) +#define AVR32_FEATURE_JAVA (1 << 5) +#define AVR32_FEATURE_FPU (1 << 6) + struct avr32_cpuinfo { struct clk *clk; unsigned long loops_per_jiffy; @@ -48,6 +56,7 @@ struct avr32_cpuinfo { unsigned short arch_revision; unsigned short cpu_revision; enum tlb_config tlb_config; + unsigned long features; struct cache_info icache; struct cache_info dcache; -- cgit v1.2.3-59-g8ed1b From 623b0355d5b1f9c6d05005b649a2f3a7b9fd7816 Mon Sep 17 00:00:00 2001 From: Haavard Skinnemoen Date: Tue, 13 Mar 2007 17:59:11 +0100 Subject: [AVR32] Clean up exception handling code * Use generic BUG() handling * Remove some useless debug statements * Use a common function _exception() to send signals or oops when an exception can't be handled. This makes sure init doesn't enter an infinite exception loop as well. Borrowed from powerpc. * Add some basic exception tracing support to the page fault code. * Rework dump_stack(), show_regs() and friends and move everything into process.c * Print information about configuration options and chip type when oopsing Signed-off-by: Haavard Skinnemoen --- arch/avr32/Kconfig | 5 + arch/avr32/kernel/module.c | 11 +- arch/avr32/kernel/process.c | 188 ++++++++++++++++-- arch/avr32/kernel/traps.c | 421 ++++++++++++---------------------------- arch/avr32/kernel/vmlinux.lds.c | 9 +- arch/avr32/mm/fault.c | 116 ++++------- include/asm-avr32/bug.h | 50 +++-- include/asm-avr32/processor.h | 6 +- include/asm-avr32/system.h | 13 +- 9 files changed, 402 insertions(+), 417 deletions(-) (limited to 'include') diff --git a/arch/avr32/Kconfig b/arch/avr32/Kconfig index 257b83648cb7..7a1eb445c17f 100644 --- a/arch/avr32/Kconfig +++ b/arch/avr32/Kconfig @@ -68,6 +68,11 @@ config GENERIC_CALIBRATE_DELAY bool default y +config GENERIC_BUG + bool + default y + depends on BUG + source "init/Kconfig" menu "System Type and features" diff --git a/arch/avr32/kernel/module.c b/arch/avr32/kernel/module.c index b599eae64576..1167fe9cf6c4 100644 --- a/arch/avr32/kernel/module.c +++ b/arch/avr32/kernel/module.c @@ -12,10 +12,11 @@ * published by the Free Software Foundation. */ -#include -#include -#include +#include #include +#include +#include +#include #include void *module_alloc(unsigned long size) @@ -315,10 +316,10 @@ int module_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, vfree(module->arch.syminfo); module->arch.syminfo = NULL; - return 0; + return module_bug_finalize(hdr, sechdrs, module); } void module_arch_cleanup(struct module *module) { - + module_bug_cleanup(module); } diff --git a/arch/avr32/kernel/process.c b/arch/avr32/kernel/process.c index 4f8d2d474740..4e4181ed1c6d 100644 --- a/arch/avr32/kernel/process.c +++ b/arch/avr32/kernel/process.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include @@ -115,39 +116,178 @@ void release_thread(struct task_struct *dead_task) /* do nothing */ } +static void dump_mem(const char *str, const char *log_lvl, + unsigned long bottom, unsigned long top) +{ + unsigned long p; + int i; + + printk("%s%s(0x%08lx to 0x%08lx)\n", log_lvl, str, bottom, top); + + for (p = bottom & ~31; p < top; ) { + printk("%s%04lx: ", log_lvl, p & 0xffff); + + for (i = 0; i < 8; i++, p += 4) { + unsigned int val; + + if (p < bottom || p >= top) + printk(" "); + else { + if (__get_user(val, (unsigned int __user *)p)) { + printk("\n"); + goto out; + } + printk("%08x ", val); + } + } + printk("\n"); + } + +out: + return; +} + +static inline int valid_stack_ptr(struct thread_info *tinfo, unsigned long p) +{ + return (p > (unsigned long)tinfo) + && (p < (unsigned long)tinfo + THREAD_SIZE - 3); +} + +#ifdef CONFIG_FRAME_POINTER +static void show_trace_log_lvl(struct task_struct *tsk, unsigned long *sp, + struct pt_regs *regs, const char *log_lvl) +{ + unsigned long lr, fp; + struct thread_info *tinfo; + + if (regs) + fp = regs->r7; + else if (tsk == current) + asm("mov %0, r7" : "=r"(fp)); + else + fp = tsk->thread.cpu_context.r7; + + /* + * Walk the stack as long as the frame pointer (a) is within + * the kernel stack of the task, and (b) it doesn't move + * downwards. + */ + tinfo = task_thread_info(tsk); + printk("%sCall trace:\n", log_lvl); + while (valid_stack_ptr(tinfo, fp)) { + unsigned long new_fp; + + lr = *(unsigned long *)fp; +#ifdef CONFIG_KALLSYMS + printk("%s [<%08lx>] ", log_lvl, lr); +#else + printk(" [<%08lx>] ", lr); +#endif + print_symbol("%s\n", lr); + + new_fp = *(unsigned long *)(fp + 4); + if (new_fp <= fp) + break; + fp = new_fp; + } + printk("\n"); +} +#else +static void show_trace_log_lvl(struct task_struct *tsk, unsigned long *sp, + struct pt_regs *regs, const char *log_lvl) +{ + unsigned long addr; + + printk("%sCall trace:\n", log_lvl); + + while (!kstack_end(sp)) { + addr = *sp++; + if (kernel_text_address(addr)) { +#ifdef CONFIG_KALLSYMS + printk("%s [<%08lx>] ", log_lvl, addr); +#else + printk(" [<%08lx>] ", addr); +#endif + print_symbol("%s\n", addr); + } + } + printk("\n"); +} +#endif + +void show_stack_log_lvl(struct task_struct *tsk, unsigned long sp, + struct pt_regs *regs, const char *log_lvl) +{ + struct thread_info *tinfo; + + if (sp == 0) { + if (tsk) + sp = tsk->thread.cpu_context.ksp; + else + sp = (unsigned long)&tinfo; + } + if (!tsk) + tsk = current; + + tinfo = task_thread_info(tsk); + + if (valid_stack_ptr(tinfo, sp)) { + dump_mem("Stack: ", log_lvl, sp, + THREAD_SIZE + (unsigned long)tinfo); + show_trace_log_lvl(tsk, (unsigned long *)sp, regs, log_lvl); + } +} + +void show_stack(struct task_struct *tsk, unsigned long *stack) +{ + show_stack_log_lvl(tsk, (unsigned long)stack, NULL, ""); +} + +void dump_stack(void) +{ + unsigned long stack; + + show_trace_log_lvl(current, &stack, NULL, ""); +} +EXPORT_SYMBOL(dump_stack); + static const char *cpu_modes[] = { "Application", "Supervisor", "Interrupt level 0", "Interrupt level 1", "Interrupt level 2", "Interrupt level 3", "Exception", "NMI" }; -void show_regs(struct pt_regs *regs) +void show_regs_log_lvl(struct pt_regs *regs, const char *log_lvl) { unsigned long sp = regs->sp; unsigned long lr = regs->lr; unsigned long mode = (regs->sr & MODE_MASK) >> MODE_SHIFT; - if (!user_mode(regs)) + if (!user_mode(regs)) { sp = (unsigned long)regs + FRAME_SIZE_FULL; - print_symbol("PC is at %s\n", instruction_pointer(regs)); - print_symbol("LR is at %s\n", lr); - printk("pc : [<%08lx>] lr : [<%08lx>] %s\n" - "sp : %08lx r12: %08lx r11: %08lx\n", - instruction_pointer(regs), - lr, print_tainted(), sp, regs->r12, regs->r11); - printk("r10: %08lx r9 : %08lx r8 : %08lx\n", - regs->r10, regs->r9, regs->r8); - printk("r7 : %08lx r6 : %08lx r5 : %08lx r4 : %08lx\n", - regs->r7, regs->r6, regs->r5, regs->r4); - printk("r3 : %08lx r2 : %08lx r1 : %08lx r0 : %08lx\n", - regs->r3, regs->r2, regs->r1, regs->r0); - printk("Flags: %c%c%c%c%c\n", + printk("%s", log_lvl); + print_symbol("PC is at %s\n", instruction_pointer(regs)); + printk("%s", log_lvl); + print_symbol("LR is at %s\n", lr); + } + + printk("%spc : [<%08lx>] lr : [<%08lx>] %s\n" + "%ssp : %08lx r12: %08lx r11: %08lx\n", + log_lvl, instruction_pointer(regs), lr, print_tainted(), + log_lvl, sp, regs->r12, regs->r11); + printk("%sr10: %08lx r9 : %08lx r8 : %08lx\n", + log_lvl, regs->r10, regs->r9, regs->r8); + printk("%sr7 : %08lx r6 : %08lx r5 : %08lx r4 : %08lx\n", + log_lvl, regs->r7, regs->r6, regs->r5, regs->r4); + printk("%sr3 : %08lx r2 : %08lx r1 : %08lx r0 : %08lx\n", + log_lvl, regs->r3, regs->r2, regs->r1, regs->r0); + printk("%sFlags: %c%c%c%c%c\n", log_lvl, regs->sr & SR_Q ? 'Q' : 'q', regs->sr & SR_V ? 'V' : 'v', regs->sr & SR_N ? 'N' : 'n', regs->sr & SR_Z ? 'Z' : 'z', regs->sr & SR_C ? 'C' : 'c'); - printk("Mode bits: %c%c%c%c%c%c%c%c%c\n", + printk("%sMode bits: %c%c%c%c%c%c%c%c%c\n", log_lvl, regs->sr & SR_H ? 'H' : 'h', regs->sr & SR_R ? 'R' : 'r', regs->sr & SR_J ? 'J' : 'j', @@ -157,9 +297,21 @@ void show_regs(struct pt_regs *regs) regs->sr & SR_I1M ? '1' : '.', regs->sr & SR_I0M ? '0' : '.', regs->sr & SR_GM ? 'G' : 'g'); - printk("CPU Mode: %s\n", cpu_modes[mode]); + printk("%sCPU Mode: %s\n", log_lvl, cpu_modes[mode]); + printk("%sProcess: %s [%d] (task: %p thread: %p)\n", + log_lvl, current->comm, current->pid, current, + task_thread_info(current)); +} + +void show_regs(struct pt_regs *regs) +{ + unsigned long sp = regs->sp; + + if (!user_mode(regs)) + sp = (unsigned long)regs + FRAME_SIZE_FULL; - show_trace(NULL, (unsigned long *)sp, regs); + show_regs_log_lvl(regs, ""); + show_trace_log_lvl(current, (unsigned long *)sp, regs, ""); } EXPORT_SYMBOL(show_regs); diff --git a/arch/avr32/kernel/traps.c b/arch/avr32/kernel/traps.c index adc01a12d154..4f0382d8483f 100644 --- a/arch/avr32/kernel/traps.c +++ b/arch/avr32/kernel/traps.c @@ -5,158 +5,25 @@ * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ -#undef DEBUG -#include + +#include #include -#include #include +#include #include +#include +#include -#include -#include #include -#include #include -#include - -static void dump_mem(const char *str, unsigned long bottom, unsigned long top) -{ - unsigned long p; - int i; - - printk("%s(0x%08lx to 0x%08lx)\n", str, bottom, top); - - for (p = bottom & ~31; p < top; ) { - printk("%04lx: ", p & 0xffff); - - for (i = 0; i < 8; i++, p += 4) { - unsigned int val; - - if (p < bottom || p >= top) - printk(" "); - else { - if (__get_user(val, (unsigned int __user *)p)) { - printk("\n"); - goto out; - } - printk("%08x ", val); - } - } - printk("\n"); - } - -out: - return; -} - -static inline int valid_stack_ptr(struct thread_info *tinfo, unsigned long p) -{ - return (p > (unsigned long)tinfo) - && (p < (unsigned long)tinfo + THREAD_SIZE - 3); -} - -#ifdef CONFIG_FRAME_POINTER -static inline void __show_trace(struct task_struct *tsk, unsigned long *sp, - struct pt_regs *regs) -{ - unsigned long lr, fp; - struct thread_info *tinfo; - - tinfo = (struct thread_info *) - ((unsigned long)sp & ~(THREAD_SIZE - 1)); - - if (regs) - fp = regs->r7; - else if (tsk == current) - asm("mov %0, r7" : "=r"(fp)); - else - fp = tsk->thread.cpu_context.r7; - - /* - * Walk the stack as long as the frame pointer (a) is within - * the kernel stack of the task, and (b) it doesn't move - * downwards. - */ - while (valid_stack_ptr(tinfo, fp)) { - unsigned long new_fp; - - lr = *(unsigned long *)fp; - printk(" [<%08lx>] ", lr); - print_symbol("%s\n", lr); - - new_fp = *(unsigned long *)(fp + 4); - if (new_fp <= fp) - break; - fp = new_fp; - } - printk("\n"); -} -#else -static inline void __show_trace(struct task_struct *tsk, unsigned long *sp, - struct pt_regs *regs) -{ - unsigned long addr; - - while (!kstack_end(sp)) { - addr = *sp++; - if (kernel_text_address(addr)) { - printk(" [<%08lx>] ", addr); - print_symbol("%s\n", addr); - } - } -} -#endif - -void show_trace(struct task_struct *tsk, unsigned long *sp, - struct pt_regs *regs) -{ - if (regs && - (((regs->sr & MODE_MASK) == MODE_EXCEPTION) || - ((regs->sr & MODE_MASK) == MODE_USER))) - return; - - printk ("Call trace:"); -#ifdef CONFIG_KALLSYMS - printk("\n"); -#endif - - __show_trace(tsk, sp, regs); - printk("\n"); -} - -void show_stack(struct task_struct *tsk, unsigned long *sp) -{ - unsigned long stack; - - if (!tsk) - tsk = current; - if (sp == 0) { - if (tsk == current) { - register unsigned long *real_sp __asm__("sp"); - sp = real_sp; - } else { - sp = (unsigned long *)tsk->thread.cpu_context.ksp; - } - } - - stack = (unsigned long)sp; - dump_mem("Stack: ", stack, - THREAD_SIZE + (unsigned long)tsk->thread_info); - show_trace(tsk, sp, NULL); -} - -void dump_stack(void) -{ - show_stack(NULL, NULL); -} -EXPORT_SYMBOL(dump_stack); +#include +#include +#include ATOMIC_NOTIFIER_HEAD(avr32_die_chain); int register_die_notifier(struct notifier_block *nb) { - pr_debug("register_die_notifier: %p\n", nb); - return atomic_notifier_chain_register(&avr32_die_chain, nb); } EXPORT_SYMBOL(register_die_notifier); @@ -169,93 +36,103 @@ EXPORT_SYMBOL(unregister_die_notifier); static DEFINE_SPINLOCK(die_lock); -void __die(const char *str, struct pt_regs *regs, unsigned long err, - const char *file, const char *func, unsigned long line) +void NORET_TYPE die(const char *str, struct pt_regs *regs, long err) { - struct task_struct *tsk = current; static int die_counter; console_verbose(); spin_lock_irq(&die_lock); bust_spinlocks(1); - printk(KERN_ALERT "%s", str); - if (file && func) - printk(" in %s:%s, line %ld", file, func, line); - printk("[#%d]:\n", ++die_counter); - print_modules(); - show_regs(regs); - printk("Process %s (pid: %d, stack limit = 0x%p)\n", - tsk->comm, tsk->pid, tsk->thread_info + 1); - - if (!user_mode(regs) || in_interrupt()) { - dump_mem("Stack: ", regs->sp, - THREAD_SIZE + (unsigned long)tsk->thread_info); + printk(KERN_ALERT "Oops: %s, sig: %ld [#%d]\n" KERN_EMERG, + str, err, ++die_counter); +#ifdef CONFIG_PREEMPT + printk("PREEMPT "); +#endif +#ifdef CONFIG_FRAME_POINTER + printk("FRAME_POINTER "); +#endif + if (current_cpu_data.features & AVR32_FEATURE_OCD) { + unsigned long did = __mfdr(DBGREG_DID); + printk("chip: 0x%03lx:0x%04lx rev %lu\n", + (did >> 1) & 0x7ff, + (did >> 12) & 0x7fff, + (did >> 28) & 0xf); + } else { + printk("cpu: arch %u r%u / core %u r%u\n", + current_cpu_data.arch_type, + current_cpu_data.arch_revision, + current_cpu_data.cpu_type, + current_cpu_data.cpu_revision); } + print_modules(); + show_regs_log_lvl(regs, KERN_EMERG); + show_stack_log_lvl(current, regs->sp, regs, KERN_EMERG); bust_spinlocks(0); spin_unlock_irq(&die_lock); - do_exit(SIGSEGV); + + if (in_interrupt()) + panic("Fatal exception in interrupt"); + + if (panic_on_oops) + panic("Fatal exception"); + + do_exit(err); } -void __die_if_kernel(const char *str, struct pt_regs *regs, unsigned long err, - const char *file, const char *func, unsigned long line) +void _exception(long signr, struct pt_regs *regs, int code, + unsigned long addr) { + siginfo_t info; + if (!user_mode(regs)) - __die(str, regs, err, file, func, line); -} + die("Unhandled exception in kernel mode", regs, signr); + + memset(&info, 0, sizeof(info)); + info.si_signo = signr; + info.si_code = code; + info.si_addr = (void __user *)addr; + force_sig_info(signr, &info, current); -asmlinkage void do_nmi(unsigned long ecr, struct pt_regs *regs) -{ -#ifdef CONFIG_SUBARCH_AVR32B /* - * The exception entry always saves RSR_EX. For NMI, this is - * wrong; it should be RSR_NMI + * Init gets no signals that it doesn't have a handler for. + * That's all very well, but if it has caused a synchronous + * exception and we ignore the resulting signal, it will just + * generate the same exception over and over again and we get + * nowhere. Better to kill it and let the kernel panic. */ - regs->sr = sysreg_read(RSR_NMI); -#endif + if (is_init(current)) { + __sighandler_t handler; + + spin_lock_irq(¤t->sighand->siglock); + handler = current->sighand->action[signr-1].sa.sa_handler; + spin_unlock_irq(¤t->sighand->siglock); + if (handler == SIG_DFL) { + /* init has generated a synchronous exception + and it doesn't have a handler for the signal */ + printk(KERN_CRIT "init has generated signal %ld " + "but has no handler for it\n", signr); + do_exit(signr); + } + } +} - printk("NMI taken!!!!\n"); - die("NMI", regs, ecr); - BUG(); +asmlinkage void do_nmi(unsigned long ecr, struct pt_regs *regs) +{ + printk(KERN_ALERT "Got Non-Maskable Interrupt, dumping regs\n"); + show_regs_log_lvl(regs, KERN_ALERT); + show_stack_log_lvl(current, regs->sp, regs, KERN_ALERT); } asmlinkage void do_critical_exception(unsigned long ecr, struct pt_regs *regs) { - printk("Unable to handle critical exception %lu at pc = %08lx!\n", - ecr, regs->pc); - die("Oops", regs, ecr); - BUG(); + die("Critical exception", regs, SIGKILL); } asmlinkage void do_address_exception(unsigned long ecr, struct pt_regs *regs) { - siginfo_t info; - - die_if_kernel("Oops: Address exception in kernel mode", regs, ecr); - -#ifdef DEBUG - if (ecr == ECR_ADDR_ALIGN_X) - pr_debug("Instruction Address Exception at pc = %08lx\n", - regs->pc); - else if (ecr == ECR_ADDR_ALIGN_R) - pr_debug("Data Address Exception (Read) at pc = %08lx\n", - regs->pc); - else if (ecr == ECR_ADDR_ALIGN_W) - pr_debug("Data Address Exception (Write) at pc = %08lx\n", - regs->pc); - else - BUG(); - - show_regs(regs); -#endif - - info.si_signo = SIGBUS; - info.si_errno = 0; - info.si_code = BUS_ADRALN; - info.si_addr = (void __user *)regs->pc; - - force_sig_info(SIGBUS, &info, current); + _exception(SIGBUS, regs, BUS_ADRALN, regs->pc); } /* This way of handling undefined instructions is stolen from ARM */ @@ -280,7 +157,8 @@ static int do_cop_absent(u32 insn) { int cop_nr; u32 cpucr; - if ( (insn & 0xfdf00000) == 0xf1900000 ) + + if ((insn & 0xfdf00000) == 0xf1900000) /* LDC0 */ cop_nr = 0; else @@ -292,136 +170,91 @@ static int do_cop_absent(u32 insn) sysreg_write(CPUCR, cpucr); cpucr = sysreg_read(CPUCR); - if ( !(cpucr & (1 << (24 + cop_nr))) ){ - printk("Coprocessor #%i not found!\n", cop_nr); - return -1; - } + if (!(cpucr & (1 << (24 + cop_nr)))) + return -ENODEV; return 0; } -#ifdef CONFIG_BUG -#ifdef CONFIG_DEBUG_BUGVERBOSE -static inline void do_bug_verbose(struct pt_regs *regs, u32 insn) -{ - char *file; - u16 line; - char c; - - if (__get_user(line, (u16 __user *)(regs->pc + 2))) - return; - if (__get_user(file, (char * __user *)(regs->pc + 4)) - || (unsigned long)file < PAGE_OFFSET - || __get_user(c, file)) - file = ""; - - printk(KERN_ALERT "kernel BUG at %s:%d!\n", file, line); -} -#else -static inline void do_bug_verbose(struct pt_regs *regs, u32 insn) +int is_valid_bugaddr(unsigned long pc) { + unsigned short opcode; + + if (pc < PAGE_OFFSET) + return 0; + if (probe_kernel_address((u16 *)pc, opcode)) + return 0; + return opcode == AVR32_BUG_OPCODE; } -#endif -#endif asmlinkage void do_illegal_opcode(unsigned long ecr, struct pt_regs *regs) { u32 insn; struct undef_hook *hook; - siginfo_t info; void __user *pc; + long code; - if (!user_mode(regs)) - goto kernel_trap; + if (!user_mode(regs) && (ecr == ECR_ILLEGAL_OPCODE)) { + enum bug_trap_type type; + + type = report_bug(regs->pc); + switch (type) { + case BUG_TRAP_TYPE_NONE: + break; + case BUG_TRAP_TYPE_WARN: + regs->pc += 2; + return; + case BUG_TRAP_TYPE_BUG: + die("Kernel BUG", regs, SIGKILL); + } + } local_irq_enable(); - pc = (void __user *)instruction_pointer(regs); - if (__get_user(insn, (u32 __user *)pc)) - goto invalid_area; + if (user_mode(regs)) { + pc = (void __user *)instruction_pointer(regs); + if (get_user(insn, (u32 __user *)pc)) + goto invalid_area; - if (ecr == ECR_COPROC_ABSENT) { - if (do_cop_absent(insn) == 0) + if (ecr == ECR_COPROC_ABSENT && !do_cop_absent(insn)) return; - } - spin_lock_irq(&undef_lock); - list_for_each_entry(hook, &undef_hook, node) { - if ((insn & hook->insn_mask) == hook->insn_val) { - if (hook->fn(regs, insn) == 0) { - spin_unlock_irq(&undef_lock); - return; + spin_lock_irq(&undef_lock); + list_for_each_entry(hook, &undef_hook, node) { + if ((insn & hook->insn_mask) == hook->insn_val) { + if (hook->fn(regs, insn) == 0) { + spin_unlock_irq(&undef_lock); + return; + } } } + spin_unlock_irq(&undef_lock); } - spin_unlock_irq(&undef_lock); - -invalid_area: -#ifdef DEBUG - printk("Illegal instruction at pc = %08lx\n", regs->pc); - if (regs->pc < TASK_SIZE) { - unsigned long ptbr, pgd, pte, *p; - - ptbr = sysreg_read(PTBR); - p = (unsigned long *)ptbr; - pgd = p[regs->pc >> 22]; - p = (unsigned long *)((pgd & 0x1ffff000) | 0x80000000); - pte = p[(regs->pc >> 12) & 0x3ff]; - printk("page table: 0x%08lx -> 0x%08lx -> 0x%08lx\n", ptbr, pgd, pte); - } -#endif - - info.si_signo = SIGILL; - info.si_errno = 0; - info.si_addr = (void __user *)regs->pc; switch (ecr) { - case ECR_ILLEGAL_OPCODE: - case ECR_UNIMPL_INSTRUCTION: - info.si_code = ILL_ILLOPC; - break; case ECR_PRIVILEGE_VIOLATION: - info.si_code = ILL_PRVOPC; + code = ILL_PRVOPC; break; case ECR_COPROC_ABSENT: - info.si_code = ILL_COPROC; + code = ILL_COPROC; break; default: - BUG(); + code = ILL_ILLOPC; + break; } - force_sig_info(SIGILL, &info, current); + _exception(SIGILL, regs, code, regs->pc); return; -kernel_trap: -#ifdef CONFIG_BUG - if (__kernel_text_address(instruction_pointer(regs))) { - insn = *(u16 *)instruction_pointer(regs); - if (insn == AVR32_BUG_OPCODE) { - do_bug_verbose(regs, insn); - die("Kernel BUG", regs, 0); - return; - } - } -#endif - - die("Oops: Illegal instruction in kernel code", regs, ecr); +invalid_area: + _exception(SIGSEGV, regs, SEGV_MAPERR, regs->pc); } asmlinkage void do_fpe(unsigned long ecr, struct pt_regs *regs) { - siginfo_t info; - - printk("Floating-point exception at pc = %08lx\n", regs->pc); - - /* We have no FPU... */ - info.si_signo = SIGILL; - info.si_errno = 0; - info.si_addr = (void __user *)regs->pc; - info.si_code = ILL_COPROC; - - force_sig_info(SIGILL, &info, current); + /* We have no FPU yet */ + _exception(SIGILL, regs, ILL_COPROC, regs->pc); } diff --git a/arch/avr32/kernel/vmlinux.lds.c b/arch/avr32/kernel/vmlinux.lds.c index ef13b7c78935..7ad20cfb48a8 100644 --- a/arch/avr32/kernel/vmlinux.lds.c +++ b/arch/avr32/kernel/vmlinux.lds.c @@ -26,6 +26,12 @@ SECTIONS _sinittext = .; *(.text.reset) *(.init.text) + /* + * .exit.text is discarded at runtime, not + * link time, to deal with references from + * __bug_table + */ + *(.exit.text) _einittext = .; . = ALIGN(4); __tagtable_begin = .; @@ -86,6 +92,8 @@ SECTIONS __stop___ex_table = .; } + BUG_TABLE + RODATA . = ALIGN(8192); @@ -126,7 +134,6 @@ SECTIONS * thrown away, as cleanup code is never called unless it's a module. */ /DISCARD/ : { - *(.exit.text) *(.exit.data) *(.exitcall.exit) } diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c index 678557260a35..146ebdbdc302 100644 --- a/arch/avr32/mm/fault.c +++ b/arch/avr32/mm/fault.c @@ -16,26 +16,8 @@ #include #include #include -#include #include - -#ifdef DEBUG -static void dump_code(unsigned long pc) -{ - char *p = (char *)pc; - char val; - int i; - - - printk(KERN_DEBUG "Code:"); - for (i = 0; i < 16; i++) { - if (__get_user(val, p + i)) - break; - printk(" %02x", val); - } - printk("\n"); -} -#endif +#include #ifdef CONFIG_KPROBES ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain); @@ -68,17 +50,19 @@ static inline int notify_page_fault(enum die_val val, struct pt_regs *regs, } #endif +int exception_trace = 1; + /* * This routine handles page faults. It determines the address and the * problem, and then passes it off to one of the appropriate routines. * * ecr is the Exception Cause Register. Possible values are: - * 5: Page not found (instruction access) * 6: Protection fault (instruction access) - * 12: Page not found (read access) - * 13: Page not found (write access) - * 14: Protection fault (read access) - * 15: Protection fault (write access) + * 15: Protection fault (read access) + * 16: Protection fault (write access) + * 20: Page not found (instruction access) + * 24: Page not found (read access) + * 28: Page not found (write access) */ asmlinkage void do_page_fault(unsigned long ecr, struct pt_regs *regs) { @@ -88,7 +72,9 @@ asmlinkage void do_page_fault(unsigned long ecr, struct pt_regs *regs) const struct exception_table_entry *fixup; unsigned long address; unsigned long page; - int writeaccess = 0; + int writeaccess; + long signr; + int code; if (notify_page_fault(DIE_PAGE_FAULT, regs, ecr, SIGSEGV) == NOTIFY_STOP) @@ -99,6 +85,9 @@ asmlinkage void do_page_fault(unsigned long ecr, struct pt_regs *regs) tsk = current; mm = tsk->mm; + signr = SIGSEGV; + code = SEGV_MAPERR; + /* * If we're in an interrupt or have no user context, we must * not take the fault... @@ -125,7 +114,9 @@ asmlinkage void do_page_fault(unsigned long ecr, struct pt_regs *regs) * can handle it... */ good_area: - //pr_debug("good area: vm_flags = 0x%lx\n", vma->vm_flags); + code = SEGV_ACCERR; + writeaccess = 0; + switch (ecr) { case ECR_PROTECTION_X: case ECR_TLB_MISS_X: @@ -176,46 +167,24 @@ survive: * map. Fix it, but check if it's kernel or user first... */ bad_area: - pr_debug("Bad area [%s:%u]: addr %08lx, ecr %lu\n", - tsk->comm, tsk->pid, address, ecr); - up_read(&mm->mmap_sem); if (user_mode(regs)) { - /* Hmm...we have to pass address and ecr somehow... */ - /* tsk->thread.address = address; - tsk->thread.error_code = ecr; */ -#ifdef DEBUG - show_regs(regs); - dump_code(regs->pc); - - page = sysreg_read(PTBR); - printk("ptbr = %08lx", page); - if (page) { - page = ((unsigned long *)page)[address >> 22]; - printk(" pgd = %08lx", page); - if (page & _PAGE_PRESENT) { - page &= PAGE_MASK; - address &= 0x003ff000; - page = ((unsigned long *)__va(page))[address >> PAGE_SHIFT]; - printk(" pte = %08lx\n", page); - } - } -#endif - pr_debug("Sending SIGSEGV to PID %d...\n", - tsk->pid); - force_sig(SIGSEGV, tsk); + if (exception_trace) + printk("%s%s[%d]: segfault at %08lx pc %08lx " + "sp %08lx ecr %lu\n", + is_init(tsk) ? KERN_EMERG : KERN_INFO, + tsk->comm, tsk->pid, address, regs->pc, + regs->sp, ecr); + _exception(SIGSEGV, regs, code, address); return; } no_context: - pr_debug("No context\n"); - /* Are we prepared to handle this kernel fault? */ fixup = search_exception_tables(regs->pc); if (fixup) { regs->pc = fixup->fixup; - pr_debug("Found fixup at %08lx\n", fixup->fixup); return; } @@ -230,7 +199,6 @@ no_context: printk(KERN_ALERT "Unable to handle kernel paging request"); printk(" at virtual address %08lx\n", address); - printk(KERN_ALERT "pc = %08lx\n", regs->pc); page = sysreg_read(PTBR); printk(KERN_ALERT "ptbr = %08lx", page); @@ -241,20 +209,20 @@ no_context: page &= PAGE_MASK; address &= 0x003ff000; page = ((unsigned long *)__va(page))[address >> PAGE_SHIFT]; - printk(" pte = %08lx\n", page); + printk(" pte = %08lx", page); } } - die("\nOops", regs, ecr); - do_exit(SIGKILL); + printk("\n"); + die("Kernel access of bad area", regs, signr); + return; /* * We ran out of memory, or some other thing happened to us * that made us unable to handle the page fault gracefully. */ out_of_memory: - printk("Out of memory\n"); up_read(&mm->mmap_sem); - if (current->pid == 1) { + if (is_init(current)) { yield(); down_read(&mm->mmap_sem); goto survive; @@ -267,21 +235,20 @@ out_of_memory: do_sigbus: up_read(&mm->mmap_sem); - /* - * Send a sigbus, regardless of whether we were in kernel or - * user mode. - */ - /* address, error_code, trap_no, ... */ -#ifdef DEBUG - show_regs(regs); - dump_code(regs->pc); -#endif - pr_debug("Sending SIGBUS to PID %d...\n", tsk->pid); - force_sig(SIGBUS, tsk); - /* Kernel mode? Handle exceptions or die */ + signr = SIGBUS; + code = BUS_ADRERR; if (!user_mode(regs)) goto no_context; + + if (exception_trace) + printk("%s%s[%d]: bus error at %08lx pc %08lx " + "sp %08lx ecr %lu\n", + is_init(tsk) ? KERN_EMERG : KERN_INFO, + tsk->comm, tsk->pid, address, regs->pc, + regs->sp, ecr); + + _exception(SIGBUS, regs, BUS_ADRERR, address); } asmlinkage void do_bus_error(unsigned long addr, int write_access, @@ -292,8 +259,7 @@ asmlinkage void do_bus_error(unsigned long addr, int write_access, addr, write_access ? "write" : "read"); printk(KERN_INFO "DTLB dump:\n"); dump_dtlb(); - die("Bus Error", regs, write_access); - do_exit(SIGKILL); + die("Bus Error", regs, SIGKILL); } /* diff --git a/include/asm-avr32/bug.h b/include/asm-avr32/bug.h index 521766bc9366..afdcd79a2966 100644 --- a/include/asm-avr32/bug.h +++ b/include/asm-avr32/bug.h @@ -18,27 +18,53 @@ #ifdef CONFIG_DEBUG_BUGVERBOSE -#define BUG() \ - do { \ - asm volatile(".hword %0\n\t" \ - ".hword %1\n\t" \ - ".long %2" \ - : \ - : "n"(AVR32_BUG_OPCODE), \ - "i"(__LINE__), "X"(__FILE__)); \ - } while (0) +#define _BUG_OR_WARN(flags) \ + asm volatile( \ + "1: .hword %0\n" \ + " .section __bug_table,\"a\",@progbits\n" \ + "2: .long 1b\n" \ + " .long %1\n" \ + " .short %2\n" \ + " .short %3\n" \ + " .org 2b + %4\n" \ + " .previous" \ + : \ + : "i"(AVR32_BUG_OPCODE), "i"(__FILE__), \ + "i"(__LINE__), "i"(flags), \ + "i"(sizeof(struct bug_entry))) #else +#define _BUG_OR_WARN(flags) \ + asm volatile( \ + "1: .hword %0\n" \ + " .section __bug_table,\"a\",@progbits\n" \ + "2: .long 1b\n" \ + " .short %1\n" \ + " .org 2b + %2\n" \ + " .previous" \ + : \ + : "i"(AVR32_BUG_OPCODE), "i"(flags), \ + "i"(sizeof(struct bug_entry))) + +#endif /* CONFIG_DEBUG_BUGVERBOSE */ + #define BUG() \ do { \ - asm volatile(".hword %0\n\t" \ - : : "n"(AVR32_BUG_OPCODE)); \ + _BUG_OR_WARN(0); \ + for (;;); \ } while (0) -#endif /* CONFIG_DEBUG_BUGVERBOSE */ +#define WARN_ON(condition) \ + ({ \ + typeof(condition) __ret_warn_on = (condition); \ + if (unlikely(__ret_warn_on)) \ + _BUG_OR_WARN(BUGFLAG_WARNING); \ + unlikely(__ret_warn_on); \ + }) #define HAVE_ARCH_BUG +#define HAVE_ARCH_WARN_ON #endif /* CONFIG_BUG */ diff --git a/include/asm-avr32/processor.h b/include/asm-avr32/processor.h index cbb89ccd3f05..6a64833756a6 100644 --- a/include/asm-avr32/processor.h +++ b/include/asm-avr32/processor.h @@ -134,10 +134,10 @@ extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); #define thread_saved_pc(tsk) ((tsk)->thread.cpu_context.pc) struct pt_regs; -void show_trace(struct task_struct *task, unsigned long *stack, - struct pt_regs *regs); - extern unsigned long get_wchan(struct task_struct *p); +extern void show_regs_log_lvl(struct pt_regs *regs, const char *log_lvl); +extern void show_stack_log_lvl(struct task_struct *tsk, unsigned long sp, + struct pt_regs *regs, const char *log_lvl); #define KSTK_EIP(tsk) ((tsk)->thread.cpu_context.pc) #define KSTK_ESP(tsk) ((tsk)->thread.cpu_context.ksp) diff --git a/include/asm-avr32/system.h b/include/asm-avr32/system.h index ac596058697d..a8236bacc878 100644 --- a/include/asm-avr32/system.h +++ b/include/asm-avr32/system.h @@ -9,6 +9,7 @@ #define __ASM_AVR32_SYSTEM_H #include +#include #include #include @@ -140,15 +141,9 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, sizeof(*(ptr)))) struct pt_regs; -extern void __die(const char *, struct pt_regs *, unsigned long, - const char *, const char *, unsigned long); -extern void __die_if_kernel(const char *, struct pt_regs *, unsigned long, - const char *, const char *, unsigned long); - -#define die(msg, regs, err) \ - __die(msg, regs, err, __FILE__ ":", __FUNCTION__, __LINE__) -#define die_if_kernel(msg, regs, err) \ - __die_if_kernel(msg, regs, err, __FILE__ ":", __FUNCTION__, __LINE__) +void NORET_TYPE die(const char *str, struct pt_regs *regs, long err); +void _exception(long signr, struct pt_regs *regs, int code, + unsigned long addr); #define arch_align_stack(x) (x) -- cgit v1.2.3-59-g8ed1b From e3e7d8d4ea37b8372ee417452d03171c5dc55125 Mon Sep 17 00:00:00 2001 From: Haavard Skinnemoen Date: Mon, 12 Feb 2007 16:28:56 +0100 Subject: [AVR32] Make I/O access macros work with external devices Fix the I/O access macros so that they work with externally connected devices accessed in little-endian mode over any bus width: * Use a set of macros to define I/O port- and memory operations borrowed from MIPS. * Allow subarchitecture to specify address- and data-mangling * Implement at32ap-specific port mangling (with build-time configurable bus width. Only one bus width at a time supported for now.) * Rewrite iowriteN and friends to use write[bwl] and friends (not the __raw counterparts.) This has been tested using pata_pcmcia to access a CompactFlash card connected to the EBI (16-bit bus width.) Signed-off-by: Haavard Skinnemoen --- arch/avr32/mach-at32ap/Kconfig | 26 ++- include/asm-avr32/arch-at32ap/io.h | 39 +++++ include/asm-avr32/io.h | 335 ++++++++++++++++++++----------------- 3 files changed, 247 insertions(+), 153 deletions(-) create mode 100644 include/asm-avr32/arch-at32ap/io.h (limited to 'include') diff --git a/arch/avr32/mach-at32ap/Kconfig b/arch/avr32/mach-at32ap/Kconfig index d7497715cc02..eb307838457b 100644 --- a/arch/avr32/mach-at32ap/Kconfig +++ b/arch/avr32/mach-at32ap/Kconfig @@ -2,6 +2,30 @@ if PLATFORM_AT32AP menu "Atmel AVR32 AP options" +choice + prompt "AT32AP7000 static memory bus width" + depends on CPU_AT32AP7000 + default AP7000_16_BIT_SMC + help + Define the width of the AP7000 external static memory interface. + This is used to determine how to mangle the address and/or data + when doing little-endian port access. + + The current code can only support a single external memory bus + width for all chip selects, excluding the flash (which is using + raw access and is thus not affected by any of this.) + +config AP7000_32_BIT_SMC + bool "32 bit" + +config AP7000_16_BIT_SMC + bool "16 bit" + +config AP7000_8_BIT_SMC + bool "8 bit" + +endchoice + endmenu -endif +endif # PLATFORM_AT32AP diff --git a/include/asm-avr32/arch-at32ap/io.h b/include/asm-avr32/arch-at32ap/io.h new file mode 100644 index 000000000000..ee59e401f041 --- /dev/null +++ b/include/asm-avr32/arch-at32ap/io.h @@ -0,0 +1,39 @@ +#ifndef __ASM_AVR32_ARCH_AT32AP_IO_H +#define __ASM_AVR32_ARCH_AT32AP_IO_H + +/* For "bizarre" halfword swapping */ +#include + +#if defined(CONFIG_AP7000_32_BIT_SMC) +# define __swizzle_addr_b(addr) (addr ^ 3UL) +# define __swizzle_addr_w(addr) (addr ^ 2UL) +# define __swizzle_addr_l(addr) (addr) +# define ioswabb(a, x) (x) +# define ioswabw(a, x) (x) +# define ioswabl(a, x) (x) +# define __mem_ioswabb(a, x) (x) +# define __mem_ioswabw(a, x) swab16(x) +# define __mem_ioswabl(a, x) swab32(x) +#elif defined(CONFIG_AP7000_16_BIT_SMC) +# define __swizzle_addr_b(addr) (addr ^ 1UL) +# define __swizzle_addr_w(addr) (addr) +# define __swizzle_addr_l(addr) (addr) +# define ioswabb(a, x) (x) +# define ioswabw(a, x) (x) +# define ioswabl(a, x) swahw32(x) +# define __mem_ioswabb(a, x) (x) +# define __mem_ioswabw(a, x) swab16(x) +# define __mem_ioswabl(a, x) swahb32(x) +#else +# define __swizzle_addr_b(addr) (addr) +# define __swizzle_addr_w(addr) (addr) +# define __swizzle_addr_l(addr) (addr) +# define ioswabb(a, x) (x) +# define ioswabw(a, x) swab16(x) +# define ioswabl(a, x) swab32(x) +# define __mem_ioswabb(a, x) (x) +# define __mem_ioswabw(a, x) (x) +# define __mem_ioswabl(a, x) (x) +#endif + +#endif /* __ASM_AVR32_ARCH_AT32AP_IO_H */ diff --git a/include/asm-avr32/io.h b/include/asm-avr32/io.h index c08e81048393..27b1523d42e0 100644 --- a/include/asm-avr32/io.h +++ b/include/asm-avr32/io.h @@ -1,13 +1,15 @@ #ifndef __ASM_AVR32_IO_H #define __ASM_AVR32_IO_H +#include #include - -#ifdef __KERNEL__ +#include #include #include +#include + /* virt_to_phys will only work when address is in P1 or P2 */ static __inline__ unsigned long virt_to_phys(volatile void *address) { @@ -36,204 +38,235 @@ extern void __raw_readsb(const void __iomem *addr, void *data, int bytelen); extern void __raw_readsw(const void __iomem *addr, void *data, int wordlen); extern void __raw_readsl(const void __iomem *addr, void *data, int longlen); -static inline void writeb(unsigned char b, volatile void __iomem *addr) +static inline void __raw_writeb(u8 v, volatile void __iomem *addr) { - *(volatile unsigned char __force *)addr = b; + *(volatile u8 __force *)addr = v; } -static inline void writew(unsigned short b, volatile void __iomem *addr) +static inline void __raw_writew(u16 v, volatile void __iomem *addr) { - *(volatile unsigned short __force *)addr = b; + *(volatile u16 __force *)addr = v; } -static inline void writel(unsigned int b, volatile void __iomem *addr) +static inline void __raw_writel(u32 v, volatile void __iomem *addr) { - *(volatile unsigned int __force *)addr = b; + *(volatile u32 __force *)addr = v; } -#define __raw_writeb writeb -#define __raw_writew writew -#define __raw_writel writel -static inline unsigned char readb(const volatile void __iomem *addr) +static inline u8 __raw_readb(const volatile void __iomem *addr) { - return *(const volatile unsigned char __force *)addr; + return *(const volatile u8 __force *)addr; } -static inline unsigned short readw(const volatile void __iomem *addr) +static inline u16 __raw_readw(const volatile void __iomem *addr) { - return *(const volatile unsigned short __force *)addr; + return *(const volatile u16 __force *)addr; } -static inline unsigned int readl(const volatile void __iomem *addr) +static inline u32 __raw_readl(const volatile void __iomem *addr) { - return *(const volatile unsigned int __force *)addr; + return *(const volatile u32 __force *)addr; +} + +/* Convert I/O port address to virtual address */ +#ifndef __io +# define __io(p) ((void *)phys_to_uncached(p)) +#endif + +/* + * Not really sure about the best way to slow down I/O on + * AVR32. Defining it as a no-op until we have an actual test case. + */ +#define SLOW_DOWN_IO do { } while (0) + +#define __BUILD_MEMORY_SINGLE(pfx, bwl, type) \ +static inline void \ +pfx##write##bwl(type val, volatile void __iomem *addr) \ +{ \ + volatile type *__addr; \ + type __val; \ + \ + __addr = (void *)__swizzle_addr_##bwl((unsigned long)(addr)); \ + __val = pfx##ioswab##bwl(__addr, val); \ + \ + BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long)); \ + \ + *__addr = __val; \ +} \ + \ +static inline type pfx##read##bwl(const volatile void __iomem *addr) \ +{ \ + volatile type *__addr; \ + type __val; \ + \ + __addr = (void *)__swizzle_addr_##bwl((unsigned long)(addr)); \ + \ + BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long)); \ + \ + __val = *__addr; \ + return pfx##ioswab##bwl(__addr, __val); \ +} + +#define __BUILD_IOPORT_SINGLE(pfx, bwl, type, p, slow) \ +static inline void pfx##out##bwl##p(type val, unsigned long port) \ +{ \ + volatile type *__addr; \ + type __val; \ + \ + __addr = __io(__swizzle_addr_##bwl(port)); \ + __val = pfx##ioswab##bwl(__addr, val); \ + \ + BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long)); \ + \ + *__addr = __val; \ + slow; \ +} \ + \ +static inline type pfx##in##bwl##p(unsigned long port) \ +{ \ + volatile type *__addr; \ + type __val; \ + \ + __addr = __io(__swizzle_addr_##bwl(port)); \ + \ + BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long)); \ + \ + __val = *__addr; \ + slow; \ + \ + return pfx##ioswab##bwl(__addr, __val); \ } -#define __raw_readb readb -#define __raw_readw readw -#define __raw_readl readl -#define writesb(p, d, l) __raw_writesb((unsigned int)p, d, l) -#define writesw(p, d, l) __raw_writesw((unsigned int)p, d, l) -#define writesl(p, d, l) __raw_writesl((unsigned int)p, d, l) +#define __BUILD_MEMORY_PFX(bus, bwl, type) \ + __BUILD_MEMORY_SINGLE(bus, bwl, type) + +#define BUILDIO_MEM(bwl, type) \ + __BUILD_MEMORY_PFX(, bwl, type) \ + __BUILD_MEMORY_PFX(__mem_, bwl, type) + +#define __BUILD_IOPORT_PFX(bus, bwl, type) \ + __BUILD_IOPORT_SINGLE(bus, bwl, type, ,) \ + __BUILD_IOPORT_SINGLE(bus, bwl, type, _p, SLOW_DOWN_IO) + +#define BUILDIO_IOPORT(bwl, type) \ + __BUILD_IOPORT_PFX(, bwl, type) \ + __BUILD_IOPORT_PFX(__mem_, bwl, type) + +BUILDIO_MEM(b, u8) +BUILDIO_MEM(w, u16) +BUILDIO_MEM(l, u32) + +BUILDIO_IOPORT(b, u8) +BUILDIO_IOPORT(w, u16) +BUILDIO_IOPORT(l, u32) + +#define readb_relaxed readb +#define readw_relaxed readw +#define readl_relaxed readl + +#define __BUILD_MEMORY_STRING(bwl, type) \ +static inline void writes##bwl(volatile void __iomem *addr, \ + const void *data, unsigned int count) \ +{ \ + const type *__data = data; \ + \ + while (count--) \ + __mem_write##bwl(*__data++, addr); \ +} \ + \ +static inline void reads##bwl(const volatile void __iomem *addr, \ + void *data, unsigned int count) \ +{ \ + type *__data = data; \ + \ + while (count--) \ + *__data++ = __mem_read##bwl(addr); \ +} + +#define __BUILD_IOPORT_STRING(bwl, type) \ +static inline void outs##bwl(unsigned long port, const void *data, \ + unsigned int count) \ +{ \ + const type *__data = data; \ + \ + while (count--) \ + __mem_out##bwl(*__data++, port); \ +} \ + \ +static inline void ins##bwl(unsigned long port, void *data, \ + unsigned int count) \ +{ \ + type *__data = data; \ + \ + while (count--) \ + *__data++ = __mem_in##bwl(port); \ +} -#define readsb(p, d, l) __raw_readsb((unsigned int)p, d, l) -#define readsw(p, d, l) __raw_readsw((unsigned int)p, d, l) -#define readsl(p, d, l) __raw_readsl((unsigned int)p, d, l) +#define BUILDSTRING(bwl, type) \ + __BUILD_MEMORY_STRING(bwl, type) \ + __BUILD_IOPORT_STRING(bwl, type) +BUILDSTRING(b, u8) +BUILDSTRING(w, u16) +BUILDSTRING(l, u32) /* * io{read,write}{8,16,32} macros in both le (for PCI style consumers) and native be */ #ifndef ioread8 -#define ioread8(p) ({ unsigned int __v = __raw_readb(p); __v; }) +#define ioread8(p) ((unsigned int)readb(p)) -#define ioread16(p) ({ unsigned int __v = le16_to_cpu(__raw_readw(p)); __v; }) -#define ioread16be(p) ({ unsigned int __v = be16_to_cpu(__raw_readw(p)); __v; }) +#define ioread16(p) ((unsigned int)readw(p)) +#define ioread16be(p) ((unsigned int)__raw_readw(p)) -#define ioread32(p) ({ unsigned int __v = le32_to_cpu(__raw_readl(p)); __v; }) -#define ioread32be(p) ({ unsigned int __v = be32_to_cpu(__raw_readl(p)); __v; }) +#define ioread32(p) ((unsigned int)readl(p)) +#define ioread32be(p) ((unsigned int)__raw_readl(p)) -#define iowrite8(v,p) __raw_writeb(v, p) +#define iowrite8(v,p) writeb(v, p) -#define iowrite16(v,p) __raw_writew(cpu_to_le16(v), p) -#define iowrite16be(v,p) __raw_writew(cpu_to_be16(v), p) +#define iowrite16(v,p) writew(v, p) +#define iowrite16be(v,p) __raw_writew(v, p) -#define iowrite32(v,p) __raw_writel(cpu_to_le32(v), p) -#define iowrite32be(v,p) __raw_writel(cpu_to_be32(v), p) +#define iowrite32(v,p) writel(v, p) +#define iowrite32be(v,p) __raw_writel(v, p) -#define ioread8_rep(p,d,c) __raw_readsb(p,d,c) -#define ioread16_rep(p,d,c) __raw_readsw(p,d,c) -#define ioread32_rep(p,d,c) __raw_readsl(p,d,c) +#define ioread8_rep(p,d,c) readsb(p,d,c) +#define ioread16_rep(p,d,c) readsw(p,d,c) +#define ioread32_rep(p,d,c) readsl(p,d,c) -#define iowrite8_rep(p,s,c) __raw_writesb(p,s,c) -#define iowrite16_rep(p,s,c) __raw_writesw(p,s,c) -#define iowrite32_rep(p,s,c) __raw_writesl(p,s,c) +#define iowrite8_rep(p,s,c) writesb(p,s,c) +#define iowrite16_rep(p,s,c) writesw(p,s,c) +#define iowrite32_rep(p,s,c) writesl(p,s,c) #endif - -/* - * These two are only here because ALSA _thinks_ it needs them... - */ static inline void memcpy_fromio(void * to, const volatile void __iomem *from, unsigned long count) { char *p = to; - while (count) { - count--; - *p = readb(from); - p++; - from++; - } + volatile const char __iomem *addr = from; + + while (count--) + *p++ = readb(addr++); } static inline void memcpy_toio(volatile void __iomem *to, const void * from, unsigned long count) { const char *p = from; - while (count) { - count--; - writeb(*p, to); - p++; - to++; - } + volatile char __iomem *addr = to; + + while (count--) + writeb(*p++, addr++); } static inline void memset_io(volatile void __iomem *addr, unsigned char val, unsigned long count) { - memset((void __force *)addr, val, count); -} - -/* - * Bad read/write accesses... - */ -extern void __readwrite_bug(const char *fn); - -#define IO_SPACE_LIMIT 0xffffffff - -/* Convert I/O port address to virtual address */ -#define __io(p) ((void __iomem *)phys_to_uncached(p)) - -/* - * IO port access primitives - * ------------------------- - * - * The AVR32 doesn't have special IO access instructions; all IO is memory - * mapped. Note that these are defined to perform little endian accesses - * only. Their primary purpose is to access PCI and ISA peripherals. - * - * Note that for a big endian machine, this implies that the following - * big endian mode connectivity is in place. - * - * The machine specific io.h include defines __io to translate an "IO" - * address to a memory address. - * - * Note that we prevent GCC re-ordering or caching values in expressions - * by introducing sequence points into the in*() definitions. Note that - * __raw_* do not guarantee this behaviour. - * - * The {in,out}[bwl] macros are for emulating x86-style PCI/ISA IO space. - */ -#define outb(v, p) __raw_writeb(v, __io(p)) -#define outw(v, p) __raw_writew(cpu_to_le16(v), __io(p)) -#define outl(v, p) __raw_writel(cpu_to_le32(v), __io(p)) - -#define inb(p) __raw_readb(__io(p)) -#define inw(p) le16_to_cpu(__raw_readw(__io(p))) -#define inl(p) le32_to_cpu(__raw_readl(__io(p))) - -static inline void __outsb(unsigned long port, void *addr, unsigned int count) -{ - while (count--) { - outb(*(u8 *)addr, port); - addr++; - } -} + volatile char __iomem *p = addr; -static inline void __insb(unsigned long port, void *addr, unsigned int count) -{ - while (count--) { - *(u8 *)addr = inb(port); - addr++; - } + while (count--) + writeb(val, p++); } -static inline void __outsw(unsigned long port, void *addr, unsigned int count) -{ - while (count--) { - outw(*(u16 *)addr, port); - addr += 2; - } -} - -static inline void __insw(unsigned long port, void *addr, unsigned int count) -{ - while (count--) { - *(u16 *)addr = inw(port); - addr += 2; - } -} - -static inline void __outsl(unsigned long port, void *addr, unsigned int count) -{ - while (count--) { - outl(*(u32 *)addr, port); - addr += 4; - } -} - -static inline void __insl(unsigned long port, void *addr, unsigned int count) -{ - while (count--) { - *(u32 *)addr = inl(port); - addr += 4; - } -} - -#define outsb(port, addr, count) __outsb(port, addr, count) -#define insb(port, addr, count) __insb(port, addr, count) -#define outsw(port, addr, count) __outsw(port, addr, count) -#define insw(port, addr, count) __insw(port, addr, count) -#define outsl(port, addr, count) __outsl(port, addr, count) -#define insl(port, addr, count) __insl(port, addr, count) +#define IO_SPACE_LIMIT 0xffffffff extern void __iomem *__ioremap(unsigned long offset, size_t size, unsigned long flags); @@ -292,6 +325,4 @@ extern void __iounmap(void __iomem *addr); */ #define xlate_dev_kmem_ptr(p) p -#endif /* __KERNEL__ */ - #endif /* __ASM_AVR32_IO_H */ -- cgit v1.2.3-59-g8ed1b From 5539f59ac40473730806580f212c4eac6e769f01 Mon Sep 17 00:00:00 2001 From: Haavard Skinnemoen Date: Wed, 21 Mar 2007 15:39:18 +0100 Subject: [AVR32] Move setup_bootmem() from mm/init.c to kernel/setup.c Signed-off-by: Haavard Skinnemoen --- arch/avr32/kernel/setup.c | 238 ++++++++++++++++++++++++++++++++++++++++++++++ arch/avr32/mm/init.c | 238 ---------------------------------------------- include/asm-avr32/setup.h | 1 - 3 files changed, 238 insertions(+), 239 deletions(-) (limited to 'include') diff --git a/arch/avr32/kernel/setup.c b/arch/avr32/kernel/setup.c index a1a7c3c3f522..53a1ff0cb05c 100644 --- a/arch/avr32/kernel/setup.c +++ b/arch/avr32/kernel/setup.c @@ -8,12 +8,14 @@ #include #include +#include #include #include #include #include #include #include +#include #include #include #include @@ -260,6 +262,242 @@ static void __init parse_tags(struct tag *t) t->hdr.tag); } +static void __init print_memory_map(const char *what, + struct tag_mem_range *mem) +{ + printk ("%s:\n", what); + for (; mem; mem = mem->next) { + printk (" %08lx - %08lx\n", + (unsigned long)mem->addr, + (unsigned long)(mem->addr + mem->size)); + } +} + +#define MAX_LOWMEM HIGHMEM_START +#define MAX_LOWMEM_PFN PFN_DOWN(MAX_LOWMEM) + +/* + * Sort a list of memory regions in-place by ascending address. + * + * We're using bubble sort because we only have singly linked lists + * with few elements. + */ +static void __init sort_mem_list(struct tag_mem_range **pmem) +{ + int done; + struct tag_mem_range **a, **b; + + if (!*pmem) + return; + + do { + done = 1; + a = pmem, b = &(*pmem)->next; + while (*b) { + if ((*a)->addr > (*b)->addr) { + struct tag_mem_range *tmp; + tmp = (*b)->next; + (*b)->next = *a; + *a = *b; + *b = tmp; + done = 0; + } + a = &(*a)->next; + b = &(*a)->next; + } + } while (!done); +} + +/* + * Find a free memory region large enough for storing the + * bootmem bitmap. + */ +static unsigned long __init +find_bootmap_pfn(const struct tag_mem_range *mem) +{ + unsigned long bootmap_pages, bootmap_len; + unsigned long node_pages = PFN_UP(mem->size); + unsigned long bootmap_addr = mem->addr; + struct tag_mem_range *reserved = mem_reserved; + struct tag_mem_range *ramdisk = mem_ramdisk; + unsigned long kern_start = __pa(_stext); + unsigned long kern_end = __pa(_end); + + bootmap_pages = bootmem_bootmap_pages(node_pages); + bootmap_len = bootmap_pages << PAGE_SHIFT; + + /* + * Find a large enough region without reserved pages for + * storing the bootmem bitmap. We can take advantage of the + * fact that all lists have been sorted. + * + * We have to check explicitly reserved regions as well as the + * kernel image and any RAMDISK images... + * + * Oh, and we have to make sure we don't overwrite the taglist + * since we're going to use it until the bootmem allocator is + * fully up and running. + */ + while (1) { + if ((bootmap_addr < kern_end) && + ((bootmap_addr + bootmap_len) > kern_start)) + bootmap_addr = kern_end; + + while (reserved && + (bootmap_addr >= (reserved->addr + reserved->size))) + reserved = reserved->next; + + if (reserved && + ((bootmap_addr + bootmap_len) >= reserved->addr)) { + bootmap_addr = reserved->addr + reserved->size; + continue; + } + + while (ramdisk && + (bootmap_addr >= (ramdisk->addr + ramdisk->size))) + ramdisk = ramdisk->next; + + if (!ramdisk || + ((bootmap_addr + bootmap_len) < ramdisk->addr)) + break; + + bootmap_addr = ramdisk->addr + ramdisk->size; + } + + if ((PFN_UP(bootmap_addr) + bootmap_len) >= (mem->addr + mem->size)) + return ~0UL; + + return PFN_UP(bootmap_addr); +} + +static void __init setup_bootmem(void) +{ + unsigned bootmap_size; + unsigned long first_pfn, bootmap_pfn, pages; + unsigned long max_pfn, max_low_pfn; + unsigned long kern_start = __pa(_stext); + unsigned long kern_end = __pa(_end); + unsigned node = 0; + struct tag_mem_range *bank, *res; + + sort_mem_list(&mem_phys); + sort_mem_list(&mem_reserved); + + print_memory_map("Physical memory", mem_phys); + print_memory_map("Reserved memory", mem_reserved); + + nodes_clear(node_online_map); + + if (mem_ramdisk) { +#ifdef CONFIG_BLK_DEV_INITRD + initrd_start = (unsigned long)__va(mem_ramdisk->addr); + initrd_end = initrd_start + mem_ramdisk->size; + + print_memory_map("RAMDISK images", mem_ramdisk); + if (mem_ramdisk->next) + printk(KERN_WARNING + "Warning: Only the first RAMDISK image " + "will be used\n"); + sort_mem_list(&mem_ramdisk); +#else + printk(KERN_WARNING "RAM disk image present, but " + "no initrd support in kernel!\n"); +#endif + } + + if (mem_phys->next) + printk(KERN_WARNING "Only using first memory bank\n"); + + for (bank = mem_phys; bank; bank = NULL) { + first_pfn = PFN_UP(bank->addr); + max_low_pfn = max_pfn = PFN_DOWN(bank->addr + bank->size); + bootmap_pfn = find_bootmap_pfn(bank); + if (bootmap_pfn > max_pfn) + panic("No space for bootmem bitmap!\n"); + + if (max_low_pfn > MAX_LOWMEM_PFN) { + max_low_pfn = MAX_LOWMEM_PFN; +#ifndef CONFIG_HIGHMEM + /* + * Lowmem is memory that can be addressed + * directly through P1/P2 + */ + printk(KERN_WARNING + "Node %u: Only %ld MiB of memory will be used.\n", + node, MAX_LOWMEM >> 20); + printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n"); +#else +#error HIGHMEM is not supported by AVR32 yet +#endif + } + + /* Initialize the boot-time allocator with low memory only. */ + bootmap_size = init_bootmem_node(NODE_DATA(node), bootmap_pfn, + first_pfn, max_low_pfn); + + printk("Node %u: bdata = %p, bdata->node_bootmem_map = %p\n", + node, NODE_DATA(node)->bdata, + NODE_DATA(node)->bdata->node_bootmem_map); + + /* + * Register fully available RAM pages with the bootmem + * allocator. + */ + pages = max_low_pfn - first_pfn; + free_bootmem_node (NODE_DATA(node), PFN_PHYS(first_pfn), + PFN_PHYS(pages)); + + /* + * Reserve space for the kernel image (if present in + * this node)... + */ + if ((kern_start >= PFN_PHYS(first_pfn)) && + (kern_start < PFN_PHYS(max_pfn))) { + printk("Node %u: Kernel image %08lx - %08lx\n", + node, kern_start, kern_end); + reserve_bootmem_node(NODE_DATA(node), kern_start, + kern_end - kern_start); + } + + /* ...the bootmem bitmap... */ + reserve_bootmem_node(NODE_DATA(node), + PFN_PHYS(bootmap_pfn), + bootmap_size); + + /* ...any RAMDISK images... */ + for (res = mem_ramdisk; res; res = res->next) { + if (res->addr > PFN_PHYS(max_pfn)) + break; + + if (res->addr >= PFN_PHYS(first_pfn)) { + printk("Node %u: RAMDISK %08lx - %08lx\n", + node, + (unsigned long)res->addr, + (unsigned long)(res->addr + res->size)); + reserve_bootmem_node(NODE_DATA(node), + res->addr, res->size); + } + } + + /* ...and any other reserved regions. */ + for (res = mem_reserved; res; res = res->next) { + if (res->addr > PFN_PHYS(max_pfn)) + break; + + if (res->addr >= PFN_PHYS(first_pfn)) { + printk("Node %u: Reserved %08lx - %08lx\n", + node, + (unsigned long)res->addr, + (unsigned long)(res->addr + res->size)); + reserve_bootmem_node(NODE_DATA(node), + res->addr, res->size); + } + } + + node_set_online(node); + } +} + void __init setup_arch (char **cmdline_p) { struct clk *cpu_clk; diff --git a/arch/avr32/mm/init.c b/arch/avr32/mm/init.c index 70da6894acc1..82cf70854b90 100644 --- a/arch/avr32/mm/init.c +++ b/arch/avr32/mm/init.c @@ -10,11 +10,9 @@ #include #include #include -#include #include #include #include -#include #include #include @@ -78,242 +76,6 @@ void show_mem(void) printk ("%d pages swap cached\n", cached); } -static void __init print_memory_map(const char *what, - struct tag_mem_range *mem) -{ - printk ("%s:\n", what); - for (; mem; mem = mem->next) { - printk (" %08lx - %08lx\n", - (unsigned long)mem->addr, - (unsigned long)(mem->addr + mem->size)); - } -} - -#define MAX_LOWMEM HIGHMEM_START -#define MAX_LOWMEM_PFN PFN_DOWN(MAX_LOWMEM) - -/* - * Sort a list of memory regions in-place by ascending address. - * - * We're using bubble sort because we only have singly linked lists - * with few elements. - */ -static void __init sort_mem_list(struct tag_mem_range **pmem) -{ - int done; - struct tag_mem_range **a, **b; - - if (!*pmem) - return; - - do { - done = 1; - a = pmem, b = &(*pmem)->next; - while (*b) { - if ((*a)->addr > (*b)->addr) { - struct tag_mem_range *tmp; - tmp = (*b)->next; - (*b)->next = *a; - *a = *b; - *b = tmp; - done = 0; - } - a = &(*a)->next; - b = &(*a)->next; - } - } while (!done); -} - -/* - * Find a free memory region large enough for storing the - * bootmem bitmap. - */ -static unsigned long __init -find_bootmap_pfn(const struct tag_mem_range *mem) -{ - unsigned long bootmap_pages, bootmap_len; - unsigned long node_pages = PFN_UP(mem->size); - unsigned long bootmap_addr = mem->addr; - struct tag_mem_range *reserved = mem_reserved; - struct tag_mem_range *ramdisk = mem_ramdisk; - unsigned long kern_start = virt_to_phys(_stext); - unsigned long kern_end = virt_to_phys(_end); - - bootmap_pages = bootmem_bootmap_pages(node_pages); - bootmap_len = bootmap_pages << PAGE_SHIFT; - - /* - * Find a large enough region without reserved pages for - * storing the bootmem bitmap. We can take advantage of the - * fact that all lists have been sorted. - * - * We have to check explicitly reserved regions as well as the - * kernel image and any RAMDISK images... - * - * Oh, and we have to make sure we don't overwrite the taglist - * since we're going to use it until the bootmem allocator is - * fully up and running. - */ - while (1) { - if ((bootmap_addr < kern_end) && - ((bootmap_addr + bootmap_len) > kern_start)) - bootmap_addr = kern_end; - - while (reserved && - (bootmap_addr >= (reserved->addr + reserved->size))) - reserved = reserved->next; - - if (reserved && - ((bootmap_addr + bootmap_len) >= reserved->addr)) { - bootmap_addr = reserved->addr + reserved->size; - continue; - } - - while (ramdisk && - (bootmap_addr >= (ramdisk->addr + ramdisk->size))) - ramdisk = ramdisk->next; - - if (!ramdisk || - ((bootmap_addr + bootmap_len) < ramdisk->addr)) - break; - - bootmap_addr = ramdisk->addr + ramdisk->size; - } - - if ((PFN_UP(bootmap_addr) + bootmap_len) >= (mem->addr + mem->size)) - return ~0UL; - - return PFN_UP(bootmap_addr); -} - -void __init setup_bootmem(void) -{ - unsigned bootmap_size; - unsigned long first_pfn, bootmap_pfn, pages; - unsigned long max_pfn, max_low_pfn; - unsigned long kern_start = virt_to_phys(_stext); - unsigned long kern_end = virt_to_phys(_end); - unsigned node = 0; - struct tag_mem_range *bank, *res; - - sort_mem_list(&mem_phys); - sort_mem_list(&mem_reserved); - - print_memory_map("Physical memory", mem_phys); - print_memory_map("Reserved memory", mem_reserved); - - nodes_clear(node_online_map); - - if (mem_ramdisk) { -#ifdef CONFIG_BLK_DEV_INITRD - initrd_start = (unsigned long)__va(mem_ramdisk->addr); - initrd_end = initrd_start + mem_ramdisk->size; - - print_memory_map("RAMDISK images", mem_ramdisk); - if (mem_ramdisk->next) - printk(KERN_WARNING - "Warning: Only the first RAMDISK image " - "will be used\n"); - sort_mem_list(&mem_ramdisk); -#else - printk(KERN_WARNING "RAM disk image present, but " - "no initrd support in kernel!\n"); -#endif - } - - if (mem_phys->next) - printk(KERN_WARNING "Only using first memory bank\n"); - - for (bank = mem_phys; bank; bank = NULL) { - first_pfn = PFN_UP(bank->addr); - max_low_pfn = max_pfn = PFN_DOWN(bank->addr + bank->size); - bootmap_pfn = find_bootmap_pfn(bank); - if (bootmap_pfn > max_pfn) - panic("No space for bootmem bitmap!\n"); - - if (max_low_pfn > MAX_LOWMEM_PFN) { - max_low_pfn = MAX_LOWMEM_PFN; -#ifndef CONFIG_HIGHMEM - /* - * Lowmem is memory that can be addressed - * directly through P1/P2 - */ - printk(KERN_WARNING - "Node %u: Only %ld MiB of memory will be used.\n", - node, MAX_LOWMEM >> 20); - printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n"); -#else -#error HIGHMEM is not supported by AVR32 yet -#endif - } - - /* Initialize the boot-time allocator with low memory only. */ - bootmap_size = init_bootmem_node(NODE_DATA(node), bootmap_pfn, - first_pfn, max_low_pfn); - - printk("Node %u: bdata = %p, bdata->node_bootmem_map = %p\n", - node, NODE_DATA(node)->bdata, - NODE_DATA(node)->bdata->node_bootmem_map); - - /* - * Register fully available RAM pages with the bootmem - * allocator. - */ - pages = max_low_pfn - first_pfn; - free_bootmem_node (NODE_DATA(node), PFN_PHYS(first_pfn), - PFN_PHYS(pages)); - - /* - * Reserve space for the kernel image (if present in - * this node)... - */ - if ((kern_start >= PFN_PHYS(first_pfn)) && - (kern_start < PFN_PHYS(max_pfn))) { - printk("Node %u: Kernel image %08lx - %08lx\n", - node, kern_start, kern_end); - reserve_bootmem_node(NODE_DATA(node), kern_start, - kern_end - kern_start); - } - - /* ...the bootmem bitmap... */ - reserve_bootmem_node(NODE_DATA(node), - PFN_PHYS(bootmap_pfn), - bootmap_size); - - /* ...any RAMDISK images... */ - for (res = mem_ramdisk; res; res = res->next) { - if (res->addr > PFN_PHYS(max_pfn)) - break; - - if (res->addr >= PFN_PHYS(first_pfn)) { - printk("Node %u: RAMDISK %08lx - %08lx\n", - node, - (unsigned long)res->addr, - (unsigned long)(res->addr + res->size)); - reserve_bootmem_node(NODE_DATA(node), - res->addr, res->size); - } - } - - /* ...and any other reserved regions. */ - for (res = mem_reserved; res; res = res->next) { - if (res->addr > PFN_PHYS(max_pfn)) - break; - - if (res->addr >= PFN_PHYS(first_pfn)) { - printk("Node %u: Reserved %08lx - %08lx\n", - node, - (unsigned long)res->addr, - (unsigned long)(res->addr + res->size)); - reserve_bootmem_node(NODE_DATA(node), - res->addr, res->size); - } - } - - node_set_online(node); - } -} - /* * paging_init() sets up the page tables * diff --git a/include/asm-avr32/setup.h b/include/asm-avr32/setup.h index 0a5224245e44..59be821cf38f 100644 --- a/include/asm-avr32/setup.h +++ b/include/asm-avr32/setup.h @@ -130,7 +130,6 @@ extern struct tag_mem_range *mem_ramdisk; extern struct tag *bootloader_tags; -extern void setup_bootmem(void); extern void setup_processor(void); extern void board_setup_fbmem(unsigned long fbmem_start, unsigned long fbmem_size); -- cgit v1.2.3-59-g8ed1b From d8011768e6bdd0d9de5cc7bdbd3077b4b4fab8c7 Mon Sep 17 00:00:00 2001 From: Haavard Skinnemoen Date: Wed, 21 Mar 2007 16:02:57 +0100 Subject: [AVR32] Simplify early handling of memory regions Use struct resource to specify both physical memory regions and reserved regions and push everything into the same framework, including kernel code/data and initrd memory. This allows us to get rid of many special cases in the bootmem initialization and will also make it easier to implement more robust handling of framebuffer memory later. Signed-off-by: Haavard Skinnemoen --- arch/avr32/kernel/setup.c | 495 ++++++++++++++++++++++------------------------ include/asm-avr32/setup.h | 10 +- 2 files changed, 234 insertions(+), 271 deletions(-) (limited to 'include') diff --git a/arch/avr32/kernel/setup.c b/arch/avr32/kernel/setup.c index 53a1ff0cb05c..d0a35a1b6a66 100644 --- a/arch/avr32/kernel/setup.c +++ b/arch/avr32/kernel/setup.c @@ -31,13 +31,6 @@ extern int root_mountflags; -/* - * Bootloader-provided information about physical memory - */ -struct tag_mem_range *mem_phys; -struct tag_mem_range *mem_reserved; -struct tag_mem_range *mem_ramdisk; - /* * Initialize loops_per_jiffy as 5000000 (500MIPS). * Better make it too large than too small... @@ -50,32 +43,153 @@ EXPORT_SYMBOL(boot_cpu_data); static char __initdata command_line[COMMAND_LINE_SIZE]; /* - * Should be more than enough, but if you have a _really_ complex - * setup, you might need to increase the size of this... + * Standard memory resources */ -static struct tag_mem_range __initdata mem_range_cache[32]; -static unsigned mem_range_next_free; +static struct resource __initdata kernel_data = { + .name = "Kernel data", + .start = 0, + .end = 0, + .flags = IORESOURCE_MEM, +}; +static struct resource __initdata kernel_code = { + .name = "Kernel code", + .start = 0, + .end = 0, + .flags = IORESOURCE_MEM, + .sibling = &kernel_data, +}; /* - * Standard memory resources + * Available system RAM and reserved regions as singly linked + * lists. These lists are traversed using the sibling pointer in + * struct resource and are kept sorted at all times. */ -static struct resource mem_res[] = { - { - .name = "Kernel code", - .start = 0, - .end = 0, - .flags = IORESOURCE_MEM - }, - { - .name = "Kernel data", - .start = 0, - .end = 0, - .flags = IORESOURCE_MEM, - }, -}; +static struct resource *__initdata system_ram; +static struct resource *__initdata reserved = &kernel_code; + +/* + * We need to allocate these before the bootmem allocator is up and + * running, so we need this "cache". 32 entries are probably enough + * for all but the most insanely complex systems. + */ +static struct resource __initdata res_cache[32]; +static unsigned int __initdata res_cache_next_free; + +static void __init resource_init(void) +{ + struct resource *mem, *res; + struct resource *new; + + kernel_code.start = __pa(init_mm.start_code); + + for (mem = system_ram; mem; mem = mem->sibling) { + new = alloc_bootmem_low(sizeof(struct resource)); + memcpy(new, mem, sizeof(struct resource)); + + new->sibling = NULL; + if (request_resource(&iomem_resource, new)) + printk(KERN_WARNING "Bad RAM resource %08x-%08x\n", + mem->start, mem->end); + } + + for (res = reserved; res; res = res->sibling) { + new = alloc_bootmem_low(sizeof(struct resource)); + memcpy(new, res, sizeof(struct resource)); + + new->sibling = NULL; + if (insert_resource(&iomem_resource, new)) + printk(KERN_WARNING + "Bad reserved resource %s (%08x-%08x)\n", + res->name, res->start, res->end); + } +} + +static void __init +add_physical_memory(resource_size_t start, resource_size_t end) +{ + struct resource *new, *next, **pprev; + + for (pprev = &system_ram, next = system_ram; next; + pprev = &next->sibling, next = next->sibling) { + if (end < next->start) + break; + if (start <= next->end) { + printk(KERN_WARNING + "Warning: Physical memory map is broken\n"); + printk(KERN_WARNING + "Warning: %08x-%08x overlaps %08x-%08x\n", + start, end, next->start, next->end); + return; + } + } + + if (res_cache_next_free >= ARRAY_SIZE(res_cache)) { + printk(KERN_WARNING + "Warning: Failed to add physical memory %08x-%08x\n", + start, end); + return; + } + + new = &res_cache[res_cache_next_free++]; + new->start = start; + new->end = end; + new->name = "System RAM"; + new->flags = IORESOURCE_MEM; + + *pprev = new; +} + +static int __init +add_reserved_region(resource_size_t start, resource_size_t end, + const char *name) +{ + struct resource *new, *next, **pprev; -#define kernel_code mem_res[0] -#define kernel_data mem_res[1] + if (end < start) + return -EINVAL; + + if (res_cache_next_free >= ARRAY_SIZE(res_cache)) + return -ENOMEM; + + for (pprev = &reserved, next = reserved; next; + pprev = &next->sibling, next = next->sibling) { + if (end < next->start) + break; + if (start <= next->end) + return -EBUSY; + } + + new = &res_cache[res_cache_next_free++]; + new->start = start; + new->end = end; + new->name = name; + new->flags = IORESOURCE_MEM; + + *pprev = new; + + return 0; +} + +static unsigned long __init +find_free_region(const struct resource *mem, resource_size_t size, + resource_size_t align) +{ + struct resource *res; + unsigned long target; + + target = ALIGN(mem->start, align); + for (res = reserved; res; res = res->sibling) { + if ((target + size) <= res->start) + break; + if (target <= res->end) + target = ALIGN(res->end + 1, align); + } + + if ((target + size) > (mem->end + 1)) + return mem->end + 1; + + return target; +} /* * Early framebuffer allocation. Works as follows: @@ -112,42 +226,6 @@ static int __init early_parse_fbmem(char *p) } early_param("fbmem", early_parse_fbmem); -static inline void __init resource_init(void) -{ - struct tag_mem_range *region; - - kernel_code.start = __pa(init_mm.start_code); - kernel_code.end = __pa(init_mm.end_code - 1); - kernel_data.start = __pa(init_mm.end_code); - kernel_data.end = __pa(init_mm.brk - 1); - - for (region = mem_phys; region; region = region->next) { - struct resource *res; - unsigned long phys_start, phys_end; - - if (region->size == 0) - continue; - - phys_start = region->addr; - phys_end = phys_start + region->size - 1; - - res = alloc_bootmem_low(sizeof(*res)); - res->name = "System RAM"; - res->start = phys_start; - res->end = phys_end; - res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; - - request_resource (&iomem_resource, res); - - if (kernel_code.start >= res->start && - kernel_code.end <= res->end) - request_resource (res, &kernel_code); - if (kernel_data.start >= res->start && - kernel_data.end <= res->end) - request_resource (res, &kernel_data); - } -} - static int __init parse_tag_core(struct tag *tag) { if (tag->hdr.size > 2) { @@ -159,11 +237,9 @@ static int __init parse_tag_core(struct tag *tag) } __tagtable(ATAG_CORE, parse_tag_core); -static int __init parse_tag_mem_range(struct tag *tag, - struct tag_mem_range **root) +static int __init parse_tag_mem(struct tag *tag) { - struct tag_mem_range *cur, **pprev; - struct tag_mem_range *new; + unsigned long start, end; /* * Ignore zero-sized entries. If we're running standalone, the @@ -173,34 +249,53 @@ static int __init parse_tag_mem_range(struct tag *tag, if (tag->u.mem_range.size == 0) return 0; - /* - * Copy the data so the bootmem init code doesn't need to care - * about it. - */ - if (mem_range_next_free >= ARRAY_SIZE(mem_range_cache)) - panic("Physical memory map too complex!\n"); + start = tag->u.mem_range.addr; + end = tag->u.mem_range.addr + tag->u.mem_range.size - 1; - new = &mem_range_cache[mem_range_next_free++]; - *new = tag->u.mem_range; + add_physical_memory(start, end); + return 0; +} +__tagtable(ATAG_MEM, parse_tag_mem); - pprev = root; - cur = *root; - while (cur) { - pprev = &cur->next; - cur = cur->next; +static int __init parse_tag_rdimg(struct tag *tag) +{ +#ifdef CONFIG_INITRD + struct tag_mem_range *mem = &tag->u.mem_range; + int ret; + + if (initrd_start) { + printk(KERN_WARNING + "Warning: Only the first initrd image will be used\n"); + return 0; } - *pprev = new; - new->next = NULL; + ret = add_reserved_region(mem->start, mem->start + mem->size - 1, + "initrd"); + if (ret) { + printk(KERN_WARNING + "Warning: Failed to reserve initrd memory\n"); + return ret; + } + + initrd_start = (unsigned long)__va(mem->addr); + initrd_end = initrd_start + mem->size; +#else + printk(KERN_WARNING "RAM disk image present, but " + "no initrd support in kernel, ignoring\n"); +#endif return 0; } +__tagtable(ATAG_RDIMG, parse_tag_rdimg); -static int __init parse_tag_mem(struct tag *tag) +static int __init parse_tag_rsvd_mem(struct tag *tag) { - return parse_tag_mem_range(tag, &mem_phys); + struct tag_mem_range *mem = &tag->u.mem_range; + + return add_reserved_region(mem->addr, mem->addr + mem->size - 1, + "Reserved"); } -__tagtable(ATAG_MEM, parse_tag_mem); +__tagtable(ATAG_RSVD_MEM, parse_tag_rsvd_mem); static int __init parse_tag_cmdline(struct tag *tag) { @@ -209,12 +304,6 @@ static int __init parse_tag_cmdline(struct tag *tag) } __tagtable(ATAG_CMDLINE, parse_tag_cmdline); -static int __init parse_tag_rdimg(struct tag *tag) -{ - return parse_tag_mem_range(tag, &mem_ramdisk); -} -__tagtable(ATAG_RDIMG, parse_tag_rdimg); - static int __init parse_tag_clock(struct tag *tag) { /* @@ -225,12 +314,6 @@ static int __init parse_tag_clock(struct tag *tag) } __tagtable(ATAG_CLOCK, parse_tag_clock); -static int __init parse_tag_rsvd_mem(struct tag *tag) -{ - return parse_tag_mem_range(tag, &mem_reserved); -} -__tagtable(ATAG_RSVD_MEM, parse_tag_rsvd_mem); - /* * Scan the tag table for this tag, and call its parse function. The * tag table is built by the linker from all the __tagtable @@ -262,66 +345,16 @@ static void __init parse_tags(struct tag *t) t->hdr.tag); } -static void __init print_memory_map(const char *what, - struct tag_mem_range *mem) -{ - printk ("%s:\n", what); - for (; mem; mem = mem->next) { - printk (" %08lx - %08lx\n", - (unsigned long)mem->addr, - (unsigned long)(mem->addr + mem->size)); - } -} - -#define MAX_LOWMEM HIGHMEM_START -#define MAX_LOWMEM_PFN PFN_DOWN(MAX_LOWMEM) - -/* - * Sort a list of memory regions in-place by ascending address. - * - * We're using bubble sort because we only have singly linked lists - * with few elements. - */ -static void __init sort_mem_list(struct tag_mem_range **pmem) -{ - int done; - struct tag_mem_range **a, **b; - - if (!*pmem) - return; - - do { - done = 1; - a = pmem, b = &(*pmem)->next; - while (*b) { - if ((*a)->addr > (*b)->addr) { - struct tag_mem_range *tmp; - tmp = (*b)->next; - (*b)->next = *a; - *a = *b; - *b = tmp; - done = 0; - } - a = &(*a)->next; - b = &(*a)->next; - } - } while (!done); -} - /* * Find a free memory region large enough for storing the * bootmem bitmap. */ static unsigned long __init -find_bootmap_pfn(const struct tag_mem_range *mem) +find_bootmap_pfn(const struct resource *mem) { unsigned long bootmap_pages, bootmap_len; - unsigned long node_pages = PFN_UP(mem->size); - unsigned long bootmap_addr = mem->addr; - struct tag_mem_range *reserved = mem_reserved; - struct tag_mem_range *ramdisk = mem_ramdisk; - unsigned long kern_start = __pa(_stext); - unsigned long kern_end = __pa(_end); + unsigned long node_pages = PFN_UP(mem->end - mem->start + 1); + unsigned long bootmap_start; bootmap_pages = bootmem_bootmap_pages(node_pages); bootmap_len = bootmap_pages << PAGE_SHIFT; @@ -331,87 +364,43 @@ find_bootmap_pfn(const struct tag_mem_range *mem) * storing the bootmem bitmap. We can take advantage of the * fact that all lists have been sorted. * - * We have to check explicitly reserved regions as well as the - * kernel image and any RAMDISK images... - * - * Oh, and we have to make sure we don't overwrite the taglist - * since we're going to use it until the bootmem allocator is - * fully up and running. + * We have to check that we don't collide with any reserved + * regions, which includes the kernel image and any RAMDISK + * images. */ - while (1) { - if ((bootmap_addr < kern_end) && - ((bootmap_addr + bootmap_len) > kern_start)) - bootmap_addr = kern_end; - - while (reserved && - (bootmap_addr >= (reserved->addr + reserved->size))) - reserved = reserved->next; - - if (reserved && - ((bootmap_addr + bootmap_len) >= reserved->addr)) { - bootmap_addr = reserved->addr + reserved->size; - continue; - } - - while (ramdisk && - (bootmap_addr >= (ramdisk->addr + ramdisk->size))) - ramdisk = ramdisk->next; - - if (!ramdisk || - ((bootmap_addr + bootmap_len) < ramdisk->addr)) - break; - - bootmap_addr = ramdisk->addr + ramdisk->size; - } - - if ((PFN_UP(bootmap_addr) + bootmap_len) >= (mem->addr + mem->size)) - return ~0UL; + bootmap_start = find_free_region(mem, bootmap_len, PAGE_SIZE); - return PFN_UP(bootmap_addr); + return bootmap_start >> PAGE_SHIFT; } +#define MAX_LOWMEM HIGHMEM_START +#define MAX_LOWMEM_PFN PFN_DOWN(MAX_LOWMEM) + static void __init setup_bootmem(void) { unsigned bootmap_size; unsigned long first_pfn, bootmap_pfn, pages; unsigned long max_pfn, max_low_pfn; - unsigned long kern_start = __pa(_stext); - unsigned long kern_end = __pa(_end); unsigned node = 0; - struct tag_mem_range *bank, *res; + struct resource *res; - sort_mem_list(&mem_phys); - sort_mem_list(&mem_reserved); - - print_memory_map("Physical memory", mem_phys); - print_memory_map("Reserved memory", mem_reserved); + printk(KERN_INFO "Physical memory:\n"); + for (res = system_ram; res; res = res->sibling) + printk(" %08x-%08x\n", res->start, res->end); + printk(KERN_INFO "Reserved memory:\n"); + for (res = reserved; res; res = res->sibling) + printk(" %08x-%08x: %s\n", + res->start, res->end, res->name); nodes_clear(node_online_map); - if (mem_ramdisk) { -#ifdef CONFIG_BLK_DEV_INITRD - initrd_start = (unsigned long)__va(mem_ramdisk->addr); - initrd_end = initrd_start + mem_ramdisk->size; - - print_memory_map("RAMDISK images", mem_ramdisk); - if (mem_ramdisk->next) - printk(KERN_WARNING - "Warning: Only the first RAMDISK image " - "will be used\n"); - sort_mem_list(&mem_ramdisk); -#else - printk(KERN_WARNING "RAM disk image present, but " - "no initrd support in kernel!\n"); -#endif - } - - if (mem_phys->next) + if (system_ram->sibling) printk(KERN_WARNING "Only using first memory bank\n"); - for (bank = mem_phys; bank; bank = NULL) { - first_pfn = PFN_UP(bank->addr); - max_low_pfn = max_pfn = PFN_DOWN(bank->addr + bank->size); - bootmap_pfn = find_bootmap_pfn(bank); + for (res = system_ram; res; res = NULL) { + first_pfn = PFN_UP(res->start); + max_low_pfn = max_pfn = PFN_DOWN(res->end + 1); + bootmap_pfn = find_bootmap_pfn(res); if (bootmap_pfn > max_pfn) panic("No space for bootmem bitmap!\n"); @@ -435,10 +424,6 @@ static void __init setup_bootmem(void) bootmap_size = init_bootmem_node(NODE_DATA(node), bootmap_pfn, first_pfn, max_low_pfn); - printk("Node %u: bdata = %p, bdata->node_bootmem_map = %p\n", - node, NODE_DATA(node)->bdata, - NODE_DATA(node)->bdata->node_bootmem_map); - /* * Register fully available RAM pages with the bootmem * allocator. @@ -447,51 +432,26 @@ static void __init setup_bootmem(void) free_bootmem_node (NODE_DATA(node), PFN_PHYS(first_pfn), PFN_PHYS(pages)); - /* - * Reserve space for the kernel image (if present in - * this node)... - */ - if ((kern_start >= PFN_PHYS(first_pfn)) && - (kern_start < PFN_PHYS(max_pfn))) { - printk("Node %u: Kernel image %08lx - %08lx\n", - node, kern_start, kern_end); - reserve_bootmem_node(NODE_DATA(node), kern_start, - kern_end - kern_start); - } - - /* ...the bootmem bitmap... */ + /* Reserve space for the bootmem bitmap... */ reserve_bootmem_node(NODE_DATA(node), PFN_PHYS(bootmap_pfn), bootmap_size); - /* ...any RAMDISK images... */ - for (res = mem_ramdisk; res; res = res->next) { - if (res->addr > PFN_PHYS(max_pfn)) - break; - - if (res->addr >= PFN_PHYS(first_pfn)) { - printk("Node %u: RAMDISK %08lx - %08lx\n", - node, - (unsigned long)res->addr, - (unsigned long)(res->addr + res->size)); - reserve_bootmem_node(NODE_DATA(node), - res->addr, res->size); - } - } - /* ...and any other reserved regions. */ - for (res = mem_reserved; res; res = res->next) { - if (res->addr > PFN_PHYS(max_pfn)) + for (res = reserved; res; res = res->sibling) { + if (res->start > PFN_PHYS(max_pfn)) break; - if (res->addr >= PFN_PHYS(first_pfn)) { - printk("Node %u: Reserved %08lx - %08lx\n", - node, - (unsigned long)res->addr, - (unsigned long)(res->addr + res->size)); - reserve_bootmem_node(NODE_DATA(node), - res->addr, res->size); - } + /* + * resource_init will complain about partial + * overlaps, so we'll just ignore such + * resources for now. + */ + if (res->start >= PFN_PHYS(first_pfn) + && res->end < PFN_PHYS(max_pfn)) + reserve_bootmem_node( + NODE_DATA(node), res->start, + res->end - res->start + 1); } node_set_online(node); @@ -502,6 +462,20 @@ void __init setup_arch (char **cmdline_p) { struct clk *cpu_clk; + init_mm.start_code = (unsigned long)_text; + init_mm.end_code = (unsigned long)_etext; + init_mm.end_data = (unsigned long)_edata; + init_mm.brk = (unsigned long)_end; + + /* + * Include .init section to make allocations easier. It will + * be removed before the resource is actually requested. + */ + kernel_code.start = __pa(__init_begin); + kernel_code.end = __pa(init_mm.end_code - 1); + kernel_data.start = __pa(init_mm.end_code); + kernel_data.end = __pa(init_mm.brk - 1); + parse_tags(bootloader_tags); setup_processor(); @@ -527,11 +501,6 @@ void __init setup_arch (char **cmdline_p) ((cpu_hz + 500) / 1000) % 1000); } - init_mm.start_code = (unsigned long) &_text; - init_mm.end_code = (unsigned long) &_etext; - init_mm.end_data = (unsigned long) &_edata; - init_mm.brk = (unsigned long) &_end; - strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE); *cmdline_p = command_line; parse_early_param(); diff --git a/include/asm-avr32/setup.h b/include/asm-avr32/setup.h index 59be821cf38f..bca2ee1fc607 100644 --- a/include/asm-avr32/setup.h +++ b/include/asm-avr32/setup.h @@ -124,19 +124,13 @@ struct tagtable { #define for_each_tag(t,base) \ for (t = base; t->hdr.size; t = tag_next(t)) -extern struct tag_mem_range *mem_phys; -extern struct tag_mem_range *mem_reserved; -extern struct tag_mem_range *mem_ramdisk; - extern struct tag *bootloader_tags; -extern void setup_processor(void); + +void setup_processor(void); extern void board_setup_fbmem(unsigned long fbmem_start, unsigned long fbmem_size); -/* Chip-specific hook to enable the use of SDRAM */ -void chip_enable_sdram(void); - #endif /* !__ASSEMBLY__ */ #endif /* __KERNEL__ */ -- cgit v1.2.3-59-g8ed1b From d80e2bb12606906fd0b5b5592f519852de8b0113 Mon Sep 17 00:00:00 2001 From: Haavard Skinnemoen Date: Wed, 21 Mar 2007 16:23:41 +0100 Subject: [AVR32] Get rid of board_setup_fbmem() Since the core setup code takes care of both allocation and reservation of framebuffer memory, there's no need for this board- specific hook anymore. Replace it with two global variables, fbmem_start and fbmem_size, which can be used directly. Signed-off-by: Haavard Skinnemoen --- arch/avr32/boards/atstk1000/atstk1002.c | 4 +++- arch/avr32/boards/atstk1000/setup.c | 30 ------------------------------ arch/avr32/kernel/setup.c | 7 ++----- include/asm-avr32/setup.h | 4 ++-- 4 files changed, 7 insertions(+), 38 deletions(-) (limited to 'include') diff --git a/arch/avr32/boards/atstk1000/atstk1002.c b/arch/avr32/boards/atstk1000/atstk1002.c index 5974768a59e5..abe6ca203fa7 100644 --- a/arch/avr32/boards/atstk1000/atstk1002.c +++ b/arch/avr32/boards/atstk1000/atstk1002.c @@ -33,7 +33,7 @@ struct eth_addr { static struct eth_addr __initdata hw_addr[2]; static struct eth_platform_data __initdata eth_data[2]; -extern struct lcdc_platform_data atstk1000_fb0_data; +static struct lcdc_platform_data atstk1000_fb0_data; static struct spi_board_info spi0_board_info[] __initdata = { { @@ -148,6 +148,8 @@ static int __init atstk1002_init(void) set_hw_addr(at32_add_device_eth(0, ð_data[0])); at32_add_device_spi(0, spi0_board_info, ARRAY_SIZE(spi0_board_info)); + atstk1000_fb0_data.fbmem_start = fbmem_start; + atstk1000_fb0_data.fbmem_size = fbmem_size; at32_add_device_lcdc(0, &atstk1000_fb0_data); return 0; diff --git a/arch/avr32/boards/atstk1000/setup.c b/arch/avr32/boards/atstk1000/setup.c index 272c011802a7..2bc4b88d7edb 100644 --- a/arch/avr32/boards/atstk1000/setup.c +++ b/arch/avr32/boards/atstk1000/setup.c @@ -18,33 +18,3 @@ /* Initialized by bootloader-specific startup code. */ struct tag *bootloader_tags __initdata; - -struct lcdc_platform_data __initdata atstk1000_fb0_data; - -void __init board_setup_fbmem(unsigned long fbmem_start, - unsigned long fbmem_size) -{ - if (!fbmem_size) - return; - - if (!fbmem_start) { - void *fbmem; - - fbmem = alloc_bootmem_low_pages(fbmem_size); - fbmem_start = __pa(fbmem); - } else { - pg_data_t *pgdat; - - for_each_online_pgdat(pgdat) { - if (fbmem_start >= pgdat->bdata->node_boot_start - && fbmem_start <= pgdat->bdata->node_low_pfn) - reserve_bootmem_node(pgdat, fbmem_start, - fbmem_size); - } - } - - printk("%luKiB framebuffer memory at address 0x%08lx\n", - fbmem_size >> 10, fbmem_start); - atstk1000_fb0_data.fbmem_start = fbmem_start; - atstk1000_fb0_data.fbmem_size = fbmem_size; -} diff --git a/arch/avr32/kernel/setup.c b/arch/avr32/kernel/setup.c index 1682e2d61cf3..b279d66acf5f 100644 --- a/arch/avr32/kernel/setup.c +++ b/arch/avr32/kernel/setup.c @@ -228,8 +228,8 @@ alloc_reserved_region(resource_size_t *start, resource_size_t size, * Board-specific code may use these variables to set up platform data * for the framebuffer driver if fbmem_size is nonzero. */ -static resource_size_t __initdata fbmem_start; -static resource_size_t __initdata fbmem_size; +resource_size_t __initdata fbmem_start; +resource_size_t __initdata fbmem_size; /* * "fbmem=xxx[kKmM]" allocates the specified amount of boot memory for @@ -561,13 +561,10 @@ void __init setup_arch (char **cmdline_p) setup_bootmem(); - board_setup_fbmem(fbmem_start, fbmem_size); - #ifdef CONFIG_VT conswitchp = &dummy_con; #endif paging_init(); - resource_init(); } diff --git a/include/asm-avr32/setup.h b/include/asm-avr32/setup.h index bca2ee1fc607..1ff1a217015d 100644 --- a/include/asm-avr32/setup.h +++ b/include/asm-avr32/setup.h @@ -126,10 +126,10 @@ struct tagtable { extern struct tag *bootloader_tags; +extern resource_size_t fbmem_start; +extern resource_size_t fbmem_size; void setup_processor(void); -extern void board_setup_fbmem(unsigned long fbmem_start, - unsigned long fbmem_size); #endif /* !__ASSEMBLY__ */ -- cgit v1.2.3-59-g8ed1b From 2c1a2a3441a754a9b5a8e7184071154f8a9bd61b Mon Sep 17 00:00:00 2001 From: Haavard Skinnemoen Date: Wed, 7 Mar 2007 10:40:44 +0100 Subject: [AVR32] Use memcpy/memset in memcpy_{from,to}_io and memset_io Using readb/writeb to implement these breaks NOR flash support. I can't see any reason why regular memcpy and memset shouldn't work. Signed-off-by: Haavard Skinnemoen --- include/asm-avr32/io.h | 17 +++-------------- 1 file changed, 3 insertions(+), 14 deletions(-) (limited to 'include') diff --git a/include/asm-avr32/io.h b/include/asm-avr32/io.h index 27b1523d42e0..e30d4b3bd836 100644 --- a/include/asm-avr32/io.h +++ b/include/asm-avr32/io.h @@ -240,30 +240,19 @@ BUILDSTRING(l, u32) static inline void memcpy_fromio(void * to, const volatile void __iomem *from, unsigned long count) { - char *p = to; - volatile const char __iomem *addr = from; - - while (count--) - *p++ = readb(addr++); + memcpy(to, (const void __force *)from, count); } static inline void memcpy_toio(volatile void __iomem *to, const void * from, unsigned long count) { - const char *p = from; - volatile char __iomem *addr = to; - - while (count--) - writeb(*p++, addr++); + memcpy((void __force *)to, from, count); } static inline void memset_io(volatile void __iomem *addr, unsigned char val, unsigned long count) { - volatile char __iomem *p = addr; - - while (count--) - writeb(val, p++); + memset((void __force *)addr, val, count); } #define IO_SPACE_LIMIT 0xffffffff -- cgit v1.2.3-59-g8ed1b From a4022b0d6005b117a985cec64559e048981a4244 Mon Sep 17 00:00:00 2001 From: Mathieu Desnoyers Date: Tue, 10 Apr 2007 18:23:09 -0400 Subject: avr32: remove unneeded cast in atomic.h This int cast is superfluous since system.h cmpxchg already casts it in (typeof(*(ptr))). Signed-off-by: Mathieu Desnoyers Signed-off-by: Andrew Morton Signed-off-by: Haavard Skinnemoen --- include/asm-avr32/atomic.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/asm-avr32/atomic.h b/include/asm-avr32/atomic.h index c40b6032c480..b9c2548a52f3 100644 --- a/include/asm-avr32/atomic.h +++ b/include/asm-avr32/atomic.h @@ -173,7 +173,7 @@ static inline int atomic_sub_if_positive(int i, atomic_t *v) } #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) -#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) +#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n))) #define atomic_sub(i, v) (void)atomic_sub_return(i, v) #define atomic_add(i, v) (void)atomic_add_return(i, v) -- cgit v1.2.3-59-g8ed1b From 8224ca195874525533665bbcd23b6da1e575aa4d Mon Sep 17 00:00:00 2001 From: Haavard Skinnemoen Date: Fri, 27 Apr 2007 14:21:47 +0200 Subject: [AVR32] Fix compile error with gcc 4.1 gcc 4.1 doesn't seem to like const variables as inline assembly outputs. Drop support for reading 64-bit values using get_user() so that we can use an unsigned long to hold the result regardless of the actual size. This should be safe since many architectures, including i386, doesn't support reading 64-bit values with get_user(). Signed-off-by: Haavard Skinnemoen --- include/asm-avr32/uaccess.h | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) (limited to 'include') diff --git a/include/asm-avr32/uaccess.h b/include/asm-avr32/uaccess.h index 74a679e9098c..ed092395215e 100644 --- a/include/asm-avr32/uaccess.h +++ b/include/asm-avr32/uaccess.h @@ -181,24 +181,23 @@ extern int __put_user_bad(void); #define __get_user_nocheck(x, ptr, size) \ ({ \ - typeof(*(ptr)) __gu_val = (typeof(*(ptr)) __force)0; \ + unsigned long __gu_val = 0; \ int __gu_err = 0; \ \ switch (size) { \ case 1: __get_user_asm("ub", __gu_val, ptr, __gu_err); break; \ case 2: __get_user_asm("uh", __gu_val, ptr, __gu_err); break; \ case 4: __get_user_asm("w", __gu_val, ptr, __gu_err); break; \ - case 8: __get_user_asm("d", __gu_val, ptr, __gu_err); break; \ default: __gu_err = __get_user_bad(); break; \ } \ \ - x = __gu_val; \ + x = (typeof(*(ptr)))__gu_val; \ __gu_err; \ }) #define __get_user_check(x, ptr, size) \ ({ \ - typeof(*(ptr)) __gu_val = (typeof(*(ptr)) __force)0; \ + unsigned long __gu_val = 0; \ const typeof(*(ptr)) __user * __gu_addr = (ptr); \ int __gu_err = 0; \ \ @@ -216,10 +215,6 @@ extern int __put_user_bad(void); __get_user_asm("w", __gu_val, __gu_addr, \ __gu_err); \ break; \ - case 8: \ - __get_user_asm("d", __gu_val, __gu_addr, \ - __gu_err); \ - break; \ default: \ __gu_err = __get_user_bad(); \ break; \ @@ -227,7 +222,7 @@ extern int __put_user_bad(void); } else { \ __gu_err = -EFAULT; \ } \ - x = __gu_val; \ + x = (typeof(*(ptr)))__gu_val; \ __gu_err; \ }) -- cgit v1.2.3-59-g8ed1b From 6fc321fd7dd91f0592f37503219196835314fbb7 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Fri, 27 Apr 2007 16:01:25 +0200 Subject: [S390] cio/ipl: Clean interface between cio and ipl code. Clean interface between cio and ipl code, so Peter stops complaining. Signed-off-by: Martin Schwidefsky Signed-off-by: Heiko Carstens --- arch/s390/kernel/ipl.c | 23 ++++++++++++++++++++++- drivers/s390/cio/cio.c | 38 ++++++++++---------------------------- include/asm-s390/cio.h | 7 +++++++ include/asm-s390/ipl.h | 3 +-- 4 files changed, 40 insertions(+), 31 deletions(-) (limited to 'include') diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c index f731185bf2bd..a83cf1fdd8f5 100644 --- a/arch/s390/kernel/ipl.c +++ b/arch/s390/kernel/ipl.c @@ -47,7 +47,7 @@ enum ipl_type { * Must be in data section since the bss section * is not cleared when these are accessed. */ -u16 ipl_devno __attribute__((__section__(".data"))) = 0; +static u16 ipl_devno __attribute__((__section__(".data"))) = 0; u32 ipl_flags __attribute__((__section__(".data"))) = 0; static char *ipl_type_str(enum ipl_type type) @@ -1038,6 +1038,27 @@ static int __init s390_ipl_init(void) __initcall(s390_ipl_init); +void __init ipl_save_parameters(void) +{ + struct cio_iplinfo iplinfo; + unsigned int *ipl_ptr; + void *src, *dst; + + if (cio_get_iplinfo(&iplinfo)) + return; + + ipl_devno = iplinfo.devno; + ipl_flags |= IPL_DEVNO_VALID; + if (!iplinfo.is_qdio) + return; + ipl_flags |= IPL_PARMBLOCK_VALID; + ipl_ptr = (unsigned int *)__LC_IPL_PARMBLOCK_PTR; + src = (void *)(unsigned long)*ipl_ptr; + dst = (void *)IPL_PARMBLOCK_ORIGIN; + memmove(dst, src, PAGE_SIZE); + *ipl_ptr = IPL_PARMBLOCK_ORIGIN; +} + static LIST_HEAD(rcall); static DEFINE_MUTEX(rcall_mutex); diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c index 9cb129ab5be5..21af446c1f2d 100644 --- a/drivers/s390/cio/cio.c +++ b/drivers/s390/cio/cio.c @@ -1048,37 +1048,19 @@ void reipl_ccw_dev(struct ccw_dev_id *devid) do_reipl_asm(*((__u32*)&schid)); } -static struct schib __initdata ipl_schib; - -/* - * ipl_save_parameters gets called very early. It is not allowed to access - * anything in the bss section at all. The bss section is not cleared yet, - * but may contain some ipl parameters written by the firmware. - * These parameters (if present) are copied to 0x2000. - * To avoid corruption of the ipl parameters, all variables used by this - * function must reside on the stack or in the data section. - */ -void ipl_save_parameters(void) +int __init cio_get_iplinfo(struct cio_iplinfo *iplinfo) { struct subchannel_id schid; - unsigned int *ipl_ptr; - void *src, *dst; + struct schib schib; schid = *(struct subchannel_id *)__LC_SUBCHANNEL_ID; if (!schid.one) - return; - if (stsch(schid, &ipl_schib)) - return; - if (!ipl_schib.pmcw.dnv) - return; - ipl_devno = ipl_schib.pmcw.dev; - ipl_flags |= IPL_DEVNO_VALID; - if (!ipl_schib.pmcw.qf) - return; - ipl_flags |= IPL_PARMBLOCK_VALID; - ipl_ptr = (unsigned int *)__LC_IPL_PARMBLOCK_PTR; - src = (void *)(unsigned long)*ipl_ptr; - dst = (void *)IPL_PARMBLOCK_ORIGIN; - memmove(dst, src, PAGE_SIZE); - *ipl_ptr = IPL_PARMBLOCK_ORIGIN; + return -ENODEV; + if (stsch(schid, &schib)) + return -ENODEV; + if (!schib.pmcw.dnv) + return -ENODEV; + iplinfo->devno = schib.pmcw.dev; + iplinfo->is_qdio = schib.pmcw.qf; + return 0; } diff --git a/include/asm-s390/cio.h b/include/asm-s390/cio.h index d92785030980..0db017bc7d09 100644 --- a/include/asm-s390/cio.h +++ b/include/asm-s390/cio.h @@ -292,6 +292,13 @@ extern void css_schedule_reprobe(void); extern void reipl_ccw_dev(struct ccw_dev_id *id); +struct cio_iplinfo { + u16 devno; + int is_qdio; +}; + +extern int cio_get_iplinfo(struct cio_iplinfo *iplinfo); + #endif #endif diff --git a/include/asm-s390/ipl.h b/include/asm-s390/ipl.h index 0eb64083480a..15bb0b529551 100644 --- a/include/asm-s390/ipl.h +++ b/include/asm-s390/ipl.h @@ -74,10 +74,9 @@ struct ipl_parameter_block { } __attribute__((packed)); /* - * IPL validity flags and parameters as detected in head.S + * IPL validity flags */ extern u32 ipl_flags; -extern u16 ipl_devno; extern u32 dump_prefix_page; extern void do_reipl(void); -- cgit v1.2.3-59-g8ed1b From e5854a5839fa426a7873f038080f63587de5f1f1 Mon Sep 17 00:00:00 2001 From: Peter Oberparleiter Date: Fri, 27 Apr 2007 16:01:31 +0200 Subject: [S390] cio: Channel-path configure function. Add a new attribute to the channel-path sysfs directory through which channel-path configure operations can be triggered. Also listen for hardware events requesting channel-path configure operations and process them accordingly. Signed-off-by: Peter Oberparleiter Signed-off-by: Martin Schwidefsky Signed-off-by: Heiko Carstens --- drivers/s390/char/Makefile | 2 +- drivers/s390/char/sclp_chp.c | 196 +++++++++++++++++++++++++++++++++ drivers/s390/cio/chp.c | 250 +++++++++++++++++++++++++++++++++++++++++- drivers/s390/cio/chp.h | 20 +++- drivers/s390/cio/chpid.h | 51 --------- drivers/s390/cio/chsc.c | 44 +++++++- drivers/s390/cio/chsc.h | 2 +- drivers/s390/cio/cio.c | 1 + drivers/s390/cio/cio.h | 1 + drivers/s390/cio/css.h | 4 +- drivers/s390/cio/device_fsm.c | 2 +- drivers/s390/cio/device_ops.c | 2 +- drivers/s390/cio/ioasm.h | 2 +- include/asm-s390/chpid.h | 53 +++++++++ include/asm-s390/cio.h | 1 + include/asm-s390/sclp.h | 12 ++ 16 files changed, 581 insertions(+), 62 deletions(-) create mode 100644 drivers/s390/char/sclp_chp.c delete mode 100644 drivers/s390/cio/chpid.h create mode 100644 include/asm-s390/chpid.h (limited to 'include') diff --git a/drivers/s390/char/Makefile b/drivers/s390/char/Makefile index 293e667b50f2..5fd581c22db3 100644 --- a/drivers/s390/char/Makefile +++ b/drivers/s390/char/Makefile @@ -3,7 +3,7 @@ # obj-y += ctrlchar.o keyboard.o defkeymap.o sclp.o sclp_rw.o sclp_quiesce.o \ - sclp_info.o + sclp_info.o sclp_chp.o obj-$(CONFIG_TN3270) += raw3270.o obj-$(CONFIG_TN3270_CONSOLE) += con3270.o diff --git a/drivers/s390/char/sclp_chp.c b/drivers/s390/char/sclp_chp.c new file mode 100644 index 000000000000..a66b914519b5 --- /dev/null +++ b/drivers/s390/char/sclp_chp.c @@ -0,0 +1,196 @@ +/* + * drivers/s390/char/sclp_chp.c + * + * Copyright IBM Corp. 2007 + * Author(s): Peter Oberparleiter + */ + +#include +#include +#include +#include +#include +#include + +#include "sclp.h" + +#define TAG "sclp_chp: " + +#define SCLP_CMDW_CONFIGURE_CHANNEL_PATH 0x000f0001 +#define SCLP_CMDW_DECONFIGURE_CHANNEL_PATH 0x000e0001 +#define SCLP_CMDW_READ_CHANNEL_PATH_INFORMATION 0x00030001 + +static inline sclp_cmdw_t get_configure_cmdw(struct chp_id chpid) +{ + return SCLP_CMDW_CONFIGURE_CHANNEL_PATH | chpid.id << 8; +} + +static inline sclp_cmdw_t get_deconfigure_cmdw(struct chp_id chpid) +{ + return SCLP_CMDW_DECONFIGURE_CHANNEL_PATH | chpid.id << 8; +} + +static void chp_callback(struct sclp_req *req, void *data) +{ + struct completion *completion = data; + + complete(completion); +} + +struct chp_cfg_sccb { + struct sccb_header header; + u8 ccm; + u8 reserved[6]; + u8 cssid; +} __attribute__((packed)); + +struct chp_cfg_data { + struct chp_cfg_sccb sccb; + struct sclp_req req; + struct completion completion; +} __attribute__((packed)); + +static int do_configure(sclp_cmdw_t cmd) +{ + struct chp_cfg_data *data; + int rc; + + /* Prepare sccb. */ + data = (struct chp_cfg_data *) get_zeroed_page(GFP_KERNEL | GFP_DMA); + if (!data) + return -ENOMEM; + data->sccb.header.length = sizeof(struct chp_cfg_sccb); + data->req.command = cmd; + data->req.sccb = &(data->sccb); + data->req.status = SCLP_REQ_FILLED; + data->req.callback = chp_callback; + data->req.callback_data = &(data->completion); + init_completion(&data->completion); + + /* Perform sclp request. */ + rc = sclp_add_request(&(data->req)); + if (rc) + goto out; + wait_for_completion(&data->completion); + + /* Check response .*/ + if (data->req.status != SCLP_REQ_DONE) { + printk(KERN_WARNING TAG "configure channel-path request failed " + "(status=0x%02x)\n", data->req.status); + rc = -EIO; + goto out; + } + switch (data->sccb.header.response_code) { + case 0x0020: + case 0x0120: + case 0x0440: + case 0x0450: + break; + default: + printk(KERN_WARNING TAG "configure channel-path failed " + "(cmd=0x%08x, response=0x%04x)\n", cmd, + data->sccb.header.response_code); + rc = -EIO; + break; + } +out: + free_page((unsigned long) data); + + return rc; +} + +/** + * sclp_chp_configure - perform configure channel-path sclp command + * @chpid: channel-path ID + * + * Perform configure channel-path command sclp command for specified chpid. + * Return 0 after command successfully finished, non-zero otherwise. + */ +int sclp_chp_configure(struct chp_id chpid) +{ + return do_configure(get_configure_cmdw(chpid)); +} + +/** + * sclp_chp_deconfigure - perform deconfigure channel-path sclp command + * @chpid: channel-path ID + * + * Perform deconfigure channel-path command sclp command for specified chpid + * and wait for completion. On success return 0. Return non-zero otherwise. + */ +int sclp_chp_deconfigure(struct chp_id chpid) +{ + return do_configure(get_deconfigure_cmdw(chpid)); +} + +struct chp_info_sccb { + struct sccb_header header; + u8 recognized[SCLP_CHP_INFO_MASK_SIZE]; + u8 standby[SCLP_CHP_INFO_MASK_SIZE]; + u8 configured[SCLP_CHP_INFO_MASK_SIZE]; + u8 ccm; + u8 reserved[6]; + u8 cssid; +} __attribute__((packed)); + +struct chp_info_data { + struct chp_info_sccb sccb; + struct sclp_req req; + struct completion completion; +} __attribute__((packed)); + +/** + * sclp_chp_read_info - perform read channel-path information sclp command + * @info: resulting channel-path information data + * + * Perform read channel-path information sclp command and wait for completion. + * On success, store channel-path information in @info and return 0. Return + * non-zero otherwise. + */ +int sclp_chp_read_info(struct sclp_chp_info *info) +{ + struct chp_info_data *data; + int rc; + + /* Prepare sccb. */ + data = (struct chp_info_data *) get_zeroed_page(GFP_KERNEL | GFP_DMA); + if (!data) + return -ENOMEM; + data->sccb.header.length = sizeof(struct chp_info_sccb); + data->req.command = SCLP_CMDW_READ_CHANNEL_PATH_INFORMATION; + data->req.sccb = &(data->sccb); + data->req.status = SCLP_REQ_FILLED; + data->req.callback = chp_callback; + data->req.callback_data = &(data->completion); + init_completion(&data->completion); + + /* Perform sclp request. */ + rc = sclp_add_request(&(data->req)); + if (rc) + goto out; + wait_for_completion(&data->completion); + + /* Check response .*/ + if (data->req.status != SCLP_REQ_DONE) { + printk(KERN_WARNING TAG "read channel-path info request failed " + "(status=0x%02x)\n", data->req.status); + rc = -EIO; + goto out; + } + if (data->sccb.header.response_code != 0x0010) { + printk(KERN_WARNING TAG "read channel-path info failed " + "(response=0x%04x)\n", data->sccb.header.response_code); + rc = -EIO; + goto out; + } + memcpy(info->recognized, data->sccb.recognized, + SCLP_CHP_INFO_MASK_SIZE); + memcpy(info->standby, data->sccb.standby, + SCLP_CHP_INFO_MASK_SIZE); + memcpy(info->configured, data->sccb.configured, + SCLP_CHP_INFO_MASK_SIZE); +out: + free_page((unsigned long) data); + + return rc; +} diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c index 8a5839147f8a..0e92c8c89860 100644 --- a/drivers/s390/cio/chp.c +++ b/drivers/s390/cio/chp.c @@ -10,9 +10,14 @@ #include #include #include +#include +#include +#include +#include #include +#include +#include -#include "chpid.h" #include "cio.h" #include "css.h" #include "ioasm.h" @@ -20,6 +25,32 @@ #include "chp.h" #define to_channelpath(device) container_of(device, struct channel_path, dev) +#define CHP_INFO_UPDATE_INTERVAL 1*HZ + +enum cfg_task_t { + cfg_none, + cfg_configure, + cfg_deconfigure +}; + +/* Map for pending configure tasks. */ +static enum cfg_task_t chp_cfg_task[__MAX_CSSID + 1][__MAX_CHPID + 1]; +static DEFINE_MUTEX(cfg_lock); +static int cfg_busy; + +/* Map for channel-path status. */ +static struct sclp_chp_info chp_info; +static DEFINE_MUTEX(info_lock); + +/* Time after which channel-path status may be outdated. */ +static unsigned long chp_info_expires; + +/* Workqueue to perform pending configure tasks. */ +static struct workqueue_struct *chp_wq; +static struct work_struct cfg_work; + +/* Wait queue for configure completion events. */ +static wait_queue_head_t cfg_wait_queue; /* Return channel_path struct for given chpid. */ static inline struct channel_path *chpid_to_chp(struct chp_id chpid) @@ -251,6 +282,43 @@ static ssize_t chp_status_write(struct device *dev, static DEVICE_ATTR(status, 0644, chp_status_show, chp_status_write); +static ssize_t chp_configure_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct channel_path *cp; + int status; + + cp = container_of(dev, struct channel_path, dev); + status = chp_info_get_status(cp->chpid); + if (status < 0) + return status; + + return snprintf(buf, PAGE_SIZE, "%d\n", status); +} + +static int cfg_wait_idle(void); + +static ssize_t chp_configure_write(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct channel_path *cp; + int val; + char delim; + + if (sscanf(buf, "%d %c", &val, &delim) != 1) + return -EINVAL; + if (val != 0 && val != 1) + return -EINVAL; + cp = container_of(dev, struct channel_path, dev); + chp_cfg_schedule(cp->chpid, val); + cfg_wait_idle(); + + return count; +} + +static DEVICE_ATTR(configure, 0644, chp_configure_show, chp_configure_write); + static ssize_t chp_type_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -293,6 +361,7 @@ static DEVICE_ATTR(shared, 0444, chp_shared_show, NULL); static struct attribute * chp_attrs[] = { &dev_attr_status.attr, + &dev_attr_configure.attr, &dev_attr_type.attr, &dev_attr_cmg.attr, &dev_attr_shared.attr, @@ -323,6 +392,8 @@ int chp_new(struct chp_id chpid) struct channel_path *chp; int ret; + if (chp_is_registered(chpid)) + return 0; chp = kzalloc(sizeof(struct channel_path), GFP_KERNEL); if (!chp) return -ENOMEM; @@ -435,3 +506,180 @@ int chp_process_crw(int id, int status) return 0; } } + +static inline int info_bit_num(struct chp_id id) +{ + return id.id + id.cssid * (__MAX_CHPID + 1); +} + +/* Force chp_info refresh on next call to info_validate(). */ +static void info_expire(void) +{ + mutex_lock(&info_lock); + chp_info_expires = jiffies - 1; + mutex_unlock(&info_lock); +} + +/* Ensure that chp_info is up-to-date. */ +static int info_update(void) +{ + int rc; + + mutex_lock(&info_lock); + rc = 0; + if (time_after(jiffies, chp_info_expires)) { + /* Data is too old, update. */ + rc = sclp_chp_read_info(&chp_info); + chp_info_expires = jiffies + CHP_INFO_UPDATE_INTERVAL ; + } + mutex_unlock(&info_lock); + + return rc; +} + +/** + * chp_info_get_status - retrieve configure status of a channel-path + * @chpid: channel-path ID + * + * On success, return 0 for standby, 1 for configured, 2 for reserved, + * 3 for not recognized. Return negative error code on error. + */ +int chp_info_get_status(struct chp_id chpid) +{ + int rc; + int bit; + + rc = info_update(); + if (rc) + return rc; + + bit = info_bit_num(chpid); + mutex_lock(&info_lock); + if (!chp_test_bit(chp_info.recognized, bit)) + rc = CHP_STATUS_NOT_RECOGNIZED; + else if (chp_test_bit(chp_info.configured, bit)) + rc = CHP_STATUS_CONFIGURED; + else if (chp_test_bit(chp_info.standby, bit)) + rc = CHP_STATUS_STANDBY; + else + rc = CHP_STATUS_RESERVED; + mutex_unlock(&info_lock); + + return rc; +} + +/* Return configure task for chpid. */ +static enum cfg_task_t cfg_get_task(struct chp_id chpid) +{ + return chp_cfg_task[chpid.cssid][chpid.id]; +} + +/* Set configure task for chpid. */ +static void cfg_set_task(struct chp_id chpid, enum cfg_task_t cfg) +{ + chp_cfg_task[chpid.cssid][chpid.id] = cfg; +} + +/* Perform one configure/deconfigure request. Reschedule work function until + * last request. */ +static void cfg_func(struct work_struct *work) +{ + struct chp_id chpid; + enum cfg_task_t t; + + mutex_lock(&cfg_lock); + t = cfg_none; + chp_id_for_each(&chpid) { + t = cfg_get_task(chpid); + if (t != cfg_none) { + cfg_set_task(chpid, cfg_none); + break; + } + } + mutex_unlock(&cfg_lock); + + switch (t) { + case cfg_configure: + sclp_chp_configure(chpid); + info_expire(); + chsc_chp_online(chpid); + break; + case cfg_deconfigure: + sclp_chp_deconfigure(chpid); + info_expire(); + chsc_chp_offline(chpid); + break; + case cfg_none: + /* Get updated information after last change. */ + info_update(); + mutex_lock(&cfg_lock); + cfg_busy = 0; + mutex_unlock(&cfg_lock); + wake_up_interruptible(&cfg_wait_queue); + return; + } + queue_work(chp_wq, &cfg_work); +} + +/** + * chp_cfg_schedule - schedule chpid configuration request + * @chpid - channel-path ID + * @configure - Non-zero for configure, zero for deconfigure + * + * Schedule a channel-path configuration/deconfiguration request. + */ +void chp_cfg_schedule(struct chp_id chpid, int configure) +{ + CIO_MSG_EVENT(2, "chp_cfg_sched%x.%02x=%d\n", chpid.cssid, chpid.id, + configure); + mutex_lock(&cfg_lock); + cfg_set_task(chpid, configure ? cfg_configure : cfg_deconfigure); + cfg_busy = 1; + mutex_unlock(&cfg_lock); + queue_work(chp_wq, &cfg_work); +} + +/** + * chp_cfg_cancel_deconfigure - cancel chpid deconfiguration request + * @chpid - channel-path ID + * + * Cancel an active channel-path deconfiguration request if it has not yet + * been performed. + */ +void chp_cfg_cancel_deconfigure(struct chp_id chpid) +{ + CIO_MSG_EVENT(2, "chp_cfg_cancel:%x.%02x\n", chpid.cssid, chpid.id); + mutex_lock(&cfg_lock); + if (cfg_get_task(chpid) == cfg_deconfigure) + cfg_set_task(chpid, cfg_none); + mutex_unlock(&cfg_lock); +} + +static int cfg_wait_idle(void) +{ + if (wait_event_interruptible(cfg_wait_queue, !cfg_busy)) + return -ERESTARTSYS; + return 0; +} + +static int __init chp_init(void) +{ + struct chp_id chpid; + + chp_wq = create_singlethread_workqueue("cio_chp"); + if (!chp_wq) + return -ENOMEM; + INIT_WORK(&cfg_work, cfg_func); + init_waitqueue_head(&cfg_wait_queue); + if (info_update()) + return 0; + /* Register available channel-paths. */ + chp_id_for_each(&chpid) { + if (chp_info_get_status(chpid) != CHP_STATUS_NOT_RECOGNIZED) + chp_new(chpid); + } + + return 0; +} + +subsys_initcall(chp_init); diff --git a/drivers/s390/cio/chp.h b/drivers/s390/cio/chp.h index ac2b1a9c3bc2..862af69d9707 100644 --- a/drivers/s390/cio/chp.h +++ b/drivers/s390/cio/chp.h @@ -10,10 +10,23 @@ #include #include - -#include "chpid.h" +#include #include "chsc.h" +#define CHP_STATUS_STANDBY 0 +#define CHP_STATUS_CONFIGURED 1 +#define CHP_STATUS_RESERVED 2 +#define CHP_STATUS_NOT_RECOGNIZED 3 + +static inline int chp_test_bit(u8 *bitmap, int num) +{ + int byte = num >> 3; + int mask = 128 >> (num & 7); + + return (bitmap[byte] & mask) ? 1 : 0; +} + + struct channel_path { struct chp_id chpid; int state; @@ -33,5 +46,8 @@ int chp_process_crw(int id, int available); void chp_remove_cmg_attr(struct channel_path *chp); int chp_add_cmg_attr(struct channel_path *chp); int chp_new(struct chp_id chpid); +void chp_cfg_schedule(struct chp_id chpid, int configure); +void chp_cfg_cancel_deconfigure(struct chp_id chpid); +int chp_info_get_status(struct chp_id chpid); #endif /* S390_CHP_H */ diff --git a/drivers/s390/cio/chpid.h b/drivers/s390/cio/chpid.h deleted file mode 100644 index 44cc00ed9b59..000000000000 --- a/drivers/s390/cio/chpid.h +++ /dev/null @@ -1,51 +0,0 @@ -/* - * drivers/s390/cio/chpid.h - * - * Copyright IBM Corp. 2007 - * Author(s): Peter Oberparleiter - */ - -#ifndef S390_CHP_ID_H -#define S390_CHP_ID_H S390_CHP_ID_H - -#include -#include -#include "css.h" - -struct chp_id { - u8 reserved1; - u8 cssid; - u8 reserved2; - u8 id; -} __attribute__((packed)); - -static inline void chp_id_init(struct chp_id *chpid) -{ - memset(chpid, 0, sizeof(struct chp_id)); -} - -static inline int chp_id_is_equal(struct chp_id *a, struct chp_id *b) -{ - return (a->id == b->id) && (a->cssid == b->cssid); -} - -static inline void chp_id_next(struct chp_id *chpid) -{ - if (chpid->id < __MAX_CHPID) - chpid->id++; - else { - chpid->id = 0; - chpid->cssid++; - } -} - -static inline int chp_id_is_valid(struct chp_id *chpid) -{ - return (chpid->cssid <= __MAX_CSSID); -} - - -#define chp_id_for_each(c) \ - for (chp_id_init(c); chp_id_is_valid(c); chp_id_next(c)) - -#endif /* S390_CHP_ID_H */ diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c index d99f525eac08..3dec460bba27 100644 --- a/drivers/s390/cio/chsc.c +++ b/drivers/s390/cio/chsc.c @@ -15,12 +15,12 @@ #include #include +#include #include "css.h" #include "cio.h" #include "cio_debug.h" #include "ioasm.h" -#include "chpid.h" #include "chp.h" #include "chsc.h" @@ -498,6 +498,45 @@ static int chsc_process_sei_res_acc(struct chsc_sei_area *sei_area) return rc; } +struct chp_config_data { + u8 map[32]; + u8 op; + u8 pc; +}; + +static int chsc_process_sei_chp_config(struct chsc_sei_area *sei_area) +{ + struct chp_config_data *data; + struct chp_id chpid; + int num; + + CIO_CRW_EVENT(4, "chsc: channel-path-configuration notification\n"); + if (sei_area->rs != 0) + return 0; + data = (struct chp_config_data *) &(sei_area->ccdf); + chp_id_init(&chpid); + for (num = 0; num <= __MAX_CHPID; num++) { + if (!chp_test_bit(data->map, num)) + continue; + chpid.id = num; + printk(KERN_WARNING "cio: processing configure event %d for " + "chpid %x.%02x\n", data->op, chpid.cssid, chpid.id); + switch (data->op) { + case 0: + chp_cfg_schedule(chpid, 1); + break; + case 1: + chp_cfg_schedule(chpid, 0); + break; + case 2: + chp_cfg_cancel_deconfigure(chpid); + break; + } + } + + return 0; +} + static int chsc_process_sei(struct chsc_sei_area *sei_area) { int rc; @@ -514,6 +553,9 @@ static int chsc_process_sei(struct chsc_sei_area *sei_area) case 2: /* i/o resource accessibiliy */ rc = chsc_process_sei_res_acc(sei_area); break; + case 8: /* channel-path-configuration notification */ + rc = chsc_process_sei_chp_config(sei_area); + break; default: /* other stuff */ CIO_CRW_EVENT(4, "chsc: unhandled sei content code %d\n", sei_area->cc); diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h index 0e40defc6087..322586f27cc0 100644 --- a/drivers/s390/cio/chsc.h +++ b/drivers/s390/cio/chsc.h @@ -3,7 +3,7 @@ #include #include -#include "chpid.h" +#include #define CHSC_SDA_OC_MSS 0x2 diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c index 7dd0649c95d1..ea1defba5693 100644 --- a/drivers/s390/cio/cio.c +++ b/drivers/s390/cio/cio.c @@ -22,6 +22,7 @@ #include #include #include +#include #include "airq.h" #include "cio.h" #include "css.h" diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h index 35154a210357..e62ab5c52863 100644 --- a/drivers/s390/cio/cio.h +++ b/drivers/s390/cio/cio.h @@ -3,6 +3,7 @@ #include "schid.h" #include +#include /* * where we put the ssd info diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h index 4319e1ced791..b2b1a265c602 100644 --- a/drivers/s390/cio/css.h +++ b/drivers/s390/cio/css.h @@ -5,8 +5,10 @@ #include #include #include +#include #include +#include #include "schid.h" @@ -149,8 +151,6 @@ extern void css_reiterate_subchannels(void); #define __MAX_SUBCHANNEL 65535 #define __MAX_SSID 3 -#define __MAX_CHPID 255 -#define __MAX_CSSID 0 struct channel_subsystem { u8 cssid; diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c index db3d1b990f58..d6226881d0df 100644 --- a/drivers/s390/cio/device_fsm.c +++ b/drivers/s390/cio/device_fsm.c @@ -15,6 +15,7 @@ #include #include +#include #include "cio.h" #include "cio_debug.h" @@ -22,7 +23,6 @@ #include "device.h" #include "chsc.h" #include "ioasm.h" -#include "chpid.h" #include "chp.h" int diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c index d819ae2ee9ae..16f59fcb66b1 100644 --- a/drivers/s390/cio/device_ops.c +++ b/drivers/s390/cio/device_ops.c @@ -16,13 +16,13 @@ #include #include +#include #include "cio.h" #include "cio_debug.h" #include "css.h" #include "chsc.h" #include "device.h" -#include "chpid.h" #include "chp.h" int ccw_device_set_options_mask(struct ccw_device *cdev, unsigned long flags) diff --git a/drivers/s390/cio/ioasm.h b/drivers/s390/cio/ioasm.h index 78c7db6daa84..7153dd959082 100644 --- a/drivers/s390/cio/ioasm.h +++ b/drivers/s390/cio/ioasm.h @@ -1,8 +1,8 @@ #ifndef S390_CIO_IOASM_H #define S390_CIO_IOASM_H +#include #include "schid.h" -#include "chpid.h" /* * TPI info structure diff --git a/include/asm-s390/chpid.h b/include/asm-s390/chpid.h new file mode 100644 index 000000000000..b203336fd892 --- /dev/null +++ b/include/asm-s390/chpid.h @@ -0,0 +1,53 @@ +/* + * drivers/s390/cio/chpid.h + * + * Copyright IBM Corp. 2007 + * Author(s): Peter Oberparleiter + */ + +#ifndef _ASM_S390_CHPID_H +#define _ASM_S390_CHPID_H _ASM_S390_CHPID_H + +#include +#include +#include + +#define __MAX_CHPID 255 + +struct chp_id { + u8 reserved1; + u8 cssid; + u8 reserved2; + u8 id; +} __attribute__((packed)); + +static inline void chp_id_init(struct chp_id *chpid) +{ + memset(chpid, 0, sizeof(struct chp_id)); +} + +static inline int chp_id_is_equal(struct chp_id *a, struct chp_id *b) +{ + return (a->id == b->id) && (a->cssid == b->cssid); +} + +static inline void chp_id_next(struct chp_id *chpid) +{ + if (chpid->id < __MAX_CHPID) + chpid->id++; + else { + chpid->id = 0; + chpid->cssid++; + } +} + +static inline int chp_id_is_valid(struct chp_id *chpid) +{ + return (chpid->cssid <= __MAX_CSSID); +} + + +#define chp_id_for_each(c) \ + for (chp_id_init(c); chp_id_is_valid(c); chp_id_next(c)) + +#endif /* _ASM_S390_CHPID_H */ diff --git a/include/asm-s390/cio.h b/include/asm-s390/cio.h index 0db017bc7d09..f738d2827582 100644 --- a/include/asm-s390/cio.h +++ b/include/asm-s390/cio.h @@ -13,6 +13,7 @@ #ifdef __KERNEL__ #define LPM_ANYPATH 0xff +#define __MAX_CSSID 0 /* * subchannel status word diff --git a/include/asm-s390/sclp.h b/include/asm-s390/sclp.h index 468b97018405..3996daaa8f54 100644 --- a/include/asm-s390/sclp.h +++ b/include/asm-s390/sclp.h @@ -9,6 +9,7 @@ #define _ASM_S390_SCLP_H #include +#include struct sccb_header { u16 length; @@ -33,7 +34,18 @@ struct sclp_readinfo_sccb { u8 _reserved3[4096 - 112]; /* 112-4095 */ } __attribute__((packed, aligned(4096))); +#define SCLP_CHP_INFO_MASK_SIZE 32 + +struct sclp_chp_info { + u8 recognized[SCLP_CHP_INFO_MASK_SIZE]; + u8 standby[SCLP_CHP_INFO_MASK_SIZE]; + u8 configured[SCLP_CHP_INFO_MASK_SIZE]; +}; + extern struct sclp_readinfo_sccb s390_readinfo_sccb; extern void sclp_readinfo_early(void); +extern int sclp_chp_configure(struct chp_id chpid); +extern int sclp_chp_deconfigure(struct chp_id chpid); +extern int sclp_chp_read_info(struct sclp_chp_info *info); #endif /* _ASM_S390_SCLP_H */ -- cgit v1.2.3-59-g8ed1b From d76123eb357a4baa653714183df286c1bb99f707 Mon Sep 17 00:00:00 2001 From: Cornelia Huck Date: Fri, 27 Apr 2007 16:01:37 +0200 Subject: [S390] cio: ccwgroup register vs. unregister. Introduce a mutex for struct ccwgroup to prevent simuntaneous register/unregister on the same ccwgroup device. Signed-off-by: Cornelia Huck Signed-off-by: Martin Schwidefsky --- drivers/s390/cio/ccwgroup.c | 33 ++++++++++++++++++--------------- include/asm-s390/ccwgroup.h | 1 + 2 files changed, 19 insertions(+), 15 deletions(-) (limited to 'include') diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c index 5aeb68e732b0..e5ccda63e883 100644 --- a/drivers/s390/cio/ccwgroup.c +++ b/drivers/s390/cio/ccwgroup.c @@ -75,8 +75,10 @@ static void ccwgroup_ungroup_callback(struct device *dev) { struct ccwgroup_device *gdev = to_ccwgroupdev(dev); + mutex_lock(&gdev->reg_mutex); __ccwgroup_remove_symlinks(gdev); device_unregister(dev); + mutex_unlock(&gdev->reg_mutex); } static ssize_t @@ -173,7 +175,8 @@ ccwgroup_create(struct device *root, return -ENOMEM; atomic_set(&gdev->onoff, 0); - + mutex_init(&gdev->reg_mutex); + mutex_lock(&gdev->reg_mutex); for (i = 0; i < argc; i++) { gdev->cdev[i] = get_ccwdev_by_busid(cdrv, argv[i]); @@ -183,12 +186,12 @@ ccwgroup_create(struct device *root, || gdev->cdev[i]->id.driver_info != gdev->cdev[0]->id.driver_info) { rc = -EINVAL; - goto free_dev; + goto error; } /* Don't allow a device to belong to more than one group. */ if (gdev->cdev[i]->dev.driver_data) { rc = -EINVAL; - goto free_dev; + goto error; } gdev->cdev[i]->dev.driver_data = gdev; } @@ -203,9 +206,8 @@ ccwgroup_create(struct device *root, gdev->cdev[0]->dev.bus_id); rc = device_register(&gdev->dev); - if (rc) - goto free_dev; + goto error; get_device(&gdev->dev); rc = device_create_file(&gdev->dev, &dev_attr_ungroup); @@ -216,27 +218,21 @@ ccwgroup_create(struct device *root, rc = __ccwgroup_create_symlinks(gdev); if (!rc) { + mutex_unlock(&gdev->reg_mutex); put_device(&gdev->dev); return 0; } device_remove_file(&gdev->dev, &dev_attr_ungroup); device_unregister(&gdev->dev); error: - for (i = 0; i < argc; i++) - if (gdev->cdev[i]) { - put_device(&gdev->cdev[i]->dev); - gdev->cdev[i]->dev.driver_data = NULL; - } - put_device(&gdev->dev); - return rc; -free_dev: for (i = 0; i < argc; i++) if (gdev->cdev[i]) { if (gdev->cdev[i]->dev.driver_data == gdev) gdev->cdev[i]->dev.driver_data = NULL; put_device(&gdev->cdev[i]->dev); } - kfree(gdev); + mutex_unlock(&gdev->reg_mutex); + put_device(&gdev->dev); return rc; } @@ -422,8 +418,12 @@ ccwgroup_driver_unregister (struct ccwgroup_driver *cdriver) get_driver(&cdriver->driver); while ((dev = driver_find_device(&cdriver->driver, NULL, NULL, __ccwgroup_match_all))) { - __ccwgroup_remove_symlinks(to_ccwgroupdev(dev)); + struct ccwgroup_device *gdev = to_ccwgroupdev(dev); + + mutex_lock(&gdev->reg_mutex); + __ccwgroup_remove_symlinks(gdev); device_unregister(dev); + mutex_unlock(&gdev->reg_mutex); put_device(dev); } put_driver(&cdriver->driver); @@ -444,8 +444,10 @@ __ccwgroup_get_gdev_by_cdev(struct ccw_device *cdev) if (cdev->dev.driver_data) { gdev = (struct ccwgroup_device *)cdev->dev.driver_data; if (get_device(&gdev->dev)) { + mutex_lock(&gdev->reg_mutex); if (device_is_registered(&gdev->dev)) return gdev; + mutex_unlock(&gdev->reg_mutex); put_device(&gdev->dev); } return NULL; @@ -465,6 +467,7 @@ ccwgroup_remove_ccwdev(struct ccw_device *cdev) if (gdev) { __ccwgroup_remove_symlinks(gdev); device_unregister(&gdev->dev); + mutex_unlock(&gdev->reg_mutex); put_device(&gdev->dev); } } diff --git a/include/asm-s390/ccwgroup.h b/include/asm-s390/ccwgroup.h index d2f9c0d53a97..925b3ddfa141 100644 --- a/include/asm-s390/ccwgroup.h +++ b/include/asm-s390/ccwgroup.h @@ -11,6 +11,7 @@ struct ccwgroup_device { CCWGROUP_ONLINE, } state; atomic_t onoff; + struct mutex reg_mutex; unsigned int count; /* number of attached slave devices */ struct device dev; /* master device */ struct ccw_device *cdev[0]; /* variable number, allocate as needed */ -- cgit v1.2.3-59-g8ed1b From bb11e3bdbac08f773a89f3ca287024a956ee8a12 Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Fri, 27 Apr 2007 16:01:41 +0200 Subject: [S390] Improved oops output. This patch adds two improvements to the oops output. First it adds an additional line after the PSW which decodes the different fields of it. Second a disassembler is added that decodes the instructions surrounding the faulting PSW. The output of a test oops now looks like this: kernel BUG at init/main.c:419 illegal operation: 0001 [#1] CPU: 0 Not tainted Process swapper (pid: 0, task: 0000000000464968, ksp: 00000000004be000) Krnl PSW : 0700000180000000 00000000000120b6 (rest_init+0x36/0x38) R:0 T:1 IO:1 EX:1 Key:0 M:0 W:0 P:0 AS:0 CC:0 PM:0 EA:3 Krnl GPRS: 0000000000000003 00000000004ba017 0000000000000022 0000000000000001 000000000003a5f6 0000000000000000 00000000004be6a8 0000000000000000 0000000000000000 00000000004b8200 0000000000003a50 0000000000008000 0000000000516368 000000000033d008 00000000000120b2 00000000004bdee0 Krnl Code: 00000000000120a6: e3e0f0980024 stg %r14,152(%r15) 00000000000120ac: c0e500014296 brasl %r14,3a5d8 00000000000120b2: a7f40001 brc 15,120b4 >00000000000120b6: 0707 bcr 0,%r7 00000000000120b8: eb7ff0500024 stmg %r7,%r15,80(%r15) 00000000000120be: c0d000195825 larl %r13,33d108 00000000000120c4: a7f13f00 tmll %r15,16128 00000000000120c8: a7840001 brc 8,120ca Call Trace: ([<00000000000120b2>] rest_init+0x32/0x38) [<00000000004be614>] start_kernel+0x37c/0x410 [<0000000000012020>] _ehead+0x20/0x80 Signed-off-by: Martin Schwidefsky Signed-off-by: Heiko Carstens --- arch/s390/kernel/Makefile | 2 +- arch/s390/kernel/dis.c | 1278 ++++++++++++++++++++++++++++++++++++++++++ arch/s390/kernel/traps.c | 55 +- include/asm-s390/processor.h | 1 + 4 files changed, 1297 insertions(+), 39 deletions(-) create mode 100644 arch/s390/kernel/dis.c (limited to 'include') diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile index 5492d25d7d69..3195d375bd51 100644 --- a/arch/s390/kernel/Makefile +++ b/arch/s390/kernel/Makefile @@ -6,7 +6,7 @@ EXTRA_AFLAGS := -traditional obj-y := bitmap.o traps.o time.o process.o base.o early.o \ setup.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o \ - semaphore.o s390_ext.o debug.o irq.o ipl.o + semaphore.o s390_ext.o debug.o irq.o ipl.o dis.o obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o) obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o) diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c new file mode 100644 index 000000000000..dabaf98943d0 --- /dev/null +++ b/arch/s390/kernel/dis.c @@ -0,0 +1,1278 @@ +/* + * arch/s390/kernel/dis.c + * + * Disassemble s390 instructions. + * + * Copyright IBM Corp. 2007 + * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifndef CONFIG_64BIT +#define ONELONG "%08lx: " +#else /* CONFIG_64BIT */ +#define ONELONG "%016lx: " +#endif /* CONFIG_64BIT */ + +#define OPERAND_GPR 0x1 /* Operand printed as %rx */ +#define OPERAND_FPR 0x2 /* Operand printed as %fx */ +#define OPERAND_AR 0x4 /* Operand printed as %ax */ +#define OPERAND_CR 0x8 /* Operand printed as %cx */ +#define OPERAND_DISP 0x10 /* Operand printed as displacement */ +#define OPERAND_BASE 0x20 /* Operand printed as base register */ +#define OPERAND_INDEX 0x40 /* Operand printed as index register */ +#define OPERAND_PCREL 0x80 /* Operand printed as pc-relative symbol */ +#define OPERAND_SIGNED 0x100 /* Operand printed as signed value */ +#define OPERAND_LENGTH 0x200 /* Operand printed as length (+1) */ + +enum { + UNUSED, /* Indicates the end of the operand list */ + R_8, /* GPR starting at position 8 */ + R_12, /* GPR starting at position 12 */ + R_16, /* GPR starting at position 16 */ + R_20, /* GPR starting at position 20 */ + R_24, /* GPR starting at position 24 */ + R_28, /* GPR starting at position 28 */ + R_32, /* GPR starting at position 32 */ + F_8, /* FPR starting at position 8 */ + F_12, /* FPR starting at position 12 */ + F_16, /* FPR starting at position 16 */ + F_20, /* FPR starting at position 16 */ + F_24, /* FPR starting at position 24 */ + F_28, /* FPR starting at position 28 */ + F_32, /* FPR starting at position 32 */ + A_8, /* Access reg. starting at position 8 */ + A_12, /* Access reg. starting at position 12 */ + A_24, /* Access reg. starting at position 24 */ + A_28, /* Access reg. starting at position 28 */ + C_8, /* Control reg. starting at position 8 */ + C_12, /* Control reg. starting at position 12 */ + B_16, /* Base register starting at position 16 */ + B_32, /* Base register starting at position 32 */ + X_12, /* Index register starting at position 12 */ + D_20, /* Displacement starting at position 20 */ + D_36, /* Displacement starting at position 36 */ + D20_20, /* 20 bit displacement starting at 20 */ + L4_8, /* 4 bit length starting at position 8 */ + L4_12, /* 4 bit length starting at position 12 */ + L8_8, /* 8 bit length starting at position 8 */ + U4_8, /* 4 bit unsigned value starting at 8 */ + U4_12, /* 4 bit unsigned value starting at 12 */ + U4_16, /* 4 bit unsigned value starting at 16 */ + U4_20, /* 4 bit unsigned value starting at 20 */ + U8_8, /* 8 bit unsigned value starting at 8 */ + U8_16, /* 8 bit unsigned value starting at 16 */ + I16_16, /* 16 bit signed value starting at 16 */ + U16_16, /* 16 bit unsigned value starting at 16 */ + J16_16, /* PC relative jump offset at 16 */ + J32_16, /* PC relative long offset at 16 */ + I32_16, /* 32 bit signed value starting at 16 */ + U32_16, /* 32 bit unsigned value starting at 16 */ + M_16, /* 4 bit optional mask starting at 16 */ + RO_28, /* optional GPR starting at position 28 */ +}; + +/* + * Enumeration of the different instruction formats. + * For details consult the principles of operation. + */ +enum { + INSTR_INVALID, + INSTR_E, INSTR_RIE_RRP, INSTR_RIL_RI, INSTR_RIL_RP, INSTR_RIL_RU, + INSTR_RIL_UP, INSTR_RI_RI, INSTR_RI_RP, INSTR_RI_RU, INSTR_RI_UP, + INSTR_RRE_00, INSTR_RRE_0R, INSTR_RRE_AA, INSTR_RRE_AR, INSTR_RRE_F0, + INSTR_RRE_FF, INSTR_RRE_R0, INSTR_RRE_RA, INSTR_RRE_RF, INSTR_RRE_RR, + INSTR_RRE_RR_OPT, INSTR_RRF_F0FF, INSTR_RRF_FUFF, INSTR_RRF_M0RR, + INSTR_RRF_R0RR, INSTR_RRF_RURR, INSTR_RRF_U0FF, INSTR_RRF_U0RF, + INSTR_RR_FF, INSTR_RR_R0, INSTR_RR_RR, INSTR_RR_U0, INSTR_RR_UR, + INSTR_RSE_CCRD, INSTR_RSE_RRRD, INSTR_RSE_RURD, INSTR_RSI_RRP, + INSTR_RSL_R0RD, INSTR_RSY_AARD, INSTR_RSY_CCRD, INSTR_RSY_RRRD, + INSTR_RSY_RURD, INSTR_RS_AARD, INSTR_RS_CCRD, INSTR_RS_R0RD, + INSTR_RS_RRRD, INSTR_RS_RURD, INSTR_RXE_FRRD, INSTR_RXE_RRRD, + INSTR_RXF_FRRDF, INSTR_RXY_FRRD, INSTR_RXY_RRRD, INSTR_RX_FRRD, + INSTR_RX_RRRD, INSTR_RX_URRD, INSTR_SIY_URD, INSTR_SI_URD, + INSTR_SSE_RDRD, INSTR_SSF_RRDRD, INSTR_SS_L0RDRD, INSTR_SS_LIRDRD, + INSTR_SS_LLRDRD, INSTR_SS_RRRDRD, INSTR_SS_RRRDRD2, INSTR_SS_RRRDRD3, + INSTR_S_00, INSTR_S_RD, +}; + +struct operand { + int bits; /* The number of bits in the operand. */ + int shift; /* The number of bits to shift. */ + int flags; /* One bit syntax flags. */ +}; + +struct insn { + const char name[5]; + unsigned char opfrag; + unsigned char format; +}; + +static const struct operand operands[] = +{ + [UNUSED] = { 0, 0, 0 }, + [R_8] = { 4, 8, OPERAND_GPR }, + [R_12] = { 4, 12, OPERAND_GPR }, + [R_16] = { 4, 16, OPERAND_GPR }, + [R_20] = { 4, 20, OPERAND_GPR }, + [R_24] = { 4, 24, OPERAND_GPR }, + [R_28] = { 4, 28, OPERAND_GPR }, + [R_32] = { 4, 32, OPERAND_GPR }, + [F_8] = { 4, 8, OPERAND_FPR }, + [F_12] = { 4, 12, OPERAND_FPR }, + [F_16] = { 4, 16, OPERAND_FPR }, + [F_20] = { 4, 16, OPERAND_FPR }, + [F_24] = { 4, 24, OPERAND_FPR }, + [F_28] = { 4, 28, OPERAND_FPR }, + [F_32] = { 4, 32, OPERAND_FPR }, + [A_8] = { 4, 8, OPERAND_AR }, + [A_12] = { 4, 12, OPERAND_AR }, + [A_24] = { 4, 24, OPERAND_AR }, + [A_28] = { 4, 28, OPERAND_AR }, + [C_8] = { 4, 8, OPERAND_CR }, + [C_12] = { 4, 12, OPERAND_CR }, + [B_16] = { 4, 16, OPERAND_BASE | OPERAND_GPR }, + [B_32] = { 4, 32, OPERAND_BASE | OPERAND_GPR }, + [X_12] = { 4, 12, OPERAND_INDEX | OPERAND_GPR }, + [D_20] = { 12, 20, OPERAND_DISP }, + [D_36] = { 12, 36, OPERAND_DISP }, + [D20_20] = { 20, 20, OPERAND_DISP | OPERAND_SIGNED }, + [L4_8] = { 4, 8, OPERAND_LENGTH }, + [L4_12] = { 4, 12, OPERAND_LENGTH }, + [L8_8] = { 8, 8, OPERAND_LENGTH }, + [U4_8] = { 4, 8, 0 }, + [U4_12] = { 4, 12, 0 }, + [U4_16] = { 4, 16, 0 }, + [U4_20] = { 4, 20, 0 }, + [U8_8] = { 8, 8, 0 }, + [U8_16] = { 8, 16, 0 }, + [I16_16] = { 16, 16, OPERAND_SIGNED }, + [U16_16] = { 16, 16, 0 }, + [J16_16] = { 16, 16, OPERAND_PCREL }, + [J32_16] = { 32, 16, OPERAND_PCREL }, + [I32_16] = { 32, 16, OPERAND_SIGNED }, + [U32_16] = { 32, 16, 0 }, + [M_16] = { 4, 16, 0 }, + [RO_28] = { 4, 28, OPERAND_GPR } +}; + +static const unsigned char formats[][7] = { + [INSTR_E] = { 0xff, 0,0,0,0,0,0 }, /* e.g. pr */ + [INSTR_RIE_RRP] = { 0xff, R_8,R_12,J16_16,0,0,0 }, /* e.g. brxhg */ + [INSTR_RIL_RP] = { 0x0f, R_8,J32_16,0,0,0,0 }, /* e.g. brasl */ + [INSTR_RIL_UP] = { 0x0f, U4_8,J32_16,0,0,0,0 }, /* e.g. brcl */ + [INSTR_RIL_RI] = { 0x0f, R_8,I32_16,0,0,0,0 }, /* e.g. afi */ + [INSTR_RIL_RU] = { 0x0f, R_8,U32_16,0,0,0,0 }, /* e.g. alfi */ + [INSTR_RI_RI] = { 0x0f, R_8,I16_16,0,0,0,0 }, /* e.g. ahi */ + [INSTR_RI_RP] = { 0x0f, R_8,J16_16,0,0,0,0 }, /* e.g. brct */ + [INSTR_RI_RU] = { 0x0f, R_8,U16_16,0,0,0,0 }, /* e.g. tml */ + [INSTR_RI_UP] = { 0x0f, U4_8,J16_16,0,0,0,0 }, /* e.g. brc */ + [INSTR_RRE_00] = { 0xff, 0,0,0,0,0,0 }, /* e.g. palb */ + [INSTR_RRE_0R] = { 0xff, R_28,0,0,0,0,0 }, /* e.g. tb */ + [INSTR_RRE_AA] = { 0xff, A_24,A_28,0,0,0,0 }, /* e.g. cpya */ + [INSTR_RRE_AR] = { 0xff, A_24,R_28,0,0,0,0 }, /* e.g. sar */ + [INSTR_RRE_F0] = { 0xff, F_24,0,0,0,0,0 }, /* e.g. sqer */ + [INSTR_RRE_FF] = { 0xff, F_24,F_28,0,0,0,0 }, /* e.g. debr */ + [INSTR_RRE_R0] = { 0xff, R_24,0,0,0,0,0 }, /* e.g. ipm */ + [INSTR_RRE_RA] = { 0xff, R_24,A_28,0,0,0,0 }, /* e.g. ear */ + [INSTR_RRE_RF] = { 0xff, R_24,F_28,0,0,0,0 }, /* e.g. cefbr */ + [INSTR_RRE_RR] = { 0xff, R_24,R_28,0,0,0,0 }, /* e.g. lura */ + [INSTR_RRE_RR_OPT]= { 0xff, R_24,RO_28,0,0,0,0 }, /* efpc, sfpc */ + [INSTR_RRF_F0FF] = { 0xff, F_16,F_24,F_28,0,0,0 }, /* e.g. madbr */ + [INSTR_RRF_FUFF] = { 0xff, F_24,F_16,F_28,U4_20,0,0 },/* e.g. didbr */ + [INSTR_RRF_RURR] = { 0xff, R_24,R_28,R_16,U4_20,0,0 },/* e.g. .insn */ + [INSTR_RRF_R0RR] = { 0xff, R_24,R_28,R_16,0,0,0 }, /* e.g. idte */ + [INSTR_RRF_U0FF] = { 0xff, F_24,U4_16,F_28,0,0,0 }, /* e.g. fixr */ + [INSTR_RRF_U0RF] = { 0xff, R_24,U4_16,F_28,0,0,0 }, /* e.g. cfebr */ + [INSTR_RRF_M0RR] = { 0xff, R_24,R_28,M_16,0,0,0 }, /* e.g. sske */ + [INSTR_RR_FF] = { 0xff, F_8,F_12,0,0,0,0 }, /* e.g. adr */ + [INSTR_RR_R0] = { 0xff, R_8, 0,0,0,0,0 }, /* e.g. spm */ + [INSTR_RR_RR] = { 0xff, R_8,R_12,0,0,0,0 }, /* e.g. lr */ + [INSTR_RR_U0] = { 0xff, U8_8, 0,0,0,0,0 }, /* e.g. svc */ + [INSTR_RR_UR] = { 0xff, U4_8,R_12,0,0,0,0 }, /* e.g. bcr */ + [INSTR_RSE_RRRD] = { 0xff, R_8,R_12,D_20,B_16,0,0 }, /* e.g. lmh */ + [INSTR_RSE_CCRD] = { 0xff, C_8,C_12,D_20,B_16,0,0 }, /* e.g. lmh */ + [INSTR_RSE_RURD] = { 0xff, R_8,U4_12,D_20,B_16,0,0 }, /* e.g. icmh */ + [INSTR_RSL_R0RD] = { 0xff, R_8,D_20,B_16,0,0,0 }, /* e.g. tp */ + [INSTR_RSI_RRP] = { 0xff, R_8,R_12,J16_16,0,0,0 }, /* e.g. brxh */ + [INSTR_RSY_RRRD] = { 0xff, R_8,R_12,D20_20,B_16,0,0 },/* e.g. stmy */ + [INSTR_RSY_RURD] = { 0xff, R_8,U4_12,D20_20,B_16,0,0 }, + /* e.g. icmh */ + [INSTR_RSY_AARD] = { 0xff, A_8,A_12,D20_20,B_16,0,0 },/* e.g. lamy */ + [INSTR_RSY_CCRD] = { 0xff, C_8,C_12,D20_20,B_16,0,0 },/* e.g. lamy */ + [INSTR_RS_AARD] = { 0xff, A_8,A_12,D_20,B_16,0,0 }, /* e.g. lam */ + [INSTR_RS_CCRD] = { 0xff, C_8,C_12,D_20,B_16,0,0 }, /* e.g. lctl */ + [INSTR_RS_R0RD] = { 0xff, R_8,D_20,B_16,0,0,0 }, /* e.g. sll */ + [INSTR_RS_RRRD] = { 0xff, R_8,R_12,D_20,B_16,0,0 }, /* e.g. cs */ + [INSTR_RS_RURD] = { 0xff, R_8,U4_12,D_20,B_16,0,0 }, /* e.g. icm */ + [INSTR_RXE_FRRD] = { 0xff, F_8,D_20,X_12,B_16,0,0 }, /* e.g. axbr */ + [INSTR_RXE_RRRD] = { 0xff, R_8,D_20,X_12,B_16,0,0 }, /* e.g. lg */ + [INSTR_RXF_FRRDF] = { 0xff, F_32,F_8,D_20,X_12,B_16,0 }, + /* e.g. madb */ + [INSTR_RXY_RRRD] = { 0xff, R_8,D20_20,X_12,B_16,0,0 },/* e.g. ly */ + [INSTR_RXY_FRRD] = { 0xff, F_8,D20_20,X_12,B_16,0,0 },/* e.g. ley */ + [INSTR_RX_FRRD] = { 0xff, F_8,D_20,X_12,B_16,0,0 }, /* e.g. ae */ + [INSTR_RX_RRRD] = { 0xff, R_8,D_20,X_12,B_16,0,0 }, /* e.g. l */ + [INSTR_RX_URRD] = { 0x00, U4_8,D_20,X_12,B_16,0,0 }, /* e.g. bc */ + [INSTR_SI_URD] = { 0x00, D_20,B_16,U8_8,0,0,0 }, /* e.g. cli */ + [INSTR_SIY_URD] = { 0xff, D20_20,B_16,U8_8,0,0,0 }, /* e.g. tmy */ + [INSTR_SSE_RDRD] = { 0xff, D_20,B_16,D_36,B_32,0,0 }, /* e.g. mvsdk */ + [INSTR_SS_L0RDRD] = { 0xff, D_20,L8_8,B_16,D_36,B_32,0 }, + /* e.g. mvc */ + [INSTR_SS_LIRDRD] = { 0xff, D_20,L4_8,B_16,D_36,B_32,U4_12 }, + /* e.g. srp */ + [INSTR_SS_LLRDRD] = { 0xff, D_20,L4_8,B_16,D_36,L4_12,B_32 }, + /* e.g. pack */ + [INSTR_SS_RRRDRD] = { 0xff, D_20,R_8,B_16,D_36,B_32,R_12 }, + /* e.g. mvck */ + [INSTR_SS_RRRDRD2]= { 0xff, R_8,D_20,B_16,R_12,D_36,B_32 }, + /* e.g. plo */ + [INSTR_SS_RRRDRD3]= { 0xff, R_8,R_12,D_20,B_16,D_36,B_32 }, + /* e.g. lmd */ + [INSTR_S_00] = { 0xff, 0,0,0,0,0,0 }, /* e.g. hsch */ + [INSTR_S_RD] = { 0xff, D_20,B_16,0,0,0,0 }, /* e.g. lpsw */ + [INSTR_SSF_RRDRD] = { 0x00, D_20,B_16,D_36,B_32,R_8,0 }, + /* e.g. mvcos */ +}; + +static struct insn opcode[] = { +#ifdef CONFIG_64BIT + { "lmd", 0xef, INSTR_SS_RRRDRD3 }, +#endif + { "spm", 0x04, INSTR_RR_R0 }, + { "balr", 0x05, INSTR_RR_RR }, + { "bctr", 0x06, INSTR_RR_RR }, + { "bcr", 0x07, INSTR_RR_UR }, + { "svc", 0x0a, INSTR_RR_U0 }, + { "bsm", 0x0b, INSTR_RR_RR }, + { "bassm", 0x0c, INSTR_RR_RR }, + { "basr", 0x0d, INSTR_RR_RR }, + { "mvcl", 0x0e, INSTR_RR_RR }, + { "clcl", 0x0f, INSTR_RR_RR }, + { "lpr", 0x10, INSTR_RR_RR }, + { "lnr", 0x11, INSTR_RR_RR }, + { "ltr", 0x12, INSTR_RR_RR }, + { "lcr", 0x13, INSTR_RR_RR }, + { "nr", 0x14, INSTR_RR_RR }, + { "clr", 0x15, INSTR_RR_RR }, + { "or", 0x16, INSTR_RR_RR }, + { "xr", 0x17, INSTR_RR_RR }, + { "lr", 0x18, INSTR_RR_RR }, + { "cr", 0x19, INSTR_RR_RR }, + { "ar", 0x1a, INSTR_RR_RR }, + { "sr", 0x1b, INSTR_RR_RR }, + { "mr", 0x1c, INSTR_RR_RR }, + { "dr", 0x1d, INSTR_RR_RR }, + { "alr", 0x1e, INSTR_RR_RR }, + { "slr", 0x1f, INSTR_RR_RR }, + { "lpdr", 0x20, INSTR_RR_FF }, + { "lndr", 0x21, INSTR_RR_FF }, + { "ltdr", 0x22, INSTR_RR_FF }, + { "lcdr", 0x23, INSTR_RR_FF }, + { "hdr", 0x24, INSTR_RR_FF }, + { "ldxr", 0x25, INSTR_RR_FF }, + { "lrdr", 0x25, INSTR_RR_FF }, + { "mxr", 0x26, INSTR_RR_FF }, + { "mxdr", 0x27, INSTR_RR_FF }, + { "ldr", 0x28, INSTR_RR_FF }, + { "cdr", 0x29, INSTR_RR_FF }, + { "adr", 0x2a, INSTR_RR_FF }, + { "sdr", 0x2b, INSTR_RR_FF }, + { "mdr", 0x2c, INSTR_RR_FF }, + { "ddr", 0x2d, INSTR_RR_FF }, + { "awr", 0x2e, INSTR_RR_FF }, + { "swr", 0x2f, INSTR_RR_FF }, + { "lper", 0x30, INSTR_RR_FF }, + { "lner", 0x31, INSTR_RR_FF }, + { "lter", 0x32, INSTR_RR_FF }, + { "lcer", 0x33, INSTR_RR_FF }, + { "her", 0x34, INSTR_RR_FF }, + { "ledr", 0x35, INSTR_RR_FF }, + { "lrer", 0x35, INSTR_RR_FF }, + { "axr", 0x36, INSTR_RR_FF }, + { "sxr", 0x37, INSTR_RR_FF }, + { "ler", 0x38, INSTR_RR_FF }, + { "cer", 0x39, INSTR_RR_FF }, + { "aer", 0x3a, INSTR_RR_FF }, + { "ser", 0x3b, INSTR_RR_FF }, + { "mder", 0x3c, INSTR_RR_FF }, + { "mer", 0x3c, INSTR_RR_FF }, + { "der", 0x3d, INSTR_RR_FF }, + { "aur", 0x3e, INSTR_RR_FF }, + { "sur", 0x3f, INSTR_RR_FF }, + { "sth", 0x40, INSTR_RX_RRRD }, + { "la", 0x41, INSTR_RX_RRRD }, + { "stc", 0x42, INSTR_RX_RRRD }, + { "ic", 0x43, INSTR_RX_RRRD }, + { "ex", 0x44, INSTR_RX_RRRD }, + { "bal", 0x45, INSTR_RX_RRRD }, + { "bct", 0x46, INSTR_RX_RRRD }, + { "bc", 0x47, INSTR_RX_URRD }, + { "lh", 0x48, INSTR_RX_RRRD }, + { "ch", 0x49, INSTR_RX_RRRD }, + { "ah", 0x4a, INSTR_RX_RRRD }, + { "sh", 0x4b, INSTR_RX_RRRD }, + { "mh", 0x4c, INSTR_RX_RRRD }, + { "bas", 0x4d, INSTR_RX_RRRD }, + { "cvd", 0x4e, INSTR_RX_RRRD }, + { "cvb", 0x4f, INSTR_RX_RRRD }, + { "st", 0x50, INSTR_RX_RRRD }, + { "lae", 0x51, INSTR_RX_RRRD }, + { "n", 0x54, INSTR_RX_RRRD }, + { "cl", 0x55, INSTR_RX_RRRD }, + { "o", 0x56, INSTR_RX_RRRD }, + { "x", 0x57, INSTR_RX_RRRD }, + { "l", 0x58, INSTR_RX_RRRD }, + { "c", 0x59, INSTR_RX_RRRD }, + { "a", 0x5a, INSTR_RX_RRRD }, + { "s", 0x5b, INSTR_RX_RRRD }, + { "m", 0x5c, INSTR_RX_RRRD }, + { "d", 0x5d, INSTR_RX_RRRD }, + { "al", 0x5e, INSTR_RX_RRRD }, + { "sl", 0x5f, INSTR_RX_RRRD }, + { "std", 0x60, INSTR_RX_FRRD }, + { "mxd", 0x67, INSTR_RX_FRRD }, + { "ld", 0x68, INSTR_RX_FRRD }, + { "cd", 0x69, INSTR_RX_FRRD }, + { "ad", 0x6a, INSTR_RX_FRRD }, + { "sd", 0x6b, INSTR_RX_FRRD }, + { "md", 0x6c, INSTR_RX_FRRD }, + { "dd", 0x6d, INSTR_RX_FRRD }, + { "aw", 0x6e, INSTR_RX_FRRD }, + { "sw", 0x6f, INSTR_RX_FRRD }, + { "ste", 0x70, INSTR_RX_FRRD }, + { "ms", 0x71, INSTR_RX_RRRD }, + { "le", 0x78, INSTR_RX_FRRD }, + { "ce", 0x79, INSTR_RX_FRRD }, + { "ae", 0x7a, INSTR_RX_FRRD }, + { "se", 0x7b, INSTR_RX_FRRD }, + { "mde", 0x7c, INSTR_RX_FRRD }, + { "me", 0x7c, INSTR_RX_FRRD }, + { "de", 0x7d, INSTR_RX_FRRD }, + { "au", 0x7e, INSTR_RX_FRRD }, + { "su", 0x7f, INSTR_RX_FRRD }, + { "ssm", 0x80, INSTR_S_RD }, + { "lpsw", 0x82, INSTR_S_RD }, + { "diag", 0x83, INSTR_RS_RRRD }, + { "brxh", 0x84, INSTR_RSI_RRP }, + { "brxle", 0x85, INSTR_RSI_RRP }, + { "bxh", 0x86, INSTR_RS_RRRD }, + { "bxle", 0x87, INSTR_RS_RRRD }, + { "srl", 0x88, INSTR_RS_R0RD }, + { "sll", 0x89, INSTR_RS_R0RD }, + { "sra", 0x8a, INSTR_RS_R0RD }, + { "sla", 0x8b, INSTR_RS_R0RD }, + { "srdl", 0x8c, INSTR_RS_R0RD }, + { "sldl", 0x8d, INSTR_RS_R0RD }, + { "srda", 0x8e, INSTR_RS_R0RD }, + { "slda", 0x8f, INSTR_RS_R0RD }, + { "stm", 0x90, INSTR_RS_RRRD }, + { "tm", 0x91, INSTR_SI_URD }, + { "mvi", 0x92, INSTR_SI_URD }, + { "ts", 0x93, INSTR_S_RD }, + { "ni", 0x94, INSTR_SI_URD }, + { "cli", 0x95, INSTR_SI_URD }, + { "oi", 0x96, INSTR_SI_URD }, + { "xi", 0x97, INSTR_SI_URD }, + { "lm", 0x98, INSTR_RS_RRRD }, + { "trace", 0x99, INSTR_RS_RRRD }, + { "lam", 0x9a, INSTR_RS_AARD }, + { "stam", 0x9b, INSTR_RS_AARD }, + { "mvcle", 0xa8, INSTR_RS_RRRD }, + { "clcle", 0xa9, INSTR_RS_RRRD }, + { "stnsm", 0xac, INSTR_SI_URD }, + { "stosm", 0xad, INSTR_SI_URD }, + { "sigp", 0xae, INSTR_RS_RRRD }, + { "mc", 0xaf, INSTR_SI_URD }, + { "lra", 0xb1, INSTR_RX_RRRD }, + { "stctl", 0xb6, INSTR_RS_CCRD }, + { "lctl", 0xb7, INSTR_RS_CCRD }, + { "cs", 0xba, INSTR_RS_RRRD }, + { "cds", 0xbb, INSTR_RS_RRRD }, + { "clm", 0xbd, INSTR_RS_RURD }, + { "stcm", 0xbe, INSTR_RS_RURD }, + { "icm", 0xbf, INSTR_RS_RURD }, + { "mvn", 0xd1, INSTR_SS_L0RDRD }, + { "mvc", 0xd2, INSTR_SS_L0RDRD }, + { "mvz", 0xd3, INSTR_SS_L0RDRD }, + { "nc", 0xd4, INSTR_SS_L0RDRD }, + { "clc", 0xd5, INSTR_SS_L0RDRD }, + { "oc", 0xd6, INSTR_SS_L0RDRD }, + { "xc", 0xd7, INSTR_SS_L0RDRD }, + { "mvck", 0xd9, INSTR_SS_RRRDRD }, + { "mvcp", 0xda, INSTR_SS_RRRDRD }, + { "mvcs", 0xdb, INSTR_SS_RRRDRD }, + { "tr", 0xdc, INSTR_SS_L0RDRD }, + { "trt", 0xdd, INSTR_SS_L0RDRD }, + { "ed", 0xde, INSTR_SS_L0RDRD }, + { "edmk", 0xdf, INSTR_SS_L0RDRD }, + { "pku", 0xe1, INSTR_SS_L0RDRD }, + { "unpku", 0xe2, INSTR_SS_L0RDRD }, + { "mvcin", 0xe8, INSTR_SS_L0RDRD }, + { "pka", 0xe9, INSTR_SS_L0RDRD }, + { "unpka", 0xea, INSTR_SS_L0RDRD }, + { "plo", 0xee, INSTR_SS_RRRDRD2 }, + { "srp", 0xf0, INSTR_SS_LIRDRD }, + { "mvo", 0xf1, INSTR_SS_LLRDRD }, + { "pack", 0xf2, INSTR_SS_LLRDRD }, + { "unpk", 0xf3, INSTR_SS_LLRDRD }, + { "zap", 0xf8, INSTR_SS_LLRDRD }, + { "cp", 0xf9, INSTR_SS_LLRDRD }, + { "ap", 0xfa, INSTR_SS_LLRDRD }, + { "sp", 0xfb, INSTR_SS_LLRDRD }, + { "mp", 0xfc, INSTR_SS_LLRDRD }, + { "dp", 0xfd, INSTR_SS_LLRDRD }, + { "", 0, INSTR_INVALID } +}; + +static struct insn opcode_01[] = { +#ifdef CONFIG_64BIT + { "sam64", 0x0e, INSTR_E }, +#endif + { "pr", 0x01, INSTR_E }, + { "upt", 0x02, INSTR_E }, + { "sckpf", 0x07, INSTR_E }, + { "tam", 0x0b, INSTR_E }, + { "sam24", 0x0c, INSTR_E }, + { "sam31", 0x0d, INSTR_E }, + { "trap2", 0xff, INSTR_E }, + { "", 0, INSTR_INVALID } +}; + +static struct insn opcode_a5[] = { +#ifdef CONFIG_64BIT + { "iihh", 0x00, INSTR_RI_RU }, + { "iihl", 0x01, INSTR_RI_RU }, + { "iilh", 0x02, INSTR_RI_RU }, + { "iill", 0x03, INSTR_RI_RU }, + { "nihh", 0x04, INSTR_RI_RU }, + { "nihl", 0x05, INSTR_RI_RU }, + { "nilh", 0x06, INSTR_RI_RU }, + { "nill", 0x07, INSTR_RI_RU }, + { "oihh", 0x08, INSTR_RI_RU }, + { "oihl", 0x09, INSTR_RI_RU }, + { "oilh", 0x0a, INSTR_RI_RU }, + { "oill", 0x0b, INSTR_RI_RU }, + { "llihh", 0x0c, INSTR_RI_RU }, + { "llihl", 0x0d, INSTR_RI_RU }, + { "llilh", 0x0e, INSTR_RI_RU }, + { "llill", 0x0f, INSTR_RI_RU }, +#endif + { "", 0, INSTR_INVALID } +}; + +static struct insn opcode_a7[] = { +#ifdef CONFIG_64BIT + { "tmhh", 0x02, INSTR_RI_RU }, + { "tmhl", 0x03, INSTR_RI_RU }, + { "brctg", 0x07, INSTR_RI_RP }, + { "lghi", 0x09, INSTR_RI_RI }, + { "aghi", 0x0b, INSTR_RI_RI }, + { "mghi", 0x0d, INSTR_RI_RI }, + { "cghi", 0x0f, INSTR_RI_RI }, +#endif + { "tmlh", 0x00, INSTR_RI_RU }, + { "tmll", 0x01, INSTR_RI_RU }, + { "brc", 0x04, INSTR_RI_UP }, + { "bras", 0x05, INSTR_RI_RP }, + { "brct", 0x06, INSTR_RI_RP }, + { "lhi", 0x08, INSTR_RI_RI }, + { "ahi", 0x0a, INSTR_RI_RI }, + { "mhi", 0x0c, INSTR_RI_RI }, + { "chi", 0x0e, INSTR_RI_RI }, + { "", 0, INSTR_INVALID } +}; + +static struct insn opcode_b2[] = { +#ifdef CONFIG_64BIT + { "sske", 0x2b, INSTR_RRF_M0RR }, + { "stckf", 0x7c, INSTR_S_RD }, + { "cu21", 0xa6, INSTR_RRF_M0RR }, + { "cuutf", 0xa6, INSTR_RRF_M0RR }, + { "cu12", 0xa7, INSTR_RRF_M0RR }, + { "cutfu", 0xa7, INSTR_RRF_M0RR }, + { "stfle", 0xb0, INSTR_S_RD }, + { "lpswe", 0xb2, INSTR_S_RD }, +#endif + { "stidp", 0x02, INSTR_S_RD }, + { "sck", 0x04, INSTR_S_RD }, + { "stck", 0x05, INSTR_S_RD }, + { "sckc", 0x06, INSTR_S_RD }, + { "stckc", 0x07, INSTR_S_RD }, + { "spt", 0x08, INSTR_S_RD }, + { "stpt", 0x09, INSTR_S_RD }, + { "spka", 0x0a, INSTR_S_RD }, + { "ipk", 0x0b, INSTR_S_00 }, + { "ptlb", 0x0d, INSTR_S_00 }, + { "spx", 0x10, INSTR_S_RD }, + { "stpx", 0x11, INSTR_S_RD }, + { "stap", 0x12, INSTR_S_RD }, + { "sie", 0x14, INSTR_S_RD }, + { "pc", 0x18, INSTR_S_RD }, + { "sac", 0x19, INSTR_S_RD }, + { "cfc", 0x1a, INSTR_S_RD }, + { "ipte", 0x21, INSTR_RRE_RR }, + { "ipm", 0x22, INSTR_RRE_R0 }, + { "ivsk", 0x23, INSTR_RRE_RR }, + { "iac", 0x24, INSTR_RRE_R0 }, + { "ssar", 0x25, INSTR_RRE_R0 }, + { "epar", 0x26, INSTR_RRE_R0 }, + { "esar", 0x27, INSTR_RRE_R0 }, + { "pt", 0x28, INSTR_RRE_RR }, + { "iske", 0x29, INSTR_RRE_RR }, + { "rrbe", 0x2a, INSTR_RRE_RR }, + { "sske", 0x2b, INSTR_RRE_RR }, + { "tb", 0x2c, INSTR_RRE_0R }, + { "dxr", 0x2d, INSTR_RRE_F0 }, + { "pgin", 0x2e, INSTR_RRE_RR }, + { "pgout", 0x2f, INSTR_RRE_RR }, + { "csch", 0x30, INSTR_S_00 }, + { "hsch", 0x31, INSTR_S_00 }, + { "msch", 0x32, INSTR_S_RD }, + { "ssch", 0x33, INSTR_S_RD }, + { "stsch", 0x34, INSTR_S_RD }, + { "tsch", 0x35, INSTR_S_RD }, + { "tpi", 0x36, INSTR_S_RD }, + { "sal", 0x37, INSTR_S_00 }, + { "rsch", 0x38, INSTR_S_00 }, + { "stcrw", 0x39, INSTR_S_RD }, + { "stcps", 0x3a, INSTR_S_RD }, + { "rchp", 0x3b, INSTR_S_00 }, + { "schm", 0x3c, INSTR_S_00 }, + { "bakr", 0x40, INSTR_RRE_RR }, + { "cksm", 0x41, INSTR_RRE_RR }, + { "sqdr", 0x44, INSTR_RRE_F0 }, + { "sqer", 0x45, INSTR_RRE_F0 }, + { "stura", 0x46, INSTR_RRE_RR }, + { "msta", 0x47, INSTR_RRE_R0 }, + { "palb", 0x48, INSTR_RRE_00 }, + { "ereg", 0x49, INSTR_RRE_RR }, + { "esta", 0x4a, INSTR_RRE_RR }, + { "lura", 0x4b, INSTR_RRE_RR }, + { "tar", 0x4c, INSTR_RRE_AR }, + { "cpya", INSTR_RRE_AA }, + { "sar", 0x4e, INSTR_RRE_AR }, + { "ear", 0x4f, INSTR_RRE_RA }, + { "csp", 0x50, INSTR_RRE_RR }, + { "msr", 0x52, INSTR_RRE_RR }, + { "mvpg", 0x54, INSTR_RRE_RR }, + { "mvst", 0x55, INSTR_RRE_RR }, + { "cuse", 0x57, INSTR_RRE_RR }, + { "bsg", 0x58, INSTR_RRE_RR }, + { "bsa", 0x5a, INSTR_RRE_RR }, + { "clst", 0x5d, INSTR_RRE_RR }, + { "srst", 0x5e, INSTR_RRE_RR }, + { "cmpsc", 0x63, INSTR_RRE_RR }, + { "cmpsc", 0x63, INSTR_RRE_RR }, + { "siga", 0x74, INSTR_S_RD }, + { "xsch", 0x76, INSTR_S_00 }, + { "rp", 0x77, INSTR_S_RD }, + { "stcke", 0x78, INSTR_S_RD }, + { "sacf", 0x79, INSTR_S_RD }, + { "stsi", 0x7d, INSTR_S_RD }, + { "srnm", 0x99, INSTR_S_RD }, + { "stfpc", 0x9c, INSTR_S_RD }, + { "lfpc", 0x9d, INSTR_S_RD }, + { "tre", 0xa5, INSTR_RRE_RR }, + { "cuutf", 0xa6, INSTR_RRE_RR }, + { "cutfu", 0xa7, INSTR_RRE_RR }, + { "stfl", 0xb1, INSTR_S_RD }, + { "trap4", 0xff, INSTR_S_RD }, + { "", 0, INSTR_INVALID } +}; + +static struct insn opcode_b3[] = { +#ifdef CONFIG_64BIT + { "maylr", 0x38, INSTR_RRF_F0FF }, + { "mylr", 0x39, INSTR_RRF_F0FF }, + { "mayr", 0x3a, INSTR_RRF_F0FF }, + { "myr", 0x3b, INSTR_RRF_F0FF }, + { "mayhr", 0x3c, INSTR_RRF_F0FF }, + { "myhr", 0x3d, INSTR_RRF_F0FF }, + { "cegbr", 0xa4, INSTR_RRE_RR }, + { "cdgbr", 0xa5, INSTR_RRE_RR }, + { "cxgbr", 0xa6, INSTR_RRE_RR }, + { "cgebr", 0xa8, INSTR_RRF_U0RF }, + { "cgdbr", 0xa9, INSTR_RRF_U0RF }, + { "cgxbr", 0xaa, INSTR_RRF_U0RF }, + { "cfer", 0xb8, INSTR_RRF_U0RF }, + { "cfdr", 0xb9, INSTR_RRF_U0RF }, + { "cfxr", 0xba, INSTR_RRF_U0RF }, + { "cegr", 0xc4, INSTR_RRE_RR }, + { "cdgr", 0xc5, INSTR_RRE_RR }, + { "cxgr", 0xc6, INSTR_RRE_RR }, + { "cger", 0xc8, INSTR_RRF_U0RF }, + { "cgdr", 0xc9, INSTR_RRF_U0RF }, + { "cgxr", 0xca, INSTR_RRF_U0RF }, +#endif + { "lpebr", 0x00, INSTR_RRE_FF }, + { "lnebr", 0x01, INSTR_RRE_FF }, + { "ltebr", 0x02, INSTR_RRE_FF }, + { "lcebr", 0x03, INSTR_RRE_FF }, + { "ldebr", 0x04, INSTR_RRE_FF }, + { "lxdbr", 0x05, INSTR_RRE_FF }, + { "lxebr", 0x06, INSTR_RRE_FF }, + { "mxdbr", 0x07, INSTR_RRE_FF }, + { "kebr", 0x08, INSTR_RRE_FF }, + { "cebr", 0x09, INSTR_RRE_FF }, + { "aebr", 0x0a, INSTR_RRE_FF }, + { "sebr", 0x0b, INSTR_RRE_FF }, + { "mdebr", 0x0c, INSTR_RRE_FF }, + { "debr", 0x0d, INSTR_RRE_FF }, + { "maebr", 0x0e, INSTR_RRF_F0FF }, + { "msebr", 0x0f, INSTR_RRF_F0FF }, + { "lpdbr", 0x10, INSTR_RRE_FF }, + { "lndbr", 0x11, INSTR_RRE_FF }, + { "ltdbr", 0x12, INSTR_RRE_FF }, + { "lcdbr", 0x13, INSTR_RRE_FF }, + { "sqebr", 0x14, INSTR_RRE_FF }, + { "sqdbr", 0x15, INSTR_RRE_FF }, + { "sqxbr", 0x16, INSTR_RRE_FF }, + { "meebr", 0x17, INSTR_RRE_FF }, + { "kdbr", 0x18, INSTR_RRE_FF }, + { "cdbr", 0x19, INSTR_RRE_FF }, + { "adbr", 0x1a, INSTR_RRE_FF }, + { "sdbr", 0x1b, INSTR_RRE_FF }, + { "mdbr", 0x1c, INSTR_RRE_FF }, + { "ddbr", 0x1d, INSTR_RRE_FF }, + { "madbr", 0x1e, INSTR_RRF_F0FF }, + { "msdbr", 0x1f, INSTR_RRF_F0FF }, + { "lder", 0x24, INSTR_RRE_FF }, + { "lxdr", 0x25, INSTR_RRE_FF }, + { "lxer", 0x26, INSTR_RRE_FF }, + { "maer", 0x2e, INSTR_RRF_F0FF }, + { "mser", 0x2f, INSTR_RRF_F0FF }, + { "sqxr", 0x36, INSTR_RRE_FF }, + { "meer", 0x37, INSTR_RRE_FF }, + { "madr", 0x3e, INSTR_RRF_F0FF }, + { "msdr", 0x3f, INSTR_RRF_F0FF }, + { "lpxbr", 0x40, INSTR_RRE_FF }, + { "lnxbr", 0x41, INSTR_RRE_FF }, + { "ltxbr", 0x42, INSTR_RRE_FF }, + { "lcxbr", 0x43, INSTR_RRE_FF }, + { "ledbr", 0x44, INSTR_RRE_FF }, + { "ldxbr", 0x45, INSTR_RRE_FF }, + { "lexbr", 0x46, INSTR_RRE_FF }, + { "fixbr", 0x47, INSTR_RRF_U0FF }, + { "kxbr", 0x48, INSTR_RRE_FF }, + { "cxbr", 0x49, INSTR_RRE_FF }, + { "axbr", 0x4a, INSTR_RRE_FF }, + { "sxbr", 0x4b, INSTR_RRE_FF }, + { "mxbr", 0x4c, INSTR_RRE_FF }, + { "dxbr", 0x4d, INSTR_RRE_FF }, + { "tbedr", 0x50, INSTR_RRF_U0FF }, + { "tbdr", 0x51, INSTR_RRF_U0FF }, + { "diebr", 0x53, INSTR_RRF_FUFF }, + { "fiebr", 0x57, INSTR_RRF_U0FF }, + { "thder", 0x58, INSTR_RRE_RR }, + { "thdr", 0x59, INSTR_RRE_RR }, + { "didbr", 0x5b, INSTR_RRF_FUFF }, + { "fidbr", 0x5f, INSTR_RRF_U0FF }, + { "lpxr", 0x60, INSTR_RRE_FF }, + { "lnxr", 0x61, INSTR_RRE_FF }, + { "ltxr", 0x62, INSTR_RRE_FF }, + { "lcxr", 0x63, INSTR_RRE_FF }, + { "lxr", 0x65, INSTR_RRE_RR }, + { "lexr", 0x66, INSTR_RRE_FF }, + { "fixr", 0x67, INSTR_RRF_U0FF }, + { "cxr", 0x69, INSTR_RRE_FF }, + { "lzer", 0x74, INSTR_RRE_R0 }, + { "lzdr", 0x75, INSTR_RRE_R0 }, + { "lzxr", 0x76, INSTR_RRE_R0 }, + { "fier", 0x77, INSTR_RRF_U0FF }, + { "fidr", 0x7f, INSTR_RRF_U0FF }, + { "sfpc", 0x84, INSTR_RRE_RR_OPT }, + { "efpc", 0x8c, INSTR_RRE_RR_OPT }, + { "cefbr", 0x94, INSTR_RRE_RF }, + { "cdfbr", 0x95, INSTR_RRE_RF }, + { "cxfbr", 0x96, INSTR_RRE_RF }, + { "cfebr", 0x98, INSTR_RRF_U0RF }, + { "cfdbr", 0x99, INSTR_RRF_U0RF }, + { "cfxbr", 0x9a, INSTR_RRF_U0RF }, + { "cefr", 0xb4, INSTR_RRE_RF }, + { "cdfr", 0xb5, INSTR_RRE_RF }, + { "cxfr", 0xb6, INSTR_RRE_RF }, + { "", 0, INSTR_INVALID } +}; + +static struct insn opcode_b9[] = { +#ifdef CONFIG_64BIT + { "lpgr", 0x00, INSTR_RRE_RR }, + { "lngr", 0x01, INSTR_RRE_RR }, + { "ltgr", 0x02, INSTR_RRE_RR }, + { "lcgr", 0x03, INSTR_RRE_RR }, + { "lgr", 0x04, INSTR_RRE_RR }, + { "lurag", 0x05, INSTR_RRE_RR }, + { "lgbr", 0x06, INSTR_RRE_RR }, + { "lghr", 0x07, INSTR_RRE_RR }, + { "agr", 0x08, INSTR_RRE_RR }, + { "sgr", 0x09, INSTR_RRE_RR }, + { "algr", 0x0a, INSTR_RRE_RR }, + { "slgr", 0x0b, INSTR_RRE_RR }, + { "msgr", 0x0c, INSTR_RRE_RR }, + { "dsgr", 0x0d, INSTR_RRE_RR }, + { "eregg", 0x0e, INSTR_RRE_RR }, + { "lrvgr", 0x0f, INSTR_RRE_RR }, + { "lpgfr", 0x10, INSTR_RRE_RR }, + { "lngfr", 0x11, INSTR_RRE_RR }, + { "ltgfr", 0x12, INSTR_RRE_RR }, + { "lcgfr", 0x13, INSTR_RRE_RR }, + { "lgfr", 0x14, INSTR_RRE_RR }, + { "llgfr", 0x16, INSTR_RRE_RR }, + { "llgtr", 0x17, INSTR_RRE_RR }, + { "agfr", 0x18, INSTR_RRE_RR }, + { "sgfr", 0x19, INSTR_RRE_RR }, + { "algfr", 0x1a, INSTR_RRE_RR }, + { "slgfr", 0x1b, INSTR_RRE_RR }, + { "msgfr", 0x1c, INSTR_RRE_RR }, + { "dsgfr", 0x1d, INSTR_RRE_RR }, + { "cgr", 0x20, INSTR_RRE_RR }, + { "clgr", 0x21, INSTR_RRE_RR }, + { "sturg", 0x25, INSTR_RRE_RR }, + { "lbr", 0x26, INSTR_RRE_RR }, + { "lhr", 0x27, INSTR_RRE_RR }, + { "cgfr", 0x30, INSTR_RRE_RR }, + { "clgfr", 0x31, INSTR_RRE_RR }, + { "bctgr", 0x46, INSTR_RRE_RR }, + { "ngr", 0x80, INSTR_RRE_RR }, + { "ogr", 0x81, INSTR_RRE_RR }, + { "xgr", 0x82, INSTR_RRE_RR }, + { "flogr", 0x83, INSTR_RRE_RR }, + { "llgcr", 0x84, INSTR_RRE_RR }, + { "llghr", 0x85, INSTR_RRE_RR }, + { "mlgr", 0x86, INSTR_RRE_RR }, + { "dlgr", 0x87, INSTR_RRE_RR }, + { "alcgr", 0x88, INSTR_RRE_RR }, + { "slbgr", 0x89, INSTR_RRE_RR }, + { "cspg", 0x8a, INSTR_RRE_RR }, + { "idte", 0x8e, INSTR_RRF_R0RR }, + { "llcr", 0x94, INSTR_RRE_RR }, + { "llhr", 0x95, INSTR_RRE_RR }, + { "esea", 0x9d, INSTR_RRE_R0 }, + { "lptea", 0xaa, INSTR_RRF_RURR }, + { "cu14", 0xb0, INSTR_RRF_M0RR }, + { "cu24", 0xb1, INSTR_RRF_M0RR }, + { "cu41", 0xb2, INSTR_RRF_M0RR }, + { "cu42", 0xb3, INSTR_RRF_M0RR }, +#endif + { "kmac", 0x1e, INSTR_RRE_RR }, + { "lrvr", 0x1f, INSTR_RRE_RR }, + { "km", 0x2e, INSTR_RRE_RR }, + { "kmc", 0x2f, INSTR_RRE_RR }, + { "kimd", 0x3e, INSTR_RRE_RR }, + { "klmd", 0x3f, INSTR_RRE_RR }, + { "epsw", 0x8d, INSTR_RRE_RR }, + { "trtt", 0x90, INSTR_RRE_RR }, + { "trtt", 0x90, INSTR_RRF_M0RR }, + { "trto", 0x91, INSTR_RRE_RR }, + { "trto", 0x91, INSTR_RRF_M0RR }, + { "trot", 0x92, INSTR_RRE_RR }, + { "trot", 0x92, INSTR_RRF_M0RR }, + { "troo", 0x93, INSTR_RRE_RR }, + { "troo", 0x93, INSTR_RRF_M0RR }, + { "mlr", 0x96, INSTR_RRE_RR }, + { "dlr", 0x97, INSTR_RRE_RR }, + { "alcr", 0x98, INSTR_RRE_RR }, + { "slbr", 0x99, INSTR_RRE_RR }, + { "", 0, INSTR_INVALID } +}; + +static struct insn opcode_c0[] = { +#ifdef CONFIG_64BIT + { "lgfi", 0x01, INSTR_RIL_RI }, + { "xihf", 0x06, INSTR_RIL_RU }, + { "xilf", 0x07, INSTR_RIL_RU }, + { "iihf", 0x08, INSTR_RIL_RU }, + { "iilf", 0x09, INSTR_RIL_RU }, + { "nihf", 0x0a, INSTR_RIL_RU }, + { "nilf", 0x0b, INSTR_RIL_RU }, + { "oihf", 0x0c, INSTR_RIL_RU }, + { "oilf", 0x0d, INSTR_RIL_RU }, + { "llihf", 0x0e, INSTR_RIL_RU }, + { "llilf", 0x0f, INSTR_RIL_RU }, +#endif + { "larl", 0x00, INSTR_RIL_RP }, + { "brcl", 0x04, INSTR_RIL_UP }, + { "brasl", 0x05, INSTR_RIL_RP }, + { "", 0, INSTR_INVALID } +}; + +static struct insn opcode_c2[] = { +#ifdef CONFIG_64BIT + { "slgfi", 0x04, INSTR_RIL_RU }, + { "slfi", 0x05, INSTR_RIL_RU }, + { "agfi", 0x08, INSTR_RIL_RI }, + { "afi", 0x09, INSTR_RIL_RI }, + { "algfi", 0x0a, INSTR_RIL_RU }, + { "alfi", 0x0b, INSTR_RIL_RU }, + { "cgfi", 0x0c, INSTR_RIL_RI }, + { "cfi", 0x0d, INSTR_RIL_RI }, + { "clgfi", 0x0e, INSTR_RIL_RU }, + { "clfi", 0x0f, INSTR_RIL_RU }, +#endif + { "", 0, INSTR_INVALID } +}; + +static struct insn opcode_c8[] = { +#ifdef CONFIG_64BIT + { "mvcos", 0x00, INSTR_SSF_RRDRD }, +#endif + { "", 0, INSTR_INVALID } +}; + +static struct insn opcode_e3[] = { +#ifdef CONFIG_64BIT + { "ltg", 0x02, INSTR_RXY_RRRD }, + { "lrag", 0x03, INSTR_RXY_RRRD }, + { "lg", 0x04, INSTR_RXY_RRRD }, + { "cvby", 0x06, INSTR_RXY_RRRD }, + { "ag", 0x08, INSTR_RXY_RRRD }, + { "sg", 0x09, INSTR_RXY_RRRD }, + { "alg", 0x0a, INSTR_RXY_RRRD }, + { "slg", 0x0b, INSTR_RXY_RRRD }, + { "msg", 0x0c, INSTR_RXY_RRRD }, + { "dsg", 0x0d, INSTR_RXY_RRRD }, + { "cvbg", 0x0e, INSTR_RXY_RRRD }, + { "lrvg", 0x0f, INSTR_RXY_RRRD }, + { "lt", 0x12, INSTR_RXY_RRRD }, + { "lray", 0x13, INSTR_RXY_RRRD }, + { "lgf", 0x14, INSTR_RXY_RRRD }, + { "lgh", 0x15, INSTR_RXY_RRRD }, + { "llgf", 0x16, INSTR_RXY_RRRD }, + { "llgt", 0x17, INSTR_RXY_RRRD }, + { "agf", 0x18, INSTR_RXY_RRRD }, + { "sgf", 0x19, INSTR_RXY_RRRD }, + { "algf", 0x1a, INSTR_RXY_RRRD }, + { "slgf", 0x1b, INSTR_RXY_RRRD }, + { "msgf", 0x1c, INSTR_RXY_RRRD }, + { "dsgf", 0x1d, INSTR_RXY_RRRD }, + { "cg", 0x20, INSTR_RXY_RRRD }, + { "clg", 0x21, INSTR_RXY_RRRD }, + { "stg", 0x24, INSTR_RXY_RRRD }, + { "cvdy", 0x26, INSTR_RXY_RRRD }, + { "cvdg", 0x2e, INSTR_RXY_RRRD }, + { "strvg", 0x2f, INSTR_RXY_RRRD }, + { "cgf", 0x30, INSTR_RXY_RRRD }, + { "clgf", 0x31, INSTR_RXY_RRRD }, + { "strvh", 0x3f, INSTR_RXY_RRRD }, + { "bctg", 0x46, INSTR_RXY_RRRD }, + { "sty", 0x50, INSTR_RXY_RRRD }, + { "msy", 0x51, INSTR_RXY_RRRD }, + { "ny", 0x54, INSTR_RXY_RRRD }, + { "cly", 0x55, INSTR_RXY_RRRD }, + { "oy", 0x56, INSTR_RXY_RRRD }, + { "xy", 0x57, INSTR_RXY_RRRD }, + { "ly", 0x58, INSTR_RXY_RRRD }, + { "cy", 0x59, INSTR_RXY_RRRD }, + { "ay", 0x5a, INSTR_RXY_RRRD }, + { "sy", 0x5b, INSTR_RXY_RRRD }, + { "aly", 0x5e, INSTR_RXY_RRRD }, + { "sly", 0x5f, INSTR_RXY_RRRD }, + { "sthy", 0x70, INSTR_RXY_RRRD }, + { "lay", 0x71, INSTR_RXY_RRRD }, + { "stcy", 0x72, INSTR_RXY_RRRD }, + { "icy", 0x73, INSTR_RXY_RRRD }, + { "lb", 0x76, INSTR_RXY_RRRD }, + { "lgb", 0x77, INSTR_RXY_RRRD }, + { "lhy", 0x78, INSTR_RXY_RRRD }, + { "chy", 0x79, INSTR_RXY_RRRD }, + { "ahy", 0x7a, INSTR_RXY_RRRD }, + { "shy", 0x7b, INSTR_RXY_RRRD }, + { "ng", 0x80, INSTR_RXY_RRRD }, + { "og", 0x81, INSTR_RXY_RRRD }, + { "xg", 0x82, INSTR_RXY_RRRD }, + { "mlg", 0x86, INSTR_RXY_RRRD }, + { "dlg", 0x87, INSTR_RXY_RRRD }, + { "alcg", 0x88, INSTR_RXY_RRRD }, + { "slbg", 0x89, INSTR_RXY_RRRD }, + { "stpq", 0x8e, INSTR_RXY_RRRD }, + { "lpq", 0x8f, INSTR_RXY_RRRD }, + { "llgc", 0x90, INSTR_RXY_RRRD }, + { "llgh", 0x91, INSTR_RXY_RRRD }, + { "llc", 0x94, INSTR_RXY_RRRD }, + { "llh", 0x95, INSTR_RXY_RRRD }, +#endif + { "lrv", 0x1e, INSTR_RXY_RRRD }, + { "lrvh", 0x1f, INSTR_RXY_RRRD }, + { "strv", 0x3e, INSTR_RXY_RRRD }, + { "ml", 0x96, INSTR_RXY_RRRD }, + { "dl", 0x97, INSTR_RXY_RRRD }, + { "alc", 0x98, INSTR_RXY_RRRD }, + { "slb", 0x99, INSTR_RXY_RRRD }, + { "", 0, INSTR_INVALID } +}; + +static struct insn opcode_e5[] = { +#ifdef CONFIG_64BIT + { "strag", 0x02, INSTR_SSE_RDRD }, +#endif + { "lasp", 0x00, INSTR_SSE_RDRD }, + { "tprot", 0x01, INSTR_SSE_RDRD }, + { "mvcsk", 0x0e, INSTR_SSE_RDRD }, + { "mvcdk", 0x0f, INSTR_SSE_RDRD }, + { "", 0, INSTR_INVALID } +}; + +static struct insn opcode_eb[] = { +#ifdef CONFIG_64BIT + { "lmg", 0x04, INSTR_RSY_RRRD }, + { "srag", 0x0a, INSTR_RSY_RRRD }, + { "slag", 0x0b, INSTR_RSY_RRRD }, + { "srlg", 0x0c, INSTR_RSY_RRRD }, + { "sllg", 0x0d, INSTR_RSY_RRRD }, + { "tracg", 0x0f, INSTR_RSY_RRRD }, + { "csy", 0x14, INSTR_RSY_RRRD }, + { "rllg", 0x1c, INSTR_RSY_RRRD }, + { "clmh", 0x20, INSTR_RSY_RURD }, + { "clmy", 0x21, INSTR_RSY_RURD }, + { "stmg", 0x24, INSTR_RSY_RRRD }, + { "stctg", 0x25, INSTR_RSY_CCRD }, + { "stmh", 0x26, INSTR_RSY_RRRD }, + { "stcmh", 0x2c, INSTR_RSY_RURD }, + { "stcmy", 0x2d, INSTR_RSY_RURD }, + { "lctlg", 0x2f, INSTR_RSY_CCRD }, + { "csg", 0x30, INSTR_RSY_RRRD }, + { "cdsy", 0x31, INSTR_RSY_RRRD }, + { "cdsg", 0x3e, INSTR_RSY_RRRD }, + { "bxhg", 0x44, INSTR_RSY_RRRD }, + { "bxleg", 0x45, INSTR_RSY_RRRD }, + { "tmy", 0x51, INSTR_SIY_URD }, + { "mviy", 0x52, INSTR_SIY_URD }, + { "niy", 0x54, INSTR_SIY_URD }, + { "cliy", 0x55, INSTR_SIY_URD }, + { "oiy", 0x56, INSTR_SIY_URD }, + { "xiy", 0x57, INSTR_SIY_URD }, + { "icmh", 0x80, INSTR_RSE_RURD }, + { "icmh", 0x80, INSTR_RSY_RURD }, + { "icmy", 0x81, INSTR_RSY_RURD }, + { "clclu", 0x8f, INSTR_RSY_RRRD }, + { "stmy", 0x90, INSTR_RSY_RRRD }, + { "lmh", 0x96, INSTR_RSY_RRRD }, + { "lmy", 0x98, INSTR_RSY_RRRD }, + { "lamy", 0x9a, INSTR_RSY_AARD }, + { "stamy", 0x9b, INSTR_RSY_AARD }, +#endif + { "rll", 0x1d, INSTR_RSY_RRRD }, + { "mvclu", 0x8e, INSTR_RSY_RRRD }, + { "tp", 0xc0, INSTR_RSL_R0RD }, + { "", 0, INSTR_INVALID } +}; + +static struct insn opcode_ec[] = { +#ifdef CONFIG_64BIT + { "brxhg", 0x44, INSTR_RIE_RRP }, + { "brxlg", 0x45, INSTR_RIE_RRP }, +#endif + { "", 0, INSTR_INVALID } +}; + +static struct insn opcode_ed[] = { +#ifdef CONFIG_64BIT + { "mayl", 0x38, INSTR_RXF_FRRDF }, + { "myl", 0x39, INSTR_RXF_FRRDF }, + { "may", 0x3a, INSTR_RXF_FRRDF }, + { "my", 0x3b, INSTR_RXF_FRRDF }, + { "mayh", 0x3c, INSTR_RXF_FRRDF }, + { "myh", 0x3d, INSTR_RXF_FRRDF }, + { "ley", 0x64, INSTR_RXY_FRRD }, + { "ldy", 0x65, INSTR_RXY_FRRD }, + { "stey", 0x66, INSTR_RXY_FRRD }, + { "stdy", 0x67, INSTR_RXY_FRRD }, +#endif + { "ldeb", 0x04, INSTR_RXE_FRRD }, + { "lxdb", 0x05, INSTR_RXE_FRRD }, + { "lxeb", 0x06, INSTR_RXE_FRRD }, + { "mxdb", 0x07, INSTR_RXE_FRRD }, + { "keb", 0x08, INSTR_RXE_FRRD }, + { "ceb", 0x09, INSTR_RXE_FRRD }, + { "aeb", 0x0a, INSTR_RXE_FRRD }, + { "seb", 0x0b, INSTR_RXE_FRRD }, + { "mdeb", 0x0c, INSTR_RXE_FRRD }, + { "deb", 0x0d, INSTR_RXE_FRRD }, + { "maeb", 0x0e, INSTR_RXF_FRRDF }, + { "mseb", 0x0f, INSTR_RXF_FRRDF }, + { "tceb", 0x10, INSTR_RXE_FRRD }, + { "tcdb", 0x11, INSTR_RXE_FRRD }, + { "tcxb", 0x12, INSTR_RXE_FRRD }, + { "sqeb", 0x14, INSTR_RXE_FRRD }, + { "sqdb", 0x15, INSTR_RXE_FRRD }, + { "meeb", 0x17, INSTR_RXE_FRRD }, + { "kdb", 0x18, INSTR_RXE_FRRD }, + { "cdb", 0x19, INSTR_RXE_FRRD }, + { "adb", 0x1a, INSTR_RXE_FRRD }, + { "sdb", 0x1b, INSTR_RXE_FRRD }, + { "mdb", 0x1c, INSTR_RXE_FRRD }, + { "ddb", 0x1d, INSTR_RXE_FRRD }, + { "madb", 0x1e, INSTR_RXF_FRRDF }, + { "msdb", 0x1f, INSTR_RXF_FRRDF }, + { "lde", 0x24, INSTR_RXE_FRRD }, + { "lxd", 0x25, INSTR_RXE_FRRD }, + { "lxe", 0x26, INSTR_RXE_FRRD }, + { "mae", 0x2e, INSTR_RXF_FRRDF }, + { "mse", 0x2f, INSTR_RXF_FRRDF }, + { "sqe", 0x34, INSTR_RXE_FRRD }, + { "mee", 0x37, INSTR_RXE_FRRD }, + { "mad", 0x3e, INSTR_RXF_FRRDF }, + { "msd", 0x3f, INSTR_RXF_FRRDF }, + { "", 0, INSTR_INVALID } +}; + +/* Extracts an operand value from an instruction. */ +static unsigned int extract_operand(unsigned char *code, + const struct operand *operand) +{ + unsigned int val; + int bits; + + /* Extract fragments of the operand byte for byte. */ + code += operand->shift / 8; + bits = (operand->shift & 7) + operand->bits; + val = 0; + do { + val <<= 8; + val |= (unsigned int) *code++; + bits -= 8; + } while (bits > 0); + val >>= -bits; + val &= ((1U << (operand->bits - 1)) << 1) - 1; + + /* Check for special long displacement case. */ + if (operand->bits == 20 && operand->shift == 20) + val = (val & 0xff) << 12 | (val & 0xfff00) >> 8; + + /* Sign extend value if the operand is signed or pc relative. */ + if ((operand->flags & (OPERAND_SIGNED | OPERAND_PCREL)) && + (val & (1U << (operand->bits - 1)))) + val |= (-1U << (operand->bits - 1)) << 1; + + /* Double value if the operand is pc relative. */ + if (operand->flags & OPERAND_PCREL) + val <<= 1; + + /* Length x in an instructions has real length x + 1. */ + if (operand->flags & OPERAND_LENGTH) + val++; + return val; +} + +static inline int insn_length(unsigned char code) +{ + return ((((int) code + 64) >> 7) + 1) << 1; +} + +static struct insn *find_insn(unsigned char *code) +{ + unsigned char opfrag = code[1]; + unsigned char opmask; + struct insn *table; + + switch (code[0]) { + case 0x01: + table = opcode_01; + break; + case 0xa5: + table = opcode_a5; + break; + case 0xa7: + table = opcode_a7; + break; + case 0xb2: + table = opcode_b2; + break; + case 0xb3: + table = opcode_b3; + break; + case 0xb9: + table = opcode_b9; + break; + case 0xc0: + table = opcode_c0; + break; + case 0xc2: + table = opcode_c2; + break; + case 0xc8: + table = opcode_c8; + break; + case 0xe3: + table = opcode_e3; + opfrag = code[5]; + break; + case 0xe5: + table = opcode_e5; + break; + case 0xeb: + table = opcode_eb; + opfrag = code[5]; + break; + case 0xec: + table = opcode_ec; + opfrag = code[5]; + break; + case 0xed: + table = opcode_ed; + opfrag = code[5]; + break; + default: + table = opcode; + opfrag = code[0]; + break; + } + while (table->format != INSTR_INVALID) { + opmask = formats[table->format][0]; + if (table->opfrag == (opfrag & opmask)) + return table; + table++; + } + return NULL; +} + +static int print_insn(char *buffer, unsigned char *code, unsigned long addr) +{ + struct insn *insn; + const unsigned char *ops; + const struct operand *operand; + unsigned int value; + char separator; + char *ptr; + + ptr = buffer; + insn = find_insn(code); + if (insn) { + ptr += sprintf(ptr, "%.5s\t", insn->name); + /* Extract the operands. */ + separator = 0; + for (ops = formats[insn->format] + 1; *ops != 0; ops++) { + operand = operands + *ops; + value = extract_operand(code, operand); + if ((operand->flags & OPERAND_INDEX) && value == 0) + continue; + if ((operand->flags & OPERAND_BASE) && + value == 0 && separator == '(') { + separator = ','; + continue; + } + if (separator) + ptr += sprintf(ptr, "%c", separator); + if (operand->flags & OPERAND_GPR) + ptr += sprintf(ptr, "%%r%i", value); + else if (operand->flags & OPERAND_FPR) + ptr += sprintf(ptr, "%%f%i", value); + else if (operand->flags & OPERAND_AR) + ptr += sprintf(ptr, "%%a%i", value); + else if (operand->flags & OPERAND_CR) + ptr += sprintf(ptr, "%%c%i", value); + else if (operand->flags & OPERAND_PCREL) + ptr += sprintf(ptr, "%lx", value + addr); + else if (operand->flags & OPERAND_SIGNED) + ptr += sprintf(ptr, "%i", value); + else + ptr += sprintf(ptr, "%u", value); + if (operand->flags & OPERAND_DISP) + separator = '('; + else if (operand->flags & OPERAND_BASE) { + ptr += sprintf(ptr, ")"); + separator = ','; + } else + separator = ','; + } + } else + ptr += sprintf(ptr, "unknown"); + return (int) (ptr - buffer); +} + +void show_code(struct pt_regs *regs) +{ + char *mode = (regs->psw.mask & PSW_MASK_PSTATE) ? "User" : "Krnl"; + unsigned char code[64]; + char buffer[64], *ptr; + mm_segment_t old_fs; + unsigned long addr; + int start, end, opsize, hops, i; + + /* Get a snapshot of the 64 bytes surrounding the fault address. */ + old_fs = get_fs(); + set_fs((regs->psw.mask & PSW_MASK_PSTATE) ? USER_DS : KERNEL_DS); + for (start = 32; start && regs->psw.addr >= 34 - start; start -= 2) { + addr = regs->psw.addr - 34 + start; + if (__copy_from_user(code + start - 2, + (char __user *) addr, 2)) + break; + } + for (end = 32; end < 64; end += 2) { + addr = regs->psw.addr + end - 32; + if (__copy_from_user(code + end, + (char __user *) addr, 2)) + break; + } + set_fs(old_fs); + /* Code snapshot useable ? */ + if ((regs->psw.addr & 1) || start >= end) { + printk("%s Code: Bad PSW.\n", mode); + return; + } + /* Find a starting point for the disassembly. */ + while (start < 32) { + hops = 0; + for (i = 0, hops = 0; start + i < 32 && hops < 3; hops++) { + if (!find_insn(code + start + i)) + break; + i += insn_length(code[start + i]); + } + if (start + i == 32) + /* Looks good, sequence ends at PSW. */ + break; + start += 2; + } + /* Decode the instructions. */ + ptr = buffer; + ptr += sprintf(ptr, "%s Code:", mode); + hops = 0; + while (start < end && hops < 8) { + *ptr++ = (start == 32) ? '>' : ' '; + addr = regs->psw.addr + start - 32; + ptr += sprintf(ptr, ONELONG, addr); + opsize = insn_length(code[start]); + if (start + opsize >= end) + break; + for (i = 0; i < opsize; i++) + ptr += sprintf(ptr, "%02x", code[start + i]); + *ptr++ = '\t'; + if (i < 6) + *ptr++ = '\t'; + ptr += print_insn(ptr, code + start, addr); + start += opsize; + printk(buffer); + ptr = buffer; + ptr += sprintf(ptr, "\n "); + hops++; + } + printk("\n"); +} diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c index f0e5a320e2ec..a6540940190b 100644 --- a/arch/s390/kernel/traps.c +++ b/arch/s390/kernel/traps.c @@ -188,18 +188,31 @@ void dump_stack(void) EXPORT_SYMBOL(dump_stack); +static inline int mask_bits(struct pt_regs *regs, unsigned long bits) +{ + return (regs->psw.mask & bits) / ((~bits + 1) & bits); +} + void show_registers(struct pt_regs *regs) { - mm_segment_t old_fs; char *mode; - int i; mode = (regs->psw.mask & PSW_MASK_PSTATE) ? "User" : "Krnl"; printk("%s PSW : %p %p", mode, (void *) regs->psw.mask, (void *) regs->psw.addr); print_symbol(" (%s)\n", regs->psw.addr & PSW_ADDR_INSN); - printk("%s GPRS: " FOURLONG, mode, + printk(" R:%x T:%x IO:%x EX:%x Key:%x M:%x W:%x " + "P:%x AS:%x CC:%x PM:%x", mask_bits(regs, PSW_MASK_PER), + mask_bits(regs, PSW_MASK_DAT), mask_bits(regs, PSW_MASK_IO), + mask_bits(regs, PSW_MASK_EXT), mask_bits(regs, PSW_MASK_KEY), + mask_bits(regs, PSW_MASK_MCHECK), mask_bits(regs, PSW_MASK_WAIT), + mask_bits(regs, PSW_MASK_PSTATE), mask_bits(regs, PSW_MASK_ASC), + mask_bits(regs, PSW_MASK_CC), mask_bits(regs, PSW_MASK_PM)); +#ifdef CONFIG_64BIT + printk(" EA:%x", mask_bits(regs, PSW_BASE_BITS)); +#endif + printk("\n%s GPRS: " FOURLONG, mode, regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]); printk(" " FOURLONG, regs->gprs[4], regs->gprs[5], regs->gprs[6], regs->gprs[7]); @@ -208,41 +221,7 @@ void show_registers(struct pt_regs *regs) printk(" " FOURLONG, regs->gprs[12], regs->gprs[13], regs->gprs[14], regs->gprs[15]); -#if 0 - /* FIXME: this isn't needed any more but it changes the ksymoops - * input. To remove or not to remove ... */ - save_access_regs(regs->acrs); - printk("%s ACRS: %08x %08x %08x %08x\n", mode, - regs->acrs[0], regs->acrs[1], regs->acrs[2], regs->acrs[3]); - printk(" %08x %08x %08x %08x\n", - regs->acrs[4], regs->acrs[5], regs->acrs[6], regs->acrs[7]); - printk(" %08x %08x %08x %08x\n", - regs->acrs[8], regs->acrs[9], regs->acrs[10], regs->acrs[11]); - printk(" %08x %08x %08x %08x\n", - regs->acrs[12], regs->acrs[13], regs->acrs[14], regs->acrs[15]); -#endif - - /* - * Print the first 20 byte of the instruction stream at the - * time of the fault. - */ - old_fs = get_fs(); - if (regs->psw.mask & PSW_MASK_PSTATE) - set_fs(USER_DS); - else - set_fs(KERNEL_DS); - printk("%s Code: ", mode); - for (i = 0; i < 20; i++) { - unsigned char c; - if (__get_user(c, (char __user *)(regs->psw.addr + i))) { - printk(" Bad PSW."); - break; - } - printk("%02x ", c); - } - set_fs(old_fs); - - printk("\n"); + show_code(regs); } /* This is called from fs/proc/array.c */ diff --git a/include/asm-s390/processor.h b/include/asm-s390/processor.h index 33b80ced4bc1..96a6f80953a9 100644 --- a/include/asm-s390/processor.h +++ b/include/asm-s390/processor.h @@ -196,6 +196,7 @@ extern unsigned long thread_saved_pc(struct task_struct *t); extern char *task_show_regs(struct task_struct *task, char *buffer); extern void show_registers(struct pt_regs *regs); +extern void show_code(struct pt_regs *regs); extern void show_trace(struct task_struct *task, unsigned long *sp); unsigned long get_wchan(struct task_struct *p); -- cgit v1.2.3-59-g8ed1b From c0007f1a65762eaf55633d403b380130ec60adad Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Fri, 27 Apr 2007 16:01:42 +0200 Subject: [S390] Use generic bug. Generic bug implementation for s390. Will increase the value of the console output on BUG() statements since registers r0-r5,r14 will not be clobbered by a printk() call that was previously done before the illegal instruction of BUG() was hit. Also implements an architecture specific WARN_ON(). Output of that could be increased but requires common code change. Signed-off-by: Martin Schwidefsky Signed-off-by: Heiko Carstens --- arch/s390/Kconfig | 5 +++ arch/s390/defconfig | 2 ++ arch/s390/kernel/module.c | 4 ++- arch/s390/kernel/traps.c | 17 +++++++++-- arch/s390/kernel/vmlinux.lds.S | 10 +++++- include/asm-s390/bug.h | 69 ++++++++++++++++++++++++++++++++++-------- 6 files changed, 89 insertions(+), 18 deletions(-) (limited to 'include') diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 0f293aa7b0fa..0f9517bc8e70 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -41,6 +41,11 @@ config GENERIC_HWEIGHT config GENERIC_TIME def_bool y +config GENERIC_BUG + bool + depends on BUG + default y + config NO_IOMEM def_bool y diff --git a/arch/s390/defconfig b/arch/s390/defconfig index 741d2bbb2b37..80046d9e2a3b 100644 --- a/arch/s390/defconfig +++ b/arch/s390/defconfig @@ -12,6 +12,7 @@ CONFIG_RWSEM_XCHGADD_ALGORITHM=y # CONFIG_ARCH_HAS_ILOG2_U64 is not set CONFIG_GENERIC_HWEIGHT=y CONFIG_GENERIC_TIME=y +CONFIG_GENERIC_BUG=y CONFIG_NO_IOMEM=y CONFIG_S390=y CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" @@ -705,6 +706,7 @@ CONFIG_DEBUG_MUTEXES=y CONFIG_DEBUG_SPINLOCK_SLEEP=y # CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set # CONFIG_DEBUG_KOBJECT is not set +CONFIG_DEBUG_BUGVERBOSE=y # CONFIG_DEBUG_INFO is not set # CONFIG_DEBUG_VM is not set # CONFIG_DEBUG_LIST is not set diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c index 39d1dd752529..59b4e796680a 100644 --- a/arch/s390/kernel/module.c +++ b/arch/s390/kernel/module.c @@ -31,6 +31,7 @@ #include #include #include +#include #if 0 #define DEBUGP printk @@ -398,9 +399,10 @@ int module_finalize(const Elf_Ehdr *hdr, struct module *me) { vfree(me->arch.syminfo); - return 0; + return module_bug_finalize(hdr, sechdrs, me); } void module_arch_cleanup(struct module *mod) { + module_bug_cleanup(mod); } diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c index a6540940190b..49dec830373a 100644 --- a/arch/s390/kernel/traps.c +++ b/arch/s390/kernel/traps.c @@ -30,7 +30,7 @@ #include #include #include - +#include #include #include #include @@ -297,6 +297,11 @@ report_user_fault(long interruption_code, struct pt_regs *regs) #endif } +int is_valid_bugaddr(unsigned long addr) +{ + return 1; +} + static void __kprobes inline do_trap(long interruption_code, int signr, char *str, struct pt_regs *regs, siginfo_t *info) @@ -323,8 +328,14 @@ static void __kprobes inline do_trap(long interruption_code, int signr, fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN); if (fixup) regs->psw.addr = fixup->fixup | PSW_ADDR_AMODE; - else - die(str, regs, interruption_code); + else { + enum bug_trap_type btt; + + btt = report_bug(regs->psw.addr & PSW_ADDR_INSN); + if (btt == BUG_TRAP_TYPE_WARN) + return; + die(str, regs, interruption_code); + } } } diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S index c30716ae130c..418f6426a949 100644 --- a/arch/s390/kernel/vmlinux.lds.S +++ b/arch/s390/kernel/vmlinux.lds.S @@ -45,6 +45,8 @@ SECTIONS __ex_table : { *(__ex_table) } __stop___ex_table = .; + BUG_TABLE + .data : { /* Data */ *(.data) CONSTRUCTORS @@ -77,6 +79,12 @@ SECTIONS *(.init.text) _einittext = .; } + /* + * .exit.text is discarded at runtime, not link time, + * to deal with references from __bug_table + */ + .exit.text : { *(.exit.text) } + .init.data : { *(.init.data) } . = ALIGN(256); __setup_start = .; @@ -116,7 +124,7 @@ SECTIONS /* Sections to be discarded */ /DISCARD/ : { - *(.exit.text) *(.exit.data) *(.exitcall.exit) + *(.exit.data) *(.exitcall.exit) } /* Stabs debugging sections. */ diff --git a/include/asm-s390/bug.h b/include/asm-s390/bug.h index 876898363944..838684dc6d35 100644 --- a/include/asm-s390/bug.h +++ b/include/asm-s390/bug.h @@ -1,27 +1,70 @@ -#ifndef _S390_BUG_H -#define _S390_BUG_H +#ifndef _ASM_S390_BUG_H +#define _ASM_S390_BUG_H #include #ifdef CONFIG_BUG -static inline __attribute__((noreturn)) void __do_illegal_op(void) -{ -#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 3) - __builtin_trap(); +#ifdef CONFIG_64BIT +#define S390_LONG ".quad" #else - asm volatile(".long 0"); +#define S390_LONG ".long" #endif -} -#define BUG() do { \ - printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \ - __do_illegal_op(); \ +#ifdef CONFIG_DEBUG_BUGVERBOSE + +#define __EMIT_BUG(x) do { \ + asm volatile( \ + "0: j 0b+2\n" \ + "1:\n" \ + ".section .rodata.str,\"aMS\",@progbits,1\n" \ + "2: .asciz \""__FILE__"\"\n" \ + ".previous\n" \ + ".section __bug_table,\"a\"\n" \ + "3:\t" S390_LONG "\t1b,2b\n" \ + " .short %0,%1\n" \ + " .org 3b+%2\n" \ + ".previous\n" \ + : : "i" (__LINE__), \ + "i" (x), \ + "i" (sizeof(struct bug_entry))); \ } while (0) +#else /* CONFIG_DEBUG_BUGVERBOSE */ + +#define __EMIT_BUG(x) do { \ + asm volatile( \ + "0: j 0b+2\n" \ + "1:\n" \ + ".section __bug_table,\"a\"\n" \ + "2:\t" S390_LONG "\t1b\n" \ + " .short %0\n" \ + " .org 2b+%1\n" \ + ".previous\n" \ + : : "i" (x), \ + "i" (sizeof(struct bug_entry))); \ +} while (0) + +#endif /* CONFIG_DEBUG_BUGVERBOSE */ + +#define BUG() __EMIT_BUG(0) + +#define WARN_ON(x) ({ \ + typeof(x) __ret_warn_on = (x); \ + if (__builtin_constant_p(__ret_warn_on)) { \ + if (__ret_warn_on) \ + __EMIT_BUG(BUGFLAG_WARNING); \ + } else { \ + if (unlikely(__ret_warn_on)) \ + __EMIT_BUG(BUGFLAG_WARNING); \ + } \ + unlikely(__ret_warn_on); \ +}) + #define HAVE_ARCH_BUG -#endif +#define HAVE_ARCH_WARN_ON +#endif /* CONFIG_BUG */ #include -#endif +#endif /* _ASM_S390_BUG_H */ -- cgit v1.2.3-59-g8ed1b From 411ed3225733dbd83b4cbaaa992ef80d6ec1534e Mon Sep 17 00:00:00 2001 From: Michael Holzheu Date: Fri, 27 Apr 2007 16:01:49 +0200 Subject: [S390] zfcpdump support. s390 machines provide hardware support for creating Linux dumps on SCSI disks. For creating a dump a special purpose dump Linux is used. The first 32 MB of memory are saved by the hardware before the dump Linux is booted. Via an SCLP interface, the saved memory can be accessed from Linux. This patch exports memory and registers of the crashed Linux to userspace via a debugfs file. For more information refer to Documentation/s390/zfcpdump.txt, which is included in this patch. Signed-off-by: Michael Holzheu Signed-off-by: Martin Schwidefsky Signed-off-by: Heiko Carstens --- Documentation/s390/zfcpdump.txt | 87 ++++++ arch/s390/Kconfig | 8 + arch/s390/Makefile | 3 + arch/s390/defconfig | 1 + arch/s390/kernel/head64.S | 72 ++++- arch/s390/kernel/ipl.c | 232 +++++++++----- arch/s390/kernel/setup.c | 38 ++- arch/s390/kernel/smp.c | 62 ++++ drivers/s390/char/Makefile | 3 + drivers/s390/char/sclp.h | 2 + drivers/s390/char/sclp_sdias.c | 255 ++++++++++++++++ drivers/s390/char/zcore.c | 651 ++++++++++++++++++++++++++++++++++++++++ include/asm-s390/ipl.h | 32 ++ include/asm-s390/lowcore.h | 46 +++ include/asm-s390/sclp.h | 2 + include/asm-s390/setup.h | 2 + include/asm-s390/smp.h | 1 + 17 files changed, 1411 insertions(+), 86 deletions(-) create mode 100644 Documentation/s390/zfcpdump.txt create mode 100644 drivers/s390/char/sclp_sdias.c create mode 100644 drivers/s390/char/zcore.c (limited to 'include') diff --git a/Documentation/s390/zfcpdump.txt b/Documentation/s390/zfcpdump.txt new file mode 100644 index 000000000000..cf45d27c4608 --- /dev/null +++ b/Documentation/s390/zfcpdump.txt @@ -0,0 +1,87 @@ +s390 SCSI dump tool (zfcpdump) + +System z machines (z900 or higher) provide hardware support for creating system +dumps on SCSI disks. The dump process is initiated by booting a dump tool, which +has to create a dump of the current (probably crashed) Linux image. In order to +not overwrite memory of the crashed Linux with data of the dump tool, the +hardware saves some memory plus the register sets of the boot cpu before the +dump tool is loaded. There exists an SCLP hardware interface to obtain the saved +memory afterwards. Currently 32 MB are saved. + +This zfcpdump implementation consists of a Linux dump kernel together with +a userspace dump tool, which are loaded together into the saved memory region +below 32 MB. zfcpdump is installed on a SCSI disk using zipl (as contained in +the s390-tools package) to make the device bootable. The operator of a Linux +system can then trigger a SCSI dump by booting the SCSI disk, where zfcpdump +resides on. + +The kernel part of zfcpdump is implemented as a debugfs file under "zcore/mem", +which exports memory and registers of the crashed Linux in an s390 +standalone dump format. It can be used in the same way as e.g. /dev/mem. The +dump format defines a 4K header followed by plain uncompressed memory. The +register sets are stored in the prefix pages of the respective cpus. To build a +dump enabled kernel with the zcore driver, the kernel config option +CONFIG_ZFCPDUMP has to be set. When reading from "zcore/mem", the part of +memory, which has been saved by hardware is read by the driver via the SCLP +hardware interface. The second part is just copied from the non overwritten real +memory. + +The userspace application of zfcpdump can reside e.g. in an intitramfs or an +initrd. It reads from zcore/mem and writes the system dump to a file on a +SCSI disk. + +To build a zfcpdump kernel use the following settings in your kernel +configuration: + * CONFIG_ZFCPDUMP=y + * Enable ZFCP driver + * Enable SCSI driver + * Enable ext2 and ext3 filesystems + * Disable as many features as possible to keep the kernel small. + E.g. network support is not needed at all. + +To use the zfcpdump userspace application in an initramfs you have to do the +following: + + * Copy the zfcpdump executable somewhere into your Linux tree. + E.g. to "arch/s390/boot/zfcpdump. If you do not want to include + shared libraries, compile the tool with the "-static" gcc option. + * If you want to include e2fsck, add it to your source tree, too. The zfcpdump + application attempts to start /sbin/e2fsck from the ramdisk. + * Use an initramfs config file like the following: + + dir /dev 755 0 0 + nod /dev/console 644 0 0 c 5 1 + nod /dev/null 644 0 0 c 1 3 + nod /dev/sda1 644 0 0 b 8 1 + nod /dev/sda2 644 0 0 b 8 2 + nod /dev/sda3 644 0 0 b 8 3 + nod /dev/sda4 644 0 0 b 8 4 + nod /dev/sda5 644 0 0 b 8 5 + nod /dev/sda6 644 0 0 b 8 6 + nod /dev/sda7 644 0 0 b 8 7 + nod /dev/sda8 644 0 0 b 8 8 + nod /dev/sda9 644 0 0 b 8 9 + nod /dev/sda10 644 0 0 b 8 10 + nod /dev/sda11 644 0 0 b 8 11 + nod /dev/sda12 644 0 0 b 8 12 + nod /dev/sda13 644 0 0 b 8 13 + nod /dev/sda14 644 0 0 b 8 14 + nod /dev/sda15 644 0 0 b 8 15 + file /init arch/s390/boot/zfcpdump 755 0 0 + file /sbin/e2fsck arch/s390/boot/e2fsck 755 0 0 + dir /proc 755 0 0 + dir /sys 755 0 0 + dir /mnt 755 0 0 + dir /sbin 755 0 0 + + * Issue "make image" to build the zfcpdump image with initramfs. + +In a Linux distribution the zfcpdump enabled kernel image must be copied to +/usr/share/zfcpdump/zfcpdump.image, where the s390 zipl tool is looking for the +dump kernel when preparing a SCSI dump disk. + +If you use a ramdisk copy it to "/usr/share/zfcpdump/zfcpdump.rd". + +For more information on how to use zfcpdump refer to the s390 'Using the Dump +Tools book', which is available from +http://www.ibm.com/developerworks/linux/linux390. diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 0f9517bc8e70..e6ec418093e5 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -519,6 +519,14 @@ config KEXEC current kernel, and to start another kernel. It is like a reboot but is independent of hardware/microcode support. +config ZFCPDUMP + tristate "zfcpdump support" + select SMP + default n + help + Select this option if you want to build an zfcpdump enabled kernel. + Refer to "Documentation/s390/zfcpdump.txt" for more details on this. + endmenu source "net/Kconfig" diff --git a/arch/s390/Makefile b/arch/s390/Makefile index ece5adc05606..68441e0e74b6 100644 --- a/arch/s390/Makefile +++ b/arch/s390/Makefile @@ -105,6 +105,9 @@ install: vmlinux image: vmlinux $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ +zfcpdump: + $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ + archclean: $(Q)$(MAKE) $(clean)=$(boot) diff --git a/arch/s390/defconfig b/arch/s390/defconfig index 80046d9e2a3b..0e4da8a7d826 100644 --- a/arch/s390/defconfig +++ b/arch/s390/defconfig @@ -167,6 +167,7 @@ CONFIG_NO_IDLE_HZ=y CONFIG_NO_IDLE_HZ_INIT=y CONFIG_S390_HYPFS_FS=y CONFIG_KEXEC=y +# CONFIG_ZFCPDUMP is not set # # Networking diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S index 37010709fe68..a87b1976d409 100644 --- a/arch/s390/kernel/head64.S +++ b/arch/s390/kernel/head64.S @@ -39,7 +39,69 @@ startup_continue: basr %r13,0 # get base .LPG1: sll %r13,1 # remove high order bit srl %r13,1 - lhi %r1,1 # mode 1 = esame + +#ifdef CONFIG_ZFCPDUMP + + # check if we have been ipled using zfcp dump: + + tm 0xb9,0x01 # test if subchannel is enabled + jno .nodump # subchannel disabled + l %r1,0xb8 + la %r5,.Lipl_schib-.LPG1(%r13) + stsch 0(%r5) # get schib of subchannel + jne .nodump # schib not available + tm 5(%r5),0x01 # devno valid? + jno .nodump + tm 4(%r5),0x80 # qdio capable device? + jno .nodump + l %r2,20(%r0) # address of ipl parameter block + lhi %r3,0 + ic %r3,0x148(%r2) # get opt field + chi %r3,0x20 # load with dump? + jne .nodump + + # store all prefix registers in case of load with dump: + + la %r7,0 # base register for 0 page + la %r8,0 # first cpu + l %r11,.Lpref_arr_ptr-.LPG1(%r13) # address of prefix array + ahi %r11,4 # skip boot cpu + lr %r12,%r11 + ahi %r12,(CONFIG_NR_CPUS*4) # end of prefix array + stap .Lcurrent_cpu+2-.LPG1(%r13) # store current cpu addr +1: + cl %r8,.Lcurrent_cpu-.LPG1(%r13) # is ipl cpu ? + je 4f # if yes get next cpu +2: + lr %r9,%r7 + sigp %r9,%r8,0x9 # stop & store status of cpu + brc 8,3f # accepted + brc 4,4f # status stored: next cpu + brc 2,2b # busy: try again + brc 1,4f # not op: next cpu +3: + mvc 0(4,%r11),264(%r7) # copy prefix register to prefix array + ahi %r11,4 # next element in prefix array + clr %r11,%r12 + je 5f # no more space in prefix array +4: + ahi %r8,1 # next cpu (r8 += 1) + cl %r8,.Llast_cpu-.LPG1(%r13) # is last possible cpu ? + jl 1b # jump if not last cpu +5: + lhi %r1,2 # mode 2 = esame (dump) + j 6f + .align 4 +.Lipl_schib: + .rept 13 + .long 0 + .endr +.nodump: + lhi %r1,1 # mode 1 = esame (normal ipl) +6: +#else + lhi %r1,1 # mode 1 = esame (normal ipl) +#endif /* CONFIG_ZFCPDUMP */ mvi __LC_AR_MODE_ID,1 # set esame flag slr %r0,%r0 # set cpuid to zero sigp %r1,%r0,0x12 # switch to esame mode @@ -149,6 +211,14 @@ startup_continue: .L4malign:.quad 0xffffffffffc00000 .Lscan2g:.quad 0x80000000 + 0x20000 - 8 # 2GB + 128K - 8 .Lnop: .long 0x07000700 +#ifdef CONFIG_ZFCPDUMP +.Lcurrent_cpu: + .long 0x0 +.Llast_cpu: + .long 0x0000ffff +.Lpref_arr_ptr: + .long zfcpdump_prefix_array +#endif /* CONFIG_ZFCPDUMP */ .Lparmaddr: .quad PARMAREA .align 64 diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c index a83cf1fdd8f5..06833ac2b115 100644 --- a/arch/s390/kernel/ipl.c +++ b/arch/s390/kernel/ipl.c @@ -29,36 +29,21 @@ #define SCCB_LOADPARM (&s390_readinfo_sccb.loadparm) #define SCCB_FLAG (s390_readinfo_sccb.flags) -enum ipl_type { - IPL_TYPE_NONE = 1, - IPL_TYPE_UNKNOWN = 2, - IPL_TYPE_CCW = 4, - IPL_TYPE_FCP = 8, - IPL_TYPE_NSS = 16, -}; - -#define IPL_NONE_STR "none" -#define IPL_UNKNOWN_STR "unknown" -#define IPL_CCW_STR "ccw" -#define IPL_FCP_STR "fcp" -#define IPL_NSS_STR "nss" - -/* - * Must be in data section since the bss section - * is not cleared when these are accessed. - */ -static u16 ipl_devno __attribute__((__section__(".data"))) = 0; -u32 ipl_flags __attribute__((__section__(".data"))) = 0; +#define IPL_UNKNOWN_STR "unknown" +#define IPL_CCW_STR "ccw" +#define IPL_FCP_STR "fcp" +#define IPL_FCP_DUMP_STR "fcp_dump" +#define IPL_NSS_STR "nss" static char *ipl_type_str(enum ipl_type type) { switch (type) { - case IPL_TYPE_NONE: - return IPL_NONE_STR; case IPL_TYPE_CCW: return IPL_CCW_STR; case IPL_TYPE_FCP: return IPL_FCP_STR; + case IPL_TYPE_FCP_DUMP: + return IPL_FCP_DUMP_STR; case IPL_TYPE_NSS: return IPL_NSS_STR; case IPL_TYPE_UNKNOWN: @@ -67,15 +52,55 @@ static char *ipl_type_str(enum ipl_type type) } } +enum dump_type { + DUMP_TYPE_NONE = 1, + DUMP_TYPE_CCW = 2, + DUMP_TYPE_FCP = 4, +}; + +#define DUMP_NONE_STR "none" +#define DUMP_CCW_STR "ccw" +#define DUMP_FCP_STR "fcp" + +static char *dump_type_str(enum dump_type type) +{ + switch (type) { + case DUMP_TYPE_NONE: + return DUMP_NONE_STR; + case DUMP_TYPE_CCW: + return DUMP_CCW_STR; + case DUMP_TYPE_FCP: + return DUMP_FCP_STR; + default: + return NULL; + } +} + +/* + * Must be in data section since the bss section + * is not cleared when these are accessed. + */ +static u16 ipl_devno __attribute__((__section__(".data"))) = 0; +u32 ipl_flags __attribute__((__section__(".data"))) = 0; + enum ipl_method { - IPL_METHOD_NONE, - IPL_METHOD_CCW_CIO, - IPL_METHOD_CCW_DIAG, - IPL_METHOD_CCW_VM, - IPL_METHOD_FCP_RO_DIAG, - IPL_METHOD_FCP_RW_DIAG, - IPL_METHOD_FCP_RO_VM, - IPL_METHOD_NSS, + REIPL_METHOD_CCW_CIO, + REIPL_METHOD_CCW_DIAG, + REIPL_METHOD_CCW_VM, + REIPL_METHOD_FCP_RO_DIAG, + REIPL_METHOD_FCP_RW_DIAG, + REIPL_METHOD_FCP_RO_VM, + REIPL_METHOD_FCP_DUMP, + REIPL_METHOD_NSS, + REIPL_METHOD_DEFAULT, +}; + +enum dump_method { + DUMP_METHOD_NONE, + DUMP_METHOD_CCW_CIO, + DUMP_METHOD_CCW_DIAG, + DUMP_METHOD_CCW_VM, + DUMP_METHOD_FCP_DIAG, }; enum shutdown_action { @@ -107,15 +132,15 @@ static int diag308_set_works = 0; static int reipl_capabilities = IPL_TYPE_UNKNOWN; static enum ipl_type reipl_type = IPL_TYPE_UNKNOWN; -static enum ipl_method reipl_method = IPL_METHOD_NONE; +static enum ipl_method reipl_method = REIPL_METHOD_DEFAULT; static struct ipl_parameter_block *reipl_block_fcp; static struct ipl_parameter_block *reipl_block_ccw; static char reipl_nss_name[NSS_NAME_SIZE + 1]; -static int dump_capabilities = IPL_TYPE_NONE; -static enum ipl_type dump_type = IPL_TYPE_NONE; -static enum ipl_method dump_method = IPL_METHOD_NONE; +static int dump_capabilities = DUMP_TYPE_NONE; +static enum dump_type dump_type = DUMP_TYPE_NONE; +static enum dump_method dump_method = DUMP_METHOD_NONE; static struct ipl_parameter_block *dump_block_fcp; static struct ipl_parameter_block *dump_block_ccw; @@ -134,6 +159,7 @@ int diag308(unsigned long subcode, void *addr) : "d" (subcode) : "cc", "memory"); return _rc; } +EXPORT_SYMBOL_GPL(diag308); /* SYSFS */ @@ -197,7 +223,7 @@ static void make_attrs_ro(struct attribute **attrs) * ipl section */ -static enum ipl_type ipl_get_type(void) +static __init enum ipl_type get_ipl_type(void) { struct ipl_parameter_block *ipl = IPL_PARMBLOCK_START; @@ -211,12 +237,44 @@ static enum ipl_type ipl_get_type(void) return IPL_TYPE_UNKNOWN; if (ipl->hdr.pbt != DIAG308_IPL_TYPE_FCP) return IPL_TYPE_UNKNOWN; + if (ipl->ipl_info.fcp.opt == DIAG308_IPL_OPT_DUMP) + return IPL_TYPE_FCP_DUMP; return IPL_TYPE_FCP; } +void __init setup_ipl_info(void) +{ + ipl_info.type = get_ipl_type(); + switch (ipl_info.type) { + case IPL_TYPE_CCW: + ipl_info.data.ccw.dev_id.devno = ipl_devno; + ipl_info.data.ccw.dev_id.ssid = 0; + break; + case IPL_TYPE_FCP: + case IPL_TYPE_FCP_DUMP: + ipl_info.data.fcp.dev_id.devno = + IPL_PARMBLOCK_START->ipl_info.fcp.devno; + ipl_info.data.fcp.dev_id.ssid = 0; + ipl_info.data.fcp.wwpn = IPL_PARMBLOCK_START->ipl_info.fcp.wwpn; + ipl_info.data.fcp.lun = IPL_PARMBLOCK_START->ipl_info.fcp.lun; + break; + case IPL_TYPE_NSS: + strncpy(ipl_info.data.nss.name, kernel_nss_name, + sizeof(ipl_info.data.nss.name)); + break; + case IPL_TYPE_UNKNOWN: + default: + /* We have no info to copy */ + break; + } +} + +struct ipl_info ipl_info; +EXPORT_SYMBOL_GPL(ipl_info); + static ssize_t ipl_type_show(struct subsystem *subsys, char *page) { - return sprintf(page, "%s\n", ipl_type_str(ipl_get_type())); + return sprintf(page, "%s\n", ipl_type_str(ipl_info.type)); } static struct subsys_attribute sys_ipl_type_attr = __ATTR_RO(ipl_type); @@ -225,10 +283,11 @@ static ssize_t sys_ipl_device_show(struct subsystem *subsys, char *page) { struct ipl_parameter_block *ipl = IPL_PARMBLOCK_START; - switch (ipl_get_type()) { + switch (ipl_info.type) { case IPL_TYPE_CCW: return sprintf(page, "0.0.%04x\n", ipl_devno); case IPL_TYPE_FCP: + case IPL_TYPE_FCP_DUMP: return sprintf(page, "0.0.%04x\n", ipl->ipl_info.fcp.devno); default: return 0; @@ -485,23 +544,29 @@ static int reipl_set_type(enum ipl_type type) switch(type) { case IPL_TYPE_CCW: if (MACHINE_IS_VM) - reipl_method = IPL_METHOD_CCW_VM; + reipl_method = REIPL_METHOD_CCW_VM; else - reipl_method = IPL_METHOD_CCW_CIO; + reipl_method = REIPL_METHOD_CCW_CIO; break; case IPL_TYPE_FCP: if (diag308_set_works) - reipl_method = IPL_METHOD_FCP_RW_DIAG; + reipl_method = REIPL_METHOD_FCP_RW_DIAG; else if (MACHINE_IS_VM) - reipl_method = IPL_METHOD_FCP_RO_VM; + reipl_method = REIPL_METHOD_FCP_RO_VM; else - reipl_method = IPL_METHOD_FCP_RO_DIAG; + reipl_method = REIPL_METHOD_FCP_RO_DIAG; + break; + case IPL_TYPE_FCP_DUMP: + reipl_method = REIPL_METHOD_FCP_DUMP; break; case IPL_TYPE_NSS: - reipl_method = IPL_METHOD_NSS; + reipl_method = REIPL_METHOD_NSS; + break; + case IPL_TYPE_UNKNOWN: + reipl_method = REIPL_METHOD_DEFAULT; break; default: - reipl_method = IPL_METHOD_NONE; + BUG(); } reipl_type = type; return 0; @@ -579,22 +644,22 @@ static struct attribute_group dump_ccw_attr_group = { /* dump type */ -static int dump_set_type(enum ipl_type type) +static int dump_set_type(enum dump_type type) { if (!(dump_capabilities & type)) return -EINVAL; switch(type) { - case IPL_TYPE_CCW: + case DUMP_TYPE_CCW: if (MACHINE_IS_VM) - dump_method = IPL_METHOD_CCW_VM; + dump_method = DUMP_METHOD_CCW_VM; else - dump_method = IPL_METHOD_CCW_CIO; + dump_method = DUMP_METHOD_CCW_CIO; break; - case IPL_TYPE_FCP: - dump_method = IPL_METHOD_FCP_RW_DIAG; + case DUMP_TYPE_FCP: + dump_method = DUMP_METHOD_FCP_DIAG; break; default: - dump_method = IPL_METHOD_NONE; + dump_method = DUMP_METHOD_NONE; } dump_type = type; return 0; @@ -602,7 +667,7 @@ static int dump_set_type(enum ipl_type type) static ssize_t dump_type_show(struct subsystem *subsys, char *page) { - return sprintf(page, "%s\n", ipl_type_str(dump_type)); + return sprintf(page, "%s\n", dump_type_str(dump_type)); } static ssize_t dump_type_store(struct subsystem *subsys, const char *buf, @@ -610,12 +675,12 @@ static ssize_t dump_type_store(struct subsystem *subsys, const char *buf, { int rc = -EINVAL; - if (strncmp(buf, IPL_NONE_STR, strlen(IPL_NONE_STR)) == 0) - rc = dump_set_type(IPL_TYPE_NONE); - else if (strncmp(buf, IPL_CCW_STR, strlen(IPL_CCW_STR)) == 0) - rc = dump_set_type(IPL_TYPE_CCW); - else if (strncmp(buf, IPL_FCP_STR, strlen(IPL_FCP_STR)) == 0) - rc = dump_set_type(IPL_TYPE_FCP); + if (strncmp(buf, DUMP_NONE_STR, strlen(DUMP_NONE_STR)) == 0) + rc = dump_set_type(DUMP_TYPE_NONE); + else if (strncmp(buf, DUMP_CCW_STR, strlen(DUMP_CCW_STR)) == 0) + rc = dump_set_type(DUMP_TYPE_CCW); + else if (strncmp(buf, DUMP_FCP_STR, strlen(DUMP_FCP_STR)) == 0) + rc = dump_set_type(DUMP_TYPE_FCP); return (rc != 0) ? rc : len; } @@ -664,14 +729,14 @@ void do_reipl(void) char loadparm[LOADPARM_LEN + 1]; switch (reipl_method) { - case IPL_METHOD_CCW_CIO: + case REIPL_METHOD_CCW_CIO: devid.devno = reipl_block_ccw->ipl_info.ccw.devno; - if (ipl_get_type() == IPL_TYPE_CCW && devid.devno == ipl_devno) + if (ipl_info.type == IPL_TYPE_CCW && devid.devno == ipl_devno) diag308(DIAG308_IPL, NULL); devid.ssid = 0; reipl_ccw_dev(&devid); break; - case IPL_METHOD_CCW_VM: + case REIPL_METHOD_CCW_VM: reipl_get_ascii_loadparm(loadparm); if (strlen(loadparm) == 0) sprintf(buf, "IPL %X", @@ -681,30 +746,32 @@ void do_reipl(void) reipl_block_ccw->ipl_info.ccw.devno, loadparm); __cpcmd(buf, NULL, 0, NULL); break; - case IPL_METHOD_CCW_DIAG: + case REIPL_METHOD_CCW_DIAG: diag308(DIAG308_SET, reipl_block_ccw); diag308(DIAG308_IPL, NULL); break; - case IPL_METHOD_FCP_RW_DIAG: + case REIPL_METHOD_FCP_RW_DIAG: diag308(DIAG308_SET, reipl_block_fcp); diag308(DIAG308_IPL, NULL); break; - case IPL_METHOD_FCP_RO_DIAG: + case REIPL_METHOD_FCP_RO_DIAG: diag308(DIAG308_IPL, NULL); break; - case IPL_METHOD_FCP_RO_VM: + case REIPL_METHOD_FCP_RO_VM: __cpcmd("IPL", NULL, 0, NULL); break; - case IPL_METHOD_NSS: + case REIPL_METHOD_NSS: sprintf(buf, "IPL %s", reipl_nss_name); __cpcmd(buf, NULL, 0, NULL); break; - case IPL_METHOD_NONE: - default: + case REIPL_METHOD_DEFAULT: if (MACHINE_IS_VM) __cpcmd("IPL", NULL, 0, NULL); diag308(DIAG308_IPL, NULL); break; + case REIPL_METHOD_FCP_DUMP: + default: + break; } signal_processor(smp_processor_id(), sigp_stop_and_store_status); } @@ -715,28 +782,28 @@ static void do_dump(void) static char buf[100]; switch (dump_method) { - case IPL_METHOD_CCW_CIO: + case DUMP_METHOD_CCW_CIO: smp_send_stop(); devid.devno = dump_block_ccw->ipl_info.ccw.devno; devid.ssid = 0; reipl_ccw_dev(&devid); break; - case IPL_METHOD_CCW_VM: + case DUMP_METHOD_CCW_VM: smp_send_stop(); sprintf(buf, "STORE STATUS"); __cpcmd(buf, NULL, 0, NULL); sprintf(buf, "IPL %X", dump_block_ccw->ipl_info.ccw.devno); __cpcmd(buf, NULL, 0, NULL); break; - case IPL_METHOD_CCW_DIAG: + case DUMP_METHOD_CCW_DIAG: diag308(DIAG308_SET, dump_block_ccw); diag308(DIAG308_DUMP, NULL); break; - case IPL_METHOD_FCP_RW_DIAG: + case DUMP_METHOD_FCP_DIAG: diag308(DIAG308_SET, dump_block_fcp); diag308(DIAG308_DUMP, NULL); break; - case IPL_METHOD_NONE: + case DUMP_METHOD_NONE: default: return; } @@ -777,12 +844,13 @@ static int __init ipl_init(void) rc = firmware_register(&ipl_subsys); if (rc) return rc; - switch (ipl_get_type()) { + switch (ipl_info.type) { case IPL_TYPE_CCW: rc = sysfs_create_group(&ipl_subsys.kset.kobj, &ipl_ccw_attr_group); break; case IPL_TYPE_FCP: + case IPL_TYPE_FCP_DUMP: rc = ipl_register_fcp_files(); break; case IPL_TYPE_NSS: @@ -852,7 +920,7 @@ static int __init reipl_ccw_init(void) /* FIXME: check for diag308_set_works when enabling diag ccw reipl */ if (!MACHINE_IS_VM) sys_reipl_ccw_loadparm_attr.attr.mode = S_IRUGO; - if (ipl_get_type() == IPL_TYPE_CCW) + if (ipl_info.type == IPL_TYPE_CCW) reipl_block_ccw->ipl_info.ccw.devno = ipl_devno; reipl_capabilities |= IPL_TYPE_CCW; return 0; @@ -862,9 +930,9 @@ static int __init reipl_fcp_init(void) { int rc; - if ((!diag308_set_works) && (ipl_get_type() != IPL_TYPE_FCP)) + if ((!diag308_set_works) && (ipl_info.type != IPL_TYPE_FCP)) return 0; - if ((!diag308_set_works) && (ipl_get_type() == IPL_TYPE_FCP)) + if ((!diag308_set_works) && (ipl_info.type == IPL_TYPE_FCP)) make_attrs_ro(reipl_fcp_attrs); reipl_block_fcp = (void *) get_zeroed_page(GFP_KERNEL); @@ -875,7 +943,7 @@ static int __init reipl_fcp_init(void) free_page((unsigned long)reipl_block_fcp); return rc; } - if (ipl_get_type() == IPL_TYPE_FCP) { + if (ipl_info.type == IPL_TYPE_FCP) { memcpy(reipl_block_fcp, IPL_PARMBLOCK_START, PAGE_SIZE); } else { reipl_block_fcp->hdr.len = IPL_PARM_BLK_FCP_LEN; @@ -909,7 +977,7 @@ static int __init reipl_init(void) rc = reipl_nss_init(); if (rc) return rc; - rc = reipl_set_type(ipl_get_type()); + rc = reipl_set_type(ipl_info.type); if (rc) return rc; return 0; @@ -931,7 +999,7 @@ static int __init dump_ccw_init(void) dump_block_ccw->hdr.version = IPL_PARM_BLOCK_VERSION; dump_block_ccw->hdr.blk0_len = IPL_PARM_BLK0_CCW_LEN; dump_block_ccw->hdr.pbt = DIAG308_IPL_TYPE_CCW; - dump_capabilities |= IPL_TYPE_CCW; + dump_capabilities |= DUMP_TYPE_CCW; return 0; } @@ -956,7 +1024,7 @@ static int __init dump_fcp_init(void) dump_block_fcp->hdr.blk0_len = IPL_PARM_BLK0_FCP_LEN; dump_block_fcp->hdr.pbt = DIAG308_IPL_TYPE_FCP; dump_block_fcp->ipl_info.fcp.opt = DIAG308_IPL_OPT_DUMP; - dump_capabilities |= IPL_TYPE_FCP; + dump_capabilities |= DUMP_TYPE_FCP; return 0; } @@ -995,7 +1063,7 @@ static int __init dump_init(void) rc = dump_fcp_init(); if (rc) return rc; - dump_set_type(IPL_TYPE_NONE); + dump_set_type(DUMP_TYPE_NONE); return 0; } diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 863c8d08c026..3dfd0985861c 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -285,6 +285,26 @@ static void __init conmode_default(void) } } +#if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_ZFCPDUMP_MODULE) +static void __init setup_zfcpdump(unsigned int console_devno) +{ + static char str[64]; + + if (ipl_info.type != IPL_TYPE_FCP_DUMP) + return; + if (console_devno != -1) + sprintf(str, "cio_ignore=all,!0.0.%04x,!0.0.%04x", + ipl_info.data.fcp.dev_id.devno, console_devno); + else + sprintf(str, "cio_ignore=all,!0.0.%04x", + ipl_info.data.fcp.dev_id.devno); + strcat(COMMAND_LINE, str); + console_loglevel = 2; +} +#else +static inline void setup_zfcpdump(unsigned int console_devno) {} +#endif /* CONFIG_ZFCPDUMP */ + #ifdef CONFIG_SMP void (*_machine_restart)(char *command) = machine_restart_smp; void (*_machine_halt)(void) = machine_halt_smp; @@ -586,13 +606,20 @@ setup_resources(void) } } +unsigned long real_memory_size; +EXPORT_SYMBOL_GPL(real_memory_size); + static void __init setup_memory_end(void) { - unsigned long real_size, memory_size; + unsigned long memory_size; unsigned long max_mem, max_phys; int i; - memory_size = real_size = 0; +#if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_ZFCPDUMP_MODULE) + if (ipl_info.type == IPL_TYPE_FCP_DUMP) + memory_end = ZFCPDUMP_HSA_SIZE; +#endif + memory_size = 0; max_phys = VMALLOC_END_INIT - VMALLOC_MIN_SIZE; memory_end &= PAGE_MASK; @@ -601,7 +628,8 @@ static void __init setup_memory_end(void) for (i = 0; i < MEMORY_CHUNKS; i++) { struct mem_chunk *chunk = &memory_chunk[i]; - real_size = max(real_size, chunk->addr + chunk->size); + real_memory_size = max(real_memory_size, + chunk->addr + chunk->size); if (chunk->addr >= max_mem) { memset(chunk, 0, sizeof(*chunk)); continue; @@ -765,6 +793,7 @@ setup_arch(char **cmdline_p) parse_early_param(); + setup_ipl_info(); setup_memory_end(); setup_addressing_mode(); setup_memory(); @@ -782,6 +811,9 @@ setup_arch(char **cmdline_p) /* Setup default console */ conmode_default(); + + /* Setup zfcpdump support */ + setup_zfcpdump(console_devno); } void print_cpu_info(struct cpuinfo_S390 *cpuinfo) diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 97764f710bb7..7c0143fdf710 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -31,6 +31,7 @@ #include #include #include +#include #include #include #include @@ -40,6 +41,7 @@ #include #include #include +#include extern volatile int __cpu_logical_map[]; @@ -395,6 +397,65 @@ void smp_ctl_clear_bit(int cr, int bit) on_each_cpu(smp_ctl_bit_callback, &parms, 0, 1); } +#if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_ZFCPDUMP_MODULE) + +/* + * zfcpdump_prefix_array holds prefix registers for the following scenario: + * 64 bit zfcpdump kernel and 31 bit kernel which is to be dumped. We have to + * save its prefix registers, since they get lost, when switching from 31 bit + * to 64 bit. + */ +unsigned int zfcpdump_prefix_array[NR_CPUS + 1] \ + __attribute__((__section__(".data"))); + +static void __init smp_get_save_areas(void) +{ + unsigned int cpu, cpu_num, rc; + __u16 boot_cpu_addr; + + if (ipl_info.type != IPL_TYPE_FCP_DUMP) + return; + boot_cpu_addr = S390_lowcore.cpu_data.cpu_addr; + cpu_num = 1; + for (cpu = 0; cpu <= 65535; cpu++) { + if ((u16) cpu == boot_cpu_addr) + continue; + __cpu_logical_map[1] = (__u16) cpu; + if (signal_processor(1, sigp_sense) == sigp_not_operational) + continue; + if (cpu_num >= NR_CPUS) { + printk("WARNING: Registers for cpu %i are not " + "saved, since dump kernel was compiled with" + "NR_CPUS=%i!\n", cpu_num, NR_CPUS); + continue; + } + zfcpdump_save_areas[cpu_num] = + alloc_bootmem(sizeof(union save_area)); + while (1) { + rc = signal_processor(1, sigp_stop_and_store_status); + if (rc != sigp_busy) + break; + cpu_relax(); + } + memcpy(zfcpdump_save_areas[cpu_num], + (void *)(unsigned long) store_prefix() + + SAVE_AREA_BASE, SAVE_AREA_SIZE); +#ifdef __s390x__ + /* copy original prefix register */ + zfcpdump_save_areas[cpu_num]->s390x.pref_reg = + zfcpdump_prefix_array[cpu_num]; +#endif + cpu_num++; + } +} + +union save_area *zfcpdump_save_areas[NR_CPUS + 1]; +EXPORT_SYMBOL_GPL(zfcpdump_save_areas); + +#else +#define smp_get_save_areas() do { } while (0) +#endif + /* * Lets check how many CPUs we have. */ @@ -589,6 +650,7 @@ void __init smp_setup_cpu_possible_map(void) { unsigned int phy_cpus, pos_cpus, cpu; + smp_get_save_areas(); phy_cpus = smp_count_cpus(); pos_cpus = min(phy_cpus + additional_cpus, (unsigned int) NR_CPUS); diff --git a/drivers/s390/char/Makefile b/drivers/s390/char/Makefile index 5fd581c22db3..a0f6db21855a 100644 --- a/drivers/s390/char/Makefile +++ b/drivers/s390/char/Makefile @@ -29,3 +29,6 @@ obj-$(CONFIG_S390_TAPE_34XX) += tape_34xx.o obj-$(CONFIG_S390_TAPE_3590) += tape_3590.o obj-$(CONFIG_MONREADER) += monreader.o obj-$(CONFIG_MONWRITER) += monwriter.o + +zcore_mod-objs := sclp_sdias.o zcore.o +obj-$(CONFIG_ZFCPDUMP) += zcore_mod.o diff --git a/drivers/s390/char/sclp.h b/drivers/s390/char/sclp.h index 7d29ab45a6ed..6402e943436f 100644 --- a/drivers/s390/char/sclp.h +++ b/drivers/s390/char/sclp.h @@ -27,6 +27,7 @@ #define EvTyp_CntlProgIdent 0x0B #define EvTyp_SigQuiesce 0x1D #define EvTyp_VT220Msg 0x1A +#define EvTyp_SDIAS 0x1C #define EvTyp_OpCmd_Mask 0x80000000 #define EvTyp_Msg_Mask 0x40000000 @@ -36,6 +37,7 @@ #define EvTyp_CtlProgIdent_Mask 0x00200000 #define EvTyp_SigQuiesce_Mask 0x00000008 #define EvTyp_VT220Msg_Mask 0x00000040 +#define EvTyp_SDIAS_Mask 0x00000010 #define GnrlMsgFlgs_DOM 0x8000 #define GnrlMsgFlgs_SndAlrm 0x4000 diff --git a/drivers/s390/char/sclp_sdias.c b/drivers/s390/char/sclp_sdias.c new file mode 100644 index 000000000000..06a4d0897232 --- /dev/null +++ b/drivers/s390/char/sclp_sdias.c @@ -0,0 +1,255 @@ +/* + * Sclp "store data in absolut storage" + * + * Copyright IBM Corp. 2003,2007 + * Author(s): Michael Holzheu + */ + +#include +#include +#include +#include +#include "sclp.h" +#include "sclp_rw.h" + +#define TRACE(x...) debug_sprintf_event(sdias_dbf, 1, x) +#define ERROR_MSG(x...) printk ( KERN_ALERT "SDIAS: " x ) + +#define SDIAS_RETRIES 300 +#define SDIAS_SLEEP_TICKS 50 + +#define EQ_STORE_DATA 0x0 +#define EQ_SIZE 0x1 +#define DI_FCP_DUMP 0x0 +#define ASA_SIZE_32 0x0 +#define ASA_SIZE_64 0x1 +#define EVSTATE_ALL_STORED 0x0 +#define EVSTATE_NO_DATA 0x3 +#define EVSTATE_PART_STORED 0x10 + +static struct debug_info *sdias_dbf; + +static struct sclp_register sclp_sdias_register = { + .send_mask = EvTyp_SDIAS_Mask, +}; + +struct sdias_evbuf { + struct evbuf_header hdr; + u8 event_qual; + u8 data_id; + u64 reserved2; + u32 event_id; + u16 reserved3; + u8 asa_size; + u8 event_status; + u32 reserved4; + u32 blk_cnt; + u64 asa; + u32 reserved5; + u32 fbn; + u32 reserved6; + u32 lbn; + u16 reserved7; + u16 dbs; +} __attribute__((packed)); + +struct sdias_sccb { + struct sccb_header hdr; + struct sdias_evbuf evbuf; +} __attribute__((packed)); + +static struct sdias_sccb sccb __attribute__((aligned(4096))); + +static int sclp_req_done; +static wait_queue_head_t sdias_wq; +static DEFINE_MUTEX(sdias_mutex); + +static void sdias_callback(struct sclp_req *request, void *data) +{ + struct sdias_sccb *sccb; + + sccb = (struct sdias_sccb *) request->sccb; + sclp_req_done = 1; + wake_up(&sdias_wq); /* Inform caller, that request is complete */ + TRACE("callback done\n"); +} + +static int sdias_sclp_send(struct sclp_req *req) +{ + int retries; + int rc; + + for (retries = SDIAS_RETRIES; retries; retries--) { + sclp_req_done = 0; + TRACE("add request\n"); + rc = sclp_add_request(req); + if (rc) { + /* not initiated, wait some time and retry */ + set_current_state(TASK_INTERRUPTIBLE); + TRACE("add request failed: rc = %i\n",rc); + schedule_timeout(SDIAS_SLEEP_TICKS); + continue; + } + /* initiated, wait for completion of service call */ + wait_event(sdias_wq, (sclp_req_done == 1)); + if (req->status == SCLP_REQ_FAILED) { + TRACE("sclp request failed\n"); + rc = -EIO; + continue; + } + TRACE("request done\n"); + break; + } + return rc; +} + +/* + * Get number of blocks (4K) available in the HSA + */ +int sclp_sdias_blk_count(void) +{ + struct sclp_req request; + int rc; + + mutex_lock(&sdias_mutex); + + memset(&sccb, 0, sizeof(sccb)); + memset(&request, 0, sizeof(request)); + + sccb.hdr.length = sizeof(sccb); + sccb.evbuf.hdr.length = sizeof(struct sdias_evbuf); + sccb.evbuf.hdr.type = EvTyp_SDIAS; + sccb.evbuf.event_qual = EQ_SIZE; + sccb.evbuf.data_id = DI_FCP_DUMP; + sccb.evbuf.event_id = 4712; + sccb.evbuf.dbs = 1; + + request.sccb = &sccb; + request.command = SCLP_CMDW_WRITE_EVENT_DATA; + request.status = SCLP_REQ_FILLED; + request.callback = sdias_callback; + + rc = sdias_sclp_send(&request); + if (rc) { + ERROR_MSG("sclp_send failed for get_nr_blocks\n"); + goto out; + } + if (sccb.hdr.response_code != 0x0020) { + TRACE("send failed: %x\n", sccb.hdr.response_code); + rc = -EIO; + goto out; + } + + switch (sccb.evbuf.event_status) { + case 0: + rc = sccb.evbuf.blk_cnt; + break; + default: + ERROR_MSG("SCLP error: %x\n", sccb.evbuf.event_status); + rc = -EIO; + goto out; + } + TRACE("%i blocks\n", rc); +out: + mutex_unlock(&sdias_mutex); + return rc; +} + +/* + * Copy from HSA to absolute storage (not reentrant): + * + * @dest : Address of buffer where data should be copied + * @start_blk: Start Block (beginning with 1) + * @nr_blks : Number of 4K blocks to copy + * + * Return Value: 0 : Requested 'number' of blocks of data copied + * <0: ERROR - negative event status + */ +int sclp_sdias_copy(void *dest, int start_blk, int nr_blks) +{ + struct sclp_req request; + int rc; + + mutex_lock(&sdias_mutex); + + memset(&sccb, 0, sizeof(sccb)); + memset(&request, 0, sizeof(request)); + + sccb.hdr.length = sizeof(sccb); + sccb.evbuf.hdr.length = sizeof(struct sdias_evbuf); + sccb.evbuf.hdr.type = EvTyp_SDIAS; + sccb.evbuf.hdr.flags = 0; + sccb.evbuf.event_qual = EQ_STORE_DATA; + sccb.evbuf.data_id = DI_FCP_DUMP; + sccb.evbuf.event_id = 4712; +#ifdef __s390x__ + sccb.evbuf.asa_size = ASA_SIZE_64; +#else + sccb.evbuf.asa_size = ASA_SIZE_32; +#endif + sccb.evbuf.event_status = 0; + sccb.evbuf.blk_cnt = nr_blks; + sccb.evbuf.asa = (unsigned long)dest; + sccb.evbuf.fbn = start_blk; + sccb.evbuf.lbn = 0; + sccb.evbuf.dbs = 1; + + request.sccb = &sccb; + request.command = SCLP_CMDW_WRITE_EVENT_DATA; + request.status = SCLP_REQ_FILLED; + request.callback = sdias_callback; + + rc = sdias_sclp_send(&request); + if (rc) { + ERROR_MSG("sclp_send failed: %x\n", rc); + goto out; + } + if (sccb.hdr.response_code != 0x0020) { + TRACE("copy failed: %x\n", sccb.hdr.response_code); + rc = -EIO; + goto out; + } + + switch (sccb.evbuf.event_status) { + case EVSTATE_ALL_STORED: + TRACE("all stored\n"); + case EVSTATE_PART_STORED: + TRACE("part stored: %i\n", sccb.evbuf.blk_cnt); + break; + case EVSTATE_NO_DATA: + TRACE("no data\n"); + default: + ERROR_MSG("Error from SCLP while copying hsa. " + "Event status = %x\n", + sccb.evbuf.event_status); + rc = -EIO; + } +out: + mutex_unlock(&sdias_mutex); + return rc; +} + +int __init sdias_init(void) +{ + int rc; + + if (ipl_info.type != IPL_TYPE_FCP_DUMP) + return 0; + sdias_dbf = debug_register("dump_sdias", 4, 1, 4 * sizeof(long)); + debug_register_view(sdias_dbf, &debug_sprintf_view); + debug_set_level(sdias_dbf, 6); + rc = sclp_register(&sclp_sdias_register); + if (rc) { + ERROR_MSG("sclp register failed\n"); + return rc; + } + init_waitqueue_head(&sdias_wq); + TRACE("init done\n"); + return 0; +} + +void __exit sdias_exit(void) +{ + debug_unregister(sdias_dbf); + sclp_unregister(&sclp_sdias_register); +} diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c new file mode 100644 index 000000000000..89d439316a53 --- /dev/null +++ b/drivers/s390/char/zcore.c @@ -0,0 +1,651 @@ +/* + * zcore module to export memory content and register sets for creating system + * dumps on SCSI disks (zfcpdump). The "zcore/mem" debugfs file shows the same + * dump format as s390 standalone dumps. + * + * For more information please refer to Documentation/s390/zfcpdump.txt + * + * Copyright IBM Corp. 2003,2007 + * Author(s): Michael Holzheu + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define TRACE(x...) debug_sprintf_event(zcore_dbf, 1, x) +#define MSG(x...) printk( KERN_ALERT x ) +#define ERROR_MSG(x...) printk ( KERN_ALERT "DUMP: " x ) + +#define TO_USER 0 +#define TO_KERNEL 1 + +enum arch_id { + ARCH_S390 = 0, + ARCH_S390X = 1, +}; + +/* dump system info */ + +struct sys_info { + enum arch_id arch; + unsigned long sa_base; + u32 sa_size; + int cpu_map[NR_CPUS]; + unsigned long mem_size; + union save_area lc_mask; +}; + +static struct sys_info sys_info; +static struct debug_info *zcore_dbf; +static int hsa_available; +static struct dentry *zcore_dir; +static struct dentry *zcore_file; + +/* + * Copy memory from HSA to kernel or user memory (not reentrant): + * + * @dest: Kernel or user buffer where memory should be copied to + * @src: Start address within HSA where data should be copied + * @count: Size of buffer, which should be copied + * @mode: Either TO_KERNEL or TO_USER + */ +static int memcpy_hsa(void *dest, unsigned long src, size_t count, int mode) +{ + int offs, blk_num; + static char buf[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE))); + + if (count == 0) + return 0; + + /* copy first block */ + offs = 0; + if ((src % PAGE_SIZE) != 0) { + blk_num = src / PAGE_SIZE + 2; + if (sclp_sdias_copy(buf, blk_num, 1)) { + TRACE("sclp_sdias_copy() failed\n"); + return -EIO; + } + offs = min((PAGE_SIZE - (src % PAGE_SIZE)), count); + if (mode == TO_USER) { + if (copy_to_user((__force __user void*) dest, + buf + (src % PAGE_SIZE), offs)) + return -EFAULT; + } else + memcpy(dest, buf + (src % PAGE_SIZE), offs); + } + if (offs == count) + goto out; + + /* copy middle */ + for (; (offs + PAGE_SIZE) <= count; offs += PAGE_SIZE) { + blk_num = (src + offs) / PAGE_SIZE + 2; + if (sclp_sdias_copy(buf, blk_num, 1)) { + TRACE("sclp_sdias_copy() failed\n"); + return -EIO; + } + if (mode == TO_USER) { + if (copy_to_user((__force __user void*) dest + offs, + buf, PAGE_SIZE)) + return -EFAULT; + } else + memcpy(dest + offs, buf, PAGE_SIZE); + } + if (offs == count) + goto out; + + /* copy last block */ + blk_num = (src + offs) / PAGE_SIZE + 2; + if (sclp_sdias_copy(buf, blk_num, 1)) { + TRACE("sclp_sdias_copy() failed\n"); + return -EIO; + } + if (mode == TO_USER) { + if (copy_to_user((__force __user void*) dest + offs, buf, + PAGE_SIZE)) + return -EFAULT; + } else + memcpy(dest + offs, buf, count - offs); +out: + return 0; +} + +static int memcpy_hsa_user(void __user *dest, unsigned long src, size_t count) +{ + return memcpy_hsa((void __force *) dest, src, count, TO_USER); +} + +static int memcpy_hsa_kernel(void *dest, unsigned long src, size_t count) +{ + return memcpy_hsa(dest, src, count, TO_KERNEL); +} + +static int memcpy_real(void *dest, unsigned long src, size_t count) +{ + unsigned long flags; + int rc = -EFAULT; + register unsigned long _dest asm("2") = (unsigned long) dest; + register unsigned long _len1 asm("3") = (unsigned long) count; + register unsigned long _src asm("4") = src; + register unsigned long _len2 asm("5") = (unsigned long) count; + + if (count == 0) + return 0; + flags = __raw_local_irq_stnsm(0xf8); /* switch to real mode */ + asm volatile ( + "0: mvcle %1,%2,0x0\n" + "1: jo 0b\n" + " lhi %0,0x0\n" + "2:\n" + EX_TABLE(1b,2b) + : "+d" (rc) + : "d" (_dest), "d" (_src), "d" (_len1), "d" (_len2) + : "cc", "memory"); + __raw_local_irq_ssm(flags); + + return rc; +} + +static int memcpy_real_user(__user void *dest, unsigned long src, size_t count) +{ + static char buf[4096]; + int offs = 0, size; + + while (offs < count) { + size = min(sizeof(buf), count - offs); + if (memcpy_real(buf, src + offs, size)) + return -EFAULT; + if (copy_to_user(dest + offs, buf, size)) + return -EFAULT; + offs += size; + } + return 0; +} + +#ifdef __s390x__ +/* + * Convert s390x (64 bit) cpu info to s390 (32 bit) cpu info + */ +static void __init s390x_to_s390_regs(union save_area *out, union save_area *in, + int cpu) +{ + int i; + + for (i = 0; i < 16; i++) { + out->s390.gp_regs[i] = in->s390x.gp_regs[i] & 0x00000000ffffffff; + out->s390.acc_regs[i] = in->s390x.acc_regs[i]; + out->s390.ctrl_regs[i] = + in->s390x.ctrl_regs[i] & 0x00000000ffffffff; + } + /* locore for 31 bit has only space for fpregs 0,2,4,6 */ + out->s390.fp_regs[0] = in->s390x.fp_regs[0]; + out->s390.fp_regs[1] = in->s390x.fp_regs[2]; + out->s390.fp_regs[2] = in->s390x.fp_regs[4]; + out->s390.fp_regs[3] = in->s390x.fp_regs[6]; + memcpy(&(out->s390.psw[0]), &(in->s390x.psw[0]), 4); + out->s390.psw[1] |= 0x8; /* set bit 12 */ + memcpy(&(out->s390.psw[4]),&(in->s390x.psw[12]), 4); + out->s390.psw[4] |= 0x80; /* set (31bit) addressing bit */ + out->s390.pref_reg = in->s390x.pref_reg; + out->s390.timer = in->s390x.timer; + out->s390.clk_cmp = in->s390x.clk_cmp; +} + +static void __init s390x_to_s390_save_areas(void) +{ + int i = 1; + static union save_area tmp; + + while (zfcpdump_save_areas[i]) { + s390x_to_s390_regs(&tmp, zfcpdump_save_areas[i], i); + memcpy(zfcpdump_save_areas[i], &tmp, sizeof(tmp)); + i++; + } +} + +#endif /* __s390x__ */ + +static int __init init_cpu_info(enum arch_id arch) +{ + union save_area *sa; + + /* get info for boot cpu from lowcore, stored in the HSA */ + + sa = kmalloc(sizeof(*sa), GFP_KERNEL); + if (!sa) { + ERROR_MSG("kmalloc failed: %s: %i\n",__FUNCTION__, __LINE__); + return -ENOMEM; + } + if (memcpy_hsa_kernel(sa, sys_info.sa_base, sys_info.sa_size) < 0) { + ERROR_MSG("could not copy from HSA\n"); + kfree(sa); + return -EIO; + } + zfcpdump_save_areas[0] = sa; + +#ifdef __s390x__ + /* convert s390x regs to s390, if we are dumping an s390 Linux */ + + if (arch == ARCH_S390) + s390x_to_s390_save_areas(); +#endif + + return 0; +} + +static DEFINE_MUTEX(zcore_mutex); + +#define DUMP_VERSION 0x3 +#define DUMP_MAGIC 0xa8190173618f23fdULL +#define DUMP_ARCH_S390X 2 +#define DUMP_ARCH_S390 1 +#define HEADER_SIZE 4096 + +/* dump header dumped according to s390 crash dump format */ + +struct zcore_header { + u64 magic; + u32 version; + u32 header_size; + u32 dump_level; + u32 page_size; + u64 mem_size; + u64 mem_start; + u64 mem_end; + u32 num_pages; + u32 pad1; + u64 tod; + cpuid_t cpu_id; + u32 arch_id; + u32 build_arch; + char pad2[4016]; +} __attribute__((packed,__aligned__(16))); + +static struct zcore_header zcore_header = { + .magic = DUMP_MAGIC, + .version = DUMP_VERSION, + .header_size = 4096, + .dump_level = 0, + .page_size = PAGE_SIZE, + .mem_start = 0, +#ifdef __s390x__ + .build_arch = DUMP_ARCH_S390X, +#else + .build_arch = DUMP_ARCH_S390, +#endif +}; + +/* + * Copy lowcore info to buffer. Use map in order to copy only register parts. + * + * @buf: User buffer + * @sa: Pointer to save area + * @sa_off: Offset in save area to copy + * @len: Number of bytes to copy + */ +static int copy_lc(void __user *buf, void *sa, int sa_off, int len) +{ + int i; + char *lc_mask = (char*)&sys_info.lc_mask; + + for (i = 0; i < len; i++) { + if (!lc_mask[i + sa_off]) + continue; + if (copy_to_user(buf + i, sa + sa_off + i, 1)) + return -EFAULT; + } + return 0; +} + +/* + * Copy lowcores info to memory, if necessary + * + * @buf: User buffer + * @addr: Start address of buffer in dump memory + * @count: Size of buffer + */ +static int zcore_add_lc(char __user *buf, unsigned long start, size_t count) +{ + unsigned long end; + int i = 0; + + if (count == 0) + return 0; + + end = start + count; + while (zfcpdump_save_areas[i]) { + unsigned long cp_start, cp_end; /* copy range */ + unsigned long sa_start, sa_end; /* save area range */ + unsigned long prefix; + unsigned long sa_off, len, buf_off; + + if (sys_info.arch == ARCH_S390) + prefix = zfcpdump_save_areas[i]->s390.pref_reg; + else + prefix = zfcpdump_save_areas[i]->s390x.pref_reg; + + sa_start = prefix + sys_info.sa_base; + sa_end = prefix + sys_info.sa_base + sys_info.sa_size; + + if ((end < sa_start) || (start > sa_end)) + goto next; + cp_start = max(start, sa_start); + cp_end = min(end, sa_end); + + buf_off = cp_start - start; + sa_off = cp_start - sa_start; + len = cp_end - cp_start; + + TRACE("copy_lc for: %lx\n", start); + if (copy_lc(buf + buf_off, zfcpdump_save_areas[i], sa_off, len)) + return -EFAULT; +next: + i++; + } + return 0; +} + +/* + * Read routine for zcore character device + * First 4K are dump header + * Next 32MB are HSA Memory + * Rest is read from absolute Memory + */ +static ssize_t zcore_read(struct file *file, char __user *buf, size_t count, + loff_t *ppos) +{ + unsigned long mem_start; /* Start address in memory */ + size_t mem_offs; /* Offset in dump memory */ + size_t hdr_count; /* Size of header part of output buffer */ + size_t size; + int rc; + + mutex_lock(&zcore_mutex); + + if (*ppos > (sys_info.mem_size + HEADER_SIZE)) { + rc = -EINVAL; + goto fail; + } + + count = min(count, (size_t) (sys_info.mem_size + HEADER_SIZE - *ppos)); + + /* Copy dump header */ + if (*ppos < HEADER_SIZE) { + size = min(count, (size_t) (HEADER_SIZE - *ppos)); + if (copy_to_user(buf, &zcore_header + *ppos, size)) { + rc = -EFAULT; + goto fail; + } + hdr_count = size; + mem_start = 0; + } else { + hdr_count = 0; + mem_start = *ppos - HEADER_SIZE; + } + + mem_offs = 0; + + /* Copy from HSA data */ + if (*ppos < (ZFCPDUMP_HSA_SIZE + HEADER_SIZE)) { + size = min((count - hdr_count), (size_t) (ZFCPDUMP_HSA_SIZE + - mem_start)); + rc = memcpy_hsa_user(buf + hdr_count, mem_start, size); + if (rc) + goto fail; + + mem_offs += size; + } + + /* Copy from real mem */ + size = count - mem_offs - hdr_count; + rc = memcpy_real_user(buf + hdr_count + mem_offs, mem_start + mem_offs, + size); + if (rc) + goto fail; + + /* + * Since s390 dump analysis tools like lcrash or crash + * expect register sets in the prefix pages of the cpus, + * we copy them into the read buffer, if necessary. + * buf + hdr_count: Start of memory part of output buffer + * mem_start: Start memory address to copy from + * count - hdr_count: Size of memory area to copy + */ + if (zcore_add_lc(buf + hdr_count, mem_start, count - hdr_count)) { + rc = -EFAULT; + goto fail; + } + *ppos += count; +fail: + mutex_unlock(&zcore_mutex); + return (rc < 0) ? rc : count; +} + +static int zcore_open(struct inode *inode, struct file *filp) +{ + if (!hsa_available) + return -ENODATA; + else + return capable(CAP_SYS_RAWIO) ? 0 : -EPERM; +} + +static int zcore_release(struct inode *inode, struct file *filep) +{ + diag308(DIAG308_REL_HSA, NULL); + hsa_available = 0; + return 0; +} + +static loff_t zcore_lseek(struct file *file, loff_t offset, int orig) +{ + loff_t rc; + + mutex_lock(&zcore_mutex); + switch (orig) { + case 0: + file->f_pos = offset; + rc = file->f_pos; + break; + case 1: + file->f_pos += offset; + rc = file->f_pos; + break; + default: + rc = -EINVAL; + } + mutex_unlock(&zcore_mutex); + return rc; +} + +static struct file_operations zcore_fops = { + .owner = THIS_MODULE, + .llseek = zcore_lseek, + .read = zcore_read, + .open = zcore_open, + .release = zcore_release, +}; + + +static void __init set_s390_lc_mask(union save_area *map) +{ + memset(&map->s390.ext_save, 0xff, sizeof(map->s390.ext_save)); + memset(&map->s390.timer, 0xff, sizeof(map->s390.timer)); + memset(&map->s390.clk_cmp, 0xff, sizeof(map->s390.clk_cmp)); + memset(&map->s390.psw, 0xff, sizeof(map->s390.psw)); + memset(&map->s390.pref_reg, 0xff, sizeof(map->s390.pref_reg)); + memset(&map->s390.acc_regs, 0xff, sizeof(map->s390.acc_regs)); + memset(&map->s390.fp_regs, 0xff, sizeof(map->s390.fp_regs)); + memset(&map->s390.gp_regs, 0xff, sizeof(map->s390.gp_regs)); + memset(&map->s390.ctrl_regs, 0xff, sizeof(map->s390.ctrl_regs)); +} + +static void __init set_s390x_lc_mask(union save_area *map) +{ + memset(&map->s390x.fp_regs, 0xff, sizeof(map->s390x.fp_regs)); + memset(&map->s390x.gp_regs, 0xff, sizeof(map->s390x.gp_regs)); + memset(&map->s390x.psw, 0xff, sizeof(map->s390x.psw)); + memset(&map->s390x.pref_reg, 0xff, sizeof(map->s390x.pref_reg)); + memset(&map->s390x.fp_ctrl_reg, 0xff, sizeof(map->s390x.fp_ctrl_reg)); + memset(&map->s390x.tod_reg, 0xff, sizeof(map->s390x.tod_reg)); + memset(&map->s390x.timer, 0xff, sizeof(map->s390x.timer)); + memset(&map->s390x.clk_cmp, 0xff, sizeof(map->s390x.clk_cmp)); + memset(&map->s390x.acc_regs, 0xff, sizeof(map->s390x.acc_regs)); + memset(&map->s390x.ctrl_regs, 0xff, sizeof(map->s390x.ctrl_regs)); +} + +/* + * Initialize dump globals for a given architecture + */ +static int __init sys_info_init(enum arch_id arch) +{ + switch (arch) { + case ARCH_S390X: + MSG("DETECTED 'S390X (64 bit) OS'\n"); + sys_info.sa_base = SAVE_AREA_BASE_S390X; + sys_info.sa_size = sizeof(struct save_area_s390x); + set_s390x_lc_mask(&sys_info.lc_mask); + break; + case ARCH_S390: + MSG("DETECTED 'S390 (32 bit) OS'\n"); + sys_info.sa_base = SAVE_AREA_BASE_S390; + sys_info.sa_size = sizeof(struct save_area_s390); + set_s390_lc_mask(&sys_info.lc_mask); + break; + default: + ERROR_MSG("unknown architecture 0x%x.\n",arch); + return -EINVAL; + } + sys_info.arch = arch; + if (init_cpu_info(arch)) { + ERROR_MSG("get cpu info failed\n"); + return -ENOMEM; + } + sys_info.mem_size = real_memory_size; + + return 0; +} + +static int __init check_sdias(void) +{ + int rc, act_hsa_size; + + rc = sclp_sdias_blk_count(); + if (rc < 0) { + ERROR_MSG("Could not determine HSA size\n"); + return rc; + } + act_hsa_size = (rc - 1) * PAGE_SIZE; + if (act_hsa_size < ZFCPDUMP_HSA_SIZE) { + ERROR_MSG("HSA size too small: %i\n", act_hsa_size); + return -EINVAL; + } + return 0; +} + +static void __init zcore_header_init(int arch, struct zcore_header *hdr) +{ + if (arch == ARCH_S390X) + hdr->arch_id = DUMP_ARCH_S390X; + else + hdr->arch_id = DUMP_ARCH_S390; + hdr->mem_size = sys_info.mem_size; + hdr->mem_end = sys_info.mem_size; + hdr->num_pages = sys_info.mem_size / PAGE_SIZE; + hdr->tod = get_clock(); + get_cpu_id(&hdr->cpu_id); +} + +extern int sdias_init(void); + +static int __init zcore_init(void) +{ + unsigned char arch; + int rc; + + if (ipl_info.type != IPL_TYPE_FCP_DUMP) + return -ENODATA; + + zcore_dbf = debug_register("zcore", 4, 1, 4 * sizeof(long)); + debug_register_view(zcore_dbf, &debug_sprintf_view); + debug_set_level(zcore_dbf, 6); + + TRACE("devno: %x\n", ipl_info.data.fcp.dev_id.devno); + TRACE("wwpn: %llx\n", (unsigned long long) ipl_info.data.fcp.wwpn); + TRACE("lun: %llx\n", (unsigned long long) ipl_info.data.fcp.lun); + + rc = sdias_init(); + if (rc) + goto fail; + + rc = check_sdias(); + if (rc) { + ERROR_MSG("Dump initialization failed\n"); + goto fail; + } + + rc = memcpy_hsa_kernel(&arch, __LC_AR_MODE_ID, 1); + if (rc) { + ERROR_MSG("sdial memcpy for arch id failed\n"); + goto fail; + } + +#ifndef __s390x__ + if (arch == ARCH_S390X) { + ERROR_MSG("32 bit dumper can't dump 64 bit system!\n"); + rc = -EINVAL; + goto fail; + } +#endif + + rc = sys_info_init(arch); + if (rc) { + ERROR_MSG("arch init failed\n"); + goto fail; + } + + zcore_header_init(arch, &zcore_header); + + zcore_dir = debugfs_create_dir("zcore" , NULL); + if (!zcore_dir) { + rc = -ENOMEM; + goto fail; + } + zcore_file = debugfs_create_file("mem", S_IRUSR, zcore_dir, NULL, + &zcore_fops); + if (!zcore_file) { + debugfs_remove(zcore_dir); + rc = -ENOMEM; + goto fail; + } + hsa_available = 1; + return 0; + +fail: + diag308(DIAG308_REL_HSA, NULL); + return rc; +} + +extern void sdias_exit(void); + +static void __exit zcore_exit(void) +{ + debug_unregister(zcore_dbf); + sdias_exit(); + diag308(DIAG308_REL_HSA, NULL); +} + +MODULE_AUTHOR("Copyright IBM Corp. 2003,2007"); +MODULE_DESCRIPTION("zcore module for zfcpdump support"); +MODULE_LICENSE("GPL"); + +subsys_initcall(zcore_init); +module_exit(zcore_exit); diff --git a/include/asm-s390/ipl.h b/include/asm-s390/ipl.h index 15bb0b529551..bdcd448d43fb 100644 --- a/include/asm-s390/ipl.h +++ b/include/asm-s390/ipl.h @@ -8,6 +8,8 @@ #define _ASM_S390_IPL_H #include +#include +#include #define IPL_PARMBLOCK_ORIGIN 0x2000 @@ -79,6 +81,7 @@ struct ipl_parameter_block { extern u32 ipl_flags; extern u32 dump_prefix_page; + extern void do_reipl(void); extern void ipl_save_parameters(void); @@ -88,6 +91,35 @@ enum { IPL_NSS_VALID = 4, }; +enum ipl_type { + IPL_TYPE_UNKNOWN = 1, + IPL_TYPE_CCW = 2, + IPL_TYPE_FCP = 4, + IPL_TYPE_FCP_DUMP = 8, + IPL_TYPE_NSS = 16, +}; + +struct ipl_info +{ + enum ipl_type type; + union { + struct { + struct ccw_dev_id dev_id; + } ccw; + struct { + struct ccw_dev_id dev_id; + u64 wwpn; + u64 lun; + } fcp; + struct { + char name[NSS_NAME_SIZE + 1]; + } nss; + } data; +}; + +extern struct ipl_info ipl_info; +extern void setup_ipl_info(void); + /* * DIAG 308 support */ diff --git a/include/asm-s390/lowcore.h b/include/asm-s390/lowcore.h index 4a31d0a7ee83..ffc9788a21a7 100644 --- a/include/asm-s390/lowcore.h +++ b/include/asm-s390/lowcore.h @@ -147,6 +147,52 @@ void pgm_check_handler(void); void mcck_int_handler(void); void io_int_handler(void); +struct save_area_s390 { + u32 ext_save; + u64 timer; + u64 clk_cmp; + u8 pad1[24]; + u8 psw[8]; + u32 pref_reg; + u8 pad2[20]; + u32 acc_regs[16]; + u64 fp_regs[4]; + u32 gp_regs[16]; + u32 ctrl_regs[16]; +} __attribute__((packed)); + +struct save_area_s390x { + u64 fp_regs[16]; + u64 gp_regs[16]; + u8 psw[16]; + u8 pad1[8]; + u32 pref_reg; + u32 fp_ctrl_reg; + u8 pad2[4]; + u32 tod_reg; + u64 timer; + u64 clk_cmp; + u8 pad3[8]; + u32 acc_regs[16]; + u64 ctrl_regs[16]; +} __attribute__((packed)); + +union save_area { + struct save_area_s390 s390; + struct save_area_s390x s390x; +}; + +#define SAVE_AREA_BASE_S390 0xd4 +#define SAVE_AREA_BASE_S390X 0x1200 + +#ifndef __s390x__ +#define SAVE_AREA_SIZE sizeof(struct save_area_s390) +#define SAVE_AREA_BASE SAVE_AREA_BASE_S390 +#else +#define SAVE_AREA_SIZE sizeof(struct save_area_s390x) +#define SAVE_AREA_BASE SAVE_AREA_BASE_S390X +#endif + struct _lowcore { #ifndef __s390x__ diff --git a/include/asm-s390/sclp.h b/include/asm-s390/sclp.h index 3996daaa8f54..21ed64773210 100644 --- a/include/asm-s390/sclp.h +++ b/include/asm-s390/sclp.h @@ -44,6 +44,8 @@ struct sclp_chp_info { extern struct sclp_readinfo_sccb s390_readinfo_sccb; extern void sclp_readinfo_early(void); +extern int sclp_sdias_blk_count(void); +extern int sclp_sdias_copy(void *dest, int blk_num, int nr_blks); extern int sclp_chp_configure(struct chp_id chpid); extern int sclp_chp_deconfigure(struct chp_id chpid); extern int sclp_chp_read_info(struct sclp_chp_info *info); diff --git a/include/asm-s390/setup.h b/include/asm-s390/setup.h index 44c7aee2bd34..a76a6b8fd887 100644 --- a/include/asm-s390/setup.h +++ b/include/asm-s390/setup.h @@ -40,6 +40,7 @@ struct mem_chunk { }; extern struct mem_chunk memory_chunk[]; +extern unsigned long real_memory_size; #ifdef CONFIG_S390_SWITCH_AMODE extern unsigned int switch_amode; @@ -77,6 +78,7 @@ extern unsigned long machine_flags; #endif /* __s390x__ */ #define MACHINE_HAS_SCLP (!MACHINE_IS_P390) +#define ZFCPDUMP_HSA_SIZE (32UL<<20) /* * Console mode. Override with conmode= diff --git a/include/asm-s390/smp.h b/include/asm-s390/smp.h index b957e4cda464..676e94ee15f0 100644 --- a/include/asm-s390/smp.h +++ b/include/asm-s390/smp.h @@ -119,4 +119,5 @@ static inline void smp_send_stop(void) #define smp_setup_cpu_possible_map() do { } while (0) #endif +extern union save_area *zfcpdump_save_areas[NR_CPUS + 1]; #endif -- cgit v1.2.3-59-g8ed1b From 2fc2d1e9ffcde78af7ab63ed640d9a4901797de2 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Fri, 27 Apr 2007 16:01:56 +0200 Subject: [S390] Processor degradation notification. Generate uevents for all cpus if cpu capability changes. This can happen e.g. because the cpus are overheating. The cpu capability can be read via /sys/devices/system/cpu/cpuN/capability. Signed-off-by: Martin Schwidefsky Signed-off-by: Heiko Carstens --- arch/s390/kernel/smp.c | 48 +++++++++++++++++++++++--- drivers/s390/char/Makefile | 2 +- drivers/s390/char/sclp.h | 2 ++ drivers/s390/char/sclp_config.c | 75 +++++++++++++++++++++++++++++++++++++++++ drivers/s390/sysinfo.c | 18 ++++++++++ include/asm-s390/processor.h | 1 + 6 files changed, 140 insertions(+), 6 deletions(-) create mode 100644 drivers/s390/char/sclp_config.c (limited to 'include') diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 7c0143fdf710..2c5de92958dd 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -821,19 +821,57 @@ int setup_profiling_timer(unsigned int multiplier) static DEFINE_PER_CPU(struct cpu, cpu_devices); +static ssize_t show_capability(struct sys_device *dev, char *buf) +{ + unsigned int capability; + int rc; + + rc = get_cpu_capability(&capability); + if (rc) + return rc; + return sprintf(buf, "%u\n", capability); +} +static SYSDEV_ATTR(capability, 0444, show_capability, NULL); + +static int __cpuinit smp_cpu_notify(struct notifier_block *self, + unsigned long action, void *hcpu) +{ + unsigned int cpu = (unsigned int)(long)hcpu; + struct cpu *c = &per_cpu(cpu_devices, cpu); + struct sys_device *s = &c->sysdev; + + switch (action) { + case CPU_ONLINE: + if (sysdev_create_file(s, &attr_capability)) + return NOTIFY_BAD; + break; + case CPU_DEAD: + sysdev_remove_file(s, &attr_capability); + break; + } + return NOTIFY_OK; +} + +static struct notifier_block __cpuinitdata smp_cpu_nb = { + .notifier_call = smp_cpu_notify, +}; + static int __init topology_init(void) { int cpu; - int ret; + + register_cpu_notifier(&smp_cpu_nb); for_each_possible_cpu(cpu) { struct cpu *c = &per_cpu(cpu_devices, cpu); + struct sys_device *s = &c->sysdev; c->hotpluggable = 1; - ret = register_cpu(c, cpu); - if (ret) - printk(KERN_WARNING "topology_init: register_cpu %d " - "failed (%d)\n", cpu, ret); + register_cpu(c, cpu); + if (!cpu_online(cpu)) + continue; + s = &c->sysdev; + sysdev_create_file(s, &attr_capability); } return 0; } diff --git a/drivers/s390/char/Makefile b/drivers/s390/char/Makefile index a0f6db21855a..c210784bdf46 100644 --- a/drivers/s390/char/Makefile +++ b/drivers/s390/char/Makefile @@ -3,7 +3,7 @@ # obj-y += ctrlchar.o keyboard.o defkeymap.o sclp.o sclp_rw.o sclp_quiesce.o \ - sclp_info.o sclp_chp.o + sclp_info.o sclp_config.o sclp_chp.o obj-$(CONFIG_TN3270) += raw3270.o obj-$(CONFIG_TN3270_CONSOLE) += con3270.o diff --git a/drivers/s390/char/sclp.h b/drivers/s390/char/sclp.h index e03dcf4c5fca..87ac4a3ad49d 100644 --- a/drivers/s390/char/sclp.h +++ b/drivers/s390/char/sclp.h @@ -27,6 +27,7 @@ #define EVTYP_CNTLPROGIDENT 0x0B #define EVTYP_SIGQUIESCE 0x1D #define EVTYP_VT220MSG 0x1A +#define EVTYP_CONFMGMDATA 0x04 #define EVTYP_SDIAS 0x1C #define EVTYP_OPCMD_MASK 0x80000000 @@ -37,6 +38,7 @@ #define EVTYP_CTLPROGIDENT_MASK 0x00200000 #define EVTYP_SIGQUIESCE_MASK 0x00000008 #define EVTYP_VT220MSG_MASK 0x00000040 +#define EVTYP_CONFMGMDATA_MASK 0x10000000 #define EVTYP_SDIAS_MASK 0x00000010 #define GNRLMSGFLGS_DOM 0x8000 diff --git a/drivers/s390/char/sclp_config.c b/drivers/s390/char/sclp_config.c new file mode 100644 index 000000000000..5322e5e54a98 --- /dev/null +++ b/drivers/s390/char/sclp_config.c @@ -0,0 +1,75 @@ +/* + * drivers/s390/char/sclp_config.c + * + * Copyright IBM Corp. 2007 + * Author(s): Heiko Carstens + */ + +#include +#include +#include +#include +#include +#include "sclp.h" + +#define TAG "sclp_config: " + +struct conf_mgm_data { + u8 reserved; + u8 ev_qualifier; +} __attribute__((packed)); + +#define EV_QUAL_CAP_CHANGE 3 + +static struct work_struct sclp_cpu_capability_work; + +static void sclp_cpu_capability_notify(struct work_struct *work) +{ + int cpu; + struct sys_device *sysdev; + + printk(KERN_WARNING TAG "cpu capability changed.\n"); + lock_cpu_hotplug(); + for_each_online_cpu(cpu) { + sysdev = get_cpu_sysdev(cpu); + kobject_uevent(&sysdev->kobj, KOBJ_CHANGE); + } + unlock_cpu_hotplug(); +} + +static void sclp_conf_receiver_fn(struct evbuf_header *evbuf) +{ + struct conf_mgm_data *cdata; + + cdata = (struct conf_mgm_data *)(evbuf + 1); + if (cdata->ev_qualifier == EV_QUAL_CAP_CHANGE) + schedule_work(&sclp_cpu_capability_work); +} + +static struct sclp_register sclp_conf_register = +{ + .receive_mask = EVTYP_CONFMGMDATA_MASK, + .receiver_fn = sclp_conf_receiver_fn, +}; + +static int __init sclp_conf_init(void) +{ + int rc; + + INIT_WORK(&sclp_cpu_capability_work, sclp_cpu_capability_notify); + + rc = sclp_register(&sclp_conf_register); + if (rc) { + printk(KERN_ERR TAG "failed to register (%d).\n", rc); + return rc; + } + + if (!(sclp_conf_register.sclp_receive_mask & EVTYP_CONFMGMDATA_MASK)) { + printk(KERN_WARNING TAG "no configuration management.\n"); + sclp_unregister(&sclp_conf_register); + rc = -ENOSYS; + } + return rc; +} + +__initcall(sclp_conf_init); diff --git a/drivers/s390/sysinfo.c b/drivers/s390/sysinfo.c index 090743d2f914..19343f9675c3 100644 --- a/drivers/s390/sysinfo.c +++ b/drivers/s390/sysinfo.c @@ -357,6 +357,24 @@ static __init int create_proc_sysinfo(void) __initcall(create_proc_sysinfo); +int get_cpu_capability(unsigned int *capability) +{ + struct sysinfo_1_2_2 *info; + int rc; + + info = (void *) get_zeroed_page(GFP_KERNEL); + if (!info) + return -ENOMEM; + rc = stsi(info, 1, 2, 2); + if (rc == -ENOSYS) + goto out; + rc = 0; + *capability = info->capability; +out: + free_page((unsigned long) info); + return rc; +} + /* * CPU capability might have changed. Therefore recalculate loops_per_jiffy. */ diff --git a/include/asm-s390/processor.h b/include/asm-s390/processor.h index 96a6f80953a9..e0fcea8c64c3 100644 --- a/include/asm-s390/processor.h +++ b/include/asm-s390/processor.h @@ -57,6 +57,7 @@ struct cpuinfo_S390 extern void s390_adjust_jiffies(void); extern void print_cpu_info(struct cpuinfo_S390 *); +extern int get_cpu_capability(unsigned int *); /* Lazy FPU handling on uni-processor */ extern struct task_struct *last_task_used_math; -- cgit v1.2.3-59-g8ed1b From 6c210482ae4a9a5bb9377ad250feaacec3faa3cd Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Fri, 27 Apr 2007 16:01:57 +0200 Subject: [S390] split page_test_and_clear_dirty. The page_test_and_clear_dirty primitive really consists of two operations, page_test_dirty and the page_clear_dirty. The combination of the two is not an atomic operation, so it makes more sense to have two separate operations instead of one. In addition to the improved readability of the s390 version of SetPageUptodate, it now avoids the page_test_dirty operation which is an insert-storage-key-extended (iske) instruction which is an expensive operation. Signed-off-by: Martin Schwidefsky --- include/asm-generic/pgtable.h | 11 +++++++++-- include/asm-s390/pgtable.h | 15 ++++++++------- include/linux/page-flags.h | 2 +- mm/rmap.c | 8 ++++++-- 4 files changed, 24 insertions(+), 12 deletions(-) (limited to 'include') diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index 6d7e279b1490..dc8f99ee305f 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h @@ -139,8 +139,15 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres #define pte_same(A,B) (pte_val(A) == pte_val(B)) #endif -#ifndef __HAVE_ARCH_PAGE_TEST_AND_CLEAR_DIRTY -#define page_test_and_clear_dirty(page) (0) +#ifndef __HAVE_ARCH_PAGE_TEST_DIRTY +#define page_test_dirty(page) (0) +#endif + +#ifndef __HAVE_ARCH_PAGE_CLEAR_DIRTY +#define page_clear_dirty(page) do { } while (0) +#endif + +#ifndef __HAVE_ARCH_PAGE_TEST_DIRTY #define pte_maybe_dirty(pte) pte_dirty(pte) #else #define pte_maybe_dirty(pte) (1) diff --git a/include/asm-s390/pgtable.h b/include/asm-s390/pgtable.h index 13c16546eff5..8fe8d42e64c3 100644 --- a/include/asm-s390/pgtable.h +++ b/include/asm-s390/pgtable.h @@ -753,14 +753,14 @@ ptep_establish(struct vm_area_struct *vma, * should therefore only be called if it is not mapped in any * address space. */ -static inline int page_test_and_clear_dirty(struct page *page) +static inline int page_test_dirty(struct page *page) { - unsigned long physpage = page_to_phys(page); - int skey = page_get_storage_key(physpage); + return (page_get_storage_key(page_to_phys(page)) & _PAGE_CHANGED) != 0; +} - if (skey & _PAGE_CHANGED) - page_set_storage_key(physpage, skey & ~_PAGE_CHANGED); - return skey & _PAGE_CHANGED; +static inline void page_clear_dirty(struct page *page) +{ + page_set_storage_key(page_to_phys(page), PAGE_DEFAULT_KEY); } /* @@ -953,7 +953,8 @@ extern void memmap_init(unsigned long, int, unsigned long, unsigned long); #define __HAVE_ARCH_PTEP_CLEAR_FLUSH #define __HAVE_ARCH_PTEP_SET_WRPROTECT #define __HAVE_ARCH_PTE_SAME -#define __HAVE_ARCH_PAGE_TEST_AND_CLEAR_DIRTY +#define __HAVE_ARCH_PAGE_TEST_DIRTY +#define __HAVE_ARCH_PAGE_CLEAR_DIRTY #define __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG #include diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 9cd0d0eaf523..96326594e55d 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -133,7 +133,7 @@ static inline void SetPageUptodate(struct page *page) { if (!test_and_set_bit(PG_uptodate, &page->flags)) - page_test_and_clear_dirty(page); + page_clear_dirty(page); } #else #define SetPageUptodate(page) set_bit(PG_uptodate, &(page)->flags) diff --git a/mm/rmap.c b/mm/rmap.c index b82146e6dfc9..59da5b734c80 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -498,8 +498,10 @@ int page_mkclean(struct page *page) struct address_space *mapping = page_mapping(page); if (mapping) ret = page_mkclean_file(mapping, page); - if (page_test_and_clear_dirty(page)) + if (page_test_dirty(page)) { + page_clear_dirty(page); ret = 1; + } } return ret; @@ -605,8 +607,10 @@ void page_remove_rmap(struct page *page, struct vm_area_struct *vma) * Leaving it set also helps swapoff to reinstate ptes * faster for those pages still in swapcache. */ - if (page_test_and_clear_dirty(page)) + if (page_test_dirty(page)) { + page_clear_dirty(page); set_page_dirty(page); + } __dec_zone_page_state(page, PageAnon(page) ? NR_ANON_PAGES : NR_FILE_MAPPED); } -- cgit v1.2.3-59-g8ed1b From 39ce010d38bf6703b49f59eb73bef030b1d659f2 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Fri, 27 Apr 2007 16:02:00 +0200 Subject: [S390] Clean up smp code in preparation for some larger changes. Signed-off-by: Martin Schwidefsky Signed-off-by: Heiko Carstens --- arch/s390/kernel/smp.c | 261 +++++++++++++++++++------------------------------ include/asm-s390/smp.h | 5 - 2 files changed, 100 insertions(+), 166 deletions(-) (limited to 'include') diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 2c5de92958dd..3754e2031b39 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -1,12 +1,12 @@ /* * arch/s390/kernel/smp.c * - * Copyright (C) IBM Corp. 1999,2006 + * Copyright IBM Corp. 1999,2007 * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), - * Martin Schwidefsky (schwidefsky@de.ibm.com) - * Heiko Carstens (heiko.carstens@de.ibm.com) + * Martin Schwidefsky (schwidefsky@de.ibm.com) + * Heiko Carstens (heiko.carstens@de.ibm.com) * - * based on other smp stuff by + * based on other smp stuff by * (c) 1995 Alan Cox, CymruNET Ltd * (c) 1998 Ingo Molnar * @@ -43,16 +43,17 @@ #include #include -extern volatile int __cpu_logical_map[]; - /* * An array with a pointer the lowcore of every CPU. */ - struct _lowcore *lowcore_ptr[NR_CPUS]; +EXPORT_SYMBOL(lowcore_ptr); cpumask_t cpu_online_map = CPU_MASK_NONE; +EXPORT_SYMBOL(cpu_online_map); + cpumask_t cpu_possible_map = CPU_MASK_NONE; +EXPORT_SYMBOL(cpu_possible_map); static struct task_struct *current_set[NR_CPUS]; @@ -72,7 +73,7 @@ struct call_data_struct { int wait; }; -static struct call_data_struct * call_data; +static struct call_data_struct *call_data; /* * 'Call function' interrupt callback @@ -152,8 +153,8 @@ out: * * Run a function on all other CPUs. * - * You must not call this function with disabled interrupts or from a - * hardware interrupt handler. You may call it from a bottom half. + * You must not call this function with disabled interrupts, from a + * hardware interrupt handler or from a bottom half. */ int smp_call_function(void (*func) (void *info), void *info, int nonatomic, int wait) @@ -179,11 +180,11 @@ EXPORT_SYMBOL(smp_call_function); * * Run a function on one processor. * - * You must not call this function with disabled interrupts or from a - * hardware interrupt handler. You may call it from a bottom half. + * You must not call this function with disabled interrupts, from a + * hardware interrupt handler or from a bottom half. */ int smp_call_function_on(void (*func) (void *info), void *info, int nonatomic, - int wait, int cpu) + int wait, int cpu) { cpumask_t map = CPU_MASK_NONE; @@ -197,9 +198,9 @@ EXPORT_SYMBOL(smp_call_function_on); static void do_send_stop(void) { - int cpu, rc; + int cpu, rc; - /* stop all processors */ + /* stop all processors */ for_each_online_cpu(cpu) { if (cpu == smp_processor_id()) continue; @@ -211,9 +212,9 @@ static void do_send_stop(void) static void do_store_status(void) { - int cpu, rc; + int cpu, rc; - /* store status of all processors in their lowcores (real 0) */ + /* store status of all processors in their lowcores (real 0) */ for_each_online_cpu(cpu) { if (cpu == smp_processor_id()) continue; @@ -221,8 +222,8 @@ static void do_store_status(void) rc = signal_processor_p( (__u32)(unsigned long) lowcore_ptr[cpu], cpu, sigp_store_status_at_address); - } while(rc == sigp_busy); - } + } while (rc == sigp_busy); + } } static void do_wait_for_stop(void) @@ -233,7 +234,7 @@ static void do_wait_for_stop(void) for_each_online_cpu(cpu) { if (cpu == smp_processor_id()) continue; - while(!smp_cpu_not_running(cpu)) + while (!smp_cpu_not_running(cpu)) cpu_relax(); } } @@ -247,7 +248,7 @@ void smp_send_stop(void) /* Disable all interrupts/machine checks */ __load_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK); - /* write magic number to zero page (absolute 0) */ + /* write magic number to zero page (absolute 0) */ lowcore_ptr[smp_processor_id()]->panic_magic = __PANIC_MAGIC; /* stop other processors. */ @@ -263,8 +264,7 @@ void smp_send_stop(void) /* * Reboot, halt and power_off routines for SMP. */ - -void machine_restart_smp(char * __unused) +void machine_restart_smp(char *__unused) { smp_send_stop(); do_reipl(); @@ -295,17 +295,17 @@ void machine_power_off_smp(void) static void do_ext_call_interrupt(__u16 code) { - unsigned long bits; + unsigned long bits; - /* - * handle bit signal external calls - * - * For the ec_schedule signal we have to do nothing. All the work - * is done automatically when we return from the interrupt. - */ + /* + * handle bit signal external calls + * + * For the ec_schedule signal we have to do nothing. All the work + * is done automatically when we return from the interrupt. + */ bits = xchg(&S390_lowcore.ext_call_fast, 0); - if (test_bit(ec_call_function, &bits)) + if (test_bit(ec_call_function, &bits)) do_call_function(); } @@ -315,11 +315,11 @@ static void do_ext_call_interrupt(__u16 code) */ static void smp_ext_bitcall(int cpu, ec_bit_sig sig) { - /* - * Set signaling bit in lowcore of target cpu and kick it - */ + /* + * Set signaling bit in lowcore of target cpu and kick it + */ set_bit(sig, (unsigned long *) &lowcore_ptr[cpu]->ext_call_fast); - while(signal_processor(cpu, sigp_emergency_signal) == sigp_busy) + while (signal_processor(cpu, sigp_emergency_signal) == sigp_busy) udelay(10); } @@ -334,7 +334,7 @@ void smp_ptlb_callback(void *info) void smp_ptlb_all(void) { - on_each_cpu(smp_ptlb_callback, NULL, 0, 1); + on_each_cpu(smp_ptlb_callback, NULL, 0, 1); } EXPORT_SYMBOL(smp_ptlb_all); #endif /* ! CONFIG_64BIT */ @@ -346,7 +346,7 @@ EXPORT_SYMBOL(smp_ptlb_all); */ void smp_send_reschedule(int cpu) { - smp_ext_bitcall(cpu, ec_schedule); + smp_ext_bitcall(cpu, ec_schedule); } /* @@ -360,11 +360,12 @@ struct ec_creg_mask_parms { /* * callback for setting/clearing control bits */ -static void smp_ctl_bit_callback(void *info) { +static void smp_ctl_bit_callback(void *info) +{ struct ec_creg_mask_parms *pp = info; unsigned long cregs[16]; int i; - + __ctl_store(cregs, 0, 15); for (i = 0; i <= 15; i++) cregs[i] = (cregs[i] & pp->andvals[i]) | pp->orvals[i]; @@ -383,6 +384,7 @@ void smp_ctl_set_bit(int cr, int bit) parms.orvals[cr] = 1 << bit; on_each_cpu(smp_ctl_bit_callback, &parms, 0, 1); } +EXPORT_SYMBOL(smp_ctl_set_bit); /* * Clear a bit in a control register of all cpus @@ -396,6 +398,7 @@ void smp_ctl_clear_bit(int cr, int bit) parms.andvals[cr] = ~(1L << bit); on_each_cpu(smp_ctl_bit_callback, &parms, 0, 1); } +EXPORT_SYMBOL(smp_ctl_clear_bit); #if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_ZFCPDUMP_MODULE) @@ -460,8 +463,7 @@ EXPORT_SYMBOL_GPL(zfcpdump_save_areas); * Lets check how many CPUs we have. */ -static unsigned int -__init smp_count_cpus(void) +static unsigned int __init smp_count_cpus(void) { unsigned int cpu, num_cpus; __u16 boot_cpu_addr; @@ -477,31 +479,30 @@ __init smp_count_cpus(void) if ((__u16) cpu == boot_cpu_addr) continue; __cpu_logical_map[1] = (__u16) cpu; - if (signal_processor(1, sigp_sense) == - sigp_not_operational) + if (signal_processor(1, sigp_sense) == sigp_not_operational) continue; num_cpus++; } - printk("Detected %d CPU's\n",(int) num_cpus); + printk("Detected %d CPU's\n", (int) num_cpus); printk("Boot cpu address %2X\n", boot_cpu_addr); return num_cpus; } /* - * Activate a secondary processor. + * Activate a secondary processor. */ int __devinit start_secondary(void *cpuvoid) { - /* Setup the cpu */ - cpu_init(); + /* Setup the cpu */ + cpu_init(); preempt_disable(); /* Enable TOD clock interrupts on the secondary cpu. */ - init_cpu_timer(); + init_cpu_timer(); #ifdef CONFIG_VIRT_TIMER /* Enable cpu timer interrupts on the secondary cpu. */ - init_cpu_vtimer(); + init_cpu_vtimer(); #endif /* Enable pfault pseudo page faults on this cpu. */ pfault_init(); @@ -510,11 +511,11 @@ int __devinit start_secondary(void *cpuvoid) cpu_set(smp_processor_id(), cpu_online_map); /* Switch on interrupts */ local_irq_enable(); - /* Print info about this processor */ - print_cpu_info(&S390_lowcore.cpu_data); - /* cpu_idle will call schedule for us */ - cpu_idle(); - return 0; + /* Print info about this processor */ + print_cpu_info(&S390_lowcore.cpu_data); + /* cpu_idle will call schedule for us */ + cpu_idle(); + return 0; } static void __init smp_create_idle(unsigned int cpu) @@ -531,56 +532,13 @@ static void __init smp_create_idle(unsigned int cpu) current_set[cpu] = p; } -/* Reserving and releasing of CPUs */ - -static DEFINE_SPINLOCK(smp_reserve_lock); -static int smp_cpu_reserved[NR_CPUS]; - -int -smp_get_cpu(cpumask_t cpu_mask) -{ - unsigned long flags; - int cpu; - - spin_lock_irqsave(&smp_reserve_lock, flags); - /* Try to find an already reserved cpu. */ - for_each_cpu_mask(cpu, cpu_mask) { - if (smp_cpu_reserved[cpu] != 0) { - smp_cpu_reserved[cpu]++; - /* Found one. */ - goto out; - } - } - /* Reserve a new cpu from cpu_mask. */ - for_each_cpu_mask(cpu, cpu_mask) { - if (cpu_online(cpu)) { - smp_cpu_reserved[cpu]++; - goto out; - } - } - cpu = -ENODEV; -out: - spin_unlock_irqrestore(&smp_reserve_lock, flags); - return cpu; -} - -void -smp_put_cpu(int cpu) -{ - unsigned long flags; - - spin_lock_irqsave(&smp_reserve_lock, flags); - smp_cpu_reserved[cpu]--; - spin_unlock_irqrestore(&smp_reserve_lock, flags); -} - -static int -cpu_stopped(int cpu) +static int cpu_stopped(int cpu) { __u32 status; /* Check for stopped state */ - if (signal_processor_ps(&status, 0, cpu, sigp_sense) == sigp_status_stored) { + if (signal_processor_ps(&status, 0, cpu, sigp_sense) == + sigp_status_stored) { if (status & 0x40) return 1; } @@ -589,14 +547,13 @@ cpu_stopped(int cpu) /* Upping and downing of CPUs */ -int -__cpu_up(unsigned int cpu) +int __cpu_up(unsigned int cpu) { struct task_struct *idle; - struct _lowcore *cpu_lowcore; + struct _lowcore *cpu_lowcore; struct stack_frame *sf; - sigp_ccode ccode; - int curr_cpu; + sigp_ccode ccode; + int curr_cpu; for (curr_cpu = 0; curr_cpu <= 65535; curr_cpu++) { __cpu_logical_map[cpu] = (__u16) curr_cpu; @@ -609,7 +566,7 @@ __cpu_up(unsigned int cpu) ccode = signal_processor_p((__u32)(unsigned long)(lowcore_ptr[cpu]), cpu, sigp_set_prefix); - if (ccode){ + if (ccode) { printk("sigp_set_prefix failed for cpu %d " "with condition code %d\n", (int) cpu, (int) ccode); @@ -617,9 +574,9 @@ __cpu_up(unsigned int cpu) } idle = current_set[cpu]; - cpu_lowcore = lowcore_ptr[cpu]; + cpu_lowcore = lowcore_ptr[cpu]; cpu_lowcore->kernel_stack = (unsigned long) - task_stack_page(idle) + (THREAD_SIZE); + task_stack_page(idle) + THREAD_SIZE; sf = (struct stack_frame *) (cpu_lowcore->kernel_stack - sizeof(struct pt_regs) - sizeof(struct stack_frame)); @@ -631,11 +588,11 @@ __cpu_up(unsigned int cpu) " stam 0,15,0(%0)" : : "a" (&cpu_lowcore->access_regs_save_area) : "memory"); cpu_lowcore->percpu_offset = __per_cpu_offset[cpu]; - cpu_lowcore->current_task = (unsigned long) idle; - cpu_lowcore->cpu_data.cpu_nr = cpu; + cpu_lowcore->current_task = (unsigned long) idle; + cpu_lowcore->cpu_data.cpu_nr = cpu; eieio(); - while (signal_processor(cpu,sigp_restart) == sigp_busy) + while (signal_processor(cpu, sigp_restart) == sigp_busy) udelay(10); while (!cpu_online(cpu)) @@ -682,18 +639,11 @@ static int __init setup_possible_cpus(char *s) } early_param("possible_cpus", setup_possible_cpus); -int -__cpu_disable(void) +int __cpu_disable(void) { - unsigned long flags; struct ec_creg_mask_parms cr_parms; int cpu = smp_processor_id(); - spin_lock_irqsave(&smp_reserve_lock, flags); - if (smp_cpu_reserved[cpu] != 0) { - spin_unlock_irqrestore(&smp_reserve_lock, flags); - return -EBUSY; - } cpu_clear(cpu, cpu_online_map); /* Disable pfault pseudo page faults on this cpu. */ @@ -704,24 +654,23 @@ __cpu_disable(void) /* disable all external interrupts */ cr_parms.orvals[0] = 0; - cr_parms.andvals[0] = ~(1<<15 | 1<<14 | 1<<13 | 1<<12 | - 1<<11 | 1<<10 | 1<< 6 | 1<< 4); + cr_parms.andvals[0] = ~(1 << 15 | 1 << 14 | 1 << 13 | 1 << 12 | + 1 << 11 | 1 << 10 | 1 << 6 | 1 << 4); /* disable all I/O interrupts */ cr_parms.orvals[6] = 0; - cr_parms.andvals[6] = ~(1<<31 | 1<<30 | 1<<29 | 1<<28 | - 1<<27 | 1<<26 | 1<<25 | 1<<24); + cr_parms.andvals[6] = ~(1 << 31 | 1 << 30 | 1 << 29 | 1 << 28 | + 1 << 27 | 1 << 26 | 1 << 25 | 1 << 24); /* disable most machine checks */ cr_parms.orvals[14] = 0; - cr_parms.andvals[14] = ~(1<<28 | 1<<27 | 1<<26 | 1<<25 | 1<<24); + cr_parms.andvals[14] = ~(1 << 28 | 1 << 27 | 1 << 26 | + 1 << 25 | 1 << 24); smp_ctl_bit_callback(&cr_parms); - spin_unlock_irqrestore(&smp_reserve_lock, flags); return 0; } -void -__cpu_die(unsigned int cpu) +void __cpu_die(unsigned int cpu) { /* Wait until target cpu is down */ while (!smp_cpu_not_running(cpu)) @@ -729,13 +678,12 @@ __cpu_die(unsigned int cpu) printk("Processor %d spun down\n", cpu); } -void -cpu_die(void) +void cpu_die(void) { idle_task_exit(); signal_processor(smp_processor_id(), sigp_stop); BUG(); - for(;;); + for (;;); } #endif /* CONFIG_HOTPLUG_CPU */ @@ -748,36 +696,36 @@ void __init smp_prepare_cpus(unsigned int max_cpus) { unsigned long stack; unsigned int cpu; - int i; - - /* request the 0x1201 emergency signal external interrupt */ - if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0) - panic("Couldn't request external interrupt 0x1201"); - memset(lowcore_ptr,0,sizeof(lowcore_ptr)); - /* - * Initialize prefix pages and stacks for all possible cpus - */ + int i; + + /* request the 0x1201 emergency signal external interrupt */ + if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0) + panic("Couldn't request external interrupt 0x1201"); + memset(lowcore_ptr, 0, sizeof(lowcore_ptr)); + /* + * Initialize prefix pages and stacks for all possible cpus + */ print_cpu_info(&S390_lowcore.cpu_data); - for_each_possible_cpu(i) { + for_each_possible_cpu(i) { lowcore_ptr[i] = (struct _lowcore *) - __get_free_pages(GFP_KERNEL|GFP_DMA, - sizeof(void*) == 8 ? 1 : 0); - stack = __get_free_pages(GFP_KERNEL,ASYNC_ORDER); - if (lowcore_ptr[i] == NULL || stack == 0ULL) + __get_free_pages(GFP_KERNEL | GFP_DMA, + sizeof(void*) == 8 ? 1 : 0); + stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER); + if (!lowcore_ptr[i] || !stack) panic("smp_boot_cpus failed to allocate memory\n"); *(lowcore_ptr[i]) = S390_lowcore; - lowcore_ptr[i]->async_stack = stack + (ASYNC_SIZE); - stack = __get_free_pages(GFP_KERNEL,0); - if (stack == 0ULL) + lowcore_ptr[i]->async_stack = stack + ASYNC_SIZE; + stack = __get_free_pages(GFP_KERNEL, 0); + if (!stack) panic("smp_boot_cpus failed to allocate memory\n"); - lowcore_ptr[i]->panic_stack = stack + (PAGE_SIZE); + lowcore_ptr[i]->panic_stack = stack + PAGE_SIZE; #ifndef CONFIG_64BIT if (MACHINE_HAS_IEEE) { lowcore_ptr[i]->extended_save_area_addr = - (__u32) __get_free_pages(GFP_KERNEL,0); - if (lowcore_ptr[i]->extended_save_area_addr == 0) + (__u32) __get_free_pages(GFP_KERNEL, 0); + if (!lowcore_ptr[i]->extended_save_area_addr) panic("smp_boot_cpus failed to " "allocate memory\n"); } @@ -816,7 +764,7 @@ void smp_cpus_done(unsigned int max_cpus) */ int setup_profiling_timer(unsigned int multiplier) { - return 0; + return 0; } static DEFINE_PER_CPU(struct cpu, cpu_devices); @@ -853,7 +801,7 @@ static int __cpuinit smp_cpu_notify(struct notifier_block *self, } static struct notifier_block __cpuinitdata smp_cpu_nb = { - .notifier_call = smp_cpu_notify, + .notifier_call = smp_cpu_notify, }; static int __init topology_init(void) @@ -875,13 +823,4 @@ static int __init topology_init(void) } return 0; } - subsys_initcall(topology_init); - -EXPORT_SYMBOL(cpu_online_map); -EXPORT_SYMBOL(cpu_possible_map); -EXPORT_SYMBOL(lowcore_ptr); -EXPORT_SYMBOL(smp_ctl_set_bit); -EXPORT_SYMBOL(smp_ctl_clear_bit); -EXPORT_SYMBOL(smp_get_cpu); -EXPORT_SYMBOL(smp_put_cpu); diff --git a/include/asm-s390/smp.h b/include/asm-s390/smp.h index 676e94ee15f0..0a28e6d6ef40 100644 --- a/include/asm-s390/smp.h +++ b/include/asm-s390/smp.h @@ -54,9 +54,6 @@ extern int smp_call_function_on(void (*func) (void *info), void *info, #define raw_smp_processor_id() (S390_lowcore.cpu_data.cpu_nr) -extern int smp_get_cpu(cpumask_t cpu_map); -extern void smp_put_cpu(int cpu); - static inline __u16 hard_smp_processor_id(void) { __u16 cpu_address; @@ -114,8 +111,6 @@ static inline void smp_send_stop(void) } #define smp_cpu_not_running(cpu) 1 -#define smp_get_cpu(cpu) ({ 0; }) -#define smp_put_cpu(cpu) ({ 0; }) #define smp_setup_cpu_possible_map() do { } while (0) #endif -- cgit v1.2.3-59-g8ed1b From 2127435e57a15f1fea8d6969e264eeb05b28ba4b Mon Sep 17 00:00:00 2001 From: Atsushi Nemoto Date: Thu, 15 Mar 2007 00:58:28 +0900 Subject: [MIPS] JMR3927 cleanup * Kill dead codes * Rearrange irq chip handlers * Minimize defconfig Signed-off-by: Atsushi Nemoto Signed-off-by: Ralf Baechle --- arch/mips/Kconfig | 1 + arch/mips/configs/jmr3927_defconfig | 254 ++++++++------------------- arch/mips/jmr3927/common/prom.c | 12 +- arch/mips/jmr3927/common/puts.c | 122 +------------ arch/mips/jmr3927/rbhma3100/Makefile | 1 - arch/mips/jmr3927/rbhma3100/init.c | 16 +- arch/mips/jmr3927/rbhma3100/irq.c | 312 ++++------------------------------ arch/mips/jmr3927/rbhma3100/kgdb_io.c | 54 +----- arch/mips/jmr3927/rbhma3100/setup.c | 157 +++-------------- arch/mips/pci/fixup-jmr3927.c | 11 +- arch/mips/pci/ops-tx3927.c | 232 ------------------------- include/asm-mips/jmr3927/irq.h | 57 ------- include/asm-mips/jmr3927/jmr3927.h | 130 +------------- include/asm-mips/jmr3927/tx3927.h | 8 - include/asm-mips/jmr3927/txx927.h | 5 - 15 files changed, 145 insertions(+), 1227 deletions(-) delete mode 100644 include/asm-mips/jmr3927/irq.h (limited to 'include') diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 2fd82e548187..acceae0e8319 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -779,6 +779,7 @@ config TOSHIBA_JMR3927 select SYS_SUPPORTS_LITTLE_ENDIAN select SYS_SUPPORTS_BIG_ENDIAN select TOSHIBA_BOARDS + select GENERIC_HARDIRQS_NO__DO_IRQ config TOSHIBA_RBTX4927 bool "Toshiba TBTX49[23]7 board" diff --git a/arch/mips/configs/jmr3927_defconfig b/arch/mips/configs/jmr3927_defconfig index 21a094752dab..068e48ec7093 100644 --- a/arch/mips/configs/jmr3927_defconfig +++ b/arch/mips/configs/jmr3927_defconfig @@ -1,7 +1,7 @@ # # Automatically generated make config: don't edit -# Linux kernel version: 2.6.20 -# Tue Feb 20 21:47:34 2007 +# Linux kernel version: 2.6.21-rc3 +# Thu Mar 15 00:40:40 2007 # CONFIG_MIPS=y @@ -70,7 +70,7 @@ CONFIG_GENERIC_HWEIGHT=y CONFIG_GENERIC_CALIBRATE_DELAY=y CONFIG_GENERIC_TIME=y CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y -# CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ is not set +CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y CONFIG_DMA_NONCOHERENT=y CONFIG_DMA_NEED_PCI_MAP_STATE=y CONFIG_CPU_BIG_ENDIAN=y @@ -138,12 +138,12 @@ CONFIG_ZONE_DMA_FLAG=1 # CONFIG_HZ_48 is not set # CONFIG_HZ_100 is not set # CONFIG_HZ_128 is not set -# CONFIG_HZ_250 is not set +CONFIG_HZ_250=y # CONFIG_HZ_256 is not set -CONFIG_HZ_1000=y +# CONFIG_HZ_1000 is not set # CONFIG_HZ_1024 is not set CONFIG_SYS_SUPPORTS_ARBIT_HZ=y -CONFIG_HZ=1000 +CONFIG_HZ=250 CONFIG_PREEMPT_NONE=y # CONFIG_PREEMPT_VOLUNTARY is not set # CONFIG_PREEMPT is not set @@ -175,14 +175,15 @@ CONFIG_SYSVIPC_SYSCTL=y # CONFIG_AUDIT is not set # CONFIG_IKCONFIG is not set CONFIG_SYSFS_DEPRECATED=y -CONFIG_RELAY=y +# CONFIG_RELAY is not set +# CONFIG_BLK_DEV_INITRD is not set # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set CONFIG_SYSCTL=y CONFIG_EMBEDDED=y CONFIG_SYSCTL_SYSCALL=y CONFIG_KALLSYMS=y # CONFIG_KALLSYMS_EXTRA_PASS is not set -CONFIG_HOTPLUG=y +# CONFIG_HOTPLUG is not set CONFIG_PRINTK=y CONFIG_BUG=y CONFIG_ELF_CORE=y @@ -217,11 +218,11 @@ CONFIG_IOSCHED_NOOP=y CONFIG_IOSCHED_AS=y CONFIG_IOSCHED_DEADLINE=y CONFIG_IOSCHED_CFQ=y -CONFIG_DEFAULT_AS=y +# CONFIG_DEFAULT_AS is not set # CONFIG_DEFAULT_DEADLINE is not set -# CONFIG_DEFAULT_CFQ is not set +CONFIG_DEFAULT_CFQ=y # CONFIG_DEFAULT_NOOP is not set -CONFIG_DEFAULT_IOSCHED="anticipatory" +CONFIG_DEFAULT_IOSCHED="cfq" # # Bus options (PCI, PCMCIA, EISA, ISA, TC) @@ -233,12 +234,10 @@ CONFIG_MMU=y # # PCCARD (PCMCIA/CardBus) support # -# CONFIG_PCCARD is not set # # PCI Hotplug Support # -# CONFIG_HOTPLUG_PCI is not set # # Executable file formats @@ -250,10 +249,7 @@ CONFIG_TRAD_SIGNALS=y # # Power management options # -CONFIG_PM=y -# CONFIG_PM_LEGACY is not set -# CONFIG_PM_DEBUG is not set -# CONFIG_PM_SYSFS_DEPRECATED is not set +# CONFIG_PM is not set # # Networking @@ -267,12 +263,7 @@ CONFIG_NET=y CONFIG_PACKET=y # CONFIG_PACKET_MMAP is not set CONFIG_UNIX=y -CONFIG_XFRM=y -CONFIG_XFRM_USER=y -# CONFIG_XFRM_SUB_POLICY is not set -CONFIG_XFRM_MIGRATE=y -CONFIG_NET_KEY=y -CONFIG_NET_KEY_MIGRATE=y +# CONFIG_NET_KEY is not set CONFIG_INET=y # CONFIG_IP_MULTICAST is not set # CONFIG_IP_ADVANCED_ROUTER is not set @@ -290,19 +281,18 @@ CONFIG_IP_PNP_BOOTP=y # CONFIG_INET_IPCOMP is not set # CONFIG_INET_XFRM_TUNNEL is not set # CONFIG_INET_TUNNEL is not set -CONFIG_INET_XFRM_MODE_TRANSPORT=y -CONFIG_INET_XFRM_MODE_TUNNEL=y -CONFIG_INET_XFRM_MODE_BEET=y -CONFIG_INET_DIAG=y -CONFIG_INET_TCP_DIAG=y +# CONFIG_INET_XFRM_MODE_TRANSPORT is not set +# CONFIG_INET_XFRM_MODE_TUNNEL is not set +# CONFIG_INET_XFRM_MODE_BEET is not set +# CONFIG_INET_DIAG is not set # CONFIG_TCP_CONG_ADVANCED is not set CONFIG_TCP_CONG_CUBIC=y CONFIG_DEFAULT_TCP_CONG="cubic" -CONFIG_TCP_MD5SIG=y +# CONFIG_TCP_MD5SIG is not set # CONFIG_IPV6 is not set # CONFIG_INET6_XFRM_TUNNEL is not set # CONFIG_INET6_TUNNEL is not set -CONFIG_NETWORK_SECMARK=y +# CONFIG_NETWORK_SECMARK is not set # CONFIG_NETFILTER is not set # @@ -343,13 +333,7 @@ CONFIG_NETWORK_SECMARK=y # CONFIG_HAMRADIO is not set # CONFIG_IRDA is not set # CONFIG_BT is not set -CONFIG_IEEE80211=y -# CONFIG_IEEE80211_DEBUG is not set -CONFIG_IEEE80211_CRYPT_WEP=y -CONFIG_IEEE80211_CRYPT_CCMP=y -CONFIG_IEEE80211_SOFTMAC=y -# CONFIG_IEEE80211_SOFTMAC_DEBUG is not set -CONFIG_WIRELESS_EXT=y +# CONFIG_IEEE80211 is not set # # Device Drivers @@ -360,14 +344,12 @@ CONFIG_WIRELESS_EXT=y # CONFIG_STANDALONE=y CONFIG_PREVENT_FIRMWARE_BUILD=y -CONFIG_FW_LOADER=y # CONFIG_SYS_HYPERVISOR is not set # # Connector - unified userspace <-> kernelspace linker # -CONFIG_CONNECTOR=y -CONFIG_PROC_EVENTS=y +# CONFIG_CONNECTOR is not set # # Memory Technology Devices (MTD) @@ -396,16 +378,13 @@ CONFIG_PROC_EVENTS=y # CONFIG_BLK_DEV_NBD is not set # CONFIG_BLK_DEV_SX8 is not set # CONFIG_BLK_DEV_RAM is not set -# CONFIG_BLK_DEV_INITRD is not set -CONFIG_CDROM_PKTCDVD=y -CONFIG_CDROM_PKTCDVD_BUFFERS=8 -# CONFIG_CDROM_PKTCDVD_WCACHE is not set -CONFIG_ATA_OVER_ETH=y +# CONFIG_CDROM_PKTCDVD is not set +# CONFIG_ATA_OVER_ETH is not set # # Misc devices # -CONFIG_SGI_IOC4=y +# CONFIG_SGI_IOC4 is not set # CONFIG_TIFM_CORE is not set # @@ -416,7 +395,7 @@ CONFIG_SGI_IOC4=y # # SCSI device support # -CONFIG_RAID_ATTRS=y +# CONFIG_RAID_ATTRS is not set # CONFIG_SCSI is not set # CONFIG_SCSI_NETLINK is not set @@ -462,26 +441,13 @@ CONFIG_NETDEVICES=y # # PHY device support # -CONFIG_PHYLIB=y - -# -# MII PHY device drivers -# -CONFIG_MARVELL_PHY=y -CONFIG_DAVICOM_PHY=y -CONFIG_QSEMI_PHY=y -CONFIG_LXT_PHY=y -CONFIG_CICADA_PHY=y -CONFIG_VITESSE_PHY=y -CONFIG_SMSC_PHY=y -# CONFIG_BROADCOM_PHY is not set -# CONFIG_FIXED_PHY is not set +# CONFIG_PHYLIB is not set # # Ethernet (10 or 100Mbit) # CONFIG_NET_ETHERNET=y -# CONFIG_MII is not set +CONFIG_MII=y # CONFIG_HAPPYMEAL is not set # CONFIG_SUNGEM is not set # CONFIG_CASSINI is not set @@ -493,7 +459,27 @@ CONFIG_NET_ETHERNET=y # # CONFIG_NET_TULIP is not set # CONFIG_HP100 is not set -# CONFIG_NET_PCI is not set +CONFIG_NET_PCI=y +# CONFIG_PCNET32 is not set +# CONFIG_AMD8111_ETH is not set +# CONFIG_ADAPTEC_STARFIRE is not set +# CONFIG_B44 is not set +# CONFIG_FORCEDETH is not set +CONFIG_TC35815=y +# CONFIG_DGRS is not set +# CONFIG_EEPRO100 is not set +# CONFIG_E100 is not set +# CONFIG_FEALNX is not set +# CONFIG_NATSEMI is not set +# CONFIG_NE2K_PCI is not set +# CONFIG_8139CP is not set +# CONFIG_8139TOO is not set +# CONFIG_SIS900 is not set +# CONFIG_EPIC100 is not set +# CONFIG_SUNDANCE is not set +# CONFIG_TLAN is not set +# CONFIG_VIA_RHINE is not set +# CONFIG_SC92031 is not set # # Ethernet (1000 Mbit) @@ -509,20 +495,21 @@ CONFIG_NET_ETHERNET=y # CONFIG_SKGE is not set # CONFIG_SKY2 is not set # CONFIG_SK98LIN is not set +# CONFIG_VIA_VELOCITY is not set # CONFIG_TIGON3 is not set # CONFIG_BNX2 is not set -CONFIG_QLA3XXX=y +# CONFIG_QLA3XXX is not set # CONFIG_ATL1 is not set # # Ethernet (10000 Mbit) # # CONFIG_CHELSIO_T1 is not set -CONFIG_CHELSIO_T3=y +# CONFIG_CHELSIO_T3 is not set # CONFIG_IXGB is not set # CONFIG_S2IO is not set # CONFIG_MYRI10GE is not set -CONFIG_NETXEN_NIC=y +# CONFIG_NETXEN_NIC is not set # # Token Ring devices @@ -566,10 +553,7 @@ CONFIG_INPUT=y # # Userland interfaces # -CONFIG_INPUT_MOUSEDEV=y -CONFIG_INPUT_MOUSEDEV_PSAUX=y -CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 -CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 +# CONFIG_INPUT_MOUSEDEV is not set # CONFIG_INPUT_JOYDEV is not set # CONFIG_INPUT_TSDEV is not set # CONFIG_INPUT_EVDEV is not set @@ -587,21 +571,13 @@ CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 # # Hardware I/O ports # -CONFIG_SERIO=y -# CONFIG_SERIO_I8042 is not set -CONFIG_SERIO_SERPORT=y -# CONFIG_SERIO_PCIPS2 is not set -# CONFIG_SERIO_LIBPS2 is not set -CONFIG_SERIO_RAW=y +# CONFIG_SERIO is not set # CONFIG_GAMEPORT is not set # # Character devices # -CONFIG_VT=y -CONFIG_VT_CONSOLE=y -CONFIG_HW_CONSOLE=y -CONFIG_VT_HW_CONSOLE_BINDING=y +# CONFIG_VT is not set CONFIG_SERIAL_NONSTANDARD=y # CONFIG_COMPUTONE is not set # CONFIG_ROCKETPORT is not set @@ -609,7 +585,7 @@ CONFIG_SERIAL_NONSTANDARD=y # CONFIG_DIGIEPCA is not set # CONFIG_MOXA_INTELLIO is not set # CONFIG_MOXA_SMARTIO is not set -CONFIG_MOXA_SMARTIO_NEW=y +# CONFIG_MOXA_SMARTIO_NEW is not set # CONFIG_ISI is not set # CONFIG_SYNCLINKMP is not set # CONFIG_SYNCLINK_GT is not set @@ -629,11 +605,12 @@ CONFIG_MOXA_SMARTIO_NEW=y # Non-8250 serial port support # CONFIG_SERIAL_CORE=y +CONFIG_SERIAL_CORE_CONSOLE=y CONFIG_SERIAL_TXX9=y CONFIG_HAS_TXX9_SERIAL=y CONFIG_SERIAL_TXX9_NR_UARTS=6 -# CONFIG_SERIAL_TXX9_CONSOLE is not set -# CONFIG_SERIAL_TXX9_STDSERIAL is not set +CONFIG_SERIAL_TXX9_CONSOLE=y +CONFIG_SERIAL_TXX9_STDSERIAL=y # CONFIG_SERIAL_JSM is not set # CONFIG_UNIX98_PTYS is not set CONFIG_LEGACY_PTYS=y @@ -684,6 +661,11 @@ CONFIG_LEGACY_PTY_COUNT=256 # CONFIG_HWMON is not set # CONFIG_HWMON_VID is not set +# +# Multifunction device drivers +# +# CONFIG_MFD_SM501 is not set + # # Multimedia devices # @@ -697,51 +679,8 @@ CONFIG_LEGACY_PTY_COUNT=256 # # Graphics support # -# CONFIG_FIRMWARE_EDID is not set -CONFIG_FB=y -# CONFIG_FB_CFB_FILLRECT is not set -# CONFIG_FB_CFB_COPYAREA is not set -# CONFIG_FB_CFB_IMAGEBLIT is not set -# CONFIG_FB_SVGALIB is not set -# CONFIG_FB_MACMODES is not set -# CONFIG_FB_BACKLIGHT is not set -# CONFIG_FB_MODE_HELPERS is not set -# CONFIG_FB_TILEBLITTING is not set -# CONFIG_FB_CIRRUS is not set -# CONFIG_FB_PM2 is not set -# CONFIG_FB_CYBER2000 is not set -# CONFIG_FB_ASILIANT is not set -# CONFIG_FB_IMSTT is not set -# CONFIG_FB_S1D13XXX is not set -# CONFIG_FB_NVIDIA is not set -# CONFIG_FB_RIVA is not set -# CONFIG_FB_MATROX is not set -# CONFIG_FB_RADEON is not set -# CONFIG_FB_ATY128 is not set -# CONFIG_FB_ATY is not set -# CONFIG_FB_S3 is not set -# CONFIG_FB_SAVAGE is not set -# CONFIG_FB_SIS is not set -# CONFIG_FB_NEOMAGIC is not set -# CONFIG_FB_KYRO is not set -# CONFIG_FB_3DFX is not set -# CONFIG_FB_VOODOO1 is not set -# CONFIG_FB_SMIVGX is not set -# CONFIG_FB_TRIDENT is not set -# CONFIG_FB_VIRTUAL is not set - -# -# Console display driver support -# -# CONFIG_VGA_CONSOLE is not set -CONFIG_DUMMY_CONSOLE=y -# CONFIG_FRAMEBUFFER_CONSOLE is not set - -# -# Logo configuration -# -# CONFIG_LOGO is not set # CONFIG_BACKLIGHT_LCD_SUPPORT is not set +# CONFIG_FB is not set # # Sound @@ -864,7 +803,7 @@ CONFIG_INOTIFY_USER=y CONFIG_DNOTIFY=y # CONFIG_AUTOFS_FS is not set # CONFIG_AUTOFS4_FS is not set -CONFIG_FUSE_FS=y +# CONFIG_FUSE_FS is not set # # CD-ROM/DVD Filesystems @@ -889,14 +828,13 @@ CONFIG_SYSFS=y # CONFIG_TMPFS is not set # CONFIG_HUGETLB_PAGE is not set CONFIG_RAMFS=y -CONFIG_CONFIGFS_FS=y +# CONFIG_CONFIGFS_FS is not set # # Miscellaneous filesystems # # CONFIG_ADFS_FS is not set # CONFIG_AFFS_FS is not set -# CONFIG_ECRYPT_FS is not set # CONFIG_HFS_FS is not set # CONFIG_HFSPLUS_FS is not set # CONFIG_BEFS_FS is not set @@ -944,10 +882,7 @@ CONFIG_MSDOS_PARTITION=y # # Distributed Lock Manager # -CONFIG_DLM=y -CONFIG_DLM_TCP=y -# CONFIG_DLM_SCTP is not set -# CONFIG_DLM_DEBUG is not set +# CONFIG_DLM is not set # # Profiling support @@ -972,65 +907,22 @@ CONFIG_CMDLINE="" # # Security options # -CONFIG_KEYS=y -CONFIG_KEYS_DEBUG_PROC_KEYS=y +# CONFIG_KEYS is not set # CONFIG_SECURITY is not set # # Cryptographic options # -CONFIG_CRYPTO=y -CONFIG_CRYPTO_ALGAPI=y -CONFIG_CRYPTO_BLKCIPHER=y -CONFIG_CRYPTO_HASH=y -CONFIG_CRYPTO_MANAGER=y -CONFIG_CRYPTO_HMAC=y -CONFIG_CRYPTO_XCBC=y -CONFIG_CRYPTO_NULL=y -CONFIG_CRYPTO_MD4=y -CONFIG_CRYPTO_MD5=y -CONFIG_CRYPTO_SHA1=y -CONFIG_CRYPTO_SHA256=y -CONFIG_CRYPTO_SHA512=y -CONFIG_CRYPTO_WP512=y -CONFIG_CRYPTO_TGR192=y -CONFIG_CRYPTO_GF128MUL=y -CONFIG_CRYPTO_ECB=y -CONFIG_CRYPTO_CBC=y -CONFIG_CRYPTO_PCBC=y -CONFIG_CRYPTO_LRW=y -CONFIG_CRYPTO_DES=y -CONFIG_CRYPTO_FCRYPT=y -CONFIG_CRYPTO_BLOWFISH=y -CONFIG_CRYPTO_TWOFISH=y -CONFIG_CRYPTO_TWOFISH_COMMON=y -CONFIG_CRYPTO_SERPENT=y -CONFIG_CRYPTO_AES=y -CONFIG_CRYPTO_CAST5=y -CONFIG_CRYPTO_CAST6=y -CONFIG_CRYPTO_TEA=y -CONFIG_CRYPTO_ARC4=y -CONFIG_CRYPTO_KHAZAD=y -CONFIG_CRYPTO_ANUBIS=y -CONFIG_CRYPTO_DEFLATE=y -CONFIG_CRYPTO_MICHAEL_MIC=y -CONFIG_CRYPTO_CRC32C=y -CONFIG_CRYPTO_CAMELLIA=y - -# -# Hardware crypto devices -# +# CONFIG_CRYPTO is not set # # Library routines # CONFIG_BITREVERSE=y # CONFIG_CRC_CCITT is not set -CONFIG_CRC16=y +# CONFIG_CRC16 is not set CONFIG_CRC32=y -CONFIG_LIBCRC32C=y -CONFIG_ZLIB_INFLATE=y -CONFIG_ZLIB_DEFLATE=y +# CONFIG_LIBCRC32C is not set CONFIG_PLIST=y CONFIG_HAS_IOMEM=y CONFIG_HAS_IOPORT=y diff --git a/arch/mips/jmr3927/common/prom.c b/arch/mips/jmr3927/common/prom.c index aa481b774c42..5398813e50e6 100644 --- a/arch/mips/jmr3927/common/prom.c +++ b/arch/mips/jmr3927/common/prom.c @@ -41,16 +41,6 @@ #include -extern int prom_argc; -extern char **prom_argv, **prom_envp; - -typedef struct -{ - char *name; -/* char *val; */ -}t_env_var; - - char * __init prom_getcmdline(void) { return &(arcs_cmdline[0]); @@ -60,6 +50,8 @@ void __init prom_init_cmdline(void) { char *cp; int actr; + int prom_argc = fw_arg0; + char **prom_argv = (char **) fw_arg1; actr = 1; /* Always ignore argv[0] */ diff --git a/arch/mips/jmr3927/common/puts.c b/arch/mips/jmr3927/common/puts.c index 1c1cad9cd078..c611ab497888 100644 --- a/arch/mips/jmr3927/common/puts.c +++ b/arch/mips/jmr3927/common/puts.c @@ -32,137 +32,29 @@ * 675 Mass Ave, Cambridge, MA 02139, USA. */ -#include -#include #include -#include #define TIMEOUT 0xffffff -#define SLOW_DOWN - -static const char digits[16] = "0123456789abcdef"; - -#ifdef SLOW_DOWN -#define slow_down() { int k; for (k=0; k<10000; k++); } -#else -#define slow_down() -#endif void -putch(const unsigned char c) +prom_putchar(char c) { int i = 0; do { - slow_down(); i++; - if (i>TIMEOUT) { + if (i>TIMEOUT) break; - } } while (!(tx3927_sioptr(1)->cisr & TXx927_SICISR_TXALS)); tx3927_sioptr(1)->tfifo = c; return; } -unsigned char getch(void) -{ - int i = 0; - int dicr; - char c; - - /* diable RX int. */ - dicr = tx3927_sioptr(1)->dicr; - tx3927_sioptr(1)->dicr = 0; - - do { - slow_down(); - i++; - if (i>TIMEOUT) { - break; - } - } while (tx3927_sioptr(1)->disr & TXx927_SIDISR_UVALID) - ; - c = tx3927_sioptr(1)->rfifo; - - /* clear RX int. status */ - tx3927_sioptr(1)->disr &= ~TXx927_SIDISR_RDIS; - /* enable RX int. */ - tx3927_sioptr(1)->dicr = dicr; - - return c; -} -void -do_jmr3927_led_set(char n) -{ - /* and with current leds */ - jmr3927_led_and_set(n); -} - void -puts(unsigned char *cp) +puts(const char *cp) { - int i = 0; - - while (*cp) { - do { - slow_down(); - i++; - if (i>TIMEOUT) { - break; - } - } while (!(tx3927_sioptr(1)->cisr & TXx927_SICISR_TXALS)); - tx3927_sioptr(1)->tfifo = *cp++; - } - putch('\r'); - putch('\n'); -} - -void -fputs(unsigned char *cp) -{ - int i = 0; - - while (*cp) { - do { - slow_down(); - i++; - if (i>TIMEOUT) { - break; - } - } while (!(tx3927_sioptr(1)->cisr & TXx927_SICISR_TXALS)); - tx3927_sioptr(1)->tfifo = *cp++; - } -} - - -void -put64(uint64_t ul) -{ - int cnt; - unsigned ch; - - cnt = 16; /* 16 nibbles in a 64 bit long */ - putch('0'); - putch('x'); - do { - cnt--; - ch = (unsigned char)(ul >> cnt * 4) & 0x0F; - putch(digits[ch]); - } while (cnt > 0); -} - -void -put32(unsigned u) -{ - int cnt; - unsigned ch; - - cnt = 8; /* 8 nibbles in a 32 bit long */ - putch('0'); - putch('x'); - do { - cnt--; - ch = (unsigned char)(u >> cnt * 4) & 0x0F; - putch(digits[ch]); - } while (cnt > 0); + while (*cp) + prom_putchar(*cp++); + prom_putchar('\r'); + prom_putchar('\n'); } diff --git a/arch/mips/jmr3927/rbhma3100/Makefile b/arch/mips/jmr3927/rbhma3100/Makefile index 18fe9a898cb7..8d00ba460cef 100644 --- a/arch/mips/jmr3927/rbhma3100/Makefile +++ b/arch/mips/jmr3927/rbhma3100/Makefile @@ -3,5 +3,4 @@ # obj-y += init.o irq.o setup.o -obj-$(CONFIG_RUNTIME_DEBUG) += debug.o obj-$(CONFIG_KGDB) += kgdb_io.o diff --git a/arch/mips/jmr3927/rbhma3100/init.c b/arch/mips/jmr3927/rbhma3100/init.c index a55cb4572ded..9169fab1773a 100644 --- a/arch/mips/jmr3927/rbhma3100/init.c +++ b/arch/mips/jmr3927/rbhma3100/init.c @@ -28,20 +28,10 @@ * 675 Mass Ave, Cambridge, MA 02139, USA. */ #include -#include -#include -#include - -#include #include -#include #include -int prom_argc; -char **prom_argv, **prom_envp; extern void __init prom_init_cmdline(void); -extern char *prom_getenv(char *envname); -unsigned long mips_nofpu = 0; const char *get_system_type(void) { @@ -52,7 +42,7 @@ const char *get_system_type(void) ; } -extern void puts(unsigned char *cp); +extern void puts(const char *cp); void __init prom_init(void) { @@ -61,10 +51,6 @@ void __init prom_init(void) if ((tx3927_ccfgptr->ccfg & TX3927_CCFG_TLBOFF) == 0) puts("Warning: TX3927 TLB off\n"); #endif - prom_argc = fw_arg0; - prom_argv = (char **) fw_arg1; - prom_envp = (char **) fw_arg2; - mips_machgroup = MACH_GROUP_TOSHIBA; #ifdef CONFIG_TOSHIBA_JMR3927 diff --git a/arch/mips/jmr3927/rbhma3100/irq.c b/arch/mips/jmr3927/rbhma3100/irq.c index 7d2c203cb406..1187b44a3dd4 100644 --- a/arch/mips/jmr3927/rbhma3100/irq.c +++ b/arch/mips/jmr3927/rbhma3100/irq.c @@ -30,53 +30,21 @@ * 675 Mass Ave, Cambridge, MA 02139, USA. */ #include - -#include -#include -#include -#include #include #include #include -#include -#include -#include -#include -#include -#include -#include -#include #include #include #include -#include #include -#include -#include #include #if JMR3927_IRQ_END > NR_IRQS #error JMR3927_IRQ_END > NR_IRQS #endif -struct tb_irq_space* tb_irq_spaces; - -static int jmr3927_irq_base = -1; - -#ifdef CONFIG_PCI -static int jmr3927_gen_iack(void) -{ - /* generate ACK cycle */ -#ifdef __BIG_ENDIAN - return (tx3927_pcicptr->iiadp >> 24) & 0xff; -#else - return tx3927_pcicptr->iiadp & 0xff; -#endif -} -#endif - #define irc_dlevel 0 #define irc_elevel 1 @@ -87,89 +55,24 @@ static unsigned char irc_level[TX3927_NUM_IR] = { 6, 6, 6 /* TMR */ }; -static void jmr3927_irq_disable(unsigned int irq_nr); -static void jmr3927_irq_enable(unsigned int irq_nr); - -static void jmr3927_irq_ack(unsigned int irq) -{ - if (irq == JMR3927_IRQ_IRC_TMR0) - jmr3927_tmrptr->tisr = 0; /* ack interrupt */ - - jmr3927_irq_disable(irq); -} - -static void jmr3927_irq_end(unsigned int irq) -{ - if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS))) - jmr3927_irq_enable(irq); -} - -static void jmr3927_irq_disable(unsigned int irq_nr) -{ - struct tb_irq_space* sp; - - for (sp = tb_irq_spaces; sp; sp = sp->next) { - if (sp->start_irqno <= irq_nr && - irq_nr < sp->start_irqno + sp->nr_irqs) { - if (sp->mask_func) - sp->mask_func(irq_nr - sp->start_irqno, - sp->space_id); - break; - } - } -} - -static void jmr3927_irq_enable(unsigned int irq_nr) -{ - struct tb_irq_space* sp; - - for (sp = tb_irq_spaces; sp; sp = sp->next) { - if (sp->start_irqno <= irq_nr && - irq_nr < sp->start_irqno + sp->nr_irqs) { - if (sp->unmask_func) - sp->unmask_func(irq_nr - sp->start_irqno, - sp->space_id); - break; - } - } -} - /* * CP0_STATUS is a thread's resource (saved/restored on context switch). - * So disable_irq/enable_irq MUST handle IOC/ISAC/IRC registers. + * So disable_irq/enable_irq MUST handle IOC/IRC registers. */ -static void mask_irq_isac(int irq_nr, int space_id) -{ - /* 0: mask */ - unsigned char imask = - jmr3927_isac_reg_in(JMR3927_ISAC_INTM_ADDR); - unsigned int bit = 1 << irq_nr; - jmr3927_isac_reg_out(imask & ~bit, JMR3927_ISAC_INTM_ADDR); - /* flush write buffer */ - (void)jmr3927_ioc_reg_in(JMR3927_IOC_REV_ADDR); -} -static void unmask_irq_isac(int irq_nr, int space_id) -{ - /* 0: mask */ - unsigned char imask = jmr3927_isac_reg_in(JMR3927_ISAC_INTM_ADDR); - unsigned int bit = 1 << irq_nr; - jmr3927_isac_reg_out(imask | bit, JMR3927_ISAC_INTM_ADDR); - /* flush write buffer */ - (void)jmr3927_ioc_reg_in(JMR3927_IOC_REV_ADDR); -} - -static void mask_irq_ioc(int irq_nr, int space_id) +static void mask_irq_ioc(unsigned int irq) { /* 0: mask */ + unsigned int irq_nr = irq - JMR3927_IRQ_IOC; unsigned char imask = jmr3927_ioc_reg_in(JMR3927_IOC_INTM_ADDR); unsigned int bit = 1 << irq_nr; jmr3927_ioc_reg_out(imask & ~bit, JMR3927_IOC_INTM_ADDR); /* flush write buffer */ (void)jmr3927_ioc_reg_in(JMR3927_IOC_REV_ADDR); } -static void unmask_irq_ioc(int irq_nr, int space_id) +static void unmask_irq_ioc(unsigned int irq) { /* 0: mask */ + unsigned int irq_nr = irq - JMR3927_IRQ_IOC; unsigned char imask = jmr3927_ioc_reg_in(JMR3927_IOC_INTM_ADDR); unsigned int bit = 1 << irq_nr; jmr3927_ioc_reg_out(imask | bit, JMR3927_IOC_INTM_ADDR); @@ -177,8 +80,9 @@ static void unmask_irq_ioc(int irq_nr, int space_id) (void)jmr3927_ioc_reg_in(JMR3927_IOC_REV_ADDR); } -static void mask_irq_irc(int irq_nr, int space_id) +static void mask_irq_irc(unsigned int irq) { + unsigned int irq_nr = irq - JMR3927_IRQ_IRC; volatile unsigned long *ilrp = &tx3927_ircptr->ilr[irq_nr / 2]; if (irq_nr & 1) *ilrp = (*ilrp & 0x00ff) | (irc_dlevel << 8); @@ -191,8 +95,9 @@ static void mask_irq_irc(int irq_nr, int space_id) (void)tx3927_ircptr->ssr; } -static void unmask_irq_irc(int irq_nr, int space_id) +static void unmask_irq_irc(unsigned int irq) { + unsigned int irq_nr = irq - JMR3927_IRQ_IRC; volatile unsigned long *ilrp = &tx3927_ircptr->ilr[irq_nr / 2]; if (irq_nr & 1) *ilrp = (*ilrp & 0x00ff) | (irc_level[irq_nr] << 8); @@ -203,98 +108,14 @@ static void unmask_irq_irc(int irq_nr, int space_id) tx3927_ircptr->imr = irc_elevel; } -struct tb_irq_space jmr3927_isac_irqspace = { - .next = NULL, - .start_irqno = JMR3927_IRQ_ISAC, - nr_irqs : JMR3927_NR_IRQ_ISAC, - .mask_func = mask_irq_isac, - .unmask_func = unmask_irq_isac, - .name = "ISAC", - .space_id = 0, - can_share : 0 -}; -struct tb_irq_space jmr3927_ioc_irqspace = { - .next = NULL, - .start_irqno = JMR3927_IRQ_IOC, - nr_irqs : JMR3927_NR_IRQ_IOC, - .mask_func = mask_irq_ioc, - .unmask_func = unmask_irq_ioc, - .name = "IOC", - .space_id = 0, - can_share : 1 -}; - -struct tb_irq_space jmr3927_irc_irqspace = { - .next = NULL, - .start_irqno = JMR3927_IRQ_IRC, - .nr_irqs = JMR3927_NR_IRQ_IRC, - .mask_func = mask_irq_irc, - .unmask_func = unmask_irq_irc, - .name = "on-chip", - .space_id = 0, - .can_share = 0 -}; - - -#ifdef CONFIG_TX_BRANCH_LIKELY_BUG_WORKAROUND -static int tx_branch_likely_bug_count = 0; -static int have_tx_branch_likely_bug = 0; - -static void tx_branch_likely_bug_fixup(void) -{ - struct pt_regs *regs = get_irq_regs(); - - /* TX39/49-BUG: Under this condition, the insn in delay slot - of the branch likely insn is executed (not nullified) even - the branch condition is false. */ - if (!have_tx_branch_likely_bug) - return; - if ((regs->cp0_epc & 0xfff) == 0xffc && - KSEGX(regs->cp0_epc) != KSEG0 && - KSEGX(regs->cp0_epc) != KSEG1) { - unsigned int insn = *(unsigned int*)(regs->cp0_epc - 4); - /* beql,bnel,blezl,bgtzl */ - /* bltzl,bgezl,blezall,bgezall */ - /* bczfl, bcztl */ - if ((insn & 0xf0000000) == 0x50000000 || - (insn & 0xfc0e0000) == 0x04020000 || - (insn & 0xf3fe0000) == 0x41020000) { - regs->cp0_epc -= 4; - tx_branch_likely_bug_count++; - printk(KERN_INFO - "fix branch-likery bug in %s (insn %08x)\n", - current->comm, insn); - } - } -} -#endif - -static void jmr3927_spurious(void) -{ - struct pt_regs * regs = get_irq_regs(); - -#ifdef CONFIG_TX_BRANCH_LIKELY_BUG_WORKAROUND - tx_branch_likely_bug_fixup(); -#endif - printk(KERN_WARNING "spurious interrupt (cause 0x%lx, pc 0x%lx, ra 0x%lx).\n", - regs->cp0_cause, regs->cp0_epc, regs->regs[31]); -} - asmlinkage void plat_irq_dispatch(void) { - struct pt_regs * regs = get_irq_regs(); + unsigned long cp0_cause = read_c0_cause(); int irq; -#ifdef CONFIG_TX_BRANCH_LIKELY_BUG_WORKAROUND - tx_branch_likely_bug_fixup(); -#endif - if ((regs->cp0_cause & CAUSEF_IP7) == 0) { -#if 0 - jmr3927_spurious(); -#endif + if ((cp0_cause & CAUSEF_IP7) == 0) return; - } - irq = (regs->cp0_cause >> CAUSEB_IP2) & 0x0f; + irq = (cp0_cause >> CAUSEB_IP2) & 0x0f; do_IRQ(irq + JMR3927_IRQ_IRC); } @@ -317,35 +138,6 @@ static struct irqaction ioc_action = { jmr3927_ioc_interrupt, 0, CPU_MASK_NONE, "IOC", NULL, NULL, }; -static irqreturn_t jmr3927_isac_interrupt(int irq, void *dev_id) -{ - unsigned char istat = jmr3927_isac_reg_in(JMR3927_ISAC_INTS2_ADDR); - int i; - - for (i = 0; i < JMR3927_NR_IRQ_ISAC; i++) { - if (istat & (1 << i)) { - irq = JMR3927_IRQ_ISAC + i; - do_IRQ(irq); - } - } - return IRQ_HANDLED; -} - -static struct irqaction isac_action = { - jmr3927_isac_interrupt, 0, CPU_MASK_NONE, "ISAC", NULL, NULL, -}; - - -static irqreturn_t jmr3927_isaerr_interrupt(int irq, void *dev_id) -{ - printk(KERN_WARNING "ISA error interrupt (irq 0x%x).\n", irq); - - return IRQ_HANDLED; -} -static struct irqaction isaerr_action = { - jmr3927_isaerr_interrupt, 0, CPU_MASK_NONE, "ISA error", NULL, NULL, -}; - static irqreturn_t jmr3927_pcierr_interrupt(int irq, void *dev_id) { printk(KERN_WARNING "PCI error interrupt (irq 0x%x).\n", irq); @@ -358,54 +150,19 @@ static struct irqaction pcierr_action = { jmr3927_pcierr_interrupt, 0, CPU_MASK_NONE, "PCI error", NULL, NULL, }; -int jmr3927_ether1_irq = 0; - -void jmr3927_irq_init(u32 irq_base); +static void __init jmr3927_irq_init(void); void __init arch_init_irq(void) { - /* look for io board's presence */ - int have_isac = jmr3927_have_isac(); - /* Now, interrupt control disabled, */ /* all IRC interrupts are masked, */ /* all IRC interrupt mode are Low Active. */ - if (have_isac) { - - /* ETHER1 (NE2000 compatible 10M-Ether) parameter setup */ - /* temporary enable interrupt control */ - tx3927_ircptr->cer = 1; - /* ETHER1 Int. Is High-Active. */ - if (tx3927_ircptr->ssr & (1 << 0)) - jmr3927_ether1_irq = JMR3927_IRQ_IRC_INT0; -#if 0 /* INT3 may be asserted by ether0 (even after reboot...) */ - else if (tx3927_ircptr->ssr & (1 << 3)) - jmr3927_ether1_irq = JMR3927_IRQ_IRC_INT3; -#endif - /* disable interrupt control */ - tx3927_ircptr->cer = 0; - - /* Ether1: High Active */ - if (jmr3927_ether1_irq) { - int ether1_irc = jmr3927_ether1_irq - JMR3927_IRQ_IRC; - tx3927_ircptr->cr[ether1_irc / 8] |= - TX3927_IRCR_HIGH << ((ether1_irc % 8) * 2); - } - } - /* mask all IOC interrupts */ jmr3927_ioc_reg_out(0, JMR3927_IOC_INTM_ADDR); /* setup IOC interrupt mode (SOFT:High Active, Others:Low Active) */ jmr3927_ioc_reg_out(JMR3927_IOC_INTF_SOFT, JMR3927_IOC_INTP_ADDR); - if (have_isac) { - /* mask all ISAC interrupts */ - jmr3927_isac_reg_out(0, JMR3927_ISAC_INTM_ADDR); - /* setup ISAC interrupt mode (ISAIRQ3,ISAIRQ5:Low Active ???) */ - jmr3927_isac_reg_out(JMR3927_ISAC_INTF_IRQ3|JMR3927_ISAC_INTF_IRQ5, JMR3927_ISAC_INTP_ADDR); - } - /* clear PCI Soft interrupts */ jmr3927_ioc_reg_out(0, JMR3927_IOC_INTS1_ADDR); /* clear PCI Reset interrupts */ @@ -415,21 +172,11 @@ void __init arch_init_irq(void) tx3927_ircptr->cer = TX3927_IRCER_ICE; tx3927_ircptr->imr = irc_elevel; - jmr3927_irq_init(NR_ISA_IRQS); - - /* setup irq space */ - add_tb_irq_space(&jmr3927_isac_irqspace); - add_tb_irq_space(&jmr3927_ioc_irqspace); - add_tb_irq_space(&jmr3927_irc_irqspace); + jmr3927_irq_init(); /* setup IOC interrupt 1 (PCI, MODEM) */ setup_irq(JMR3927_IRQ_IOCINT, &ioc_action); - if (have_isac) { - setup_irq(JMR3927_IRQ_ISACINT, &isac_action); - setup_irq(JMR3927_IRQ_ISAC_ISAER, &isaerr_action); - } - #ifdef CONFIG_PCI setup_irq(JMR3927_IRQ_IRC_PCI, &pcierr_action); #endif @@ -438,21 +185,28 @@ void __init arch_init_irq(void) set_c0_status(ST0_IM); /* IE bit is still 0. */ } -static struct irq_chip jmr3927_irq_controller = { - .name = "jmr3927_irq", - .ack = jmr3927_irq_ack, - .mask = jmr3927_irq_disable, - .mask_ack = jmr3927_irq_ack, - .unmask = jmr3927_irq_enable, - .end = jmr3927_irq_end, +static struct irq_chip jmr3927_irq_ioc = { + .name = "jmr3927_ioc", + .ack = mask_irq_ioc, + .mask = mask_irq_ioc, + .mask_ack = mask_irq_ioc, + .unmask = unmask_irq_ioc, }; -void jmr3927_irq_init(u32 irq_base) +static struct irq_chip jmr3927_irq_irc = { + .name = "jmr3927_irc", + .ack = mask_irq_irc, + .mask = mask_irq_irc, + .mask_ack = mask_irq_irc, + .unmask = unmask_irq_irc, +}; + +static void __init jmr3927_irq_init(void) { u32 i; - for (i= irq_base; i< irq_base + JMR3927_NR_IRQ_IRC + JMR3927_NR_IRQ_IOC; i++) - set_irq_chip(i, &jmr3927_irq_controller); - - jmr3927_irq_base = irq_base; + for (i = JMR3927_IRQ_IRC; i < JMR3927_IRQ_IRC + JMR3927_NR_IRQ_IRC; i++) + set_irq_chip_and_handler(i, &jmr3927_irq_irc, handle_level_irq); + for (i = JMR3927_IRQ_IOC; i < JMR3927_IRQ_IOC + JMR3927_NR_IRQ_IOC; i++) + set_irq_chip_and_handler(i, &jmr3927_irq_ioc, handle_level_irq); } diff --git a/arch/mips/jmr3927/rbhma3100/kgdb_io.c b/arch/mips/jmr3927/rbhma3100/kgdb_io.c index 269a42deae06..2604f2c9a96e 100644 --- a/arch/mips/jmr3927/rbhma3100/kgdb_io.c +++ b/arch/mips/jmr3927/rbhma3100/kgdb_io.c @@ -31,23 +31,12 @@ * 675 Mass Ave, Cambridge, MA 02139, USA. */ -#include -#include -#include #include #define TIMEOUT 0xffffff -#define SLOW_DOWN - -static const char digits[16] = "0123456789abcdef"; - -#ifdef SLOW_DOWN -#define slow_down() { int k; for (k=0; k<10000; k++); } -#else -#define slow_down() -#endif static int remoteDebugInitialized = 0; +static void debugInit(int baud) int putDebugChar(unsigned char c) { @@ -103,20 +92,8 @@ unsigned char getDebugChar(void) return c; } -void debugInit(int baud) +static void debugInit(int baud) { - /* - volatile unsigned long lcr; - volatile unsigned long dicr; - volatile unsigned long disr; - volatile unsigned long cisr; - volatile unsigned long fcr; - volatile unsigned long flcr; - volatile unsigned long bgr; - volatile unsigned long tfifo; - volatile unsigned long rfifo; - */ - tx3927_sioptr(0)->lcr = 0x020; tx3927_sioptr(0)->dicr = 0; tx3927_sioptr(0)->disr = 0x4100; @@ -125,31 +102,4 @@ void debugInit(int baud) tx3927_sioptr(0)->flcr = 0x02; tx3927_sioptr(0)->bgr = ((JMR3927_BASE_BAUD + baud / 2) / baud) | TXx927_SIBGR_BCLK_T0; -#if 0 - /* - * Reset the UART. - */ - tx3927_sioptr(0)->fcr = TXx927_SIFCR_SWRST; - while (tx3927_sioptr(0)->fcr & TXx927_SIFCR_SWRST) - ; - - /* - * and set the speed of the serial port - * (currently hardwired to 9600 8N1 - */ - - tx3927_sioptr(0)->lcr = TXx927_SILCR_UMODE_8BIT | - TXx927_SILCR_USBL_1BIT | - TXx927_SILCR_SCS_IMCLK_BG; - tx3927_sioptr(0)->bgr = - ((JMR3927_BASE_BAUD + baud / 2) / baud) | - TXx927_SIBGR_BCLK_T0; - - /* HW RTS/CTS control */ - if (ser->flags & ASYNC_HAVE_CTS_LINE) - tx3927_sioptr(0)->flcr = TXx927_SIFLCR_RCS | TXx927_SIFLCR_TES | - TXx927_SIFLCR_RTSTL_MAX /* 15 */; - /* Enable RX/TX */ - tx3927_sioptr(0)->flcr &= ~(TXx927_SIFLCR_RSDE | TXx927_SIFLCR_TSDE); -#endif } diff --git a/arch/mips/jmr3927/rbhma3100/setup.c b/arch/mips/jmr3927/rbhma3100/setup.c index fc523bda068f..d1ef2895d564 100644 --- a/arch/mips/jmr3927/rbhma3100/setup.c +++ b/arch/mips/jmr3927/rbhma3100/setup.c @@ -54,87 +54,18 @@ #include #include -#include -#include #include -#include #include #include -#include -extern void puts(unsigned char *cp); +extern void puts(const char *cp); /* Tick Timer divider */ #define JMR3927_TIMER_CCD 0 /* 1/2 */ #define JMR3927_TIMER_CLK (JMR3927_IMCLK / (2 << JMR3927_TIMER_CCD)) -unsigned char led_state = 0xf; - -struct { - struct resource ram0; - struct resource ram1; - struct resource pcimem; - struct resource iob; - struct resource ioc; - struct resource pciio; - struct resource jmy1394; - struct resource rom1; - struct resource rom0; - struct resource sio0; - struct resource sio1; -} jmr3927_resources = { - { - .start = 0, - .end = 0x01FFFFFF, - .name = "RAM0", - .flags = IORESOURCE_MEM - }, { - .start = 0x02000000, - .end = 0x03FFFFFF, - .name = "RAM1", - .flags = IORESOURCE_MEM - }, { - .start = 0x08000000, - .end = 0x07FFFFFF, - .name = "PCIMEM", - .flags = IORESOURCE_MEM - }, { - .start = 0x10000000, - .end = 0x13FFFFFF, - .name = "IOB" - }, { - .start = 0x14000000, - .end = 0x14FFFFFF, - .name = "IOC" - }, { - .start = 0x15000000, - .end = 0x15FFFFFF, - .name = "PCIIO" - }, { - .start = 0x1D000000, - .end = 0x1D3FFFFF, - .name = "JMY1394" - }, { - .start = 0x1E000000, - .end = 0x1E3FFFFF, - .name = "ROM1" - }, { - .start = 0x1FC00000, - .end = 0x1FFFFFFF, - .name = "ROM0" - }, { - .start = 0xFFFEF300, - .end = 0xFFFEF3FF, - .name = "SIO0" - }, { - .start = 0xFFFEF400, - .end = 0xFFFEF4FF, - .name = "SIO1" - }, -}; - /* don't enable - see errata */ -int jmr3927_ccfg_toeon = 0; +static int jmr3927_ccfg_toeon; static inline void do_reset(void) { @@ -173,9 +104,15 @@ static cycle_t jmr3927_hpt_read(void) return jiffies * (JMR3927_TIMER_CLK / HZ) + jmr3927_tmrptr->trr; } +static void jmr3927_timer_ack(void) +{ + jmr3927_tmrptr->tisr = 0; /* ack interrupt */ +} + static void __init jmr3927_time_init(void) { clocksource_mips.read = jmr3927_hpt_read; + mips_timer_ack = jmr3927_timer_ack; mips_hpt_frequency = JMR3927_TIMER_CLK; } @@ -190,9 +127,6 @@ void __init plat_timer_setup(struct irqaction *irq) setup_irq(JMR3927_IRQ_TICK, irq); } -#define USECS_PER_JIFFY (1000000/HZ) - -//#undef DO_WRITE_THROUGH #define DO_WRITE_THROUGH #define DO_ENABLE_CACHE @@ -224,12 +158,6 @@ void __init plat_mem_setup(void) /* Reboot on panic */ panic_timeout = 180; - { - unsigned int conf; - conf = read_c0_conf(); - } - -#if 1 /* cache setup */ { unsigned int conf; @@ -256,16 +184,14 @@ void __init plat_mem_setup(void) write_c0_conf(conf); write_c0_cache(0); } -#endif /* initialize board */ jmr3927_board_init(); argptr = prom_getcmdline(); - if ((argptr = strstr(argptr, "toeon")) != NULL) { - jmr3927_ccfg_toeon = 1; - } + if ((argptr = strstr(argptr, "toeon")) != NULL) + jmr3927_ccfg_toeon = 1; argptr = prom_getcmdline(); if ((argptr = strstr(argptr, "ip=")) == NULL) { argptr = prom_getcmdline(); @@ -281,7 +207,7 @@ void __init plat_mem_setup(void) memset(&req, 0, sizeof(req)); req.line = i; req.iotype = UPIO_MEM; - req.membase = (char *)TX3927_SIO_REG(i); + req.membase = (unsigned char __iomem *)TX3927_SIO_REG(i); req.mapbase = TX3927_SIO_REG(i); req.irq = i == 0 ? JMR3927_IRQ_IRC_SIO0 : JMR3927_IRQ_IRC_SIO1; @@ -303,65 +229,33 @@ void __init plat_mem_setup(void) static void tx3927_setup(void); -#ifdef CONFIG_PCI -unsigned long mips_pci_io_base; -unsigned long mips_pci_io_size; -unsigned long mips_pci_mem_base; -unsigned long mips_pci_mem_size; -/* for legacy I/O, PCI I/O PCI Bus address must be 0 */ -unsigned long mips_pci_io_pciaddr = 0; -#endif - static void __init jmr3927_board_init(void) { - char *argptr; - -#ifdef CONFIG_PCI - mips_pci_io_base = JMR3927_PCIIO; - mips_pci_io_size = JMR3927_PCIIO_SIZE; - mips_pci_mem_base = JMR3927_PCIMEM; - mips_pci_mem_size = JMR3927_PCIMEM_SIZE; -#endif - tx3927_setup(); - if (jmr3927_have_isac()) { - -#ifdef CONFIG_FB_E1355 - argptr = prom_getcmdline(); - if ((argptr = strstr(argptr, "video=")) == NULL) { - argptr = prom_getcmdline(); - strcat(argptr, " video=e1355fb:crt16h"); - } -#endif - -#ifdef CONFIG_BLK_DEV_IDE - /* overrides PCI-IDE */ -#endif - } - /* SIO0 DTR on */ jmr3927_ioc_reg_out(0, JMR3927_IOC_DTR_ADDR); jmr3927_led_set(0); - - if (jmr3927_have_isac()) - jmr3927_io_led_set(0); printk("JMR-TX3927 (Rev %d) --- IOC(Rev %d) DIPSW:%d,%d,%d,%d\n", jmr3927_ioc_reg_in(JMR3927_IOC_BREV_ADDR) & JMR3927_REV_MASK, jmr3927_ioc_reg_in(JMR3927_IOC_REV_ADDR) & JMR3927_REV_MASK, jmr3927_dipsw1(), jmr3927_dipsw2(), jmr3927_dipsw3(), jmr3927_dipsw4()); - if (jmr3927_have_isac()) - printk("JMI-3927IO2 --- ISAC(Rev %d) DIPSW:%01x\n", - jmr3927_isac_reg_in(JMR3927_ISAC_REV_ADDR) & JMR3927_REV_MASK, - jmr3927_io_dipsw()); } -void __init tx3927_setup(void) +static void __init tx3927_setup(void) { int i; +#ifdef CONFIG_PCI + unsigned long mips_pci_io_base = JMR3927_PCIIO; + unsigned long mips_pci_io_size = JMR3927_PCIIO_SIZE; + unsigned long mips_pci_mem_base = JMR3927_PCIMEM; + unsigned long mips_pci_mem_size = JMR3927_PCIMEM_SIZE; + /* for legacy I/O, PCI I/O PCI Bus address must be 0 */ + unsigned long mips_pci_io_pciaddr = 0; +#endif /* SDRAMC are configured by PROM */ @@ -475,10 +369,8 @@ void __init tx3927_setup(void) tx3927_pcicptr->mbas = ~(mips_pci_mem_size - 1); tx3927_pcicptr->mba = 0; tx3927_pcicptr->tlbmma = 0; -#ifndef JMR3927_INIT_INDIRECT_PCI /* Enable Direct mapping Address Space Decoder */ tx3927_pcicptr->lbc |= TX3927_PCIC_LBC_ILMDE | TX3927_PCIC_LBC_ILIDE; -#endif /* Clear All Local Bus Status */ tx3927_pcicptr->lbstat = TX3927_PCIC_LBIM_ALL; @@ -491,22 +383,15 @@ void __init tx3927_setup(void) /* PCIC Int => IRC IRQ10 */ tx3927_pcicptr->il = TX3927_IR_PCI; -#if 1 /* Target Control (per errata) */ tx3927_pcicptr->tc = TX3927_PCIC_TC_OF8E | TX3927_PCIC_TC_IF8E; -#endif /* Enable Bus Arbiter */ -#if 0 - tx3927_pcicptr->req_trace = 0x73737373; -#endif tx3927_pcicptr->pbapmc = TX3927_PCIC_PBAPMC_PBAEN; tx3927_pcicptr->pcicmd = PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY | -#if 1 PCI_COMMAND_IO | -#endif PCI_COMMAND_PARITY | PCI_COMMAND_SERR; } #endif /* CONFIG_PCI */ @@ -555,8 +440,6 @@ static int __init jmr3927_rtc_init(void) .flags = IORESOURCE_MEM, }; struct platform_device *dev; - if (!jmr3927_have_nvram()) - return -ENODEV; dev = platform_device_register_simple("ds1742", -1, &res, 1); return IS_ERR(dev) ? PTR_ERR(dev) : 0; } diff --git a/arch/mips/pci/fixup-jmr3927.c b/arch/mips/pci/fixup-jmr3927.c index 6e72d213f4cd..73d18503517c 100644 --- a/arch/mips/pci/fixup-jmr3927.c +++ b/arch/mips/pci/fixup-jmr3927.c @@ -29,7 +29,6 @@ */ #include #include -#include #include #include @@ -81,14 +80,8 @@ int __init pcibios_map_irq(struct pci_dev *dev, u8 slot, u8 pin) /* Check OnBoard Ethernet (IDSEL=A24, DevNu=13) */ if (dev->bus->parent == NULL && - slot == TX3927_PCIC_IDSEL_AD_TO_SLOT(24)) { - extern int jmr3927_ether1_irq; - /* check this irq line was reserved for ether1 */ - if (jmr3927_ether1_irq != JMR3927_IRQ_ETHER0) - irq = JMR3927_IRQ_ETHER0; - else - irq = 0; /* disable */ - } + slot == TX3927_PCIC_IDSEL_AD_TO_SLOT(24)) + irq = JMR3927_IRQ_ETHER0; return irq; } diff --git a/arch/mips/pci/ops-tx3927.c b/arch/mips/pci/ops-tx3927.c index 42530a0b84b3..aa698bd0d5e3 100644 --- a/arch/mips/pci/ops-tx3927.c +++ b/arch/mips/pci/ops-tx3927.c @@ -40,7 +40,6 @@ #include #include -#include static inline int mkaddr(unsigned char bus, unsigned char dev_fn, unsigned char where) @@ -130,234 +129,3 @@ struct pci_ops jmr3927_pci_ops = { jmr3927_pci_read_config, jmr3927_pci_write_config, }; - - -#ifndef JMR3927_INIT_INDIRECT_PCI - -inline unsigned long tc_readl(volatile __u32 * addr) -{ - return readl(addr); -} - -inline void tc_writel(unsigned long data, volatile __u32 * addr) -{ - writel(data, addr); -} -#else - -unsigned long tc_readl(volatile __u32 * addr) -{ - unsigned long val; - - *(volatile u32 *) (unsigned long) & tx3927_pcicptr->ipciaddr = - (unsigned long) CPHYSADDR(addr); - *(volatile u32 *) (unsigned long) & tx3927_pcicptr->ipcibe = - (PCI_IPCIBE_ICMD_MEMREAD << PCI_IPCIBE_ICMD_SHIFT) | - PCI_IPCIBE_IBE_LONG; - while (!(tx3927_pcicptr->istat & PCI_ISTAT_IDICC)); - val = - le32_to_cpu(*(volatile u32 *) (unsigned long) & tx3927_pcicptr-> - ipcidata); - /* clear by setting */ - tx3927_pcicptr->istat |= PCI_ISTAT_IDICC; - return val; -} - -void tc_writel(unsigned long data, volatile __u32 * addr) -{ - *(volatile u32 *) (unsigned long) & tx3927_pcicptr->ipcidata = - cpu_to_le32(data); - *(volatile u32 *) (unsigned long) & tx3927_pcicptr->ipciaddr = - (unsigned long) CPHYSADDR(addr); - *(volatile u32 *) (unsigned long) & tx3927_pcicptr->ipcibe = - (PCI_IPCIBE_ICMD_MEMWRITE << PCI_IPCIBE_ICMD_SHIFT) | - PCI_IPCIBE_IBE_LONG; - while (!(tx3927_pcicptr->istat & PCI_ISTAT_IDICC)); - /* clear by setting */ - tx3927_pcicptr->istat |= PCI_ISTAT_IDICC; -} - -unsigned char tx_ioinb(unsigned char *addr) -{ - unsigned long val; - __u32 ioaddr; - int offset; - int byte; - - ioaddr = (unsigned long) addr; - offset = ioaddr & 0x3; - byte = 0xf & ~(8 >> offset); - - *(volatile u32 *) (unsigned long) & tx3927_pcicptr->ipciaddr = - (unsigned long) ioaddr; - *(volatile u32 *) (unsigned long) & tx3927_pcicptr->ipcibe = - (PCI_IPCIBE_ICMD_IOREAD << PCI_IPCIBE_ICMD_SHIFT) | byte; - while (!(tx3927_pcicptr->istat & PCI_ISTAT_IDICC)); - val = - le32_to_cpu(*(volatile u32 *) (unsigned long) & tx3927_pcicptr-> - ipcidata); - val = val & 0xff; - /* clear by setting */ - tx3927_pcicptr->istat |= PCI_ISTAT_IDICC; - return val; -} - -void tx_iooutb(unsigned long data, unsigned char *addr) -{ - __u32 ioaddr; - int offset; - int byte; - - data = data | (data << 8) | (data << 16) | (data << 24); - ioaddr = (unsigned long) addr; - offset = ioaddr & 0x3; - byte = 0xf & ~(8 >> offset); - - *(volatile u32 *) (unsigned long) & tx3927_pcicptr->ipcidata = data; - *(volatile u32 *) (unsigned long) & tx3927_pcicptr->ipciaddr = - (unsigned long) ioaddr; - *(volatile u32 *) (unsigned long) & tx3927_pcicptr->ipcibe = - (PCI_IPCIBE_ICMD_IOWRITE << PCI_IPCIBE_ICMD_SHIFT) | byte; - while (!(tx3927_pcicptr->istat & PCI_ISTAT_IDICC)); - /* clear by setting */ - tx3927_pcicptr->istat |= PCI_ISTAT_IDICC; -} - -unsigned short tx_ioinw(unsigned short *addr) -{ - unsigned long val; - __u32 ioaddr; - int offset; - int byte; - - ioaddr = (unsigned long) addr; - offset = ioaddr & 0x2; - byte = 3 << offset; - - *(volatile u32 *) (unsigned long) & tx3927_pcicptr->ipciaddr = - (unsigned long) ioaddr; - *(volatile u32 *) (unsigned long) & tx3927_pcicptr->ipcibe = - (PCI_IPCIBE_ICMD_IOREAD << PCI_IPCIBE_ICMD_SHIFT) | byte; - while (!(tx3927_pcicptr->istat & PCI_ISTAT_IDICC)); - val = - le32_to_cpu(*(volatile u32 *) (unsigned long) & tx3927_pcicptr-> - ipcidata); - val = val & 0xffff; - /* clear by setting */ - tx3927_pcicptr->istat |= PCI_ISTAT_IDICC; - return val; - -} - -void tx_iooutw(unsigned long data, unsigned short *addr) -{ - __u32 ioaddr; - int offset; - int byte; - - data = data | (data << 16); - ioaddr = (unsigned long) addr; - offset = ioaddr & 0x2; - byte = 3 << offset; - - *(volatile u32 *) (unsigned long) & tx3927_pcicptr->ipcidata = data; - *(volatile u32 *) (unsigned long) & tx3927_pcicptr->ipciaddr = - (unsigned long) ioaddr; - *(volatile u32 *) (unsigned long) & tx3927_pcicptr->ipcibe = - (PCI_IPCIBE_ICMD_IOWRITE << PCI_IPCIBE_ICMD_SHIFT) | byte; - while (!(tx3927_pcicptr->istat & PCI_ISTAT_IDICC)); - /* clear by setting */ - tx3927_pcicptr->istat |= PCI_ISTAT_IDICC; -} - -unsigned long tx_ioinl(unsigned int *addr) -{ - unsigned long val; - __u32 ioaddr; - - ioaddr = (unsigned long) addr; - *(volatile u32 *) (unsigned long) & tx3927_pcicptr->ipciaddr = - (unsigned long) ioaddr; - *(volatile u32 *) (unsigned long) & tx3927_pcicptr->ipcibe = - (PCI_IPCIBE_ICMD_IOREAD << PCI_IPCIBE_ICMD_SHIFT) | - PCI_IPCIBE_IBE_LONG; - while (!(tx3927_pcicptr->istat & PCI_ISTAT_IDICC)); - val = - le32_to_cpu(*(volatile u32 *) (unsigned long) & tx3927_pcicptr-> - ipcidata); - /* clear by setting */ - tx3927_pcicptr->istat |= PCI_ISTAT_IDICC; - return val; -} - -void tx_iooutl(unsigned long data, unsigned int *addr) -{ - __u32 ioaddr; - - ioaddr = (unsigned long) addr; - *(volatile u32 *) (unsigned long) & tx3927_pcicptr->ipcidata = - cpu_to_le32(data); - *(volatile u32 *) (unsigned long) & tx3927_pcicptr->ipciaddr = - (unsigned long) ioaddr; - *(volatile u32 *) (unsigned long) & tx3927_pcicptr->ipcibe = - (PCI_IPCIBE_ICMD_IOWRITE << PCI_IPCIBE_ICMD_SHIFT) | - PCI_IPCIBE_IBE_LONG; - while (!(tx3927_pcicptr->istat & PCI_ISTAT_IDICC)); - /* clear by setting */ - tx3927_pcicptr->istat |= PCI_ISTAT_IDICC; -} - -void tx_insbyte(unsigned char *addr, void *buffer, unsigned int count) -{ - unsigned char *ptr = (unsigned char *) buffer; - - while (count--) { - *ptr++ = tx_ioinb(addr); - } -} - -void tx_insword(unsigned short *addr, void *buffer, unsigned int count) -{ - unsigned short *ptr = (unsigned short *) buffer; - - while (count--) { - *ptr++ = tx_ioinw(addr); - } -} - -void tx_inslong(unsigned int *addr, void *buffer, unsigned int count) -{ - unsigned long *ptr = (unsigned long *) buffer; - - while (count--) { - *ptr++ = tx_ioinl(addr); - } -} - -void tx_outsbyte(unsigned char *addr, void *buffer, unsigned int count) -{ - unsigned char *ptr = (unsigned char *) buffer; - - while (count--) { - tx_iooutb(*ptr++, addr); - } -} - -void tx_outsword(unsigned short *addr, void *buffer, unsigned int count) -{ - unsigned short *ptr = (unsigned short *) buffer; - - while (count--) { - tx_iooutw(*ptr++, addr); - } -} - -void tx_outslong(unsigned int *addr, void *buffer, unsigned int count) -{ - unsigned long *ptr = (unsigned long *) buffer; - - while (count--) { - tx_iooutl(*ptr++, addr); - } -} -#endif diff --git a/include/asm-mips/jmr3927/irq.h b/include/asm-mips/jmr3927/irq.h deleted file mode 100644 index e3e7ed38da6c..000000000000 --- a/include/asm-mips/jmr3927/irq.h +++ /dev/null @@ -1,57 +0,0 @@ -/* - * linux/include/asm-mips/tx3927/irq.h - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 2001 Toshiba Corporation - */ -#ifndef __ASM_TX3927_IRQ_H -#define __ASM_TX3927_IRQ_H - -#ifndef __ASSEMBLY__ - -#include - -struct tb_irq_space { - struct tb_irq_space* next; - int start_irqno; - int nr_irqs; - void (*mask_func)(int irq_nr, int space_id); - void (*unmask_func)(int irq_no, int space_id); - const char *name; - int space_id; - int can_share; -}; -extern struct tb_irq_space* tb_irq_spaces; - -static __inline__ void add_tb_irq_space(struct tb_irq_space* sp) -{ - sp->next = tb_irq_spaces; - tb_irq_spaces = sp; -} - - -struct pt_regs; -extern void -toshibaboards_spurious(struct pt_regs *regs, int irq); -extern void -toshibaboards_irqdispatch(struct pt_regs *regs, int irq); - -extern struct irqaction * -toshibaboards_get_irq_action(int irq); -extern int -toshibaboards_setup_irq(int irq, struct irqaction * new); - - -extern int (*toshibaboards_gen_iack)(void); - -#endif /* !__ASSEMBLY__ */ - -#define NR_ISA_IRQS 16 -#define TB_IRQ_IS_ISA(irq) \ - (0 <= (irq) && (irq) < NR_ISA_IRQS) -#define TB_IRQ_TO_ISA_IRQ(irq) (irq) - -#endif /* __ASM_TX3927_IRQ_H */ diff --git a/include/asm-mips/jmr3927/jmr3927.h b/include/asm-mips/jmr3927/jmr3927.h index c50e68ffa3af..958e29706e2d 100644 --- a/include/asm-mips/jmr3927/jmr3927.h +++ b/include/asm-mips/jmr3927/jmr3927.h @@ -1,5 +1,5 @@ /* - * Defines for the TJSYS JMR-TX3927/JMI-3927IO2/JMY-1394IF. + * Defines for the TJSYS JMR-TX3927 * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive @@ -12,10 +12,7 @@ #include #include -#include -#ifndef __ASSEMBLY__ #include -#endif /* CS */ #define JMR3927_ROMCE0 0x1fc00000 /* 4M */ @@ -35,28 +32,10 @@ #define JMR3927_SDRAM_SIZE 0x02000000 /* 32M */ #define JMR3927_PORT_BASE KSEG1 -/* select indirect initiator access per errata */ -#define JMR3927_INIT_INDIRECT_PCI -#define PCI_ISTAT_IDICC 0x1000 -#define PCI_IPCIBE_IBE_LONG 0 -#define PCI_IPCIBE_ICMD_IOREAD 2 -#define PCI_IPCIBE_ICMD_IOWRITE 3 -#define PCI_IPCIBE_ICMD_MEMREAD 6 -#define PCI_IPCIBE_ICMD_MEMWRITE 7 -#define PCI_IPCIBE_ICMD_SHIFT 4 - /* Address map (virtual address) */ #define JMR3927_ROM0_BASE (KSEG1 + JMR3927_ROMCE0) #define JMR3927_ROM1_BASE (KSEG1 + JMR3927_ROMCE1) #define JMR3927_IOC_BASE (KSEG1 + JMR3927_ROMCE2) -#define JMR3927_IOB_BASE (KSEG1 + JMR3927_ROMCE3) -#define JMR3927_ISAMEM_BASE (JMR3927_IOB_BASE) -#define JMR3927_ISAIO_BASE (JMR3927_IOB_BASE + 0x01000000) -#define JMR3927_ISAC_BASE (JMR3927_IOB_BASE + 0x02000000) -#define JMR3927_LCDVGA_REG_BASE (JMR3927_IOB_BASE + 0x03000000) -#define JMR3927_LCDVGA_MEM_BASE (JMR3927_IOB_BASE + 0x03800000) -#define JMR3927_JMY1394_BASE (KSEG1 + JMR3927_ROMCE5) -#define JMR3927_PREMIER3_BASE (JMR3927_JMY1394_BASE + 0x00100000) #define JMR3927_PCIMEM_BASE (KSEG1 + JMR3927_PCIMEM) #define JMR3927_PCIIO_BASE (KSEG1 + JMR3927_PCIIO) @@ -72,25 +51,14 @@ #define JMR3927_IOC_INTP_ADDR (JMR3927_IOC_BASE + 0x000b0000) #define JMR3927_IOC_RESET_ADDR (JMR3927_IOC_BASE + 0x000f0000) -#define JMR3927_ISAC_REV_ADDR (JMR3927_ISAC_BASE + 0x00000000) -#define JMR3927_ISAC_EINTS_ADDR (JMR3927_ISAC_BASE + 0x00200000) -#define JMR3927_ISAC_EINTM_ADDR (JMR3927_ISAC_BASE + 0x00300000) -#define JMR3927_ISAC_NMI_ADDR (JMR3927_ISAC_BASE + 0x00400000) -#define JMR3927_ISAC_LED_ADDR (JMR3927_ISAC_BASE + 0x00500000) -#define JMR3927_ISAC_INTP_ADDR (JMR3927_ISAC_BASE + 0x00800000) -#define JMR3927_ISAC_INTS1_ADDR (JMR3927_ISAC_BASE + 0x00900000) -#define JMR3927_ISAC_INTS2_ADDR (JMR3927_ISAC_BASE + 0x00a00000) -#define JMR3927_ISAC_INTM_ADDR (JMR3927_ISAC_BASE + 0x00b00000) - /* Flash ROM */ #define JMR3927_FLASH_BASE (JMR3927_ROM0_BASE) #define JMR3927_FLASH_SIZE 0x00400000 -/* bits for IOC_REV/IOC_BREV/ISAC_REV (high byte) */ +/* bits for IOC_REV/IOC_BREV (high byte) */ #define JMR3927_IDT_MASK 0xfc #define JMR3927_REV_MASK 0x03 #define JMR3927_IOC_IDT 0xe0 -#define JMR3927_ISAC_IDT 0x20 /* bits for IOC_INTS1/IOC_INTS2/IOC_INTM/IOC_INTP (high byte) */ #define JMR3927_IOC_INTB_PCIA 0 @@ -114,40 +82,6 @@ #define JMR3927_IOC_RESET_CPU 1 #define JMR3927_IOC_RESET_PCI 2 -/* bits for ISAC_EINTS/ISAC_EINTM (high byte) */ -#define JMR3927_ISAC_EINTB_IOCHK 2 -#define JMR3927_ISAC_EINTB_BWTH 4 -#define JMR3927_ISAC_EINTF_IOCHK (1 << JMR3927_ISAC_EINTB_IOCHK) -#define JMR3927_ISAC_EINTF_BWTH (1 << JMR3927_ISAC_EINTB_BWTH) - -/* bits for ISAC_LED (high byte) */ -#define JMR3927_ISAC_LED_ISALED 0x01 -#define JMR3927_ISAC_LED_USRLED 0x02 - -/* bits for ISAC_INTS/ISAC_INTM/ISAC_INTP (high byte) */ -#define JMR3927_ISAC_INTB_IRQ5 0 -#define JMR3927_ISAC_INTB_IRQKB 1 -#define JMR3927_ISAC_INTB_IRQMOUSE 2 -#define JMR3927_ISAC_INTB_IRQ4 3 -#define JMR3927_ISAC_INTB_IRQ12 4 -#define JMR3927_ISAC_INTB_IRQ3 5 -#define JMR3927_ISAC_INTB_IRQ10 6 -#define JMR3927_ISAC_INTB_ISAER 7 -#define JMR3927_ISAC_INTF_IRQ5 (1 << JMR3927_ISAC_INTB_IRQ5) -#define JMR3927_ISAC_INTF_IRQKB (1 << JMR3927_ISAC_INTB_IRQKB) -#define JMR3927_ISAC_INTF_IRQMOUSE (1 << JMR3927_ISAC_INTB_IRQMOUSE) -#define JMR3927_ISAC_INTF_IRQ4 (1 << JMR3927_ISAC_INTB_IRQ4) -#define JMR3927_ISAC_INTF_IRQ12 (1 << JMR3927_ISAC_INTB_IRQ12) -#define JMR3927_ISAC_INTF_IRQ3 (1 << JMR3927_ISAC_INTB_IRQ3) -#define JMR3927_ISAC_INTF_IRQ10 (1 << JMR3927_ISAC_INTB_IRQ10) -#define JMR3927_ISAC_INTF_ISAER (1 << JMR3927_ISAC_INTB_ISAER) - -#ifndef __ASSEMBLY__ - -#if 0 -#define jmr3927_ioc_reg_out(d, a) ((*(volatile unsigned short *)(a)) = (d) << 8) -#define jmr3927_ioc_reg_in(a) (((*(volatile unsigned short *)(a)) >> 8) & 0xff) -#else #if defined(__BIG_ENDIAN) #define jmr3927_ioc_reg_out(d, a) ((*(volatile unsigned char *)(a)) = (d)) #define jmr3927_ioc_reg_in(a) (*(volatile unsigned char *)(a)) @@ -157,31 +91,9 @@ #else #error "No Endian" #endif -#endif -#define jmr3927_isac_reg_out(d, a) ((*(volatile unsigned char *)(a)) = (d)) -#define jmr3927_isac_reg_in(a) (*(volatile unsigned char *)(a)) - -static inline int jmr3927_have_isac(void) -{ - unsigned char idt; - unsigned long flags; - unsigned long romcr3; - - local_irq_save(flags); - romcr3 = tx3927_romcptr->cr[3]; - tx3927_romcptr->cr[3] &= 0xffffefff; /* do not wait infinitely */ - idt = jmr3927_isac_reg_in(JMR3927_ISAC_REV_ADDR) & JMR3927_IDT_MASK; - tx3927_romcptr->cr[3] = romcr3; - local_irq_restore(flags); - - return idt == JMR3927_ISAC_IDT; -} -#define jmr3927_have_nvram() \ - ((jmr3927_ioc_reg_in(JMR3927_IOC_REV_ADDR) & JMR3927_IDT_MASK) == JMR3927_IOC_IDT) /* LED macro */ #define jmr3927_led_set(n/*0-16*/) jmr3927_ioc_reg_out(~(n), JMR3927_IOC_LED_ADDR) -#define jmr3927_io_led_set(n/*0-3*/) jmr3927_isac_reg_out((n), JMR3927_ISAC_LED_ADDR) #define jmr3927_led_and_set(n/*0-16*/) jmr3927_ioc_reg_out((~(n)) & jmr3927_ioc_reg_in(JMR3927_IOC_LED_ADDR), JMR3927_IOC_LED_ADDR) @@ -190,10 +102,6 @@ static inline int jmr3927_have_isac(void) #define jmr3927_dipsw2() ((tx3927_pioptr->din & (1 << 10)) == 0) #define jmr3927_dipsw3() ((jmr3927_ioc_reg_in(JMR3927_IOC_DIPSW_ADDR) & 2) == 0) #define jmr3927_dipsw4() ((jmr3927_ioc_reg_in(JMR3927_IOC_DIPSW_ADDR) & 1) == 0) -#define jmr3927_io_dipsw() (jmr3927_isac_reg_in(JMR3927_ISAC_LED_ADDR) >> 4) - - -#endif /* !__ASSEMBLY__ */ /* * IRQ mappings @@ -206,16 +114,10 @@ static inline int jmr3927_have_isac(void) */ #define JMR3927_NR_IRQ_IRC 16 /* On-Chip IRC */ #define JMR3927_NR_IRQ_IOC 8 /* PCI/MODEM/INT[6:7] */ -#define JMR3927_NR_IRQ_ISAC 8 /* ISA */ - -#define JMR3927_IRQ_IRC NR_ISA_IRQS +#define JMR3927_IRQ_IRC 16 #define JMR3927_IRQ_IOC (JMR3927_IRQ_IRC + JMR3927_NR_IRQ_IRC) -#define JMR3927_IRQ_ISAC (JMR3927_IRQ_IOC + JMR3927_NR_IRQ_IOC) -#define JMR3927_IRQ_END (JMR3927_IRQ_ISAC + JMR3927_NR_IRQ_ISAC) -#define JMR3927_IRQ_IS_IRC(irq) (JMR3927_IRQ_IRC <= (irq) && (irq) < JMR3927_IRQ_IOC) -#define JMR3927_IRQ_IS_IOC(irq) (JMR3927_IRQ_IOC <= (irq) && (irq) < JMR3927_IRQ_ISAC) -#define JMR3927_IRQ_IS_ISAC(irq) (JMR3927_IRQ_ISAC <= (irq) && (irq) < JMR3927_IRQ_END) +#define JMR3927_IRQ_END (JMR3927_IRQ_IOC + JMR3927_NR_IRQ_IOC) #define JMR3927_IRQ_IRC_INT0 (JMR3927_IRQ_IRC + TX3927_IR_INT0) #define JMR3927_IRQ_IRC_INT1 (JMR3927_IRQ_IRC + TX3927_IR_INT1) @@ -240,37 +142,13 @@ static inline int jmr3927_have_isac(void) #define JMR3927_IRQ_IOC_INT6 (JMR3927_IRQ_IOC + JMR3927_IOC_INTB_INT6) #define JMR3927_IRQ_IOC_INT7 (JMR3927_IRQ_IOC + JMR3927_IOC_INTB_INT7) #define JMR3927_IRQ_IOC_SOFT (JMR3927_IRQ_IOC + JMR3927_IOC_INTB_SOFT) -#define JMR3927_IRQ_ISAC_IRQ5 (JMR3927_IRQ_ISAC + JMR3927_ISAC_INTB_IRQ5) -#define JMR3927_IRQ_ISAC_IRQKB (JMR3927_IRQ_ISAC + JMR3927_ISAC_INTB_IRQKB) -#define JMR3927_IRQ_ISAC_IRQMOUSE (JMR3927_IRQ_ISAC + JMR3927_ISAC_INTB_IRQMOUSE) -#define JMR3927_IRQ_ISAC_IRQ4 (JMR3927_IRQ_ISAC + JMR3927_ISAC_INTB_IRQ4) -#define JMR3927_IRQ_ISAC_IRQ12 (JMR3927_IRQ_ISAC + JMR3927_ISAC_INTB_IRQ12) -#define JMR3927_IRQ_ISAC_IRQ3 (JMR3927_IRQ_ISAC + JMR3927_ISAC_INTB_IRQ3) -#define JMR3927_IRQ_ISAC_IRQ10 (JMR3927_IRQ_ISAC + JMR3927_ISAC_INTB_IRQ10) -#define JMR3927_IRQ_ISAC_ISAER (JMR3927_IRQ_ISAC + JMR3927_ISAC_INTB_ISAER) -#if 0 /* auto detect */ -/* RTL8019AS 10M Ether (JMI-3927IO2:JPW2:1-2 Short) */ -#define JMR3927_IRQ_ETHER1 JMR3927_IRQ_IRC_INT0 -#endif /* IOC (PCI, MODEM) */ #define JMR3927_IRQ_IOCINT JMR3927_IRQ_IRC_INT1 -/* ISAC (ISA, PCMCIA, KEYBOARD, MOUSE) */ -#define JMR3927_IRQ_ISACINT JMR3927_IRQ_IRC_INT2 /* TC35815 100M Ether (JMR-TX3912:JPW4:2-3 Short) */ #define JMR3927_IRQ_ETHER0 JMR3927_IRQ_IRC_INT3 /* Clock Tick (10ms) */ #define JMR3927_IRQ_TICK JMR3927_IRQ_IRC_TMR0 -#define JMR3927_IRQ_IDE JMR3927_IRQ_ISAC_IRQ12 - -/* IEEE1394 (Note that this may conflicts with RTL8019AS 10M Ether...) */ -#define JMR3927_IRQ_PREMIER3 JMR3927_IRQ_IRC_INT0 - -/* I/O Ports */ -/* RTL8019AS 10M Ether */ -#define JMR3927_ETHER1_PORT (JMR3927_ISAIO_BASE - JMR3927_PORT_BASE + 0x280) -#define JMR3927_KBD_PORT (JMR3927_ISAIO_BASE - JMR3927_PORT_BASE + 0x00800060) -#define JMR3927_IDE_PORT (JMR3927_ISAIO_BASE - JMR3927_PORT_BASE + 0x001001f0) /* Clocks */ #define JMR3927_CORECLK 132710400 /* 132.7MHz */ diff --git a/include/asm-mips/jmr3927/tx3927.h b/include/asm-mips/jmr3927/tx3927.h index b3d67c75d9ac..0b9073bfb759 100644 --- a/include/asm-mips/jmr3927/tx3927.h +++ b/include/asm-mips/jmr3927/tx3927.h @@ -22,8 +22,6 @@ #define TX3927_SIO_REG(ch) (0xfffef300 + (ch) * 0x100) #define TX3927_PIO_REG 0xfffef500 -#ifndef __ASSEMBLY__ - struct tx3927_sdramc_reg { volatile unsigned long cr[8]; volatile unsigned long tr[3]; @@ -164,8 +162,6 @@ struct tx3927_ccfg_reg { volatile unsigned long pdcr; }; -#endif /* !__ASSEMBLY__ */ - /* * SDRAMC */ @@ -348,8 +344,6 @@ struct tx3927_ccfg_reg { #define TX3927_PCFG_SELDMA_ALL 0x0000000f #define TX3927_PCFG_SELDMA(ch) (0x00000001<<(ch)) -#ifndef __ASSEMBLY__ - #define tx3927_sdramcptr ((struct tx3927_sdramc_reg *)TX3927_SDRAMC_REG) #define tx3927_romcptr ((struct tx3927_romc_reg *)TX3927_ROMC_REG) #define tx3927_dmaptr ((struct tx3927_dma_reg *)TX3927_DMA_REG) @@ -360,6 +354,4 @@ struct tx3927_ccfg_reg { #define tx3927_sioptr(ch) ((struct txx927_sio_reg *)TX3927_SIO_REG(ch)) #define tx3927_pioptr ((struct txx927_pio_reg *)TX3927_PIO_REG) -#endif /* !__ASSEMBLY__ */ - #endif /* __ASM_TX3927_H */ diff --git a/include/asm-mips/jmr3927/txx927.h b/include/asm-mips/jmr3927/txx927.h index 9d5792eab452..58a8ff6be815 100644 --- a/include/asm-mips/jmr3927/txx927.h +++ b/include/asm-mips/jmr3927/txx927.h @@ -10,8 +10,6 @@ #ifndef __ASM_TXX927_H #define __ASM_TXX927_H -#ifndef __ASSEMBLY__ - struct txx927_tmr_reg { volatile unsigned long tcr; volatile unsigned long tisr; @@ -52,9 +50,6 @@ struct txx927_pio_reg { volatile unsigned long maskext; }; -#endif /* !__ASSEMBLY__ */ - - /* * TMR */ -- cgit v1.2.3-59-g8ed1b From 78709b9df35346965b214e0e548412748d147776 Mon Sep 17 00:00:00 2001 From: Ralf Baechle Date: Thu, 26 Apr 2007 15:46:24 +0100 Subject: [MIPS] IP22: Get rid of volatile in IP22 core code. Signed-off-by: Ralf Baechle --- arch/mips/sgi-ip22/ip22-nvram.c | 24 ++++++++++++------------ arch/mips/sgi-ip22/ip22-time.c | 14 +++++++------- include/asm-mips/sgi/hpc3.h | 2 +- include/asm-mips/sgi/ip22.h | 2 +- include/asm-mips/sgi/mc.h | 2 +- 5 files changed, 22 insertions(+), 22 deletions(-) (limited to 'include') diff --git a/arch/mips/sgi-ip22/ip22-nvram.c b/arch/mips/sgi-ip22/ip22-nvram.c index fd29fd407ae8..e19d60d5fcc1 100644 --- a/arch/mips/sgi-ip22/ip22-nvram.c +++ b/arch/mips/sgi-ip22/ip22-nvram.c @@ -52,8 +52,7 @@ * national semiconductor nv ram chip the op code is 3 bits and * the address is 6/8 bits. */ -static inline void eeprom_cmd(volatile unsigned int *ctrl, unsigned cmd, - unsigned reg) +static inline void eeprom_cmd(unsigned int *ctrl, unsigned cmd, unsigned reg) { unsigned short ser_cmd; int i; @@ -61,33 +60,34 @@ static inline void eeprom_cmd(volatile unsigned int *ctrl, unsigned cmd, ser_cmd = cmd | (reg << (16 - BITS_IN_COMMAND)); for (i = 0; i < BITS_IN_COMMAND; i++) { if (ser_cmd & (1<<15)) /* if high order bit set */ - *ctrl |= EEPROM_DATO; + writel(readl(ctrl) | EEPROM_DATO, ctrl); else - *ctrl &= ~EEPROM_DATO; - *ctrl &= ~EEPROM_ECLK; - *ctrl |= EEPROM_ECLK; + writel(readl(ctrl) & ~EEPROM_DATO, ctrl); + writel(readl(ctrl) & ~EEPROM_ECLK, ctrl); + writel(readl(ctrl) | EEPROM_ECLK, ctrl); ser_cmd <<= 1; } - *ctrl &= ~EEPROM_DATO; /* see data sheet timing diagram */ + /* see data sheet timing diagram */ + writel(readl(ctrl) & ~EEPROM_DATO, ctrl); } -unsigned short ip22_eeprom_read(volatile unsigned int *ctrl, int reg) +unsigned short ip22_eeprom_read(unsigned int *ctrl, int reg) { unsigned short res = 0; int i; - *ctrl &= ~EEPROM_EPROT; + writel(readl(ctrl) & ~EEPROM_EPROT, ctrl); eeprom_cs_on(ctrl); eeprom_cmd(ctrl, EEPROM_READ, reg); /* clock the data ouf of serial mem */ for (i = 0; i < 16; i++) { - *ctrl &= ~EEPROM_ECLK; + writel(readl(ctrl) & ~EEPROM_ECLK, ctrl); delay(); - *ctrl |= EEPROM_ECLK; + writel(readl(ctrl) | EEPROM_ECLK, ctrl); delay(); res <<= 1; - if (*ctrl & EEPROM_DATI) + if (readl(ctrl) & EEPROM_DATI) res |= 1; } diff --git a/arch/mips/sgi-ip22/ip22-time.c b/arch/mips/sgi-ip22/ip22-time.c index 205554734099..8e88a442b22a 100644 --- a/arch/mips/sgi-ip22/ip22-time.c +++ b/arch/mips/sgi-ip22/ip22-time.c @@ -94,7 +94,7 @@ static int indy_rtc_set_time(unsigned long tim) static unsigned long dosample(void) { u32 ct0, ct1; - volatile u8 msb, lsb; + u8 msb, lsb; /* Start the counter. */ sgint->tcword = (SGINT_TCWORD_CNT2 | SGINT_TCWORD_CALL | @@ -107,21 +107,21 @@ static unsigned long dosample(void) /* Latch and spin until top byte of counter2 is zero */ do { - sgint->tcword = SGINT_TCWORD_CNT2 | SGINT_TCWORD_CLAT; - lsb = sgint->tcnt2; - msb = sgint->tcnt2; + writeb(SGINT_TCWORD_CNT2 | SGINT_TCWORD_CLAT, &sgint->tcword); + lsb = readb(&sgint->tcnt2); + msb = readb(&sgint->tcnt2); ct1 = read_c0_count(); } while (msb); /* Stop the counter. */ - sgint->tcword = (SGINT_TCWORD_CNT2 | SGINT_TCWORD_CALL | - SGINT_TCWORD_MSWST); + writeb(sgint->tcword, (SGINT_TCWORD_CNT2 | SGINT_TCWORD_CALL | + SGINT_TCWORD_MSWST)); /* * Return the difference, this is how far the r4k counter increments * for every 1/HZ seconds. We round off the nearest 1 MHz of master * clock (= 1000000 / HZ / 2). */ - /*return (ct1 - ct0 + (500000/HZ/2)) / (500000/HZ) * (500000/HZ);*/ + return (ct1 - ct0) / (500000/HZ) * (500000/HZ); } diff --git a/include/asm-mips/sgi/hpc3.h b/include/asm-mips/sgi/hpc3.h index fcec52bafb25..c4729f531919 100644 --- a/include/asm-mips/sgi/hpc3.h +++ b/include/asm-mips/sgi/hpc3.h @@ -206,7 +206,7 @@ struct hpc3_regs { #define HPC3_GIOMISC_ERTIME 0x1 /* Enable external timer real time. */ #define HPC3_GIOMISC_DENDIAN 0x2 /* dma descriptor endian, 1=lit 0=big */ - volatile u32 eeprom; /* EEPROM data reg. */ + u32 eeprom; /* EEPROM data reg. */ #define HPC3_EEPROM_EPROT 0x01 /* Protect register enable */ #define HPC3_EEPROM_CSEL 0x02 /* Chip select */ #define HPC3_EEPROM_ECLK 0x04 /* EEPROM clock */ diff --git a/include/asm-mips/sgi/ip22.h b/include/asm-mips/sgi/ip22.h index 6592f3bd1999..f4981c4f16bb 100644 --- a/include/asm-mips/sgi/ip22.h +++ b/include/asm-mips/sgi/ip22.h @@ -72,7 +72,7 @@ #define ip22_is_fullhouse() (sgioc->sysid & SGIOC_SYSID_FULLHOUSE) -extern unsigned short ip22_eeprom_read(volatile unsigned int *ctrl, int reg); +extern unsigned short ip22_eeprom_read(unsigned int *ctrl, int reg); extern unsigned short ip22_nvram_read(int reg); #endif diff --git a/include/asm-mips/sgi/mc.h b/include/asm-mips/sgi/mc.h index c52f7834c7c8..1576c2394de8 100644 --- a/include/asm-mips/sgi/mc.h +++ b/include/asm-mips/sgi/mc.h @@ -57,7 +57,7 @@ struct sgimc_regs { volatile u32 divider; /* Divider reg for RPSS */ u32 _unused5; - volatile u32 eeprom; /* EEPROM byte reg for r4k */ + u32 eeprom; /* EEPROM byte reg for r4k */ #define SGIMC_EEPROM_PRE 0x00000001 /* eeprom chip PRE pin assertion */ #define SGIMC_EEPROM_CSEL 0x00000002 /* Active high, eeprom chip select */ #define SGIMC_EEPROM_SECLOCK 0x00000004 /* EEPROM serial clock */ -- cgit v1.2.3-59-g8ed1b From eacb9d61919db56482dcea7ec943c9508175dc16 Mon Sep 17 00:00:00 2001 From: Ralf Baechle Date: Thu, 26 Apr 2007 15:46:25 +0100 Subject: [MIPS] Remove unused argument from kunmap_coherent(). Signed-off-by: Ralf Baechle --- arch/mips/mm/cache.c | 2 +- arch/mips/mm/init.c | 8 ++++---- include/asm-mips/cacheflush.h | 2 +- 3 files changed, 6 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c index 4e8f1b683376..abf99b1eba13 100644 --- a/arch/mips/mm/cache.c +++ b/arch/mips/mm/cache.c @@ -96,7 +96,7 @@ void __flush_anon_page(struct page *page, unsigned long vmaddr) kaddr = kmap_coherent(page, vmaddr); flush_data_cache_page((unsigned long)kaddr); - kunmap_coherent(kaddr); + kunmap_coherent(); } } diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c index 8800c88cbbf6..2d1c2c024822 100644 --- a/arch/mips/mm/init.c +++ b/arch/mips/mm/init.c @@ -177,7 +177,7 @@ void *kmap_coherent(struct page *page, unsigned long addr) #define UNIQUE_ENTRYHI(idx) (CKSEG0 + ((idx) << (PAGE_SHIFT + 1))) -void kunmap_coherent(struct page *page) +void kunmap_coherent(void) { #ifndef CONFIG_MIPS_MT_SMTC unsigned int wired; @@ -210,7 +210,7 @@ void copy_user_highpage(struct page *to, struct page *from, if (cpu_has_dc_aliases) { vfrom = kmap_coherent(from, vaddr); copy_page(vto, vfrom); - kunmap_coherent(from); + kunmap_coherent(); } else { vfrom = kmap_atomic(from, KM_USER0); copy_page(vto, vfrom); @@ -233,7 +233,7 @@ void copy_to_user_page(struct vm_area_struct *vma, if (cpu_has_dc_aliases) { void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); memcpy(vto, src, len); - kunmap_coherent(page); + kunmap_coherent(); } else memcpy(dst, src, len); if ((vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc) @@ -250,7 +250,7 @@ void copy_from_user_page(struct vm_area_struct *vma, void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); memcpy(dst, vfrom, len); - kunmap_coherent(page); + kunmap_coherent(); } else memcpy(dst, src, len); } diff --git a/include/asm-mips/cacheflush.h b/include/asm-mips/cacheflush.h index 28d907d4347a..4933b4947ed0 100644 --- a/include/asm-mips/cacheflush.h +++ b/include/asm-mips/cacheflush.h @@ -96,6 +96,6 @@ extern void (*flush_data_cache_page)(unsigned long addr); unsigned long __init run_uncached(void *func); extern void *kmap_coherent(struct page *page, unsigned long addr); -extern void kunmap_coherent(struct page *page); +extern void kunmap_coherent(void); #endif /* _ASM_CACHEFLUSH_H */ -- cgit v1.2.3-59-g8ed1b From 8deab1144b553548fb2f1b51affdd36dcd652aaa Mon Sep 17 00:00:00 2001 From: Mark Mason Date: Wed, 28 Mar 2007 14:40:25 -0700 Subject: [MIPS] Updated Sibyte headers This is an update to the earlier patch for the sibyte headers, and superceeds the previous patch. Changes were necessary to get the tbprof driver working on the bcm1480. Patch to update Sibyte header files to match master versions maintained at Broadcom. This patch also corrects some whitespace problems, and (hopefully) shouldn't introduce any new ones. Signed-off-by: Mark Mason Signed-off-by: Ralf Baechle --- include/asm-mips/sibyte/bcm1480_int.h | 2 + include/asm-mips/sibyte/bcm1480_mc.h | 32 ++++++++++-- include/asm-mips/sibyte/bcm1480_regs.h | 20 ++++++++ include/asm-mips/sibyte/bcm1480_scd.h | 94 ++++++++++++---------------------- include/asm-mips/sibyte/board.h | 14 ++++- include/asm-mips/sibyte/carmel.h | 1 - include/asm-mips/sibyte/sb1250_int.h | 5 +- include/asm-mips/sibyte/sb1250_mac.h | 24 ++++----- include/asm-mips/sibyte/sb1250_mc.h | 2 +- include/asm-mips/sibyte/sb1250_regs.h | 46 +++++++++++++---- include/asm-mips/sibyte/sb1250_scd.h | 30 +++++++---- include/asm-mips/sibyte/swarm.h | 12 +++++ 12 files changed, 175 insertions(+), 107 deletions(-) (limited to 'include') diff --git a/include/asm-mips/sibyte/bcm1480_int.h b/include/asm-mips/sibyte/bcm1480_int.h index 42d4cf00efd3..c0d5206020fd 100644 --- a/include/asm-mips/sibyte/bcm1480_int.h +++ b/include/asm-mips/sibyte/bcm1480_int.h @@ -157,6 +157,7 @@ * Mask values for each interrupt */ +#define _BCM1480_INT_MASK(w,n) _SB_MAKEMASK(w,((n) & 0x3F)) #define _BCM1480_INT_MASK1(n) _SB_MAKEMASK1(((n) & 0x3F)) #define _BCM1480_INT_OFFSET(n) (((n) & 0x40) << 6) @@ -195,6 +196,7 @@ #define M_BCM1480_INT_PMI_HIGH _BCM1480_INT_MASK1(K_BCM1480_INT_PMI_HIGH) #define M_BCM1480_INT_PMO_LOW _BCM1480_INT_MASK1(K_BCM1480_INT_PMO_LOW) #define M_BCM1480_INT_PMO_HIGH _BCM1480_INT_MASK1(K_BCM1480_INT_PMO_HIGH) +#define M_BCM1480_INT_MBOX_ALL _BCM1480_INT_MASK(8,K_BCM1480_INT_MBOX_0_0) #define M_BCM1480_INT_MBOX_0_0 _BCM1480_INT_MASK1(K_BCM1480_INT_MBOX_0_0) #define M_BCM1480_INT_MBOX_0_1 _BCM1480_INT_MASK1(K_BCM1480_INT_MBOX_0_1) #define M_BCM1480_INT_MBOX_0_2 _BCM1480_INT_MASK1(K_BCM1480_INT_MBOX_0_2) diff --git a/include/asm-mips/sibyte/bcm1480_mc.h b/include/asm-mips/sibyte/bcm1480_mc.h index 6bdc941afc91..a6a437451da4 100644 --- a/include/asm-mips/sibyte/bcm1480_mc.h +++ b/include/asm-mips/sibyte/bcm1480_mc.h @@ -382,6 +382,10 @@ #define M_BCM1480_MC_CS6 _SB_MAKEMASK1(10) #define M_BCM1480_MC_CS7 _SB_MAKEMASK1(11) +#define M_BCM1480_MC_CS _SB_MAKEMASK(8,S_BCM1480_MC_CS0) +#define V_BCM1480_MC_CS(x) _SB_MAKEVALUE(x,S_BCM1480_MC_CS0) +#define G_BCM1480_MC_CS(x) _SB_GETVALUE(x,S_BCM1480_MC_CS0,M_BCM1480_MC_CS0) + #define M_BCM1480_MC_CMD_ACTIVE _SB_MAKEMASK1(16) /* @@ -412,6 +416,8 @@ #define K_BCM1480_MC_DRAM_TYPE_DDR2 2 #endif +#define K_BCM1480_MC_DRAM_TYPE_DDR2_PASS1 0 + #define V_BCM1480_MC_DRAM_TYPE_JEDEC V_BCM1480_MC_DRAM_TYPE(K_BCM1480_MC_DRAM_TYPE_JEDEC) #define V_BCM1480_MC_DRAM_TYPE_FCRAM V_BCM1480_MC_DRAM_TYPE(K_BCM1480_MC_DRAM_TYPE_FCRAM) @@ -511,6 +517,22 @@ #define M_BCM1480_MC_WR_ODT6_CS6 _SB_MAKEMASK1(31) #define M_BCM1480_MC_CS_ODD_ODT_EN _SB_MAKEMASK1(32) + +#define S_BCM1480_MC_ODT0 0 +#define M_BCM1480_MC_ODT0 _SB_MAKEMASK(8,S_BCM1480_MC_ODT0) +#define V_BCM1480_MC_ODT0(x) _SB_MAKEVALUE(x,S_BCM1480_MC_ODT0) + +#define S_BCM1480_MC_ODT2 8 +#define M_BCM1480_MC_ODT2 _SB_MAKEMASK(8,S_BCM1480_MC_ODT2) +#define V_BCM1480_MC_ODT2(x) _SB_MAKEVALUE(x,S_BCM1480_MC_ODT2) + +#define S_BCM1480_MC_ODT4 16 +#define M_BCM1480_MC_ODT4 _SB_MAKEMASK(8,S_BCM1480_MC_ODT4) +#define V_BCM1480_MC_ODT4(x) _SB_MAKEVALUE(x,S_BCM1480_MC_ODT4) + +#define S_BCM1480_MC_ODT6 24 +#define M_BCM1480_MC_ODT6 _SB_MAKEMASK(8,S_BCM1480_MC_ODT6) +#define V_BCM1480_MC_ODT6(x) _SB_MAKEVALUE(x,S_BCM1480_MC_ODT6) #endif /* @@ -588,11 +610,11 @@ #define M_BCM1480_MC_DQO_SHIFT _SB_MAKEMASK1(47) #endif -#define S_BCM1480_MC_DLL_DEFAULT 48 -#define M_BCM1480_MC_DLL_DEFAULT _SB_MAKEMASK(6,S_BCM1480_MC_DLL_DEFAULT) -#define V_BCM1480_MC_DLL_DEFAULT(x) _SB_MAKEVALUE(x,S_BCM1480_MC_DLL_DEFAULT) -#define G_BCM1480_MC_DLL_DEFAULT(x) _SB_GETVALUE(x,S_BCM1480_MC_DLL_DEFAULT,M_BCM1480_MC_DLL_DEFAULT) -#define V_BCM1480_MC_DLL_DEFAULT_DEFAULT V_BCM1480_MC_DLL_DEFAULT(0x10) +#define S_BCM1480_MC_DLL_DEFAULT 48 +#define M_BCM1480_MC_DLL_DEFAULT _SB_MAKEMASK(6,S_BCM1480_MC_DLL_DEFAULT) +#define V_BCM1480_MC_DLL_DEFAULT(x) _SB_MAKEVALUE(x,S_BCM1480_MC_DLL_DEFAULT) +#define G_BCM1480_MC_DLL_DEFAULT(x) _SB_GETVALUE(x,S_BCM1480_MC_DLL_DEFAULT,M_BCM1480_MC_DLL_DEFAULT) +#define V_BCM1480_MC_DLL_DEFAULT_DEFAULT V_BCM1480_MC_DLL_DEFAULT(0x10) #if SIBYTE_HDR_FEATURE(1480, PASS2) #define S_BCM1480_MC_DLL_REGCTRL 54 diff --git a/include/asm-mips/sibyte/bcm1480_regs.h b/include/asm-mips/sibyte/bcm1480_regs.h index c2dd2fe3047c..bda391d3af85 100644 --- a/include/asm-mips/sibyte/bcm1480_regs.h +++ b/include/asm-mips/sibyte/bcm1480_regs.h @@ -230,6 +230,7 @@ #define A_BCM1480_DUART_IMRREG(chan) (A_BCM1480_DUART(chan) + R_BCM1480_DUART_IMRREG(chan)) #define A_BCM1480_DUART_ISRREG(chan) (A_BCM1480_DUART(chan) + R_BCM1480_DUART_ISRREG(chan)) +#define A_BCM1480_DUART_IN_PORT(chan) (A_BCM1480_DUART(chan) + R_DUART_INP_ORT) /* * These constants are the absolute addresses. @@ -404,6 +405,21 @@ #define R_BCM1480_IMR_ALIAS_MAILBOX_0 0x0000 /* 0x0x0 */ #define R_BCM1480_IMR_ALIAS_MAILBOX_0_SET 0x0008 /* 0x0x8 */ +/* + * these macros work together to build the address of a mailbox + * register, e.g., A_BCM1480_MAILBOX_REGISTER(0,R_BCM1480_IMR_MAILBOX_SET,2) + * for mbox_0_set_cpu2 returns 0x00100240C8 + */ +#define R_BCM1480_IMR_MAILBOX_CPU 0x00 +#define R_BCM1480_IMR_MAILBOX_SET 0x08 +#define R_BCM1480_IMR_MAILBOX_CLR 0x10 +#define R_BCM1480_IMR_MAILBOX_NUM_SPACING 0x20 +#define A_BCM1480_MAILBOX_REGISTER(num,reg,cpu) \ + (A_BCM1480_IMR_CPU0_BASE + \ + (num * R_BCM1480_IMR_MAILBOX_NUM_SPACING) + \ + (cpu * BCM1480_IMR_REGISTER_SPACING) + \ + (R_BCM1480_IMR_MAILBOX_0_CPU + reg)) + /* ********************************************************************* * System Performance Counter Registers (Section 4.7) ********************************************************************* */ @@ -428,6 +444,10 @@ #define A_BCM1480_SCD_PERF_CNT_6 0x0010020500 #define A_BCM1480_SCD_PERF_CNT_7 0x0010020508 +#define BCM1480_SCD_NUM_PERF_CNT 8 +#define BCM1480_SCD_PERF_CNT_SPACING 8 +#define A_BCM1480_SCD_PERF_CNT(n) (A_SCD_PERF_CNT_0+(n*BCM1480_SCD_PERF_CNT_SPACING)) + /* ********************************************************************* * System Bus Watcher Registers (Section 4.8) ********************************************************************* */ diff --git a/include/asm-mips/sibyte/bcm1480_scd.h b/include/asm-mips/sibyte/bcm1480_scd.h index 648bed96780f..6111d6dcf117 100644 --- a/include/asm-mips/sibyte/bcm1480_scd.h +++ b/include/asm-mips/sibyte/bcm1480_scd.h @@ -10,7 +10,7 @@ * ********************************************************************* * - * Copyright 2000,2001,2002,2003 + * Copyright 2000,2001,2002,2003,2004,2005 * Broadcom Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or @@ -78,6 +78,7 @@ #define K_SYS_PART_BCM1280 0x1206 #define K_SYS_PART_BCM1455 0x1407 #define K_SYS_PART_BCM1255 0x1257 +#define K_SYS_PART_BCM1158 0x1156 /* * Manufacturing Information Register (Table 14) @@ -237,58 +238,42 @@ * System Performance Counter Configuration Register (Table 31) * Register: PERF_CNT_CFG_0 * - * Since the clear/enable bits are moved compared to the - * 1250 and there are more fields, this register will be BCM1480 specific. + * SPC_CFG_SRC[0-3] is the same as the 1250. + * SPC_CFG_SRC[4-7] only exist on the 1480 + * The clear/enable bits are in different locations on the 1250 and 1480. */ -#define S_BCM1480_SPC_CFG_SRC0 0 -#define M_BCM1480_SPC_CFG_SRC0 _SB_MAKEMASK(8,S_BCM1480_SPC_CFG_SRC0) -#define V_BCM1480_SPC_CFG_SRC0(x) _SB_MAKEVALUE(x,S_BCM1480_SPC_CFG_SRC0) -#define G_BCM1480_SPC_CFG_SRC0(x) _SB_GETVALUE(x,S_BCM1480_SPC_CFG_SRC0,M_BCM1480_SPC_CFG_SRC0) - -#define S_BCM1480_SPC_CFG_SRC1 8 -#define M_BCM1480_SPC_CFG_SRC1 _SB_MAKEMASK(8,S_BCM1480_SPC_CFG_SRC1) -#define V_BCM1480_SPC_CFG_SRC1(x) _SB_MAKEVALUE(x,S_BCM1480_SPC_CFG_SRC1) -#define G_BCM1480_SPC_CFG_SRC1(x) _SB_GETVALUE(x,S_BCM1480_SPC_CFG_SRC1,M_BCM1480_SPC_CFG_SRC1) - -#define S_BCM1480_SPC_CFG_SRC2 16 -#define M_BCM1480_SPC_CFG_SRC2 _SB_MAKEMASK(8,S_BCM1480_SPC_CFG_SRC2) -#define V_BCM1480_SPC_CFG_SRC2(x) _SB_MAKEVALUE(x,S_BCM1480_SPC_CFG_SRC2) -#define G_BCM1480_SPC_CFG_SRC2(x) _SB_GETVALUE(x,S_BCM1480_SPC_CFG_SRC2,M_BCM1480_SPC_CFG_SRC2) - -#define S_BCM1480_SPC_CFG_SRC3 24 -#define M_BCM1480_SPC_CFG_SRC3 _SB_MAKEMASK(8,S_BCM1480_SPC_CFG_SRC3) -#define V_BCM1480_SPC_CFG_SRC3(x) _SB_MAKEVALUE(x,S_BCM1480_SPC_CFG_SRC3) -#define G_BCM1480_SPC_CFG_SRC3(x) _SB_GETVALUE(x,S_BCM1480_SPC_CFG_SRC3,M_BCM1480_SPC_CFG_SRC3) - -#define S_BCM1480_SPC_CFG_SRC4 32 -#define M_BCM1480_SPC_CFG_SRC4 _SB_MAKEMASK(8,S_BCM1480_SPC_CFG_SRC4) -#define V_BCM1480_SPC_CFG_SRC4(x) _SB_MAKEVALUE(x,S_BCM1480_SPC_CFG_SRC4) -#define G_BCM1480_SPC_CFG_SRC4(x) _SB_GETVALUE(x,S_BCM1480_SPC_CFG_SRC4,M_BCM1480_SPC_CFG_SRC4) - -#define S_BCM1480_SPC_CFG_SRC5 40 -#define M_BCM1480_SPC_CFG_SRC5 _SB_MAKEMASK(8,S_BCM1480_SPC_CFG_SRC5) -#define V_BCM1480_SPC_CFG_SRC5(x) _SB_MAKEVALUE(x,S_BCM1480_SPC_CFG_SRC5) -#define G_BCM1480_SPC_CFG_SRC5(x) _SB_GETVALUE(x,S_BCM1480_SPC_CFG_SRC5,M_BCM1480_SPC_CFG_SRC5) - -#define S_BCM1480_SPC_CFG_SRC6 48 -#define M_BCM1480_SPC_CFG_SRC6 _SB_MAKEMASK(8,S_BCM1480_SPC_CFG_SRC6) -#define V_BCM1480_SPC_CFG_SRC6(x) _SB_MAKEVALUE(x,S_BCM1480_SPC_CFG_SRC6) -#define G_BCM1480_SPC_CFG_SRC6(x) _SB_GETVALUE(x,S_BCM1480_SPC_CFG_SRC6,M_BCM1480_SPC_CFG_SRC6) - -#define S_BCM1480_SPC_CFG_SRC7 56 -#define M_BCM1480_SPC_CFG_SRC7 _SB_MAKEMASK(8,S_BCM1480_SPC_CFG_SRC7) -#define V_BCM1480_SPC_CFG_SRC7(x) _SB_MAKEVALUE(x,S_BCM1480_SPC_CFG_SRC7) -#define G_BCM1480_SPC_CFG_SRC7(x) _SB_GETVALUE(x,S_BCM1480_SPC_CFG_SRC7,M_BCM1480_SPC_CFG_SRC7) +#define S_SPC_CFG_SRC4 32 +#define M_SPC_CFG_SRC4 _SB_MAKEMASK(8,S_SPC_CFG_SRC4) +#define V_SPC_CFG_SRC4(x) _SB_MAKEVALUE(x,S_SPC_CFG_SRC4) +#define G_SPC_CFG_SRC4(x) _SB_GETVALUE(x,S_SPC_CFG_SRC4,M_SPC_CFG_SRC4) + +#define S_SPC_CFG_SRC5 40 +#define M_SPC_CFG_SRC5 _SB_MAKEMASK(8,S_SPC_CFG_SRC5) +#define V_SPC_CFG_SRC5(x) _SB_MAKEVALUE(x,S_SPC_CFG_SRC5) +#define G_SPC_CFG_SRC5(x) _SB_GETVALUE(x,S_SPC_CFG_SRC5,M_SPC_CFG_SRC5) + +#define S_SPC_CFG_SRC6 48 +#define M_SPC_CFG_SRC6 _SB_MAKEMASK(8,S_SPC_CFG_SRC6) +#define V_SPC_CFG_SRC6(x) _SB_MAKEVALUE(x,S_SPC_CFG_SRC6) +#define G_SPC_CFG_SRC6(x) _SB_GETVALUE(x,S_SPC_CFG_SRC6,M_SPC_CFG_SRC6) + +#define S_SPC_CFG_SRC7 56 +#define M_SPC_CFG_SRC7 _SB_MAKEMASK(8,S_SPC_CFG_SRC7) +#define V_SPC_CFG_SRC7(x) _SB_MAKEVALUE(x,S_SPC_CFG_SRC7) +#define G_SPC_CFG_SRC7(x) _SB_GETVALUE(x,S_SPC_CFG_SRC7,M_SPC_CFG_SRC7) /* * System Performance Counter Control Register (Table 32) * Register: PERF_CNT_CFG_1 * BCM1480 specific */ - -#define M_BCM1480_SPC_CFG_CLEAR _SB_MAKEMASK1(0) -#define M_BCM1480_SPC_CFG_ENABLE _SB_MAKEMASK1(1) +#define M_BCM1480_SPC_CFG_CLEAR _SB_MAKEMASK1(0) +#define M_BCM1480_SPC_CFG_ENABLE _SB_MAKEMASK1(1) +#if SIBYTE_HDR_FEATURE_CHIP(1480) +#define M_SPC_CFG_CLEAR M_BCM1480_SPC_CFG_CLEAR +#define M_SPC_CFG_ENABLE M_BCM1480_SPC_CFG_ENABLE +#endif /* * System Performance Counters (Table 33) @@ -405,20 +390,10 @@ * Trace Control Register (Table 49) * Register: TRACE_CFG * - * Bits 0..8 are the same as the BCM1250, rest are different. - * Entire register is redefined below. + * BCM1480 changes to this register (other than location of the CUR_ADDR field) + * are defined below. */ -#define M_BCM1480_SCD_TRACE_CFG_RESET _SB_MAKEMASK1(0) -#define M_BCM1480_SCD_TRACE_CFG_START_READ _SB_MAKEMASK1(1) -#define M_BCM1480_SCD_TRACE_CFG_START _SB_MAKEMASK1(2) -#define M_BCM1480_SCD_TRACE_CFG_STOP _SB_MAKEMASK1(3) -#define M_BCM1480_SCD_TRACE_CFG_FREEZE _SB_MAKEMASK1(4) -#define M_BCM1480_SCD_TRACE_CFG_FREEZE_FULL _SB_MAKEMASK1(5) -#define M_BCM1480_SCD_TRACE_CFG_DEBUG_FULL _SB_MAKEMASK1(6) -#define M_BCM1480_SCD_TRACE_CFG_FULL _SB_MAKEMASK1(7) -#define M_BCM1480_SCD_TRACE_CFG_FORCE_CNT _SB_MAKEMASK1(8) - #define S_BCM1480_SCD_TRACE_CFG_MODE 16 #define M_BCM1480_SCD_TRACE_CFG_MODE _SB_MAKEMASK(2,S_BCM1480_SCD_TRACE_CFG_MODE) #define V_BCM1480_SCD_TRACE_CFG_MODE(x) _SB_MAKEVALUE(x,S_BCM1480_SCD_TRACE_CFG_MODE) @@ -428,9 +403,4 @@ #define K_BCM1480_SCD_TRACE_CFG_MODE_BYTEEN_INT 1 #define K_BCM1480_SCD_TRACE_CFG_MODE_FLOW_ID 2 -#define S_BCM1480_SCD_TRACE_CFG_CUR_ADDR 24 -#define M_BCM1480_SCD_TRACE_CFG_CUR_ADDR _SB_MAKEMASK(8,S_BCM1480_SCD_TRACE_CFG_CUR_ADDR) -#define V_BCM1480_SCD_TRACE_CFG_CUR_ADDR(x) _SB_MAKEVALUE(x,S_BCM1480_SCD_TRACE_CFG_CUR_ADDR) -#define G_BCM1480_SCD_TRACE_CFG_CUR_ADDR(x) _SB_GETVALUE(x,S_BCM1480_SCD_TRACE_CFG_CUR_ADDR,M_BCM1480_SCD_TRACE_CFG_CUR_ADDR) - #endif /* _BCM1480_SCD_H */ diff --git a/include/asm-mips/sibyte/board.h b/include/asm-mips/sibyte/board.h index 3dfe29ed42a8..73bce901a378 100644 --- a/include/asm-mips/sibyte/board.h +++ b/include/asm-mips/sibyte/board.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2000, 2001, 2002, 2003 Broadcom Corporation + * Copyright (C) 2000,2001,2002,2003,2004 Broadcom Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License @@ -19,8 +19,8 @@ #ifndef _SIBYTE_BOARD_H #define _SIBYTE_BOARD_H - #if defined(CONFIG_SIBYTE_SWARM) || defined(CONFIG_SIBYTE_PTSWARM) || \ + defined(CONFIG_SIBYTE_PT1120) || defined(CONFIG_SIBYTE_PT1125) || \ defined(CONFIG_SIBYTE_CRHONE) || defined(CONFIG_SIBYTE_CRHINE) || \ defined(CONFIG_SIBYTE_LITTLESUR) #include @@ -55,6 +55,16 @@ #define setleds(t0,t1,c0,c1,c2,c3) #endif /* LEDS_PHYS */ +#else + +void swarm_setup(void); + +#ifdef LEDS_PHYS +extern void setleds(char *str); +#else +#define setleds(s) do { } while (0) +#endif /* LEDS_PHYS */ + #endif /* __ASSEMBLY__ */ #endif /* _SIBYTE_BOARD_H */ diff --git a/include/asm-mips/sibyte/carmel.h b/include/asm-mips/sibyte/carmel.h index 57c53e62a37a..11cad71323e8 100644 --- a/include/asm-mips/sibyte/carmel.h +++ b/include/asm-mips/sibyte/carmel.h @@ -18,7 +18,6 @@ #ifndef __ASM_SIBYTE_CARMEL_H #define __ASM_SIBYTE_CARMEL_H - #include #include diff --git a/include/asm-mips/sibyte/sb1250_int.h b/include/asm-mips/sibyte/sb1250_int.h index 05c7b39f1b02..94e8299b0a2a 100644 --- a/include/asm-mips/sibyte/sb1250_int.h +++ b/include/asm-mips/sibyte/sb1250_int.h @@ -45,8 +45,6 @@ * First, the interrupt numbers. */ -#if SIBYTE_HDR_FEATURE_1250_112x - #define K_INT_SOURCES 64 #define K_INT_WATCHDOG_TIMER_0 0 @@ -152,6 +150,7 @@ #define M_INT_MBOX_1 _SB_MAKEMASK1(K_INT_MBOX_1) #define M_INT_MBOX_2 _SB_MAKEMASK1(K_INT_MBOX_2) #define M_INT_MBOX_3 _SB_MAKEMASK1(K_INT_MBOX_3) +#define M_INT_MBOX_ALL _SB_MAKEMASK(4,K_INT_MBOX_0) #if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1) #define M_INT_CYCLE_CP0_INT _SB_MAKEMASK1(K_INT_CYCLE_CP0_INT) #define M_INT_CYCLE_CP1_INT _SB_MAKEMASK1(K_INT_CYCLE_CP1_INT) @@ -247,5 +246,3 @@ #endif /* 1250/112x */ - -#endif diff --git a/include/asm-mips/sibyte/sb1250_mac.h b/include/asm-mips/sibyte/sb1250_mac.h index adfc688fa559..833c8b59d687 100644 --- a/include/asm-mips/sibyte/sb1250_mac.h +++ b/include/asm-mips/sibyte/sb1250_mac.h @@ -129,9 +129,9 @@ #define M_MAC_BYPASS_16 _SB_MAKEMASK1(42) #define M_MAC_BYPASS_FCS_CHK _SB_MAKEMASK1(43) -#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1) +#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480) #define M_MAC_RX_CH_SEL_MSB _SB_MAKEMASK1(44) -#endif /* 1250 PASS2 || 112x PASS1 */ +#endif /* 1250 PASS2 || 112x PASS1 || 1480*/ #if SIBYTE_HDR_FEATURE(1250, PASS3) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480) #define M_MAC_SPLIT_CH_SEL _SB_MAKEMASK1(45) @@ -223,9 +223,9 @@ /* XXX: Can't enable, as it has the same name as a pass2+ define below. */ /* #define M_MAC_TX_WR_THRSH _SB_MAKEMASK(6,S_MAC_TX_WR_THRSH) */ #endif /* up to 1250 PASS1 */ -#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1) +#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480) #define M_MAC_TX_WR_THRSH _SB_MAKEMASK(7,S_MAC_TX_WR_THRSH) -#endif /* 1250 PASS2 || 112x PASS1 */ +#endif /* 1250 PASS2 || 112x PASS1 || 1480 */ #define V_MAC_TX_WR_THRSH(x) _SB_MAKEVALUE(x,S_MAC_TX_WR_THRSH) #define G_MAC_TX_WR_THRSH(x) _SB_GETVALUE(x,S_MAC_TX_WR_THRSH,M_MAC_TX_WR_THRSH) @@ -234,9 +234,9 @@ /* XXX: Can't enable, as it has the same name as a pass2+ define below. */ /* #define M_MAC_TX_RD_THRSH _SB_MAKEMASK(6,S_MAC_TX_RD_THRSH) */ #endif /* up to 1250 PASS1 */ -#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1) +#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480) #define M_MAC_TX_RD_THRSH _SB_MAKEMASK(7,S_MAC_TX_RD_THRSH) -#endif /* 1250 PASS2 || 112x PASS1 */ +#endif /* 1250 PASS2 || 112x PASS1 || 1480 */ #define V_MAC_TX_RD_THRSH(x) _SB_MAKEVALUE(x,S_MAC_TX_RD_THRSH) #define G_MAC_TX_RD_THRSH(x) _SB_GETVALUE(x,S_MAC_TX_RD_THRSH,M_MAC_TX_RD_THRSH) @@ -260,12 +260,12 @@ #define V_MAC_RX_RL_THRSH(x) _SB_MAKEVALUE(x,S_MAC_RX_RL_THRSH) #define G_MAC_RX_RL_THRSH(x) _SB_GETVALUE(x,S_MAC_RX_RL_THRSH,M_MAC_RX_RL_THRSH) -#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1) +#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480) #define S_MAC_ENC_FC_THRSH _SB_MAKE64(56) #define M_MAC_ENC_FC_THRSH _SB_MAKEMASK(6,S_MAC_ENC_FC_THRSH) #define V_MAC_ENC_FC_THRSH(x) _SB_MAKEVALUE(x,S_MAC_ENC_FC_THRSH) #define G_MAC_ENC_FC_THRSH(x) _SB_GETVALUE(x,S_MAC_ENC_FC_THRSH,M_MAC_ENC_FC_THRSH) -#endif /* 1250 PASS2 || 112x PASS1 */ +#endif /* 1250 PASS2 || 112x PASS1 || 1480 */ /* * MAC Frame Configuration Registers (Table 9-15) @@ -462,9 +462,9 @@ #define M_MAC_LTCOL_ERR _SB_MAKEMASK1(44) #define M_MAC_EXCOL_ERR _SB_MAKEMASK1(45) #define M_MAC_CNTR_OVRFL_ERR _SB_MAKEMASK1(46) -#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1) +#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480) #define M_MAC_SPLIT_EN _SB_MAKEMASK1(47) /* interrupt mask only */ -#endif /* 1250 PASS2 || 112x PASS1 */ +#endif /* 1250 PASS2 || 112x PASS1 || 1480 */ #define S_MAC_COUNTER_ADDR _SB_MAKE64(47) #define M_MAC_COUNTER_ADDR _SB_MAKEMASK(5,S_MAC_COUNTER_ADDR) @@ -598,9 +598,9 @@ #define M_MAC_MCAST_INV _SB_MAKEMASK1(4) #define M_MAC_BCAST_EN _SB_MAKEMASK1(5) #define M_MAC_DIRECT_INV _SB_MAKEMASK1(6) -#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1) +#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480) #define M_MAC_ALLMCAST_EN _SB_MAKEMASK1(7) -#endif /* 1250 PASS2 || 112x PASS1 */ +#endif /* 1250 PASS2 || 112x PASS1 || 1480 */ #define S_MAC_IPHDR_OFFSET _SB_MAKE64(8) #define M_MAC_IPHDR_OFFSET _SB_MAKEMASK(8,S_MAC_IPHDR_OFFSET) diff --git a/include/asm-mips/sibyte/sb1250_mc.h b/include/asm-mips/sibyte/sb1250_mc.h index 26e421498c97..4fe848ffbc31 100644 --- a/include/asm-mips/sibyte/sb1250_mc.h +++ b/include/asm-mips/sibyte/sb1250_mc.h @@ -295,7 +295,7 @@ #if SIBYTE_HDR_FEATURE(1250, PASS3) || SIBYTE_HDR_FEATURE(112x, PASS1) #define M_MC_PRE_ON_A8 _SB_MAKEMASK1(36) -#define M_MC_RAM_WITH_A13 _SB_MAKEMASK1(38) +#define M_MC_RAM_WITH_A13 _SB_MAKEMASK1(37) #endif /* 1250 PASS3 || 112x PASS1 */ diff --git a/include/asm-mips/sibyte/sb1250_regs.h b/include/asm-mips/sibyte/sb1250_regs.h index bab3a4580a36..da7c188993c9 100644 --- a/include/asm-mips/sibyte/sb1250_regs.h +++ b/include/asm-mips/sibyte/sb1250_regs.h @@ -131,6 +131,7 @@ #endif + /* ********************************************************************* * PCI Interface Registers ********************************************************************* */ @@ -239,14 +240,14 @@ #define R_MAC_VLANTAG 0x00000110 #define R_MAC_FRAMECFG 0x00000118 #define R_MAC_EOPCNT 0x00000120 -#define R_MAC_FIFO_PTRS 0x00000130 +#define R_MAC_FIFO_PTRS 0x00000128 #define R_MAC_ADFILTER_CFG 0x00000200 #define R_MAC_ETHERNET_ADDR 0x00000208 #define R_MAC_PKT_TYPE 0x00000210 -#if SIBYTE_HDR_FEATURE(1250, PASS3) || SIBYTE_HDR_FEATURE(112x, PASS1) +#if SIBYTE_HDR_FEATURE(1250, PASS3) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480) #define R_MAC_ADMASK0 0x00000218 #define R_MAC_ADMASK1 0x00000220 -#endif /* 1250 PASS3 || 112x PASS1 */ +#endif /* 1250 PASS3 || 112x PASS1 || 1480 */ #define R_MAC_HASH_BASE 0x00000240 #define R_MAC_ADDR_BASE 0x00000280 #define R_MAC_CHLO0_BASE 0x00000300 @@ -256,9 +257,9 @@ #define R_MAC_INT_MASK 0x00000410 #define R_MAC_TXD_CTL 0x00000420 #define R_MAC_MDIO 0x00000428 -#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1) +#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480) #define R_MAC_STATUS1 0x00000430 -#endif /* 1250 PASS2 || 112x PASS1 */ +#endif /* 1250 PASS2 || 112x PASS1 || 1480 */ #define R_MAC_DEBUG_STATUS 0x00000448 #define MAC_HASH_COUNT 8 @@ -289,11 +290,11 @@ #define R_DUART_RX_HOLD 0x160 #define R_DUART_TX_HOLD 0x170 -#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1) +#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480) #define R_DUART_FULL_CTL 0x140 #define R_DUART_OPCR_X 0x180 #define R_DUART_AUXCTL_X 0x190 -#endif /* 1250 PASS2 || 112x PASS1 */ +#endif /* 1250 PASS2 || 112x PASS1 || 1480*/ /* @@ -308,6 +309,7 @@ #define R_DUART_IMR_B 0x350 #define R_DUART_OUT_PORT 0x360 #define R_DUART_OPCR 0x370 +#define R_DUART_IN_PORT 0x380 #define R_DUART_SET_OPR 0x3B0 #define R_DUART_CLEAR_OPR 0x3C0 @@ -685,12 +687,17 @@ #define A_ADDR_TRAP_REG_DEBUG 0x0010020460 #endif /* 1250 PASS2 || 112x PASS1 || 1480 */ +#define ADDR_TRAP_SPACING 8 +#define NUM_ADDR_TRAP 4 +#define A_ADDR_TRAP_UP(n) (A_ADDR_TRAP_UP_0 + ((n) * ADDR_TRAP_SPACING)) +#define A_ADDR_TRAP_DOWN(n) (A_ADDR_TRAP_DOWN_0 + ((n) * ADDR_TRAP_SPACING)) +#define A_ADDR_TRAP_CFG(n) (A_ADDR_TRAP_CFG_0 + ((n) * ADDR_TRAP_SPACING)) + /* ********************************************************************* * System Interrupt Mapper Registers ********************************************************************* */ -#if SIBYTE_HDR_FEATURE_1250_112x #define A_IMR_CPU0_BASE 0x0010020000 #define A_IMR_CPU1_BASE 0x0010022000 #define IMR_REGISTER_SPACING 0x2000 @@ -700,6 +707,7 @@ #define A_IMR_REGISTER(cpu,reg) (A_IMR_MAPPER(cpu)+(reg)) #define R_IMR_INTERRUPT_DIAG 0x0010 +#define R_IMR_INTERRUPT_LDT 0x0018 #define R_IMR_INTERRUPT_MASK 0x0028 #define R_IMR_INTERRUPT_TRACE 0x0038 #define R_IMR_INTERRUPT_SOURCE_STATUS 0x0040 @@ -715,7 +723,14 @@ #define R_IMR_INTERRUPT_STATUS_COUNT 7 #define R_IMR_INTERRUPT_MAP_BASE 0x0200 #define R_IMR_INTERRUPT_MAP_COUNT 64 -#endif /* 1250/112x */ + +/* + * these macros work together to build the address of a mailbox + * register, e.g., A_MAILBOX_REGISTER(R_IMR_MAILBOX_SET_CPU,1) + * for mbox_0_set_cpu2 returns 0x00100240C8 + */ +#define A_MAILBOX_REGISTER(reg,cpu) \ + (A_IMR_CPU0_BASE + (cpu * IMR_REGISTER_SPACING) + reg) /* ********************************************************************* * System Performance Counter Registers @@ -727,6 +742,10 @@ #define A_SCD_PERF_CNT_2 0x00100204E0 #define A_SCD_PERF_CNT_3 0x00100204E8 +#define SCD_NUM_PERF_CNT 4 +#define SCD_PERF_CNT_SPACING 8 +#define A_SCD_PERF_CNT(n) (A_SCD_PERF_CNT_0+(n*SCD_PERF_CNT_SPACING)) + /* ********************************************************************* * System Bus Watcher Registers ********************************************************************* */ @@ -772,6 +791,15 @@ #define A_SCD_TRACE_SEQUENCE_6 0x0010020A90 #define A_SCD_TRACE_SEQUENCE_7 0x0010020A98 +#define TRACE_REGISTER_SPACING 8 +#define TRACE_NUM_REGISTERS 8 +#define A_SCD_TRACE_EVENT(n) (((n) & 4) ? \ + (A_SCD_TRACE_EVENT_4 + (((n) & 3) * TRACE_REGISTER_SPACING)) : \ + (A_SCD_TRACE_EVENT_0 + ((n) * TRACE_REGISTER_SPACING))) +#define A_SCD_TRACE_SEQUENCE(n) (((n) & 4) ? \ + (A_SCD_TRACE_SEQUENCE_4 + (((n) & 3) * TRACE_REGISTER_SPACING)) : \ + (A_SCD_TRACE_SEQUENCE_0 + ((n) * TRACE_REGISTER_SPACING))) + /* ********************************************************************* * System Generic DMA Registers ********************************************************************* */ diff --git a/include/asm-mips/sibyte/sb1250_scd.h b/include/asm-mips/sibyte/sb1250_scd.h index b6a7d8f6ced5..9ea3da367ab6 100644 --- a/include/asm-mips/sibyte/sb1250_scd.h +++ b/include/asm-mips/sibyte/sb1250_scd.h @@ -10,7 +10,7 @@ * ********************************************************************* * - * Copyright 2000,2001,2002,2003 + * Copyright 2000,2001,2002,2003,2004,2005 * Broadcom Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or @@ -150,7 +150,7 @@ * (For the assembler version, sysrev and dest may be the same register. * Also, it clobbers AT.) */ -#ifdef __ASSEMBLY__ +#ifdef __ASSEMBLER__ #define SYS_SOC_TYPE(dest, sysrev) \ .set push ; \ .set reorder ; \ @@ -214,6 +214,7 @@ #define G_SYS_YPOS(x) _SB_GETVALUE(x,S_SYS_YPOS,M_SYS_YPOS) #endif + /* * System Config Register (Table 4-2) * Register: SCD_SYSTEM_CFG @@ -360,13 +361,13 @@ */ #define V_SCD_TIMER_FREQ 1000000 -#define V_SCD_TIMER_WIDTH 23 #define S_SCD_TIMER_INIT 0 -#define M_SCD_TIMER_INIT _SB_MAKEMASK(V_SCD_TIMER_WIDTH,S_SCD_TIMER_INIT) +#define M_SCD_TIMER_INIT _SB_MAKEMASK(23,S_SCD_TIMER_INIT) #define V_SCD_TIMER_INIT(x) _SB_MAKEVALUE(x,S_SCD_TIMER_INIT) #define G_SCD_TIMER_INIT(x) _SB_GETVALUE(x,S_SCD_TIMER_INIT,M_SCD_TIMER_INIT) +#define V_SCD_TIMER_WIDTH 23 #define S_SCD_TIMER_CNT 0 #define M_SCD_TIMER_CNT _SB_MAKEMASK(V_SCD_TIMER_WIDTH,S_SCD_TIMER_CNT) #define V_SCD_TIMER_CNT(x) _SB_MAKEVALUE(x,S_SCD_TIMER_CNT) @@ -380,7 +381,6 @@ * System Performance Counters */ -#if SIBYTE_HDR_FEATURE_1250_112x #define S_SPC_CFG_SRC0 0 #define M_SPC_CFG_SRC0 _SB_MAKEMASK(8,S_SPC_CFG_SRC0) #define V_SPC_CFG_SRC0(x) _SB_MAKEVALUE(x,S_SPC_CFG_SRC0) @@ -401,6 +401,7 @@ #define V_SPC_CFG_SRC3(x) _SB_MAKEVALUE(x,S_SPC_CFG_SRC3) #define G_SPC_CFG_SRC3(x) _SB_GETVALUE(x,S_SPC_CFG_SRC3,M_SPC_CFG_SRC3) +#if SIBYTE_HDR_FEATURE_1250_112x #define M_SPC_CFG_CLEAR _SB_MAKEMASK1(32) #define M_SPC_CFG_ENABLE _SB_MAKEMASK1(33) #endif @@ -516,8 +517,6 @@ * Trace Buffer Config register */ -#if SIBYTE_HDR_FEATURE_1250_112x - #define M_SCD_TRACE_CFG_RESET _SB_MAKEMASK1(0) #define M_SCD_TRACE_CFG_START_READ _SB_MAKEMASK1(1) #define M_SCD_TRACE_CFG_START _SB_MAKEMASK1(2) @@ -526,17 +525,26 @@ #define M_SCD_TRACE_CFG_FREEZE_FULL _SB_MAKEMASK1(5) #define M_SCD_TRACE_CFG_DEBUG_FULL _SB_MAKEMASK1(6) #define M_SCD_TRACE_CFG_FULL _SB_MAKEMASK1(7) -#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1) +#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480) #define M_SCD_TRACE_CFG_FORCECNT _SB_MAKEMASK1(8) -#endif /* 1250 PASS2 || 112x PASS1 */ +#endif /* 1250 PASS2 || 112x PASS1 || 1480 */ +/* + * This field is the same on the 1250/112x and 1480, just located in + * a slightly different place in the register. + */ +#if SIBYTE_HDR_FEATURE_1250_112x #define S_SCD_TRACE_CFG_CUR_ADDR 10 +#else +#if SIBYTE_HDR_FEATURE_CHIP(1480) +#define S_SCD_TRACE_CFG_CUR_ADDR 24 +#endif /* 1480 */ +#endif /* 1250/112x */ + #define M_SCD_TRACE_CFG_CUR_ADDR _SB_MAKEMASK(8,S_SCD_TRACE_CFG_CUR_ADDR) #define V_SCD_TRACE_CFG_CUR_ADDR(x) _SB_MAKEVALUE(x,S_SCD_TRACE_CFG_CUR_ADDR) #define G_SCD_TRACE_CFG_CUR_ADDR(x) _SB_GETVALUE(x,S_SCD_TRACE_CFG_CUR_ADDR,M_SCD_TRACE_CFG_CUR_ADDR) -#endif /* 1250/112x */ - /* * Trace Event registers */ diff --git a/include/asm-mips/sibyte/swarm.h b/include/asm-mips/sibyte/swarm.h index 86db37e5ad85..540865fa7ec3 100644 --- a/include/asm-mips/sibyte/swarm.h +++ b/include/asm-mips/sibyte/swarm.h @@ -32,6 +32,18 @@ #define SIBYTE_HAVE_IDE 1 #define SIBYTE_DEFAULT_CONSOLE "ttyS0,115200" #endif +#ifdef CONFIG_SIBYTE_PT1120 +#define SIBYTE_BOARD_NAME "PT1120" +#define SIBYTE_HAVE_PCMCIA 1 +#define SIBYTE_HAVE_IDE 1 +#define SIBYTE_DEFAULT_CONSOLE "ttyS0,115200" +#endif +#ifdef CONFIG_SIBYTE_PT1125 +#define SIBYTE_BOARD_NAME "PT1125" +#define SIBYTE_HAVE_PCMCIA 1 +#define SIBYTE_HAVE_IDE 1 +#define SIBYTE_DEFAULT_CONSOLE "ttyS0,115200" +#endif #ifdef CONFIG_SIBYTE_LITTLESUR #define SIBYTE_BOARD_NAME "BCM91250C2 (LittleSur)" #define SIBYTE_HAVE_PCMCIA 0 -- cgit v1.2.3-59-g8ed1b From a4c9bb7d22aa61ec585984f46df185669e557e3d Mon Sep 17 00:00:00 2001 From: "Robert P. J. Day" Date: Tue, 10 Apr 2007 06:23:27 -0400 Subject: [MIPS] Replace old fashioned "__typeof" with "__typeof__". [Robert's original log message said this was a bug but it isn't, it's just very old fashioned syntax that is not (no longer?) documented in the gcc documentation. So for the sake of uniformity I'm applying his patch but with a modified log message. -- Ralf] Signed-off-by: Robert P. J. Day Signed-off-by: Ralf Baechle --- include/asm-mips/paccess.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/asm-mips/paccess.h b/include/asm-mips/paccess.h index 147844ef103b..8c08fa904b2c 100644 --- a/include/asm-mips/paccess.h +++ b/include/asm-mips/paccess.h @@ -34,7 +34,7 @@ struct __large_pstruct { unsigned long buf[100]; }; #define __get_dbe(x,ptr,size) \ ({ \ long __gu_err; \ - __typeof(*(ptr)) __gu_val; \ + __typeof__(*(ptr)) __gu_val; \ unsigned long __gu_addr; \ __asm__("":"=r" (__gu_val)); \ __gu_addr = (unsigned long) (ptr); \ -- cgit v1.2.3-59-g8ed1b From 20f09390b2da2432309afe8aaa0bd64ec64c4584 Mon Sep 17 00:00:00 2001 From: Daniel Walker Date: Thu, 26 Apr 2007 09:46:05 -0700 Subject: seqlocks: trivial remove weird whitespace Signed-off-by: Daniel Walker Signed-off-by: Linus Torvalds --- include/linux/seqlock.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h index 52c9eb9b6df2..26e4925bc35b 100644 --- a/include/linux/seqlock.h +++ b/include/linux/seqlock.h @@ -61,10 +61,10 @@ static inline void write_seqlock(seqlock_t *sl) { spin_lock(&sl->lock); ++sl->sequence; - smp_wmb(); -} + smp_wmb(); +} -static inline void write_sequnlock(seqlock_t *sl) +static inline void write_sequnlock(seqlock_t *sl) { smp_wmb(); sl->sequence++; @@ -77,7 +77,7 @@ static inline int write_tryseqlock(seqlock_t *sl) if (ret) { ++sl->sequence; - smp_wmb(); + smp_wmb(); } return ret; } -- cgit v1.2.3-59-g8ed1b From 39bc89fd4019b164002adaacef92c4140e37955a Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 25 Apr 2007 20:50:03 -0700 Subject: make SysRq-T show all tasks again show_state() (SysRq-T) developed the buggy habbit of not showing TASK_RUNNING tasks. This was due to the mistaken belief that state_filter == -1 would be a pass-through filter - while in reality it did not let TASK_RUNNING == 0 p->state values through. Fix this by restoring the original '!state_filter means all tasks' special-case i had in the original version. Test-built and test-booted on i686, SysRq-T now works as intended. Signed-off-by: Ingo Molnar Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/sched.h | 4 ++-- kernel/sched.c | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/linux/sched.h b/include/linux/sched.h index 49fe2997a016..a1707583de49 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -196,13 +196,13 @@ extern void init_idle(struct task_struct *idle, int cpu); extern cpumask_t nohz_cpu_mask; /* - * Only dump TASK_* tasks. (-1 for all tasks) + * Only dump TASK_* tasks. (0 for all tasks) */ extern void show_state_filter(unsigned long state_filter); static inline void show_state(void) { - show_state_filter(-1); + show_state_filter(0); } extern void show_regs(struct pt_regs *); diff --git a/kernel/sched.c b/kernel/sched.c index b9a683730148..960d7c5fca39 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -4746,7 +4746,7 @@ void show_state_filter(unsigned long state_filter) * console might take alot of time: */ touch_nmi_watchdog(); - if (p->state & state_filter) + if (!state_filter || (p->state & state_filter)) show_task(p); } while_each_thread(g, p); -- cgit v1.2.3-59-g8ed1b From 864062457a2e444227bd368ca5f2a2b740de604f Mon Sep 17 00:00:00 2001 From: Kay Sievers Date: Wed, 14 Mar 2007 03:25:56 +0100 Subject: driver core: fix namespace issue with devices assigned to classes - uses a kset in "struct class" to keep track of all directories belonging to this class - merges with the /sys/devices/virtual logic. - removes the namespace-dir if the last member of that class leaves the directory. There may be locking or refcounting fixes left, I stopped when it seemed to work with network and sound modules. :) From: Kay Sievers Signed-off-by: Greg Kroah-Hartman --- drivers/base/class.c | 2 +- drivers/base/core.c | 82 +++++++++++++++++++++++++++++++++++++++---------- include/linux/device.h | 3 +- include/linux/kobject.h | 2 ++ lib/kobject.c | 12 ++++++-- lib/kobject_uevent.c | 16 ++++++---- 6 files changed, 89 insertions(+), 28 deletions(-) (limited to 'include') diff --git a/drivers/base/class.c b/drivers/base/class.c index d5968128be2b..80bbb2074636 100644 --- a/drivers/base/class.c +++ b/drivers/base/class.c @@ -145,6 +145,7 @@ int class_register(struct class * cls) INIT_LIST_HEAD(&cls->children); INIT_LIST_HEAD(&cls->devices); INIT_LIST_HEAD(&cls->interfaces); + kset_init(&cls->class_dirs); init_MUTEX(&cls->sem); error = kobject_set_name(&cls->subsys.kset.kobj, "%s", cls->name); if (error) @@ -163,7 +164,6 @@ int class_register(struct class * cls) void class_unregister(struct class * cls) { pr_debug("device class '%s': unregistering\n", cls->name); - kobject_unregister(cls->virtual_dir); remove_class_attrs(cls); subsystem_unregister(&cls->subsys); } diff --git a/drivers/base/core.c b/drivers/base/core.c index db3a151be4a1..658eae5dacda 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -477,34 +477,58 @@ static struct kobject * get_device_parent(struct device *dev, return NULL; } #else -static struct kobject * virtual_device_parent(struct device *dev) +static struct kobject *virtual_device_parent(struct device *dev) { - if (!dev->class) - return ERR_PTR(-ENODEV); - - if (!dev->class->virtual_dir) { - static struct kobject *virtual_dir = NULL; + static struct kobject *virtual_dir = NULL; - if (!virtual_dir) - virtual_dir = kobject_add_dir(&devices_subsys.kset.kobj, "virtual"); - dev->class->virtual_dir = kobject_add_dir(virtual_dir, dev->class->name); - } + if (!virtual_dir) + virtual_dir = kobject_add_dir(&devices_subsys.kset.kobj, "virtual"); - return dev->class->virtual_dir; + return virtual_dir; } static struct kobject * get_device_parent(struct device *dev, struct device *parent) { - /* if this is a class device, and has no parent, create one */ - if ((dev->class) && (parent == NULL)) { - return virtual_device_parent(dev); - } else if (parent) + if (dev->class) { + struct kobject *kobj = NULL; + struct kobject *parent_kobj; + struct kobject *k; + + /* + * If we have no parent, we live in "virtual". + * Class-devices with a bus-device as parent, live + * in a class-directory to prevent namespace collisions. + */ + if (parent == NULL) + parent_kobj = virtual_device_parent(dev); + else if (parent->class) + return &parent->kobj; + else + parent_kobj = &parent->kobj; + + /* find our class-directory at the parent and reference it */ + spin_lock(&dev->class->class_dirs.list_lock); + list_for_each_entry(k, &dev->class->class_dirs.list, entry) + if (k->parent == parent_kobj) { + kobj = kobject_get(k); + break; + } + spin_unlock(&dev->class->class_dirs.list_lock); + if (kobj) + return kobj; + + /* or create a new class-directory at the parent device */ + return kobject_kset_add_dir(&dev->class->class_dirs, + parent_kobj, dev->class->name); + } + + if (parent) return &parent->kobj; return NULL; } - #endif + static int setup_parent(struct device *dev, struct device *parent) { struct kobject *kobj; @@ -541,7 +565,6 @@ int device_add(struct device *dev) pr_debug("DEV: registering device: ID = '%s'\n", dev->bus_id); parent = get_device(dev->parent); - error = setup_parent(dev, parent); if (error) goto Error; @@ -787,6 +810,31 @@ void device_del(struct device * dev) /* remove the device from the class list */ list_del_init(&dev->node); up(&dev->class->sem); + + /* If we live in a parent class-directory, unreference it */ + if (dev->kobj.parent->kset == &dev->class->class_dirs) { + struct device *d; + int other = 0; + + /* + * if we are the last child of our class, delete + * our class-directory at this parent + */ + down(&dev->class->sem); + list_for_each_entry(d, &dev->class->devices, node) { + if (d == dev) + continue; + if (d->kobj.parent == dev->kobj.parent) { + other = 1; + break; + } + } + if (!other) + kobject_del(dev->kobj.parent); + + kobject_put(dev->kobj.parent); + up(&dev->class->sem); + } } device_remove_file(dev, &dev->uevent_attr); device_remove_groups(dev); diff --git a/include/linux/device.h b/include/linux/device.h index 5cf30e95c8b6..de0e73eae6bc 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -181,10 +181,9 @@ struct class { struct list_head children; struct list_head devices; struct list_head interfaces; + struct kset class_dirs; struct semaphore sem; /* locks both the children and interfaces lists */ - struct kobject *virtual_dir; - struct class_attribute * class_attrs; struct class_device_attribute * class_dev_attrs; struct device_attribute * dev_attrs; diff --git a/include/linux/kobject.h b/include/linux/kobject.h index b850e0310538..d37cd7f10e3d 100644 --- a/include/linux/kobject.h +++ b/include/linux/kobject.h @@ -89,6 +89,8 @@ extern void kobject_unregister(struct kobject *); extern struct kobject * kobject_get(struct kobject *); extern void kobject_put(struct kobject *); +extern struct kobject *kobject_kset_add_dir(struct kset *kset, + struct kobject *, const char *); extern struct kobject *kobject_add_dir(struct kobject *, const char *); extern char * kobject_get_path(struct kobject *, gfp_t); diff --git a/lib/kobject.c b/lib/kobject.c index 057921c5945a..f66455155606 100644 --- a/lib/kobject.c +++ b/lib/kobject.c @@ -488,13 +488,15 @@ static struct kobj_type dir_ktype = { }; /** - * kobject_add_dir - add sub directory of object. + * kobject__kset_add_dir - add sub directory of object. + * @kset: kset the directory is belongs to. * @parent: object in which a directory is created. * @name: directory name. * * Add a plain directory object as child of given object. */ -struct kobject *kobject_add_dir(struct kobject *parent, const char *name) +struct kobject *kobject_kset_add_dir(struct kset *kset, + struct kobject *parent, const char *name) { struct kobject *k; int ret; @@ -506,6 +508,7 @@ struct kobject *kobject_add_dir(struct kobject *parent, const char *name) if (!k) return NULL; + k->kset = kset; k->parent = parent; k->ktype = &dir_ktype; kobject_set_name(k, name); @@ -520,6 +523,11 @@ struct kobject *kobject_add_dir(struct kobject *parent, const char *name) return k; } +struct kobject *kobject_add_dir(struct kobject *parent, const char *name) +{ + return kobject_kset_add_dir(NULL, parent, name); +} + /** * kset_init - initialize a kset for use * @k: kset diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c index 82fc1794b691..4122f38330d4 100644 --- a/lib/kobject_uevent.c +++ b/lib/kobject_uevent.c @@ -115,6 +115,16 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action, return 0; } + /* originating subsystem */ + if (uevent_ops && uevent_ops->name) + subsystem = uevent_ops->name(kset, kobj); + else + subsystem = kobject_name(&kset->kobj); + if (!subsystem) { + pr_debug("unset subsytem caused the event to drop!\n"); + return 0; + } + /* environment index */ envp = kzalloc(NUM_ENVP * sizeof (char *), GFP_KERNEL); if (!envp) @@ -134,12 +144,6 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action, goto exit; } - /* originating subsystem */ - if (uevent_ops && uevent_ops->name) - subsystem = uevent_ops->name(kset, kobj); - else - subsystem = kobject_name(&kset->kobj); - /* event environemnt for helper process only */ envp[i++] = "HOME=/"; envp[i++] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin"; -- cgit v1.2.3-59-g8ed1b From b8c5cec23d5c33b767a1cddebd4f8813a9563e3c Mon Sep 17 00:00:00 2001 From: Kay Sievers Date: Fri, 16 Feb 2007 17:33:36 +0100 Subject: Driver core: udev triggered device-<>driver binding We get two per-bus sysfs files: ls-l /sys/subsystem/usb drwxr-xr-x 2 root root 0 2007-02-16 16:42 devices drwxr-xr-x 7 root root 0 2007-02-16 14:55 drivers -rw-r--r-- 1 root root 4096 2007-02-16 16:42 drivers_autoprobe --w------- 1 root root 4096 2007-02-16 16:42 drivers_probe The flag "drivers_autoprobe" controls the behavior of the bus to bind devices by default, or just initialize the device and leave it alone. The command "drivers_probe" accepts a bus_id and the bus tries to bind a driver to this device. Systems who want to control the driver binding with udev, switch off the bus initiated probing: echo 0 > /sys/subsystem/usb/drivers_autoprobe echo 0 > /sys/subsystem/pcmcia/drivers_autoprobe ... and initiate the probing with udev rules like: ACTION=="add", SUBSYSTEM=="usb", ATTR{subsystem/drivers_probe}="$kernel" ACTION=="add", SUBSYSTEM=="pcmcia", ATTR{subsystem/drivers_probe}="$kernel" ... Custom driver binding can happen in earlier rules by something like: ACTION=="add", SUBSYSTEM=="usb", \ ATTRS{idVendor}=="1234", ATTRS{idProduct}=="5678" \ ATTR{subsystem/drivers//bind}="$kernel" This is intended to solve the modprobe.conf mess with "install-rules", custom bind/unbind-scripts and all the weird things people invented over the years. It should also provide the functionality "libusual" was supposed to do. With udev, one can just write a udev rule to drive all USB-disks at the third port of USB-hub by the "ub" driver, and everything else by usb-storage. One can also instruct udev to bind different wireless drivers to identical cards - just selected by the pcmcia slot-number, and whatever ... To use the mentioned rules, it needs udev version 106, to be able to write ATTR{}="$kernel" to sysfs files. Signed-off-by: Greg Kroah-Hartman --- drivers/base/bus.c | 84 ++++++++++++++++++++++++++++++++++++++++++++++---- include/linux/device.h | 34 +++++++++++--------- 2 files changed, 97 insertions(+), 21 deletions(-) (limited to 'include') diff --git a/drivers/base/bus.c b/drivers/base/bus.c index 253868e03c70..9df2e6dff519 100644 --- a/drivers/base/bus.c +++ b/drivers/base/bus.c @@ -27,6 +27,9 @@ #define to_driver(obj) container_of(obj, struct device_driver, kobj) +static int __must_check bus_rescan_devices_helper(struct device *dev, + void *data); + static ssize_t drv_attr_show(struct kobject * kobj, struct attribute * attr, char * buf) { @@ -133,7 +136,6 @@ static decl_subsys(bus, &ktype_bus, NULL); #ifdef CONFIG_HOTPLUG - /* Manually detach a device from its associated driver. */ static int driver_helper(struct device *dev, void *data) { @@ -199,6 +201,33 @@ static ssize_t driver_bind(struct device_driver *drv, } static DRIVER_ATTR(bind, S_IWUSR, NULL, driver_bind); +static ssize_t show_drivers_autoprobe(struct bus_type *bus, char *buf) +{ + return sprintf(buf, "%d\n", bus->drivers_autoprobe); +} + +static ssize_t store_drivers_autoprobe(struct bus_type *bus, + const char *buf, size_t count) +{ + if (buf[0] == '0') + bus->drivers_autoprobe = 0; + else + bus->drivers_autoprobe = 1; + return count; +} + +static ssize_t store_drivers_probe(struct bus_type *bus, + const char *buf, size_t count) +{ + struct device *dev; + + dev = bus_find_device(bus, NULL, (void *)buf, driver_helper); + if (!dev) + return -ENODEV; + if (bus_rescan_devices_helper(dev, NULL) != 0) + return -EINVAL; + return count; +} #endif static struct device * next_device(struct klist_iter * i) @@ -425,7 +454,8 @@ int bus_attach_device(struct device * dev) if (bus) { dev->is_registered = 1; - ret = device_attach(dev); + if (bus->drivers_autoprobe) + ret = device_attach(dev); if (ret >= 0) { klist_add_tail(&dev->knode_bus, &bus->klist_devices); ret = 0; @@ -515,9 +545,41 @@ static void remove_bind_files(struct device_driver *drv) driver_remove_file(drv, &driver_attr_bind); driver_remove_file(drv, &driver_attr_unbind); } + +static int add_probe_files(struct bus_type *bus) +{ + int retval; + + bus->drivers_probe_attr.attr.name = "drivers_probe"; + bus->drivers_probe_attr.attr.mode = S_IWUSR; + bus->drivers_probe_attr.attr.owner = bus->owner; + bus->drivers_probe_attr.store = store_drivers_probe; + retval = bus_create_file(bus, &bus->drivers_probe_attr); + if (retval) + goto out; + + bus->drivers_autoprobe_attr.attr.name = "drivers_autoprobe"; + bus->drivers_autoprobe_attr.attr.mode = S_IWUSR | S_IRUGO; + bus->drivers_autoprobe_attr.attr.owner = bus->owner; + bus->drivers_autoprobe_attr.show = show_drivers_autoprobe; + bus->drivers_autoprobe_attr.store = store_drivers_autoprobe; + retval = bus_create_file(bus, &bus->drivers_autoprobe_attr); + if (retval) + bus_remove_file(bus, &bus->drivers_probe_attr); +out: + return retval; +} + +static void remove_probe_files(struct bus_type *bus) +{ + bus_remove_file(bus, &bus->drivers_autoprobe_attr); + bus_remove_file(bus, &bus->drivers_probe_attr); +} #else static inline int add_bind_files(struct device_driver *drv) { return 0; } static inline void remove_bind_files(struct device_driver *drv) {} +static inline int add_probe_files(struct bus_type *bus) { return 0; } +static inline void remove_probe_files(struct bus_type *bus) {} #endif /** @@ -541,9 +603,11 @@ int bus_add_driver(struct device_driver *drv) if ((error = kobject_register(&drv->kobj))) goto out_put_bus; - error = driver_attach(drv); - if (error) - goto out_unregister; + if (drv->bus->drivers_autoprobe) { + error = driver_attach(drv); + if (error) + goto out_unregister; + } klist_add_tail(&drv->knode_bus, &bus->klist_drivers); module_add_driver(drv->owner, drv); @@ -762,6 +826,12 @@ int bus_register(struct bus_type * bus) klist_init(&bus->klist_devices, klist_devices_get, klist_devices_put); klist_init(&bus->klist_drivers, NULL, NULL); + + bus->drivers_autoprobe = 1; + retval = add_probe_files(bus); + if (retval) + goto bus_probe_files_fail; + retval = bus_add_attrs(bus); if (retval) goto bus_attrs_fail; @@ -770,6 +840,8 @@ int bus_register(struct bus_type * bus) return 0; bus_attrs_fail: + remove_probe_files(bus); +bus_probe_files_fail: kset_unregister(&bus->drivers); bus_drivers_fail: kset_unregister(&bus->devices); @@ -779,7 +851,6 @@ out: return retval; } - /** * bus_unregister - remove a bus from the system * @bus: bus. @@ -791,6 +862,7 @@ void bus_unregister(struct bus_type * bus) { pr_debug("bus %s: unregistering\n", bus->name); bus_remove_attrs(bus); + remove_probe_files(bus); kset_unregister(&bus->drivers); kset_unregister(&bus->devices); subsystem_unregister(&bus->subsys); diff --git a/include/linux/device.h b/include/linux/device.h index de0e73eae6bc..9d54fe13eb2e 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -34,9 +34,24 @@ struct device; struct device_driver; struct class; struct class_device; +struct bus_type; + +struct bus_attribute { + struct attribute attr; + ssize_t (*show)(struct bus_type *, char * buf); + ssize_t (*store)(struct bus_type *, const char * buf, size_t count); +}; + +#define BUS_ATTR(_name,_mode,_show,_store) \ +struct bus_attribute bus_attr_##_name = __ATTR(_name,_mode,_show,_store) + +extern int __must_check bus_create_file(struct bus_type *, + struct bus_attribute *); +extern void bus_remove_file(struct bus_type *, struct bus_attribute *); struct bus_type { const char * name; + struct module * owner; struct subsystem subsys; struct kset drivers; @@ -49,6 +64,8 @@ struct bus_type { struct bus_attribute * bus_attrs; struct device_attribute * dev_attrs; struct driver_attribute * drv_attrs; + struct bus_attribute drivers_autoprobe_attr; + struct bus_attribute drivers_probe_attr; int (*match)(struct device * dev, struct device_driver * drv); int (*uevent)(struct device *dev, char **envp, @@ -61,6 +78,8 @@ struct bus_type { int (*suspend_late)(struct device * dev, pm_message_t state); int (*resume_early)(struct device * dev); int (*resume)(struct device * dev); + + unsigned int drivers_autoprobe:1; }; extern int __must_check bus_register(struct bus_type * bus); @@ -102,21 +121,6 @@ extern int bus_unregister_notifier(struct bus_type *bus, #define BUS_NOTIFY_UNBIND_DRIVER 0x00000004 /* driver about to be unbound */ -/* sysfs interface for exporting bus attributes */ - -struct bus_attribute { - struct attribute attr; - ssize_t (*show)(struct bus_type *, char * buf); - ssize_t (*store)(struct bus_type *, const char * buf, size_t count); -}; - -#define BUS_ATTR(_name,_mode,_show,_store) \ -struct bus_attribute bus_attr_##_name = __ATTR(_name,_mode,_show,_store) - -extern int __must_check bus_create_file(struct bus_type *, - struct bus_attribute *); -extern void bus_remove_file(struct bus_type *, struct bus_attribute *); - struct device_driver { const char * name; struct bus_type * bus; -- cgit v1.2.3-59-g8ed1b From 621a1672f7377e08a942f205d6742d8af1292aab Mon Sep 17 00:00:00 2001 From: Dmitry Torokhov Date: Sat, 10 Mar 2007 01:37:34 -0500 Subject: driver core: Use attribute groups in struct device_type Driver core: use attribute groups in struct device_type Attribute groups are more flexible than attribute lists (an attribute list can be represented by anonymous group) so switch struct device_type to use them. Also rework attribute creation for devices so that they all cleaned up properly in case of errors. Signed-off-by: Dmitry Torokhov Cc: Kay Sievers Signed-off-by: Greg Kroah-Hartman --- drivers/base/core.c | 115 +++++++++++++++++++++++++++++-------------------- include/linux/device.h | 2 +- 2 files changed, 70 insertions(+), 47 deletions(-) (limited to 'include') diff --git a/drivers/base/core.c b/drivers/base/core.c index 9ea12d9b48a6..bb2cc37a4d43 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -246,64 +246,95 @@ static ssize_t store_uevent(struct device *dev, struct device_attribute *attr, return count; } -static int device_add_groups(struct device *dev) +static int device_add_attributes(struct device *dev, + struct device_attribute *attrs) +{ + int error = 0; + int i; + + if (attrs) { + for (i = 0; attr_name(attrs[i]); i++) { + error = device_create_file(dev, &attrs[i]); + if (error) + break; + } + if (error) + while (--i >= 0) + device_remove_file(dev, &attrs[i]); + } + return error; +} + +static void device_remove_attributes(struct device *dev, + struct device_attribute *attrs) { int i; + + if (attrs) + for (i = 0; attr_name(attrs[i]); i++) + device_remove_file(dev, &attrs[i]); +} + +static int device_add_groups(struct device *dev, + struct attribute_group **groups) +{ int error = 0; + int i; - if (dev->groups) { - for (i = 0; dev->groups[i]; i++) { - error = sysfs_create_group(&dev->kobj, dev->groups[i]); + if (groups) { + for (i = 0; groups[i]; i++) { + error = sysfs_create_group(&dev->kobj, groups[i]); if (error) { while (--i >= 0) - sysfs_remove_group(&dev->kobj, dev->groups[i]); - goto out; + sysfs_remove_group(&dev->kobj, groups[i]); + break; } } } -out: return error; } -static void device_remove_groups(struct device *dev) +static void device_remove_groups(struct device *dev, + struct attribute_group **groups) { int i; - if (dev->groups) { - for (i = 0; dev->groups[i]; i++) { - sysfs_remove_group(&dev->kobj, dev->groups[i]); - } - } + + if (groups) + for (i = 0; groups[i]; i++) + sysfs_remove_group(&dev->kobj, groups[i]); } static int device_add_attrs(struct device *dev) { struct class *class = dev->class; struct device_type *type = dev->type; - int error = 0; - int i; + int error; - if (class && class->dev_attrs) { - for (i = 0; attr_name(class->dev_attrs[i]); i++) { - error = device_create_file(dev, &class->dev_attrs[i]); - if (error) - break; - } + if (class) { + error = device_add_attributes(dev, class->dev_attrs); if (error) - while (--i >= 0) - device_remove_file(dev, &class->dev_attrs[i]); + return error; } - if (type && type->attrs) { - for (i = 0; attr_name(type->attrs[i]); i++) { - error = device_create_file(dev, &type->attrs[i]); - if (error) - break; - } + if (type) { + error = device_add_groups(dev, type->groups); if (error) - while (--i >= 0) - device_remove_file(dev, &type->attrs[i]); + goto err_remove_class_attrs; } + error = device_add_groups(dev, dev->groups); + if (error) + goto err_remove_type_groups; + + return 0; + + err_remove_type_groups: + if (type) + device_remove_groups(dev, type->groups); + err_remove_class_attrs: + if (class) + device_remove_attributes(dev, class->dev_attrs); + return error; } @@ -311,17 +342,14 @@ static void device_remove_attrs(struct device *dev) { struct class *class = dev->class; struct device_type *type = dev->type; - int i; - if (class && class->dev_attrs) { - for (i = 0; attr_name(class->dev_attrs[i]); i++) - device_remove_file(dev, &class->dev_attrs[i]); - } + device_remove_groups(dev, dev->groups); - if (type && type->attrs) { - for (i = 0; attr_name(type->attrs[i]); i++) - device_remove_file(dev, &type->attrs[i]); - } + if (type) + device_remove_groups(dev, type->groups); + + if (class) + device_remove_attributes(dev, class->dev_attrs); } @@ -638,8 +666,6 @@ int device_add(struct device *dev) if ((error = device_add_attrs(dev))) goto AttrsError; - if ((error = device_add_groups(dev))) - goto GroupError; if ((error = device_pm_add(dev))) goto PMError; if ((error = bus_add_device(dev))) @@ -663,7 +689,7 @@ int device_add(struct device *dev) up(&dev->class->sem); } Done: - kfree(class_name); + kfree(class_name); put_device(dev); return error; AttachError: @@ -674,8 +700,6 @@ int device_add(struct device *dev) if (dev->bus) blocking_notifier_call_chain(&dev->bus->bus_notifier, BUS_NOTIFY_DEL_DEVICE, dev); - device_remove_groups(dev); - GroupError: device_remove_attrs(dev); AttrsError: if (dev->devt_attr) { @@ -838,7 +862,6 @@ void device_del(struct device * dev) } } device_remove_file(dev, &dev->uevent_attr); - device_remove_groups(dev); device_remove_attrs(dev); bus_remove_device(dev); diff --git a/include/linux/device.h b/include/linux/device.h index 9d54fe13eb2e..3b64fdecd041 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -332,7 +332,7 @@ extern struct class_device *class_device_create(struct class *cls, extern void class_device_destroy(struct class *cls, dev_t devt); struct device_type { - struct device_attribute *attrs; + struct attribute_group **groups; int (*uevent)(struct device *dev, char **envp, int num_envp, char *buffer, int buffer_size); void (*release)(struct device *dev); -- cgit v1.2.3-59-g8ed1b From 414264f959cf46f49f974b3510400e12ac3624a6 Mon Sep 17 00:00:00 2001 From: Kay Sievers Date: Mon, 12 Mar 2007 21:08:57 +0100 Subject: Driver core: add name to device_type If "name" of a device_type is specified, the uevent will contain the device_type name in the DEVTYPE variable. This helps userspace to distingiush between different types of devices, belonging to the same subsystem. Signed-off-by: Kay Sievers Signed-off-by: Greg Kroah-Hartman --- drivers/base/core.c | 5 +++++ include/linux/device.h | 10 ++++++++++ 2 files changed, 15 insertions(+) (limited to 'include') diff --git a/drivers/base/core.c b/drivers/base/core.c index bb2cc37a4d43..bffb69e4bde2 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -157,6 +157,11 @@ static int dev_uevent(struct kset *kset, struct kobject *kobj, char **envp, "MINOR=%u", MINOR(dev->devt)); } + if (dev->type && dev->type->name) + add_uevent_var(envp, num_envp, &i, + buffer, buffer_size, &length, + "DEVTYPE=%s", dev->type->name); + if (dev->driver) add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length, diff --git a/include/linux/device.h b/include/linux/device.h index 3b64fdecd041..7f63d4de5c4d 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -331,7 +331,17 @@ extern struct class_device *class_device_create(struct class *cls, __attribute__((format(printf,5,6))); extern void class_device_destroy(struct class *cls, dev_t devt); +/* + * The type of device, "struct device" is embedded in. A class + * or bus can contain devices of different types + * like "partitions" and "disks", "mouse" and "event". + * This identifies the device type and carries type-specific + * information, equivalent to the kobj_type of a kobject. + * If "name" is specified, the uevent will contain it in + * the DEVTYPE variable. + */ struct device_type { + const char *name; struct attribute_group **groups; int (*uevent)(struct device *dev, char **envp, int num_envp, char *buffer, int buffer_size); -- cgit v1.2.3-59-g8ed1b From 21c7f30b1d3f8a3de3128478daca3ce203fc8733 Mon Sep 17 00:00:00 2001 From: Cornelia Huck Date: Mon, 5 Feb 2007 16:15:25 -0800 Subject: driver core: per-subsystem multithreaded probing Make multithreaded probing work per subsystem instead of per driver. It doesn't make much sense to probe the same device for multiple drivers in parallel (after all, only one driver can bind to the device). Instead, create a probing thread for each device that probes the drivers one after another. Also make the decision to use multi-threaded probe per bus instead of per device and adapt the pci code. Signed-off-by: Cornelia Huck Cc: Benjamin Herrenschmidt Signed-off-by: Andrew Morton Signed-off-by: Greg Kroah-Hartman --- drivers/base/dd.c | 62 ++++++++++++++++++++++++------------------------ drivers/pci/pci-driver.c | 6 +---- include/linux/device.h | 3 +-- include/linux/pci.h | 2 -- 4 files changed, 33 insertions(+), 40 deletions(-) (limited to 'include') diff --git a/drivers/base/dd.c b/drivers/base/dd.c index 6a48824e43ff..616b4bbacf1b 100644 --- a/drivers/base/dd.c +++ b/drivers/base/dd.c @@ -94,19 +94,11 @@ int device_bind_driver(struct device *dev) return ret; } -struct stupid_thread_structure { - struct device_driver *drv; - struct device *dev; -}; - static atomic_t probe_count = ATOMIC_INIT(0); static DECLARE_WAIT_QUEUE_HEAD(probe_waitqueue); -static int really_probe(void *void_data) +static int really_probe(struct device *dev, struct device_driver *drv) { - struct stupid_thread_structure *data = void_data; - struct device_driver *drv = data->drv; - struct device *dev = data->dev; int ret = 0; atomic_inc(&probe_count); @@ -154,7 +146,6 @@ probe_failed: */ ret = 0; done: - kfree(data); atomic_dec(&probe_count); wake_up(&probe_waitqueue); return ret; @@ -186,16 +177,14 @@ int driver_probe_done(void) * format of the ID structures, nor what is to be considered a match and * what is not. * - * This function returns 1 if a match is found, an error if one occurs - * (that is not -ENODEV or -ENXIO), and 0 otherwise. + * This function returns 1 if a match is found, -ENODEV if the device is + * not registered, and 0 otherwise. * * This function must be called with @dev->sem held. When called for a * USB interface, @dev->parent->sem must be held as well. */ int driver_probe_device(struct device_driver * drv, struct device * dev) { - struct stupid_thread_structure *data; - struct task_struct *probe_task; int ret = 0; if (!device_is_registered(dev)) @@ -206,19 +195,7 @@ int driver_probe_device(struct device_driver * drv, struct device * dev) pr_debug("%s: Matched Device %s with Driver %s\n", drv->bus->name, dev->bus_id, drv->name); - data = kmalloc(sizeof(*data), GFP_KERNEL); - if (!data) - return -ENOMEM; - data->drv = drv; - data->dev = dev; - - if (drv->multithread_probe) { - probe_task = kthread_run(really_probe, data, - "probe-%s", dev->bus_id); - if (IS_ERR(probe_task)) - ret = really_probe(data); - } else - ret = really_probe(data); + ret = really_probe(dev, drv); done: return ret; @@ -230,30 +207,53 @@ static int __device_attach(struct device_driver * drv, void * data) return driver_probe_device(drv, dev); } +static int device_probe_drivers(void *data) +{ + struct device *dev = data; + int ret = 0; + + if (dev->bus) { + down(&dev->sem); + ret = bus_for_each_drv(dev->bus, NULL, dev, __device_attach); + up(&dev->sem); + } + return ret; +} + /** * device_attach - try to attach device to a driver. * @dev: device. * * Walk the list of drivers that the bus has and call * driver_probe_device() for each pair. If a compatible - * pair is found, break out and return. + * pair is found, break out and return. If the bus specifies + * multithreaded probing, walking the list of drivers is done + * on a probing thread. * * Returns 1 if the device was bound to a driver; - * 0 if no matching device was found; error code otherwise. + * 0 if no matching device was found or multithreaded probing is done; + * error code otherwise. * * When called for a USB interface, @dev->parent->sem must be held. */ int device_attach(struct device * dev) { int ret = 0; + struct task_struct *probe_task = ERR_PTR(-ENOMEM); down(&dev->sem); if (dev->driver) { ret = device_bind_driver(dev); if (ret == 0) ret = 1; - } else - ret = bus_for_each_drv(dev->bus, NULL, dev, __device_attach); + } else { + if (dev->bus->multithread_probe) + probe_task = kthread_run(device_probe_drivers, dev, + "probe-%s", dev->bus_id); + if(IS_ERR(probe_task)) + ret = bus_for_each_drv(dev->bus, NULL, dev, + __device_attach); + } up(&dev->sem); return ret; } diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index a3c1755b2f28..39e80fcef4b3 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c @@ -434,11 +434,6 @@ int __pci_register_driver(struct pci_driver *drv, struct module *owner, drv->driver.mod_name = mod_name; drv->driver.kobj.ktype = &pci_driver_kobj_type; - if (pci_multithread_probe) - drv->driver.multithread_probe = pci_multithread_probe; - else - drv->driver.multithread_probe = drv->multithread_probe; - spin_lock_init(&drv->dynids.lock); INIT_LIST_HEAD(&drv->dynids.list); @@ -574,6 +569,7 @@ struct bus_type pci_bus_type = { static int __init pci_driver_init(void) { + pci_bus_type.multithread_probe = pci_multithread_probe; return bus_register(&pci_bus_type); } diff --git a/include/linux/device.h b/include/linux/device.h index 7f63d4de5c4d..eb1fff0b1d2a 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -80,6 +80,7 @@ struct bus_type { int (*resume)(struct device * dev); unsigned int drivers_autoprobe:1; + unsigned int multithread_probe:1; }; extern int __must_check bus_register(struct bus_type * bus); @@ -139,8 +140,6 @@ struct device_driver { void (*shutdown) (struct device * dev); int (*suspend) (struct device * dev, pm_message_t state); int (*resume) (struct device * dev); - - unsigned int multithread_probe:1; }; diff --git a/include/linux/pci.h b/include/linux/pci.h index 481ea0663f19..a3ad76221c6f 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -361,8 +361,6 @@ struct pci_driver { struct pci_error_handlers *err_handler; struct device_driver driver; struct pci_dynids dynids; - - int multithread_probe; }; #define to_pci_driver(drv) container_of(drv,struct pci_driver, driver) -- cgit v1.2.3-59-g8ed1b From 74e9f5fa1570f956c96dd5d3f1053daedbbf01a0 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Tue, 9 Apr 2002 12:14:34 -0700 Subject: Driver core: remove unneeded completion from driver release path The completion in the driver release path is due to ancient history in the _very_ early 2.5 days when we were not tracking the module reference count of attributes. It is not needed at all and can be removed. Note, we now have an empty release function for the driver structure. This is due to the fact that drivers are statically allocated in the system at this point in time, something which I want to change in the future. But remember, drivers are really code, which is reference counted by the module, unlike devices, which are data and _must_ be reference counted properly in order to work correctly. Cc: Kay Sievers Signed-off-by: Greg Kroah-Hartman --- drivers/base/bus.c | 15 +++++++++++++-- drivers/base/driver.c | 20 -------------------- include/linux/device.h | 1 - 3 files changed, 13 insertions(+), 23 deletions(-) (limited to 'include') diff --git a/drivers/base/bus.c b/drivers/base/bus.c index 20b6dc8706fa..1a5a350eca15 100644 --- a/drivers/base/bus.c +++ b/drivers/base/bus.c @@ -63,8 +63,19 @@ static struct sysfs_ops driver_sysfs_ops = { static void driver_release(struct kobject * kobj) { - struct device_driver * drv = to_driver(kobj); - complete(&drv->unloaded); + /* + * Yes this is an empty release function, it is this way because struct + * device is always a static object, not a dynamic one. Yes, this is + * not nice and bad, but remember, drivers are code, reference counted + * by the module count, not a device, which is really data. And yes, + * in the future I do want to have all drivers be created dynamically, + * and am working toward that goal, but it will take a bit longer... + * + * But do not let this example give _anyone_ the idea that they can + * create a release function without any code in it at all, to do that + * is almost always wrong. If you have any questions about this, + * please send an email to + */ } static struct kobj_type ktype_driver = { diff --git a/drivers/base/driver.c b/drivers/base/driver.c index 082bfded3854..eb11475293ed 100644 --- a/drivers/base/driver.c +++ b/drivers/base/driver.c @@ -149,10 +149,6 @@ void put_driver(struct device_driver * drv) * We pass off most of the work to the bus_add_driver() call, * since most of the things we have to do deal with the bus * structures. - * - * The one interesting aspect is that we setup @drv->unloaded - * as a completion that gets complete when the driver reference - * count reaches 0. */ int driver_register(struct device_driver * drv) { @@ -162,35 +158,19 @@ int driver_register(struct device_driver * drv) printk(KERN_WARNING "Driver '%s' needs updating - please use bus_type methods\n", drv->name); } klist_init(&drv->klist_devices, NULL, NULL); - init_completion(&drv->unloaded); return bus_add_driver(drv); } - /** * driver_unregister - remove driver from system. * @drv: driver. * * Again, we pass off most of the work to the bus-level call. - * - * Though, once that is done, we wait until @drv->unloaded is completed. - * This will block until the driver refcount reaches 0, and it is - * released. Only modular drivers will call this function, and we - * have to guarantee that it won't complete, letting the driver - * unload until all references are gone. */ void driver_unregister(struct device_driver * drv) { bus_remove_driver(drv); - /* - * If the driver is a module, we are probably in - * the module unload path, and we want to wait - * for everything to unload before we can actually - * finish the unload. - */ - if (drv->owner) - wait_for_completion(&drv->unloaded); } /** diff --git a/include/linux/device.h b/include/linux/device.h index eb1fff0b1d2a..c9dc458e8e50 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -126,7 +126,6 @@ struct device_driver { const char * name; struct bus_type * bus; - struct completion unloaded; struct kobject kobj; struct klist klist_devices; struct klist_node knode_bus; -- cgit v1.2.3-59-g8ed1b From f89cbc399ecd924c4bd879344e662aace2274b4f Mon Sep 17 00:00:00 2001 From: Dmitry Torokhov Date: Tue, 3 Apr 2007 01:08:40 -0400 Subject: Driver core: add suspend() and resume() to struct device_type Driver core: add suspend() and resume() to struct device_type In cases when there are devices of different types in the same class we can't use class's implementation of suspend and resume methods and we need to add them to struct device_type instead. Also fix error handling in resume code (we should not try to call class's resume method iof bus's resume method for the device failed. Signed-off-by: Dmitry Torokhov Signed-off-by: Greg Kroah-Hartman --- drivers/base/power/resume.c | 13 ++++++++++++- drivers/base/power/suspend.c | 12 ++++++++++++ include/linux/device.h | 2 ++ 3 files changed, 26 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/drivers/base/power/resume.c b/drivers/base/power/resume.c index 020be36705a6..a2c64188d713 100644 --- a/drivers/base/power/resume.c +++ b/drivers/base/power/resume.c @@ -26,7 +26,9 @@ int resume_device(struct device * dev) TRACE_DEVICE(dev); TRACE_RESUME(0); + down(&dev->sem); + if (dev->power.pm_parent && dev->power.pm_parent->power.power_state.event) { dev_err(dev, "PM: resume from %d, parent %s still %d\n", @@ -34,15 +36,24 @@ int resume_device(struct device * dev) dev->power.pm_parent->bus_id, dev->power.pm_parent->power.power_state.event); } + if (dev->bus && dev->bus->resume) { dev_dbg(dev,"resuming\n"); error = dev->bus->resume(dev); } - if (dev->class && dev->class->resume) { + + if (!error && dev->type && dev->type->resume) { + dev_dbg(dev,"resuming\n"); + error = dev->type->resume(dev); + } + + if (!error && dev->class && dev->class->resume) { dev_dbg(dev,"class resume\n"); error = dev->class->resume(dev); } + up(&dev->sem); + TRACE_RESUME(error); return error; } diff --git a/drivers/base/power/suspend.c b/drivers/base/power/suspend.c index ece136bf97e3..42d2b86ba765 100644 --- a/drivers/base/power/suspend.c +++ b/drivers/base/power/suspend.c @@ -78,6 +78,18 @@ int suspend_device(struct device * dev, pm_message_t state) suspend_report_result(dev->class->suspend, error); } + if (!error && dev->type && dev->type->suspend && !dev->power.power_state.event) { + dev_dbg(dev, "%s%s\n", + suspend_verb(state.event), + ((state.event == PM_EVENT_SUSPEND) + && device_may_wakeup(dev)) + ? ", may wakeup" + : "" + ); + error = dev->type->suspend(dev, state); + suspend_report_result(dev->type->suspend, error); + } + if (!error && dev->bus && dev->bus->suspend && !dev->power.power_state.event) { dev_dbg(dev, "%s%s\n", suspend_verb(state.event), diff --git a/include/linux/device.h b/include/linux/device.h index c9dc458e8e50..af603a137690 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -344,6 +344,8 @@ struct device_type { int (*uevent)(struct device *dev, char **envp, int num_envp, char *buffer, int buffer_size); void (*release)(struct device *dev); + int (*suspend)(struct device * dev, pm_message_t state); + int (*resume)(struct device * dev); }; /* interface for exporting device attributes */ -- cgit v1.2.3-59-g8ed1b From 4628803062d93dadc6ba8e801fd075526904a38c Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Mon, 9 Apr 2007 11:52:31 -0400 Subject: kobject core: remove rwsem from struct subsystem It isn't used at all by the driver core anymore, and the few usages of it within the kernel have now all been fixed as most of them were using it incorrectly. So remove it. Now the whole struct subsys can be removed from the system, but that's for a later patch... Cc: Kay Sievers Signed-off-by: Greg Kroah-Hartman --- include/linux/kobject.h | 2 -- lib/kobject.c | 4 +--- 2 files changed, 1 insertion(+), 5 deletions(-) (limited to 'include') diff --git a/include/linux/kobject.h b/include/linux/kobject.h index d37cd7f10e3d..a659a97eccf1 100644 --- a/include/linux/kobject.h +++ b/include/linux/kobject.h @@ -22,7 +22,6 @@ #include #include #include -#include #include #include #include @@ -177,7 +176,6 @@ extern struct kobject * kset_find_obj(struct kset *, const char *); struct subsystem { struct kset kset; - struct rw_semaphore rwsem; }; #define decl_subsys(_name,_type,_uevent_ops) \ diff --git a/lib/kobject.c b/lib/kobject.c index eb251aae78dd..2882aff6f3d1 100644 --- a/lib/kobject.c +++ b/lib/kobject.c @@ -652,7 +652,6 @@ struct kobject * kset_find_obj(struct kset * kset, const char * name) void subsystem_init(struct subsystem * s) { - init_rwsem(&s->rwsem); kset_init(&s->kset); } @@ -661,8 +660,7 @@ void subsystem_init(struct subsystem * s) * @s: the subsystem we're registering. * * Once we register the subsystem, we want to make sure that - * the kset points back to this subsystem for correct usage of - * the rwsem. + * the kset points back to this subsystem. */ int subsystem_register(struct subsystem * s) -- cgit v1.2.3-59-g8ed1b From 3106d46f51a1a72fdbf071ebc0800a9bcfcbc544 Mon Sep 17 00:00:00 2001 From: Adrian Bunk Date: Fri, 6 Apr 2007 12:21:45 +0200 Subject: the overdue removal of the mount/umount uevents This patch contains the overdue removal of the mount/umount uevents. Signed-off-by: Adrian Bunk Signed-off-by: Greg Kroah-Hartman --- Documentation/feature-removal-schedule.txt | 9 --------- fs/super.c | 12 ------------ include/linux/kobject.h | 8 +++----- lib/kobject_uevent.c | 4 ---- 4 files changed, 3 insertions(+), 30 deletions(-) (limited to 'include') diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt index 6da663607f7b..ec0b4843b1cb 100644 --- a/Documentation/feature-removal-schedule.txt +++ b/Documentation/feature-removal-schedule.txt @@ -134,15 +134,6 @@ Who: Arjan van de Ven --------------------------- -What: mount/umount uevents -When: February 2007 -Why: These events are not correct, and do not properly let userspace know - when a file system has been mounted or unmounted. Userspace should - poll the /proc/mounts file instead to detect this properly. -Who: Greg Kroah-Hartman - ---------------------------- - What: USB driver API moves to EXPORT_SYMBOL_GPL When: February 2008 Files: include/linux/usb.h, drivers/usb/core/driver.c diff --git a/fs/super.c b/fs/super.c index 60b1e50cbf53..8341e4e1d738 100644 --- a/fs/super.c +++ b/fs/super.c @@ -725,16 +725,6 @@ static int test_bdev_super(struct super_block *s, void *data) return (void *)s->s_bdev == data; } -static void bdev_uevent(struct block_device *bdev, enum kobject_action action) -{ - if (bdev->bd_disk) { - if (bdev->bd_part) - kobject_uevent(&bdev->bd_part->kobj, action); - else - kobject_uevent(&bdev->bd_disk->kobj, action); - } -} - int get_sb_bdev(struct file_system_type *fs_type, int flags, const char *dev_name, void *data, int (*fill_super)(struct super_block *, void *, int), @@ -782,7 +772,6 @@ int get_sb_bdev(struct file_system_type *fs_type, } s->s_flags |= MS_ACTIVE; - bdev_uevent(bdev, KOBJ_MOUNT); } return simple_set_mnt(mnt, s); @@ -801,7 +790,6 @@ void kill_block_super(struct super_block *sb) { struct block_device *bdev = sb->s_bdev; - bdev_uevent(bdev, KOBJ_UMOUNT); generic_shutdown_super(sb); sync_blockdev(bdev); close_bdev_excl(bdev); diff --git a/include/linux/kobject.h b/include/linux/kobject.h index a659a97eccf1..eb0e63ef297f 100644 --- a/include/linux/kobject.h +++ b/include/linux/kobject.h @@ -42,11 +42,9 @@ enum kobject_action { KOBJ_ADD = (__force kobject_action_t) 0x01, /* exclusive to core */ KOBJ_REMOVE = (__force kobject_action_t) 0x02, /* exclusive to core */ KOBJ_CHANGE = (__force kobject_action_t) 0x03, /* device state change */ - KOBJ_MOUNT = (__force kobject_action_t) 0x04, /* mount event for block devices (broken) */ - KOBJ_UMOUNT = (__force kobject_action_t) 0x05, /* umount event for block devices (broken) */ - KOBJ_OFFLINE = (__force kobject_action_t) 0x06, /* device offline */ - KOBJ_ONLINE = (__force kobject_action_t) 0x07, /* device online */ - KOBJ_MOVE = (__force kobject_action_t) 0x08, /* device move */ + KOBJ_OFFLINE = (__force kobject_action_t) 0x04, /* device offline */ + KOBJ_ONLINE = (__force kobject_action_t) 0x05, /* device online */ + KOBJ_MOVE = (__force kobject_action_t) 0x06, /* device move */ }; struct kobject { diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c index d9a3510ed2e2..12e311dc664c 100644 --- a/lib/kobject_uevent.c +++ b/lib/kobject_uevent.c @@ -42,10 +42,6 @@ static char *action_to_string(enum kobject_action action) return "remove"; case KOBJ_CHANGE: return "change"; - case KOBJ_MOUNT: - return "mount"; - case KOBJ_UMOUNT: - return "umount"; case KOBJ_OFFLINE: return "offline"; case KOBJ_ONLINE: -- cgit v1.2.3-59-g8ed1b From 8447891fe845851738439788c74b3c811578e3f9 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Tue, 17 Apr 2007 15:59:36 +1000 Subject: debugfs: Add debugfs_create_u64() I went to use this the other day, only to find it didn't exist. It's a straight copy of the debugfs u32 code, then s/u32/u64/. A quick test shows it seems to be working. Signed-off-by: Michael Ellerman Signed-off-by: Greg Kroah-Hartman --- fs/debugfs/file.c | 42 ++++++++++++++++++++++++++++++++++++++++++ include/linux/debugfs.h | 9 +++++++++ 2 files changed, 51 insertions(+) (limited to 'include') diff --git a/fs/debugfs/file.c b/fs/debugfs/file.c index 682f928b7f4d..2e124e0075c5 100644 --- a/fs/debugfs/file.c +++ b/fs/debugfs/file.c @@ -179,6 +179,48 @@ struct dentry *debugfs_create_u32(const char *name, mode_t mode, } EXPORT_SYMBOL_GPL(debugfs_create_u32); +static void debugfs_u64_set(void *data, u64 val) +{ + *(u64 *)data = val; +} + +static u64 debugfs_u64_get(void *data) +{ + return *(u64 *)data; +} +DEFINE_SIMPLE_ATTRIBUTE(fops_u64, debugfs_u64_get, debugfs_u64_set, "%llu\n"); + +/** + * debugfs_create_u64 - create a debugfs file that is used to read and write an unsigned 64-bit value + * @name: a pointer to a string containing the name of the file to create. + * @mode: the permission that the file should have + * @parent: a pointer to the parent dentry for this file. This should be a + * directory dentry if set. If this parameter is %NULL, then the + * file will be created in the root of the debugfs filesystem. + * @value: a pointer to the variable that the file should read to and write + * from. + * + * This function creates a file in debugfs with the given name that + * contains the value of the variable @value. If the @mode variable is so + * set, it can be read from, and written to. + * + * This function will return a pointer to a dentry if it succeeds. This + * pointer must be passed to the debugfs_remove() function when the file is + * to be removed (no automatic cleanup happens if your module is unloaded, + * you are responsible here.) If an error occurs, %NULL will be returned. + * + * If debugfs is not enabled in the kernel, the value -%ENODEV will be + * returned. It is not wise to check for this value, but rather, check for + * %NULL or !%NULL instead as to eliminate the need for #ifdef in the calling + * code. + */ +struct dentry *debugfs_create_u64(const char *name, mode_t mode, + struct dentry *parent, u64 *value) +{ + return debugfs_create_file(name, mode, parent, value, &fops_u64); +} +EXPORT_SYMBOL_GPL(debugfs_create_u64); + static ssize_t read_file_bool(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h index 9fa0983d1aa8..5a9c49534d08 100644 --- a/include/linux/debugfs.h +++ b/include/linux/debugfs.h @@ -44,6 +44,8 @@ struct dentry *debugfs_create_u16(const char *name, mode_t mode, struct dentry *parent, u16 *value); struct dentry *debugfs_create_u32(const char *name, mode_t mode, struct dentry *parent, u32 *value); +struct dentry *debugfs_create_u64(const char *name, mode_t mode, + struct dentry *parent, u64 *value); struct dentry *debugfs_create_bool(const char *name, mode_t mode, struct dentry *parent, u32 *value); @@ -104,6 +106,13 @@ static inline struct dentry *debugfs_create_u32(const char *name, mode_t mode, return ERR_PTR(-ENODEV); } +static inline struct dentry *debugfs_create_u64(const char *name, mode_t mode, + struct dentry *parent, + u64 *value) +{ + return ERR_PTR(-ENODEV); +} + static inline struct dentry *debugfs_create_bool(const char *name, mode_t mode, struct dentry *parent, u32 *value) -- cgit v1.2.3-59-g8ed1b From 523ded71de0c5e66973335bf99a80edfda9f401b Mon Sep 17 00:00:00 2001 From: Alan Stern Date: Thu, 26 Apr 2007 00:12:04 -0700 Subject: device_schedule_callback() needs a module reference This patch (as896b) fixes an oversight in the design of device_schedule_callback(). It is necessary to acquire a reference to the module owning the callback routine, to prevent the module from being unloaded before the callback can run. Signed-off-by: Alan Stern Cc: Satyam Sharma Cc: Neil Brown Cc: Cornelia Huck Signed-off-by: Andrew Morton Signed-off-by: Greg Kroah-Hartman --- drivers/base/core.c | 16 ++++++++++------ fs/sysfs/file.c | 14 +++++++++++--- include/linux/device.h | 8 ++++++-- include/linux/sysfs.h | 4 ++-- 4 files changed, 29 insertions(+), 13 deletions(-) (limited to 'include') diff --git a/drivers/base/core.c b/drivers/base/core.c index f69305c7269d..8aa090da1cd7 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -480,9 +480,10 @@ void device_remove_bin_file(struct device *dev, struct bin_attribute *attr) EXPORT_SYMBOL_GPL(device_remove_bin_file); /** - * device_schedule_callback - helper to schedule a callback for a device + * device_schedule_callback_owner - helper to schedule a callback for a device * @dev: device. * @func: callback function to invoke later. + * @owner: module owning the callback routine * * Attribute methods must not unregister themselves or their parent device * (which would amount to the same thing). Attempts to do so will deadlock, @@ -493,20 +494,23 @@ EXPORT_SYMBOL_GPL(device_remove_bin_file); * argument in the workqueue's process context. @dev will be pinned until * @func returns. * + * This routine is usually called via the inline device_schedule_callback(), + * which automatically sets @owner to THIS_MODULE. + * * Returns 0 if the request was submitted, -ENOMEM if storage could not - * be allocated. + * be allocated, -ENODEV if a reference to @owner isn't available. * * NOTE: This routine won't work if CONFIG_SYSFS isn't set! It uses an * underlying sysfs routine (since it is intended for use by attribute * methods), and if sysfs isn't available you'll get nothing but -ENOSYS. */ -int device_schedule_callback(struct device *dev, - void (*func)(struct device *)) +int device_schedule_callback_owner(struct device *dev, + void (*func)(struct device *), struct module *owner) { return sysfs_schedule_callback(&dev->kobj, - (void (*)(void *)) func, dev); + (void (*)(void *)) func, dev, owner); } -EXPORT_SYMBOL_GPL(device_schedule_callback); +EXPORT_SYMBOL_GPL(device_schedule_callback_owner); static void klist_children_get(struct klist_node *n) { diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c index fc4633378dc0..db0413a411d6 100644 --- a/fs/sysfs/file.c +++ b/fs/sysfs/file.c @@ -633,6 +633,7 @@ struct sysfs_schedule_callback_struct { struct kobject *kobj; void (*func)(void *); void *data; + struct module *owner; struct work_struct work; }; @@ -643,6 +644,7 @@ static void sysfs_schedule_callback_work(struct work_struct *work) (ss->func)(ss->data); kobject_put(ss->kobj); + module_put(ss->owner); kfree(ss); } @@ -651,6 +653,7 @@ static void sysfs_schedule_callback_work(struct work_struct *work) * @kobj: object we're acting for. * @func: callback function to invoke later. * @data: argument to pass to @func. + * @owner: module owning the callback code * * sysfs attribute methods must not unregister themselves or their parent * kobject (which would amount to the same thing). Attempts to do so will @@ -663,20 +666,25 @@ static void sysfs_schedule_callback_work(struct work_struct *work) * until @func returns. * * Returns 0 if the request was submitted, -ENOMEM if storage could not - * be allocated. + * be allocated, -ENODEV if a reference to @owner isn't available. */ int sysfs_schedule_callback(struct kobject *kobj, void (*func)(void *), - void *data) + void *data, struct module *owner) { struct sysfs_schedule_callback_struct *ss; + if (!try_module_get(owner)) + return -ENODEV; ss = kmalloc(sizeof(*ss), GFP_KERNEL); - if (!ss) + if (!ss) { + module_put(owner); return -ENOMEM; + } kobject_get(kobj); ss->kobj = kobj; ss->func = func; ss->data = data; + ss->owner = owner; INIT_WORK(&ss->work, sysfs_schedule_callback_work); schedule_work(&ss->work); return 0; diff --git a/include/linux/device.h b/include/linux/device.h index af603a137690..8511d14071b3 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -367,8 +367,12 @@ extern int __must_check device_create_bin_file(struct device *dev, struct bin_attribute *attr); extern void device_remove_bin_file(struct device *dev, struct bin_attribute *attr); -extern int device_schedule_callback(struct device *dev, - void (*func)(struct device *)); +extern int device_schedule_callback_owner(struct device *dev, + void (*func)(struct device *), struct module *owner); + +/* This is a macro to avoid include problems with THIS_MODULE */ +#define device_schedule_callback(dev, func) \ + device_schedule_callback_owner(dev, func, THIS_MODULE) /* device resource management */ typedef void (*dr_release_t)(struct device *dev, void *res); diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h index fea9a6b3fb7b..7d5d1ec95c2e 100644 --- a/include/linux/sysfs.h +++ b/include/linux/sysfs.h @@ -80,7 +80,7 @@ struct sysfs_ops { #ifdef CONFIG_SYSFS extern int sysfs_schedule_callback(struct kobject *kobj, - void (*func)(void *), void *data); + void (*func)(void *), void *data, struct module *owner); extern int __must_check sysfs_create_dir(struct kobject *, struct dentry *); @@ -137,7 +137,7 @@ extern int __must_check sysfs_init(void); #else /* CONFIG_SYSFS */ static inline int sysfs_schedule_callback(struct kobject *kobj, - void (*func)(void *), void *data) + void (*func)(void *), void *data, struct module *owner) { return -ENOSYS; } -- cgit v1.2.3-59-g8ed1b From 057f6c019fff9ee290641d50647359bb8898918e Mon Sep 17 00:00:00 2001 From: James Morris Date: Thu, 26 Apr 2007 00:12:05 -0700 Subject: security: prevent permission checking of file removal via sysfs_remove_group() Prevent permission checking from being performed when the kernel wants to unconditionally remove a sysfs group, by introducing an kernel-only variant of lookup_one_len(), lookup_one_len_kern(). Additionally, as sysfs_remove_group() does not check the return value of the lookup before using it, a BUG_ON has been added to pinpoint the cause of any problems potentially caused by this (and as a form of annotation). Signed-off-by: James Morris Cc: Nagendra Singh Tomar Cc: Tejun Heo Cc: Stephen Smalley Cc: Eric Paris Signed-off-by: Andrew Morton Signed-off-by: Greg Kroah-Hartman --- fs/namei.c | 72 +++++++++++++++++++++++++++++++++++++-------------- fs/sysfs/group.c | 6 +++-- include/linux/namei.h | 1 + 3 files changed, 57 insertions(+), 22 deletions(-) (limited to 'include') diff --git a/fs/namei.c b/fs/namei.c index ee60cc4d3453..880052cadbcd 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -1243,22 +1243,13 @@ int __user_path_lookup_open(const char __user *name, unsigned int lookup_flags, return err; } -/* - * Restricted form of lookup. Doesn't follow links, single-component only, - * needs parent already locked. Doesn't follow mounts. - * SMP-safe. - */ -static struct dentry * __lookup_hash(struct qstr *name, struct dentry * base, struct nameidata *nd) +static inline struct dentry *__lookup_hash_kern(struct qstr *name, struct dentry *base, struct nameidata *nd) { - struct dentry * dentry; + struct dentry *dentry; struct inode *inode; int err; inode = base->d_inode; - err = permission(inode, MAY_EXEC, nd); - dentry = ERR_PTR(err); - if (err) - goto out; /* * See if the low-level filesystem might want @@ -1287,35 +1278,76 @@ out: return dentry; } +/* + * Restricted form of lookup. Doesn't follow links, single-component only, + * needs parent already locked. Doesn't follow mounts. + * SMP-safe. + */ +static inline struct dentry * __lookup_hash(struct qstr *name, struct dentry *base, struct nameidata *nd) +{ + struct dentry *dentry; + struct inode *inode; + int err; + + inode = base->d_inode; + + err = permission(inode, MAY_EXEC, nd); + dentry = ERR_PTR(err); + if (err) + goto out; + + dentry = __lookup_hash_kern(name, base, nd); +out: + return dentry; +} + static struct dentry *lookup_hash(struct nameidata *nd) { return __lookup_hash(&nd->last, nd->dentry, nd); } /* SMP-safe */ -struct dentry * lookup_one_len(const char * name, struct dentry * base, int len) +static inline int __lookup_one_len(const char *name, struct qstr *this, struct dentry *base, int len) { unsigned long hash; - struct qstr this; unsigned int c; - this.name = name; - this.len = len; + this->name = name; + this->len = len; if (!len) - goto access; + return -EACCES; hash = init_name_hash(); while (len--) { c = *(const unsigned char *)name++; if (c == '/' || c == '\0') - goto access; + return -EACCES; hash = partial_name_hash(c, hash); } - this.hash = end_name_hash(hash); + this->hash = end_name_hash(hash); + return 0; +} +struct dentry *lookup_one_len(const char *name, struct dentry *base, int len) +{ + int err; + struct qstr this; + + err = __lookup_one_len(name, &this, base, len); + if (err) + return ERR_PTR(err); return __lookup_hash(&this, base, NULL); -access: - return ERR_PTR(-EACCES); +} + +struct dentry *lookup_one_len_kern(const char *name, struct dentry *base, int len) +{ + int err; + struct qstr this; + + err = __lookup_one_len(name, &this, base, len); + if (err) + return ERR_PTR(err); + return __lookup_hash_kern(&this, base, NULL); } /* diff --git a/fs/sysfs/group.c b/fs/sysfs/group.c index b20951c93761..52eed2a7a5ef 100644 --- a/fs/sysfs/group.c +++ b/fs/sysfs/group.c @@ -70,9 +70,11 @@ void sysfs_remove_group(struct kobject * kobj, { struct dentry * dir; - if (grp->name) - dir = lookup_one_len(grp->name, kobj->dentry, + if (grp->name) { + dir = lookup_one_len_kern(grp->name, kobj->dentry, strlen(grp->name)); + BUG_ON(IS_ERR(dir)); + } else dir = dget(kobj->dentry); diff --git a/include/linux/namei.h b/include/linux/namei.h index d39a5a67e979..b7dd24917f0d 100644 --- a/include/linux/namei.h +++ b/include/linux/namei.h @@ -82,6 +82,7 @@ extern struct file *nameidata_to_filp(struct nameidata *nd, int flags); extern void release_open_intent(struct nameidata *); extern struct dentry * lookup_one_len(const char *, struct dentry *, int); +extern struct dentry *lookup_one_len_kern(const char *, struct dentry *, int); extern int follow_down(struct vfsmount **, struct dentry **); extern int follow_up(struct vfsmount **, struct dentry **); -- cgit v1.2.3-59-g8ed1b From 075c1771526c85849ed22298d048bc07e400aee5 Mon Sep 17 00:00:00 2001 From: David Brownell Date: Thu, 26 Apr 2007 00:12:06 -0700 Subject: define platform wakeup hook, use in pci_enable_wake() This defines a platform hook to enable/disable a device as a wakeup event source. It's initially for use with ACPI, but more generally it could be used whenever enable_irq_wake()/disable_irq_wake() don't suffice. The hook is called -- if available -- inside pci_enable_wake(); and the semantics of that call are enhanced so that support for PCI PME# is no longer needed. It can now work for devices with "legacy PCI PM", when platform support allows it. (That support would use some board-specific signal for for the same purpose as PME#.) [akpm@linux-foundation.org: Make it compile with CONFIG_PM=n] Signed-off-by: David Brownell Signed-off-by: Zhang Rui Cc: Len Brown Signed-off-by: Andrew Morton Signed-off-by: Greg Kroah-Hartman --- drivers/base/power/main.c | 3 +++ drivers/pci/pci.c | 58 +++++++++++++++++++++++++++++++++-------------- include/linux/pm.h | 19 ++++++++++++++++ 3 files changed, 63 insertions(+), 17 deletions(-) (limited to 'include') diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index bbbb973a9d3c..05dc8764e765 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -29,6 +29,9 @@ LIST_HEAD(dpm_off_irq); DECLARE_MUTEX(dpm_sem); DECLARE_MUTEX(dpm_list_sem); +int (*platform_enable_wakeup)(struct device *dev, int is_on); + + /** * device_pm_set_parent - Specify power dependency. * @dev: Device who needs power. diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index d3eab057b2d3..2a458279327a 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include #include @@ -891,31 +892,48 @@ pci_disable_device(struct pci_dev *dev) } /** - * pci_enable_wake - enable device to generate PME# when suspended - * @dev: - PCI device to operate on - * @state: - Current state of device. - * @enable: - Flag to enable or disable generation - * - * Set the bits in the device's PM Capabilities to generate PME# when - * the system is suspended. + * pci_enable_wake - enable PCI device as wakeup event source + * @dev: PCI device affected + * @state: PCI state from which device will issue wakeup events + * @enable: True to enable event generation; false to disable * - * -EIO is returned if device doesn't have PM Capabilities. - * -EINVAL is returned if device supports it, but can't generate wake events. - * 0 if operation is successful. - * + * This enables the device as a wakeup event source, or disables it. + * When such events involves platform-specific hooks, those hooks are + * called automatically by this routine. + * + * Devices with legacy power management (no standard PCI PM capabilities) + * always require such platform hooks. Depending on the platform, devices + * supporting the standard PCI PME# signal may require such platform hooks; + * they always update bits in config space to allow PME# generation. + * + * -EIO is returned if the device can't ever be a wakeup event source. + * -EINVAL is returned if the device can't generate wakeup events from + * the specified PCI state. Returns zero if the operation is successful. */ int pci_enable_wake(struct pci_dev *dev, pci_power_t state, int enable) { int pm; + int status; u16 value; + /* Note that drivers should verify device_may_wakeup(&dev->dev) + * before calling this function. Platform code should report + * errors when drivers try to enable wakeup on devices that + * can't issue wakeups, or on which wakeups were disabled by + * userspace updating the /sys/devices.../power/wakeup file. + */ + + status = call_platform_enable_wakeup(&dev->dev, enable); + /* find PCI PM capability in list */ pm = pci_find_capability(dev, PCI_CAP_ID_PM); - /* If device doesn't support PM Capabilities, but request is to disable - * wake events, it's a nop; otherwise fail */ - if (!pm) - return enable ? -EIO : 0; + /* If device doesn't support PM Capabilities, but caller wants to + * disable wake events, it's a NOP. Otherwise fail unless the + * platform hooks handled this legacy device already. + */ + if (!pm) + return enable ? status : 0; /* Check device's ability to generate PME# */ pci_read_config_word(dev,pm+PCI_PM_PMC,&value); @@ -924,8 +942,14 @@ int pci_enable_wake(struct pci_dev *dev, pci_power_t state, int enable) value >>= ffs(PCI_PM_CAP_PME_MASK) - 1; /* First bit of mask */ /* Check if it can generate PME# from requested state. */ - if (!value || !(value & (1 << state))) + if (!value || !(value & (1 << state))) { + /* if it can't, revert what the platform hook changed, + * always reporting the base "EINVAL, can't PME#" error + */ + if (enable) + call_platform_enable_wakeup(&dev->dev, 0); return enable ? -EINVAL : 0; + } pci_read_config_word(dev, pm + PCI_PM_CTRL, &value); @@ -936,7 +960,7 @@ int pci_enable_wake(struct pci_dev *dev, pci_power_t state, int enable) value &= ~PCI_PM_CTRL_PME_ENABLE; pci_write_config_word(dev, pm + PCI_PM_CTRL, value); - + return 0; } diff --git a/include/linux/pm.h b/include/linux/pm.h index 21db05ac7c0b..b0ab623adbf5 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h @@ -273,6 +273,20 @@ extern void __suspend_report_result(const char *function, void *fn, int ret); __suspend_report_result(__FUNCTION__, fn, ret); \ } while (0) +/* + * Platform hook to activate device wakeup capability, if that's not already + * handled by enable_irq_wake() etc. + * Returns zero on success, else negative errno + */ +extern int (*platform_enable_wakeup)(struct device *dev, int is_on); + +static inline int call_platform_enable_wakeup(struct device *dev, int is_on) +{ + if (platform_enable_wakeup) + return (*platform_enable_wakeup)(dev, is_on); + return 0; +} + #else /* !CONFIG_PM */ static inline int device_suspend(pm_message_t state) @@ -294,6 +308,11 @@ static inline void dpm_runtime_resume(struct device * dev) #define suspend_report_result(fn, ret) do { } while (0) +static inline int call_platform_enable_wakeup(struct device *dev, int is_on) +{ + return -EIO; +} + #endif /* changes to device_may_wakeup take effect on the next pm state change. -- cgit v1.2.3-59-g8ed1b From a53c46dc8253cc613ad66a2ca7aad6de8b7e61b9 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Thu, 26 Apr 2007 11:43:58 +0200 Subject: s2ram: add arch irq disable/enable hooks After some more discussion this patch replaces it: From: Johannes Berg Subject: suspend: add arch irq disable/enable hooks For powermac, we need to do some things between suspending devices and device_power_off, for example setting the decrementer. This patch allows architectures to define arch_s2ram_{en,dis}able_irqs in their asm/suspend.h to have control over this step. Signed-off-by: Johannes Berg Acked-by: Pavel Machek Cc: Andrew Morton Signed-off-by: Greg Kroah-Hartman --- include/linux/pm.h | 18 ++++++++++++++++++ kernel/power/main.c | 18 +++++++++++++++--- 2 files changed, 33 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/linux/pm.h b/include/linux/pm.h index b0ab623adbf5..9bd86db4d395 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h @@ -166,6 +166,24 @@ extern struct pm_ops *pm_ops; extern int pm_suspend(suspend_state_t state); +/** + * arch_suspend_disable_irqs - disable IRQs for suspend + * + * Disables IRQs (in the default case). This is a weak symbol in the common + * code and thus allows architectures to override it if more needs to be + * done. Not called for suspend to disk. + */ +extern void arch_suspend_disable_irqs(void); + +/** + * arch_suspend_enable_irqs - enable IRQs after suspend + * + * Enables IRQs (in the default case). This is a weak symbol in the common + * code and thus allows architectures to override it if more needs to be + * done. Not called for suspend to disk. + */ +extern void arch_suspend_enable_irqs(void); + /* * Device power management */ diff --git a/kernel/power/main.c b/kernel/power/main.c index a064dfd8877a..3062e940d1fa 100644 --- a/kernel/power/main.c +++ b/kernel/power/main.c @@ -111,13 +111,24 @@ static int suspend_prepare(suspend_state_t state) return error; } +/* default implementation */ +void __attribute__ ((weak)) arch_suspend_disable_irqs(void) +{ + local_irq_disable(); +} + +/* default implementation */ +void __attribute__ ((weak)) arch_suspend_enable_irqs(void) +{ + local_irq_enable(); +} int suspend_enter(suspend_state_t state) { int error = 0; - unsigned long flags; - local_irq_save(flags); + arch_suspend_disable_irqs(); + BUG_ON(!irqs_disabled()); if ((error = device_power_down(PMSG_SUSPEND))) { printk(KERN_ERR "Some devices failed to power down\n"); @@ -126,7 +137,8 @@ int suspend_enter(suspend_state_t state) error = pm_ops->enter(state); device_power_up(); Done: - local_irq_restore(flags); + arch_suspend_enable_irqs(); + BUG_ON(irqs_disabled()); return error; } -- cgit v1.2.3-59-g8ed1b From 404d5b185b4eb56d6fa2f7bd27833f8df1c38ce4 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Thu, 26 Apr 2007 00:12:10 -0700 Subject: dev_dbg: check dev_dbg() arguments Duplicate what Zach Brown did for pr_debug in commit 8b2a1fd1b394c60eaa2587716102dd5e9b4e5990 [akpm@linux-foundation.org: fix a couple of things which broke] Signed-off-by: Dan Williams Signed-off-by: Andrew Morton Signed-off-by: Greg Kroah-Hartman --- drivers/usb/core/hub.c | 4 +--- drivers/usb/host/ohci-hcd.c | 6 ------ include/linux/device.h | 6 +++++- 3 files changed, 6 insertions(+), 10 deletions(-) (limited to 'include') diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index b89a98e61323..7a6028599d62 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -119,8 +119,7 @@ MODULE_PARM_DESC(use_both_schemes, "first one fails"); -#ifdef DEBUG -static inline char *portspeed (int portstatus) +static inline char *portspeed(int portstatus) { if (portstatus & (1 << USB_PORT_FEAT_HIGHSPEED)) return "480 Mb/s"; @@ -129,7 +128,6 @@ static inline char *portspeed (int portstatus) else return "12 Mb/s"; } -#endif /* Note that hdev or one of its children must be locked! */ static inline struct usb_hub *hdev_to_hub(struct usb_device *hdev) diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c index f0d29eda3c6d..e8bbe8bc2598 100644 --- a/drivers/usb/host/ohci-hcd.c +++ b/drivers/usb/host/ohci-hcd.c @@ -486,9 +486,6 @@ static int ohci_run (struct ohci_hcd *ohci) * or if bus glue did the same (e.g. for PCI add-in cards with * PCI PM support). */ - ohci_dbg (ohci, "resetting from state '%s', control = 0x%x\n", - hcfs2string (ohci->hc_control & OHCI_CTRL_HCFS), - ohci_readl (ohci, &ohci->regs->control)); if ((ohci->hc_control & OHCI_CTRL_RWC) != 0 && !device_may_wakeup(hcd->self.controller)) device_init_wakeup(hcd->self.controller, 1); @@ -744,9 +741,6 @@ static void ohci_stop (struct usb_hcd *hcd) { struct ohci_hcd *ohci = hcd_to_ohci (hcd); - ohci_dbg (ohci, "stop %s controller (state 0x%02x)\n", - hcfs2string (ohci->hc_control & OHCI_CTRL_HCFS), - hcd->state); ohci_dump (ohci, 1); flush_scheduled_work(); diff --git a/include/linux/device.h b/include/linux/device.h index 8511d14071b3..a0cd2ced31a9 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -571,7 +571,11 @@ extern const char *dev_driver_string(struct device *dev); #define dev_dbg(dev, format, arg...) \ dev_printk(KERN_DEBUG , dev , format , ## arg) #else -#define dev_dbg(dev, format, arg...) do { (void)(dev); } while (0) +static inline int __attribute__ ((format (printf, 2, 3))) +dev_dbg(struct device * dev, const char * fmt, ...) +{ + return 0; +} #endif #define dev_err(dev, format, arg...) \ -- cgit v1.2.3-59-g8ed1b From 5eee72e88416ef11f55791626440ac3c9018c4c0 Mon Sep 17 00:00:00 2001 From: Hans Verkuil Date: Fri, 27 Apr 2007 12:31:00 -0300 Subject: V4L/DVB (5268): Add support for three new MPEG controls. Added V4L2_CID_MPEG_AUDIO_MUTE, V4L2_CID_MPEG_VIDEO_MUTE and V4L2_CID_MPEG_CX2341X_STREAM_INSERT_NAV_PACKETS controls together with their implementation in the cx2341x module. Signed-off-by: Hans Verkuil Signed-off-by: Mauro Carvalho Chehab --- drivers/media/video/cx2341x.c | 72 ++++++++++++++++++++++++++++++++++++--- drivers/media/video/v4l2-common.c | 10 ++++++ include/linux/videodev2.h | 4 +++ include/media/cx2341x.h | 4 +++ 4 files changed, 85 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/drivers/media/video/cx2341x.c b/drivers/media/video/cx2341x.c index d60cd5ecf821..88dbdddeec42 100644 --- a/drivers/media/video/cx2341x.c +++ b/drivers/media/video/cx2341x.c @@ -51,6 +51,7 @@ const u32 cx2341x_mpeg_ctrls[] = { V4L2_CID_MPEG_AUDIO_MODE_EXTENSION, V4L2_CID_MPEG_AUDIO_EMPHASIS, V4L2_CID_MPEG_AUDIO_CRC, + V4L2_CID_MPEG_AUDIO_MUTE, V4L2_CID_MPEG_VIDEO_ENCODING, V4L2_CID_MPEG_VIDEO_ASPECT, V4L2_CID_MPEG_VIDEO_B_FRAMES, @@ -60,6 +61,8 @@ const u32 cx2341x_mpeg_ctrls[] = { V4L2_CID_MPEG_VIDEO_BITRATE, V4L2_CID_MPEG_VIDEO_BITRATE_PEAK, V4L2_CID_MPEG_VIDEO_TEMPORAL_DECIMATION, + V4L2_CID_MPEG_VIDEO_MUTE, + V4L2_CID_MPEG_VIDEO_MUTE_YUV, V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE, V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER, V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE, @@ -71,6 +74,7 @@ const u32 cx2341x_mpeg_ctrls[] = { V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_TOP, V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_BOTTOM, V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_TOP, + V4L2_CID_MPEG_CX2341X_STREAM_INSERT_NAV_PACKETS, 0 }; @@ -102,6 +106,9 @@ static int cx2341x_get_ctrl(struct cx2341x_mpeg_params *params, case V4L2_CID_MPEG_AUDIO_CRC: ctrl->value = params->audio_crc; break; + case V4L2_CID_MPEG_AUDIO_MUTE: + ctrl->value = params->audio_mute; + break; case V4L2_CID_MPEG_VIDEO_ENCODING: ctrl->value = params->video_encoding; break; @@ -129,6 +136,12 @@ static int cx2341x_get_ctrl(struct cx2341x_mpeg_params *params, case V4L2_CID_MPEG_VIDEO_TEMPORAL_DECIMATION: ctrl->value = params->video_temporal_decimation; break; + case V4L2_CID_MPEG_VIDEO_MUTE: + ctrl->value = params->video_mute; + break; + case V4L2_CID_MPEG_VIDEO_MUTE_YUV: + ctrl->value = params->video_mute_yuv; + break; case V4L2_CID_MPEG_STREAM_TYPE: ctrl->value = params->stream_type; break; @@ -168,6 +181,9 @@ static int cx2341x_get_ctrl(struct cx2341x_mpeg_params *params, case V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_BOTTOM: ctrl->value = params->video_chroma_median_filter_bottom; break; + case V4L2_CID_MPEG_CX2341X_STREAM_INSERT_NAV_PACKETS: + ctrl->value = params->stream_insert_nav_packets; + break; default: return -EINVAL; } @@ -201,6 +217,9 @@ static int cx2341x_set_ctrl(struct cx2341x_mpeg_params *params, case V4L2_CID_MPEG_AUDIO_CRC: params->audio_crc = ctrl->value; break; + case V4L2_CID_MPEG_AUDIO_MUTE: + params->audio_mute = ctrl->value; + break; case V4L2_CID_MPEG_VIDEO_ASPECT: params->video_aspect = ctrl->value; break; @@ -243,6 +262,12 @@ static int cx2341x_set_ctrl(struct cx2341x_mpeg_params *params, case V4L2_CID_MPEG_VIDEO_TEMPORAL_DECIMATION: params->video_temporal_decimation = ctrl->value; break; + case V4L2_CID_MPEG_VIDEO_MUTE: + params->video_mute = (ctrl->value != 0); + break; + case V4L2_CID_MPEG_VIDEO_MUTE_YUV: + params->video_mute_yuv = ctrl->value; + break; case V4L2_CID_MPEG_STREAM_TYPE: params->stream_type = ctrl->value; params->video_encoding = @@ -290,6 +315,9 @@ static int cx2341x_set_ctrl(struct cx2341x_mpeg_params *params, case V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_BOTTOM: params->video_chroma_median_filter_bottom = ctrl->value; break; + case V4L2_CID_MPEG_CX2341X_STREAM_INSERT_NAV_PACKETS: + params->stream_insert_nav_packets = ctrl->value; + break; default: return -EINVAL; } @@ -336,6 +364,9 @@ static int cx2341x_ctrl_query_fill(struct v4l2_queryctrl *qctrl, s32 min, s32 ma case V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_BOTTOM: name = "Median Chroma Filter Minimum"; break; + case V4L2_CID_MPEG_CX2341X_STREAM_INSERT_NAV_PACKETS: + name = "Insert Navigation Packets"; + break; default: return v4l2_ctrl_query_fill(qctrl, min, max, step, def); @@ -350,6 +381,12 @@ static int cx2341x_ctrl_query_fill(struct v4l2_queryctrl *qctrl, s32 min, s32 ma min = 0; step = 1; break; + case V4L2_CID_MPEG_CX2341X_STREAM_INSERT_NAV_PACKETS: + qctrl->type = V4L2_CTRL_TYPE_BOOLEAN; + min = 0; + max = 1; + step = 1; + break; default: qctrl->type = V4L2_CTRL_TYPE_INTEGER; break; @@ -505,6 +542,9 @@ int cx2341x_ctrl_query(struct cx2341x_mpeg_params *params, struct v4l2_queryctrl qctrl->flags |= V4L2_CTRL_FLAG_INACTIVE; return 0; + case V4L2_CID_MPEG_CX2341X_STREAM_INSERT_NAV_PACKETS: + return cx2341x_ctrl_query_fill(qctrl, 0, 1, 1, 0); + default: return v4l2_ctrl_query_fill_std(qctrl); @@ -656,6 +696,7 @@ void cx2341x_fill_defaults(struct cx2341x_mpeg_params *p) /* stream */ .stream_type = V4L2_MPEG_STREAM_TYPE_MPEG2_PS, .stream_vbi_fmt = V4L2_MPEG_STREAM_VBI_FMT_NONE, + .stream_insert_nav_packets = 0, /* audio */ .audio_sampling_freq = V4L2_MPEG_AUDIO_SAMPLING_FREQ_48000, @@ -665,6 +706,7 @@ void cx2341x_fill_defaults(struct cx2341x_mpeg_params *p) .audio_mode_extension = V4L2_MPEG_AUDIO_MODE_EXTENSION_BOUND_4, .audio_emphasis = V4L2_MPEG_AUDIO_EMPHASIS_NONE, .audio_crc = V4L2_MPEG_AUDIO_CRC_NONE, + .audio_mute = 0, /* video */ .video_encoding = V4L2_MPEG_VIDEO_ENCODING_MPEG_2, @@ -676,6 +718,8 @@ void cx2341x_fill_defaults(struct cx2341x_mpeg_params *p) .video_bitrate = 6000000, .video_bitrate_peak = 8000000, .video_temporal_decimation = 0, + .video_mute = 0, + .video_mute_yuv = 0x008080, /* YCbCr value for black */ /* encoding filters */ .video_spatial_filter_mode = V4L2_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE_MANUAL, @@ -779,6 +823,10 @@ int cx2341x_update(void *priv, cx2341x_mbox_func func, err = cx2341x_api(priv, func, CX2341X_ENC_SET_AUDIO_PROPERTIES, 1, new->audio_properties); if (err) return err; } + if (old == NULL || old->audio_mute != new->audio_mute) { + err = cx2341x_api(priv, func, CX2341X_ENC_MUTE_AUDIO, 1, new->audio_mute); + if (err) return err; + } if (old == NULL || old->video_bitrate_mode != new->video_bitrate_mode || old->video_bitrate != new->video_bitrate || old->video_bitrate_peak != new->video_bitrate_peak) { @@ -826,6 +874,15 @@ int cx2341x_update(void *priv, cx2341x_mbox_func func, new->video_temporal_decimation); if (err) return err; } + if (old == NULL || old->video_mute != new->video_mute || + (new->video_mute && old->video_mute_yuv != new->video_mute_yuv)) { + err = cx2341x_api(priv, func, CX2341X_ENC_MUTE_VIDEO, 1, new->video_mute | (new->video_mute_yuv << 8)); + if (err) return err; + } + if (old == NULL || old->stream_insert_nav_packets != new->stream_insert_nav_packets) { + err = cx2341x_api(priv, func, CX2341X_ENC_MISC, 2, 7, new->stream_insert_nav_packets); + if (err) return err; + } return 0; } @@ -854,18 +911,22 @@ void cx2341x_log_status(struct cx2341x_mpeg_params *p, const char *prefix) int temporal = p->video_temporal_filter; /* Stream */ - printk(KERN_INFO "%s: Stream: %s\n", + printk(KERN_INFO "%s: Stream: %s", prefix, cx2341x_menu_item(p, V4L2_CID_MPEG_STREAM_TYPE)); + if (p->stream_insert_nav_packets) + printk(" (with navigation packets)"); + printk("\n"); printk(KERN_INFO "%s: VBI Format: %s\n", prefix, cx2341x_menu_item(p, V4L2_CID_MPEG_STREAM_VBI_FMT)); /* Video */ - printk(KERN_INFO "%s: Video: %dx%d, %d fps\n", + printk(KERN_INFO "%s: Video: %dx%d, %d fps%s\n", prefix, p->width / (is_mpeg1 ? 2 : 1), p->height / (is_mpeg1 ? 2 : 1), - p->is_50hz ? 25 : 30); + p->is_50hz ? 25 : 30, + (p->video_mute) ? " (muted)" : ""); printk(KERN_INFO "%s: Video: %s, %s, %s, %d", prefix, cx2341x_menu_item(p, V4L2_CID_MPEG_VIDEO_ENCODING), @@ -886,12 +947,13 @@ void cx2341x_log_status(struct cx2341x_mpeg_params *p, const char *prefix) } /* Audio */ - printk(KERN_INFO "%s: Audio: %s, %s, %s, %s", + printk(KERN_INFO "%s: Audio: %s, %s, %s, %s%s", prefix, cx2341x_menu_item(p, V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ), cx2341x_menu_item(p, V4L2_CID_MPEG_AUDIO_ENCODING), cx2341x_menu_item(p, V4L2_CID_MPEG_AUDIO_L2_BITRATE), - cx2341x_menu_item(p, V4L2_CID_MPEG_AUDIO_MODE)); + cx2341x_menu_item(p, V4L2_CID_MPEG_AUDIO_MODE), + p->audio_mute ? " (muted)" : ""); if (p->audio_mode == V4L2_MPEG_AUDIO_MODE_JOINT_STEREO) { printk(", %s", cx2341x_menu_item(p, V4L2_CID_MPEG_AUDIO_MODE_EXTENSION)); diff --git a/drivers/media/video/v4l2-common.c b/drivers/media/video/v4l2-common.c index 54747606eae1..43a8467f0209 100644 --- a/drivers/media/video/v4l2-common.c +++ b/drivers/media/video/v4l2-common.c @@ -680,6 +680,7 @@ int v4l2_ctrl_query_fill(struct v4l2_queryctrl *qctrl, s32 min, s32 max, s32 ste case V4L2_CID_MPEG_AUDIO_MODE_EXTENSION: name = "Audio Stereo Mode Extension"; break; case V4L2_CID_MPEG_AUDIO_EMPHASIS: name = "Audio Emphasis"; break; case V4L2_CID_MPEG_AUDIO_CRC: name = "Audio CRC"; break; + case V4L2_CID_MPEG_AUDIO_MUTE: name = "Audio Mute"; break; case V4L2_CID_MPEG_VIDEO_ENCODING: name = "Video Encoding"; break; case V4L2_CID_MPEG_VIDEO_ASPECT: name = "Video Aspect"; break; case V4L2_CID_MPEG_VIDEO_B_FRAMES: name = "Video B Frames"; break; @@ -690,6 +691,8 @@ int v4l2_ctrl_query_fill(struct v4l2_queryctrl *qctrl, s32 min, s32 max, s32 ste case V4L2_CID_MPEG_VIDEO_BITRATE: name = "Video Bitrate"; break; case V4L2_CID_MPEG_VIDEO_BITRATE_PEAK: name = "Video Peak Bitrate"; break; case V4L2_CID_MPEG_VIDEO_TEMPORAL_DECIMATION: name = "Video Temporal Decimation"; break; + case V4L2_CID_MPEG_VIDEO_MUTE: name = "Video Mute"; break; + case V4L2_CID_MPEG_VIDEO_MUTE_YUV: name = "Video Mute YUV"; break; case V4L2_CID_MPEG_STREAM_TYPE: name = "Stream Type"; break; case V4L2_CID_MPEG_STREAM_PID_PMT: name = "Stream PMT Program ID"; break; case V4L2_CID_MPEG_STREAM_PID_AUDIO: name = "Stream Audio Program ID"; break; @@ -705,6 +708,7 @@ int v4l2_ctrl_query_fill(struct v4l2_queryctrl *qctrl, s32 min, s32 max, s32 ste switch (qctrl->id) { case V4L2_CID_AUDIO_MUTE: case V4L2_CID_AUDIO_LOUDNESS: + case V4L2_CID_MPEG_AUDIO_MUTE: case V4L2_CID_MPEG_VIDEO_GOP_CLOSURE: case V4L2_CID_MPEG_VIDEO_PULLDOWN: qctrl->type = V4L2_CTRL_TYPE_BOOLEAN; @@ -838,6 +842,8 @@ int v4l2_ctrl_query_fill_std(struct v4l2_queryctrl *qctrl) V4L2_MPEG_AUDIO_CRC_NONE, V4L2_MPEG_AUDIO_CRC_CRC16, 1, V4L2_MPEG_AUDIO_CRC_NONE); + case V4L2_CID_MPEG_AUDIO_MUTE: + return v4l2_ctrl_query_fill(qctrl, 0, 1, 1, 0); case V4L2_CID_MPEG_VIDEO_ENCODING: return v4l2_ctrl_query_fill(qctrl, V4L2_MPEG_VIDEO_ENCODING_MPEG_1, @@ -867,6 +873,10 @@ int v4l2_ctrl_query_fill_std(struct v4l2_queryctrl *qctrl) return v4l2_ctrl_query_fill(qctrl, 0, 27000000, 1, 8000000); case V4L2_CID_MPEG_VIDEO_TEMPORAL_DECIMATION: return v4l2_ctrl_query_fill(qctrl, 0, 255, 1, 0); + case V4L2_CID_MPEG_VIDEO_MUTE: + return v4l2_ctrl_query_fill(qctrl, 0, 1, 1, 0); + case V4L2_CID_MPEG_VIDEO_MUTE_YUV: /* Init YUV (really YCbCr) to black */ + return v4l2_ctrl_query_fill(qctrl, 0, 0xffffff, 1, 0x008080); case V4L2_CID_MPEG_STREAM_TYPE: return v4l2_ctrl_query_fill(qctrl, V4L2_MPEG_STREAM_TYPE_MPEG2_PS, diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h index 441b877bf150..7b83d17c4cdd 100644 --- a/include/linux/videodev2.h +++ b/include/linux/videodev2.h @@ -1037,6 +1037,7 @@ enum v4l2_mpeg_audio_crc { V4L2_MPEG_AUDIO_CRC_NONE = 0, V4L2_MPEG_AUDIO_CRC_CRC16 = 1, }; +#define V4L2_CID_MPEG_AUDIO_MUTE (V4L2_CID_MPEG_BASE+109) /* MPEG video */ #define V4L2_CID_MPEG_VIDEO_ENCODING (V4L2_CID_MPEG_BASE+200) @@ -1063,6 +1064,8 @@ enum v4l2_mpeg_video_bitrate_mode { #define V4L2_CID_MPEG_VIDEO_BITRATE (V4L2_CID_MPEG_BASE+207) #define V4L2_CID_MPEG_VIDEO_BITRATE_PEAK (V4L2_CID_MPEG_BASE+208) #define V4L2_CID_MPEG_VIDEO_TEMPORAL_DECIMATION (V4L2_CID_MPEG_BASE+209) +#define V4L2_CID_MPEG_VIDEO_MUTE (V4L2_CID_MPEG_BASE+210) +#define V4L2_CID_MPEG_VIDEO_MUTE_YUV (V4L2_CID_MPEG_BASE+211) /* MPEG-class control IDs specific to the CX2584x driver as defined by V4L2 */ #define V4L2_CID_MPEG_CX2341X_BASE (V4L2_CTRL_CLASS_MPEG | 0x1000) @@ -1103,6 +1106,7 @@ enum v4l2_mpeg_cx2341x_video_median_filter_type { #define V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_TOP (V4L2_CID_MPEG_CX2341X_BASE+8) #define V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_BOTTOM (V4L2_CID_MPEG_CX2341X_BASE+9) #define V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_TOP (V4L2_CID_MPEG_CX2341X_BASE+10) +#define V4L2_CID_MPEG_CX2341X_STREAM_INSERT_NAV_PACKETS (V4L2_CID_MPEG_CX2341X_BASE+11) /* * T U N I N G diff --git a/include/media/cx2341x.h b/include/media/cx2341x.h index d758a52cf556..583b0621ff43 100644 --- a/include/media/cx2341x.h +++ b/include/media/cx2341x.h @@ -40,6 +40,7 @@ struct cx2341x_mpeg_params { /* stream */ enum v4l2_mpeg_stream_type stream_type; enum v4l2_mpeg_stream_vbi_fmt stream_vbi_fmt; + u16 stream_insert_nav_packets; /* audio */ enum v4l2_mpeg_audio_sampling_freq audio_sampling_freq; @@ -50,6 +51,7 @@ struct cx2341x_mpeg_params { enum v4l2_mpeg_audio_emphasis audio_emphasis; enum v4l2_mpeg_audio_crc audio_crc; u16 audio_properties; + u16 audio_mute; /* video */ enum v4l2_mpeg_video_encoding video_encoding; @@ -61,6 +63,8 @@ struct cx2341x_mpeg_params { u32 video_bitrate; u32 video_bitrate_peak; u16 video_temporal_decimation; + u16 video_mute; + u32 video_mute_yuv; /* encoding filters */ enum v4l2_mpeg_cx2341x_video_spatial_filter_mode video_spatial_filter_mode; -- cgit v1.2.3-59-g8ed1b From 206ebaf32795cf1582b1e2ff2ec6a560c9e986b8 Mon Sep 17 00:00:00 2001 From: Hans Verkuil Date: Fri, 27 Apr 2007 12:31:01 -0300 Subject: V4L/DVB (5272): Add V4L2_CAP_VIDEO_OUTPUT_POS capability Add V4L2_CAP_VIDEO_OUTPUT_POS capability and x, y position coordinates to struct v4l2_pix_format. This is needed to support positioning the MPEG/YUV output of the cx23415. Signed-off-by: Hans Verkuil Signed-off-by: Mauro Carvalho Chehab --- include/linux/videodev2.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'include') diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h index 7b83d17c4cdd..e4ed5667a55d 100644 --- a/include/linux/videodev2.h +++ b/include/linux/videodev2.h @@ -227,6 +227,7 @@ struct v4l2_capability #define V4L2_CAP_SLICED_VBI_CAPTURE 0x00000040 /* Is a sliced VBI capture device */ #define V4L2_CAP_SLICED_VBI_OUTPUT 0x00000080 /* Is a sliced VBI output device */ #define V4L2_CAP_RDS_CAPTURE 0x00000100 /* RDS data capture */ +#define V4L2_CAP_VIDEO_OUTPUT_POS 0x00000200 /* Video output can have x,y coords */ #define V4L2_CAP_TUNER 0x00010000 /* has a tuner */ #define V4L2_CAP_AUDIO 0x00020000 /* has audio support */ @@ -249,6 +250,8 @@ struct v4l2_pix_format __u32 sizeimage; enum v4l2_colorspace colorspace; __u32 priv; /* private data, depends on pixelformat */ + __u32 left; /* only valid if V4L2_CAP_VIDEO_OUTPUT_POS is set */ + __u32 top; /* only valid if V4L2_CAP_VIDEO_OUTPUT_POS is set */ }; /* Pixel format FOURCC depth Description */ -- cgit v1.2.3-59-g8ed1b From b2787845fb91da18ebb079dc9297f92d990e9fe1 Mon Sep 17 00:00:00 2001 From: Hans Verkuil Date: Fri, 27 Apr 2007 12:31:02 -0300 Subject: V4L/DVB (5289): Add support for video output overlays. Add V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY support. Also add support for local and global alpha overlays. Add new field enums V4L2_FIELD_INTERLACED_TB and V4L2_FIELD_INTERLACED_BT. These changes are needed to support the ivtv On Screen Display features. Signed-off-by: Hans Verkuil Signed-off-by: Mauro Carvalho Chehab --- drivers/media/video/v4l2-common.c | 5 ++- drivers/media/video/videodev.c | 23 ++++++++++++++ include/linux/videodev2.h | 66 ++++++++++++++++++++++++++------------- include/media/v4l2-dev.h | 8 +++++ 4 files changed, 79 insertions(+), 23 deletions(-) (limited to 'include') diff --git a/drivers/media/video/v4l2-common.c b/drivers/media/video/v4l2-common.c index 43a8467f0209..740ea5a9202f 100644 --- a/drivers/media/video/v4l2-common.c +++ b/drivers/media/video/v4l2-common.c @@ -260,6 +260,8 @@ char *v4l2_field_names[] = { [V4L2_FIELD_SEQ_TB] = "seq-tb", [V4L2_FIELD_SEQ_BT] = "seq-bt", [V4L2_FIELD_ALTERNATE] = "alternate", + [V4L2_FIELD_INTERLACED_TB] = "interlaced-tb", + [V4L2_FIELD_INTERLACED_BT] = "interlaced-bt", }; char *v4l2_type_names[] = { @@ -269,7 +271,8 @@ char *v4l2_type_names[] = { [V4L2_BUF_TYPE_VBI_CAPTURE] = "vbi-cap", [V4L2_BUF_TYPE_VBI_OUTPUT] = "vbi-out", [V4L2_BUF_TYPE_SLICED_VBI_CAPTURE] = "sliced-vbi-cap", - [V4L2_BUF_TYPE_SLICED_VBI_OUTPUT] = "slicec-vbi-out", + [V4L2_BUF_TYPE_SLICED_VBI_OUTPUT] = "sliced-vbi-out", + [V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY] = "video-out-over", }; diff --git a/drivers/media/video/videodev.c b/drivers/media/video/videodev.c index 011938fb7e0e..5c9f2116d7bf 100644 --- a/drivers/media/video/videodev.c +++ b/drivers/media/video/videodev.c @@ -318,6 +318,7 @@ static char *v4l2_type_names_FIXME[] = { [V4L2_BUF_TYPE_VBI_OUTPUT] = "vbi-out", [V4L2_BUF_TYPE_SLICED_VBI_OUTPUT] = "sliced-vbi-out", [V4L2_BUF_TYPE_SLICED_VBI_CAPTURE] = "sliced-vbi-capture", + [V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY] = "video-out-over", [V4L2_BUF_TYPE_PRIVATE] = "private", }; @@ -330,6 +331,8 @@ static char *v4l2_field_names_FIXME[] = { [V4L2_FIELD_SEQ_TB] = "seq-tb", [V4L2_FIELD_SEQ_BT] = "seq-bt", [V4L2_FIELD_ALTERNATE] = "alternate", + [V4L2_FIELD_INTERLACED_TB] = "interlaced-tb", + [V4L2_FIELD_INTERLACED_BT] = "interlaced-bt", }; #define prt_names(a,arr) (((a)>=0)&&((a)vidioc_try_fmt_vbi_output) return (0); break; + case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY: + if (vfd->vidioc_try_fmt_output_overlay) + return (0); + break; case V4L2_BUF_TYPE_PRIVATE: if (vfd->vidioc_try_fmt_type_private) return (0); @@ -525,6 +532,10 @@ static int __video_do_ioctl(struct inode *inode, struct file *file, ret=vfd->vidioc_enum_fmt_vbi_output(file, fh, f); break; + case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY: + if (vfd->vidioc_enum_fmt_output_overlay) + ret=vfd->vidioc_enum_fmt_output_overlay(file, fh, f); + break; case V4L2_BUF_TYPE_PRIVATE: if (vfd->vidioc_enum_fmt_type_private) ret=vfd->vidioc_enum_fmt_type_private(file, @@ -582,6 +593,10 @@ static int __video_do_ioctl(struct inode *inode, struct file *file, ret=vfd->vidioc_g_fmt_video_output(file, fh, f); break; + case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY: + if (vfd->vidioc_g_fmt_output_overlay) + ret=vfd->vidioc_g_fmt_output_overlay(file, fh, f); + break; case V4L2_BUF_TYPE_VBI_OUTPUT: if (vfd->vidioc_g_fmt_vbi_output) ret=vfd->vidioc_g_fmt_vbi_output(file, fh, f); @@ -630,6 +645,10 @@ static int __video_do_ioctl(struct inode *inode, struct file *file, ret=vfd->vidioc_s_fmt_video_output(file, fh, f); break; + case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY: + if (vfd->vidioc_s_fmt_output_overlay) + ret=vfd->vidioc_s_fmt_output_overlay(file, fh, f); + break; case V4L2_BUF_TYPE_VBI_OUTPUT: if (vfd->vidioc_s_fmt_vbi_output) ret=vfd->vidioc_s_fmt_vbi_output(file, @@ -680,6 +699,10 @@ static int __video_do_ioctl(struct inode *inode, struct file *file, ret=vfd->vidioc_try_fmt_video_output(file, fh, f); break; + case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY: + if (vfd->vidioc_try_fmt_output_overlay) + ret=vfd->vidioc_try_fmt_output_overlay(file, fh, f); + break; case V4L2_BUF_TYPE_VBI_OUTPUT: if (vfd->vidioc_try_fmt_vbi_output) ret=vfd->vidioc_try_fmt_vbi_output(file, diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h index e4ed5667a55d..a08ef2c16300 100644 --- a/include/linux/videodev2.h +++ b/include/linux/videodev2.h @@ -96,44 +96,60 @@ * E N U M S */ enum v4l2_field { - V4L2_FIELD_ANY = 0, /* driver can choose from none, - top, bottom, interlaced - depending on whatever it thinks - is approximate ... */ - V4L2_FIELD_NONE = 1, /* this device has no fields ... */ - V4L2_FIELD_TOP = 2, /* top field only */ - V4L2_FIELD_BOTTOM = 3, /* bottom field only */ - V4L2_FIELD_INTERLACED = 4, /* both fields interlaced */ - V4L2_FIELD_SEQ_TB = 5, /* both fields sequential into one - buffer, top-bottom order */ - V4L2_FIELD_SEQ_BT = 6, /* same as above + bottom-top order */ - V4L2_FIELD_ALTERNATE = 7, /* both fields alternating into - separate buffers */ + V4L2_FIELD_ANY = 0, /* driver can choose from none, + top, bottom, interlaced + depending on whatever it thinks + is approximate ... */ + V4L2_FIELD_NONE = 1, /* this device has no fields ... */ + V4L2_FIELD_TOP = 2, /* top field only */ + V4L2_FIELD_BOTTOM = 3, /* bottom field only */ + V4L2_FIELD_INTERLACED = 4, /* both fields interlaced */ + V4L2_FIELD_SEQ_TB = 5, /* both fields sequential into one + buffer, top-bottom order */ + V4L2_FIELD_SEQ_BT = 6, /* same as above + bottom-top order */ + V4L2_FIELD_ALTERNATE = 7, /* both fields alternating into + separate buffers */ + V4L2_FIELD_INTERLACED_TB = 8, /* both fields interlaced, top field + first and the top field is + transmitted first */ + V4L2_FIELD_INTERLACED_BT = 9, /* both fields interlaced, top field + first and the bottom field is + transmitted first */ }; #define V4L2_FIELD_HAS_TOP(field) \ ((field) == V4L2_FIELD_TOP ||\ (field) == V4L2_FIELD_INTERLACED ||\ + (field) == V4L2_FIELD_INTERLACED_TB ||\ + (field) == V4L2_FIELD_INTERLACED_BT ||\ (field) == V4L2_FIELD_SEQ_TB ||\ (field) == V4L2_FIELD_SEQ_BT) #define V4L2_FIELD_HAS_BOTTOM(field) \ ((field) == V4L2_FIELD_BOTTOM ||\ (field) == V4L2_FIELD_INTERLACED ||\ + (field) == V4L2_FIELD_INTERLACED_TB ||\ + (field) == V4L2_FIELD_INTERLACED_BT ||\ (field) == V4L2_FIELD_SEQ_TB ||\ (field) == V4L2_FIELD_SEQ_BT) #define V4L2_FIELD_HAS_BOTH(field) \ ((field) == V4L2_FIELD_INTERLACED ||\ - (field) == V4L2_FIELD_SEQ_TB ||\ + (field) == V4L2_FIELD_INTERLACED_TB ||\ + (field) == V4L2_FIELD_INTERLACED_BT ||\ + (field) == V4L2_FIELD_SEQ_TB ||\ (field) == V4L2_FIELD_SEQ_BT) enum v4l2_buf_type { - V4L2_BUF_TYPE_VIDEO_CAPTURE = 1, - V4L2_BUF_TYPE_VIDEO_OUTPUT = 2, - V4L2_BUF_TYPE_VIDEO_OVERLAY = 3, - V4L2_BUF_TYPE_VBI_CAPTURE = 4, - V4L2_BUF_TYPE_VBI_OUTPUT = 5, - V4L2_BUF_TYPE_SLICED_VBI_CAPTURE = 6, - V4L2_BUF_TYPE_SLICED_VBI_OUTPUT = 7, - V4L2_BUF_TYPE_PRIVATE = 0x80, + V4L2_BUF_TYPE_VIDEO_CAPTURE = 1, + V4L2_BUF_TYPE_VIDEO_OUTPUT = 2, + V4L2_BUF_TYPE_VIDEO_OVERLAY = 3, + V4L2_BUF_TYPE_VBI_CAPTURE = 4, + V4L2_BUF_TYPE_VBI_OUTPUT = 5, + V4L2_BUF_TYPE_SLICED_VBI_CAPTURE = 6, + V4L2_BUF_TYPE_SLICED_VBI_OUTPUT = 7, +#if 1 + /* Experimental */ + V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY = 8, +#endif + V4L2_BUF_TYPE_PRIVATE = 0x80, }; enum v4l2_ctrl_type { @@ -228,6 +244,7 @@ struct v4l2_capability #define V4L2_CAP_SLICED_VBI_OUTPUT 0x00000080 /* Is a sliced VBI output device */ #define V4L2_CAP_RDS_CAPTURE 0x00000100 /* RDS data capture */ #define V4L2_CAP_VIDEO_OUTPUT_POS 0x00000200 /* Video output can have x,y coords */ +#define V4L2_CAP_VIDEO_OUTPUT_OVERLAY 0x00000400 /* Can do video output overlay */ #define V4L2_CAP_TUNER 0x00010000 /* has a tuner */ #define V4L2_CAP_AUDIO 0x00020000 /* has audio support */ @@ -599,10 +616,14 @@ struct v4l2_framebuffer #define V4L2_FBUF_CAP_CHROMAKEY 0x0002 #define V4L2_FBUF_CAP_LIST_CLIPPING 0x0004 #define V4L2_FBUF_CAP_BITMAP_CLIPPING 0x0008 +#define V4L2_FBUF_CAP_LOCAL_ALPHA 0x0010 +#define V4L2_FBUF_CAP_GLOBAL_ALPHA 0x0020 /* Flags for the 'flags' field. */ #define V4L2_FBUF_FLAG_PRIMARY 0x0001 #define V4L2_FBUF_FLAG_OVERLAY 0x0002 #define V4L2_FBUF_FLAG_CHROMAKEY 0x0004 +#define V4L2_FBUF_FLAG_LOCAL_ALPHA 0x0008 +#define V4L2_FBUF_FLAG_GLOBAL_ALPHA 0x0010 struct v4l2_clip { @@ -618,6 +639,7 @@ struct v4l2_window struct v4l2_clip __user *clips; __u32 clipcount; void __user *bitmap; + __u8 global_alpha; }; /* diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h index 1dd3d3239ecf..2693f3ae6ffb 100644 --- a/include/media/v4l2-dev.h +++ b/include/media/v4l2-dev.h @@ -127,6 +127,8 @@ struct video_device struct v4l2_fmtdesc *f); int (*vidioc_enum_fmt_video_output)(struct file *file, void *fh, struct v4l2_fmtdesc *f); + int (*vidioc_enum_fmt_output_overlay) (struct file *file, void *fh, + struct v4l2_fmtdesc *f); int (*vidioc_enum_fmt_vbi_output) (struct file *file, void *fh, struct v4l2_fmtdesc *f); int (*vidioc_enum_fmt_type_private)(struct file *file, void *fh, @@ -145,6 +147,8 @@ struct video_device struct v4l2_format *f); int (*vidioc_g_fmt_video_output)(struct file *file, void *fh, struct v4l2_format *f); + int (*vidioc_g_fmt_output_overlay) (struct file *file, void *fh, + struct v4l2_format *f); int (*vidioc_g_fmt_type_private)(struct file *file, void *fh, struct v4l2_format *f); @@ -162,6 +166,8 @@ struct video_device struct v4l2_format *f); int (*vidioc_s_fmt_video_output)(struct file *file, void *fh, struct v4l2_format *f); + int (*vidioc_s_fmt_output_overlay) (struct file *file, void *fh, + struct v4l2_format *f); int (*vidioc_s_fmt_type_private)(struct file *file, void *fh, struct v4l2_format *f); @@ -178,6 +184,8 @@ struct video_device struct v4l2_format *f); int (*vidioc_try_fmt_video_output)(struct file *file, void *fh, struct v4l2_format *f); + int (*vidioc_try_fmt_output_overlay)(struct file *file, void *fh, + struct v4l2_format *f); int (*vidioc_try_fmt_type_private)(struct file *file, void *fh, struct v4l2_format *f); -- cgit v1.2.3-59-g8ed1b From 045290b2a90ff1be60196a061aadecf70eb6bcc3 Mon Sep 17 00:00:00 2001 From: Hans Verkuil Date: Fri, 27 Apr 2007 12:31:04 -0300 Subject: V4L/DVB (5290): Add support for VIDIOC_INT_G/S_STD_OUTPUT Added VIDIOC_INT_G_STD_OUTPUT and VIDIOC_INT_S_STD_OUTPUT to allow drivers to set the TV standard for video output separately from the video capture. This is needed for cx23415 support where the decoder is separate from the encoder and can have a different TV standard. Modified the saa7127 module to listen to VIDIOC_INT_G/S_STD_OUTPUT instead of VIDIOC_G/S_STD. Signed-off-by: Hans Verkuil Signed-off-by: Mauro Carvalho Chehab --- drivers/media/video/saa7127.c | 4 ++-- drivers/media/video/v4l2-common.c | 5 ++++- include/media/v4l2-common.h | 8 ++++++++ 3 files changed, 14 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/drivers/media/video/saa7127.c b/drivers/media/video/saa7127.c index 654863db1591..50dbb76d4a7f 100644 --- a/drivers/media/video/saa7127.c +++ b/drivers/media/video/saa7127.c @@ -550,12 +550,12 @@ static int saa7127_command(struct i2c_client *client, struct v4l2_routing *route = arg; switch (cmd) { - case VIDIOC_S_STD: + case VIDIOC_INT_S_STD_OUTPUT: if (state->std == *(v4l2_std_id *)arg) break; return saa7127_set_std(client, *(v4l2_std_id *)arg); - case VIDIOC_G_STD: + case VIDIOC_INT_G_STD_OUTPUT: *(v4l2_std_id *)arg = state->std; break; diff --git a/drivers/media/video/v4l2-common.c b/drivers/media/video/v4l2-common.c index 740ea5a9202f..4a3635cd6f9e 100644 --- a/drivers/media/video/v4l2-common.c +++ b/drivers/media/video/v4l2-common.c @@ -420,7 +420,10 @@ static const char *v4l2_int_ioctls[] = { [_IOC_NR(VIDIOC_INT_G_AUDIO_ROUTING)] = "VIDIOC_INT_G_AUDIO_ROUTING", [_IOC_NR(VIDIOC_INT_S_VIDEO_ROUTING)] = "VIDIOC_INT_S_VIDEO_ROUTING", [_IOC_NR(VIDIOC_INT_G_VIDEO_ROUTING)] = "VIDIOC_INT_G_VIDEO_ROUTING", - [_IOC_NR(VIDIOC_INT_S_CRYSTAL_FREQ)] = "VIDIOC_INT_S_CRYSTAL_FREQ" + [_IOC_NR(VIDIOC_INT_S_CRYSTAL_FREQ)] = "VIDIOC_INT_S_CRYSTAL_FREQ", + [_IOC_NR(VIDIOC_INT_INIT)] = "VIDIOC_INT_INIT", + [_IOC_NR(VIDIOC_INT_G_STD_OUTPUT)] = "VIDIOC_INT_G_STD_OUTPUT", + [_IOC_NR(VIDIOC_INT_S_STD_OUTPUT)] = "VIDIOC_INT_S_STD_OUTPUT", }; #define V4L2_INT_IOCTLS ARRAY_SIZE(v4l2_int_ioctls) diff --git a/include/media/v4l2-common.h b/include/media/v4l2-common.h index 6eaeec98ed89..abb9ce9b21b8 100644 --- a/include/media/v4l2-common.h +++ b/include/media/v4l2-common.h @@ -254,4 +254,12 @@ struct v4l2_crystal_freq { default values. */ #define VIDIOC_INT_INIT _IOW ('d', 114, u32) +/* Set v4l2_std_id for video OUTPUT devices. This is ignored by + video input devices. */ +#define VIDIOC_INT_S_STD_OUTPUT _IOW ('d', 115, v4l2_std_id) + +/* Get v4l2_std_id for video OUTPUT devices. This is ignored by + video input devices. */ +#define VIDIOC_INT_G_STD_OUTPUT _IOW ('d', 116, v4l2_std_id) + #endif /* V4L2_COMMON_H_ */ -- cgit v1.2.3-59-g8ed1b From 3434eb7e14d9587ee56f3462bcfa5726b62dadb9 Mon Sep 17 00:00:00 2001 From: Hans Verkuil Date: Fri, 27 Apr 2007 12:31:08 -0300 Subject: V4L/DVB (5306): Add support for VIDIOC_G_CHIP_IDENT VIDIOC_G_CHIP_IDENT improves debugging of card problems: it can be used to detect which chips are on the board and based on that information selected register dumps can be made, making it easy to debug complicated media chips containing tens or hundreds of registers. This ioctl replaces the internal VIDIOC_INT_G_CHIP_IDENT ioctl. Signed-off-by: Hans Verkuil Signed-off-by: Mauro Carvalho Chehab --- drivers/media/video/cafe_ccic.c | 8 +++- drivers/media/video/cx25840/cx25840-core.c | 9 +++-- drivers/media/video/cx25840/cx25840-core.h | 3 +- drivers/media/video/ov7670.c | 6 +-- drivers/media/video/saa7115.c | 9 ++--- drivers/media/video/saa7127.c | 8 ++-- drivers/media/video/v4l2-common.c | 21 +++++++++- drivers/media/video/videodev.c | 10 +++++ include/linux/videodev2.h | 10 +++++ include/media/v4l2-chip-ident.h | 62 ++++++++++++++++++++++++++++++ include/media/v4l2-common.h | 39 +------------------ include/media/v4l2-dev.h | 2 + 12 files changed, 130 insertions(+), 57 deletions(-) create mode 100644 include/media/v4l2-chip-ident.h (limited to 'include') diff --git a/drivers/media/video/cafe_ccic.c b/drivers/media/video/cafe_ccic.c index 710c11a68296..4d4db7b2b611 100644 --- a/drivers/media/video/cafe_ccic.c +++ b/drivers/media/video/cafe_ccic.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include #include @@ -164,7 +165,7 @@ struct cafe_camera struct tasklet_struct s_tasklet; /* Current operating parameters */ - enum v4l2_chip_ident sensor_type; /* Currently ov7670 only */ + u32 sensor_type; /* Currently ov7670 only */ struct v4l2_pix_format pix_format; /* Locks */ @@ -818,6 +819,7 @@ static int __cafe_cam_reset(struct cafe_camera *cam) */ static int cafe_cam_init(struct cafe_camera *cam) { + struct v4l2_chip_ident chip = { V4L2_CHIP_MATCH_I2C_ADDR, 0, 0, 0 }; int ret; mutex_lock(&cam->s_mutex); @@ -827,9 +829,11 @@ static int cafe_cam_init(struct cafe_camera *cam) ret = __cafe_cam_reset(cam); if (ret) goto out; - ret = __cafe_cam_cmd(cam, VIDIOC_INT_G_CHIP_IDENT, &cam->sensor_type); + chip.match_chip = cam->sensor->addr; + ret = __cafe_cam_cmd(cam, VIDIOC_G_CHIP_IDENT, &chip); if (ret) goto out; + cam->sensor_type = chip.ident; // if (cam->sensor->addr != OV7xx0_SID) { if (cam->sensor_type != V4L2_IDENT_OV7670) { cam_err(cam, "Unsupported sensor type %d", cam->sensor->addr); diff --git a/drivers/media/video/cx25840/cx25840-core.c b/drivers/media/video/cx25840/cx25840-core.c index 774d2536555b..1757a588970f 100644 --- a/drivers/media/video/cx25840/cx25840-core.c +++ b/drivers/media/video/cx25840/cx25840-core.c @@ -35,6 +35,7 @@ #include #include #include +#include #include #include "cx25840-core.h" @@ -827,9 +828,8 @@ static int cx25840_command(struct i2c_client *client, unsigned int cmd, cx25840_initialize(client, 0); break; - case VIDIOC_INT_G_CHIP_IDENT: - *(enum v4l2_chip_ident *)arg = state->id; - break; + case VIDIOC_G_CHIP_IDENT: + return v4l2_chip_ident_i2c_client(client, arg, state->id, state->rev); default: return -EINVAL; @@ -847,7 +847,7 @@ static int cx25840_detect_client(struct i2c_adapter *adapter, int address, { struct i2c_client *client; struct cx25840_state *state; - enum v4l2_chip_ident id; + u32 id; u16 device_id; /* Check if the adapter supports the needed features @@ -902,6 +902,7 @@ static int cx25840_detect_client(struct i2c_adapter *adapter, int address, state->audmode = V4L2_TUNER_MODE_LANG1; state->vbi_line_offset = 8; state->id = id; + state->rev = device_id; i2c_attach_client(client); diff --git a/drivers/media/video/cx25840/cx25840-core.h b/drivers/media/video/cx25840/cx25840-core.h index 28049064dd7d..f4b56d2fd6b6 100644 --- a/drivers/media/video/cx25840/cx25840-core.h +++ b/drivers/media/video/cx25840/cx25840-core.h @@ -43,7 +43,8 @@ struct cx25840_state { u32 audclk_freq; int audmode; int vbi_line_offset; - enum v4l2_chip_ident id; + u32 id; + u32 rev; int is_cx25836; }; diff --git a/drivers/media/video/ov7670.c b/drivers/media/video/ov7670.c index 5ed0adc4ca26..5234762c5427 100644 --- a/drivers/media/video/ov7670.c +++ b/drivers/media/video/ov7670.c @@ -15,6 +15,7 @@ #include #include #include +#include #include @@ -1270,9 +1271,8 @@ static int ov7670_command(struct i2c_client *client, unsigned int cmd, void *arg) { switch (cmd) { - case VIDIOC_INT_G_CHIP_IDENT: - * (enum v4l2_chip_ident *) arg = V4L2_IDENT_OV7670; - return 0; + case VIDIOC_G_CHIP_IDENT: + return v4l2_chip_ident_i2c_client(client, arg, V4L2_IDENT_OV7670, 0); case VIDIOC_INT_RESET: ov7670_reset(client); diff --git a/drivers/media/video/saa7115.c b/drivers/media/video/saa7115.c index 4d5bbd859de1..26c9b64c748c 100644 --- a/drivers/media/video/saa7115.c +++ b/drivers/media/video/saa7115.c @@ -45,6 +45,7 @@ #include #include #include +#include #include #include @@ -80,7 +81,7 @@ struct saa711x_state { int sat; int width; int height; - enum v4l2_chip_ident ident; + u32 ident; u32 audclk_freq; u32 crystal_freq; u8 ucgc; @@ -1232,7 +1233,6 @@ static void saa711x_decode_vbi_line(struct i2c_client *client, static int saa711x_command(struct i2c_client *client, unsigned int cmd, void *arg) { struct saa711x_state *state = i2c_get_clientdata(client); - int *iarg = arg; /* ioctls to allow direct access to the saa7115 registers for testing */ switch (cmd) { @@ -1437,9 +1437,8 @@ static int saa711x_command(struct i2c_client *client, unsigned int cmd, void *ar } #endif - case VIDIOC_INT_G_CHIP_IDENT: - *iarg = state->ident; - break; + case VIDIOC_G_CHIP_IDENT: + return v4l2_chip_ident_i2c_client(client, arg, state->ident, 0); default: return -EINVAL; diff --git a/drivers/media/video/saa7127.c b/drivers/media/video/saa7127.c index 50dbb76d4a7f..9f986930490f 100644 --- a/drivers/media/video/saa7127.c +++ b/drivers/media/video/saa7127.c @@ -54,6 +54,7 @@ #include #include #include +#include #include static int debug = 0; @@ -234,7 +235,7 @@ static struct i2c_reg_value saa7127_init_config_50hz[] = { struct saa7127_state { v4l2_std_id std; - enum v4l2_chip_ident ident; + u32 ident; enum saa7127_input_type input_type; enum saa7127_output_type output_type; int video_enable; @@ -650,9 +651,8 @@ static int saa7127_command(struct i2c_client *client, break; } - case VIDIOC_INT_G_CHIP_IDENT: - *(enum v4l2_chip_ident *)arg = state->ident; - break; + case VIDIOC_G_CHIP_IDENT: + return v4l2_chip_ident_i2c_client(client, arg, state->ident, 0); default: return -EINVAL; diff --git a/drivers/media/video/v4l2-common.c b/drivers/media/video/v4l2-common.c index 4a3635cd6f9e..49f1df74aa21 100644 --- a/drivers/media/video/v4l2-common.c +++ b/drivers/media/video/v4l2-common.c @@ -60,6 +60,7 @@ #include #define __OLD_VIDIOC_ /* To allow fixing old calls*/ #include +#include #ifdef CONFIG_KMOD #include @@ -383,6 +384,8 @@ static const char *v4l2_ioctls[] = { [_IOC_NR(VIDIOC_DBG_S_REGISTER)] = "VIDIOC_DBG_S_REGISTER", [_IOC_NR(VIDIOC_DBG_G_REGISTER)] = "VIDIOC_DBG_G_REGISTER", + + [_IOC_NR(VIDIOC_G_CHIP_IDENT)] = "VIDIOC_G_CHIP_IDENT", #endif }; #define V4L2_IOCTLS ARRAY_SIZE(v4l2_ioctls) @@ -413,7 +416,6 @@ static const char *v4l2_int_ioctls[] = { [_IOC_NR(VIDIOC_INT_DECODE_VBI_LINE)] = "VIDIOC_INT_DECODE_VBI_LINE", [_IOC_NR(VIDIOC_INT_S_VBI_DATA)] = "VIDIOC_INT_S_VBI_DATA", [_IOC_NR(VIDIOC_INT_G_VBI_DATA)] = "VIDIOC_INT_G_VBI_DATA", - [_IOC_NR(VIDIOC_INT_G_CHIP_IDENT)] = "VIDIOC_INT_G_CHIP_IDENT", [_IOC_NR(VIDIOC_INT_I2S_CLOCK_FREQ)] = "VIDIOC_INT_I2S_CLOCK_FREQ", [_IOC_NR(VIDIOC_INT_S_STANDBY)] = "VIDIOC_INT_S_STANDBY", [_IOC_NR(VIDIOC_INT_S_AUDIO_ROUTING)] = "VIDIOC_INT_S_AUDIO_ROUTING", @@ -981,6 +983,22 @@ int v4l2_chip_match_i2c_client(struct i2c_client *c, u32 match_type, u32 match_c } } +int v4l2_chip_ident_i2c_client(struct i2c_client *c, struct v4l2_chip_ident *chip, + u32 ident, u32 revision) +{ + if (!v4l2_chip_match_i2c_client(c, chip->match_type, chip->match_chip)) + return 0; + if (chip->ident == V4L2_IDENT_NONE) { + chip->ident = ident; + chip->revision = revision; + } + else { + chip->ident = V4L2_IDENT_AMBIGUOUS; + chip->revision = 0; + } + return 0; +} + int v4l2_chip_match_host(u32 match_type, u32 match_chip) { switch (match_type) { @@ -1015,6 +1033,7 @@ EXPORT_SYMBOL(v4l2_ctrl_query_fill); EXPORT_SYMBOL(v4l2_ctrl_query_fill_std); EXPORT_SYMBOL(v4l2_chip_match_i2c_client); +EXPORT_SYMBOL(v4l2_chip_ident_i2c_client); EXPORT_SYMBOL(v4l2_chip_match_host); /* diff --git a/drivers/media/video/videodev.c b/drivers/media/video/videodev.c index 5c9f2116d7bf..fdfef0b53315 100644 --- a/drivers/media/video/videodev.c +++ b/drivers/media/video/videodev.c @@ -1532,6 +1532,16 @@ static int __video_do_ioctl(struct inode *inode, struct file *file, break; } #endif + case VIDIOC_G_CHIP_IDENT: + { + struct v4l2_chip_ident *p=arg; + if (!vfd->vidioc_g_chip_ident) + break; + ret=vfd->vidioc_g_chip_ident(file, fh, p); + if (!ret) + dbgarg (cmd, "chip_ident=%u, revision=0x%x\n", p->ident, p->revision); + break; + } } /* switch */ if (vfd->debug & V4L2_DEBUG_IOCTL_ARG) { diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h index a08ef2c16300..a25c2afa67e1 100644 --- a/include/linux/videodev2.h +++ b/include/linux/videodev2.h @@ -1398,6 +1398,14 @@ struct v4l2_register { __u64 val; }; +/* VIDIOC_G_CHIP_IDENT */ +struct v4l2_chip_ident { + __u32 match_type; /* Match type */ + __u32 match_chip; /* Match this chip, meaning determined by match_type */ + __u32 ident; /* chip identifier as specified in */ + __u32 revision; /* chip revision, chip specific */ +}; + /* * I O C T L C O D E S F O R V I D E O D E V I C E S * @@ -1471,6 +1479,8 @@ struct v4l2_register { /* Experimental, only implemented if CONFIG_VIDEO_ADV_DEBUG is defined */ #define VIDIOC_DBG_S_REGISTER _IOW ('V', 79, struct v4l2_register) #define VIDIOC_DBG_G_REGISTER _IOWR ('V', 80, struct v4l2_register) + +#define VIDIOC_G_CHIP_IDENT _IOWR ('V', 81, struct v4l2_chip_ident) #endif #ifdef __OLD_VIDIOC_ diff --git a/include/media/v4l2-chip-ident.h b/include/media/v4l2-chip-ident.h new file mode 100644 index 000000000000..f6686ce133d8 --- /dev/null +++ b/include/media/v4l2-chip-ident.h @@ -0,0 +1,62 @@ +/* + v4l2 chip identifiers header + + This header provides a list of chip identifiers that can be returned + through the VIDIOC_G_CHIP_IDENT ioctl. + + Copyright (C) 2007 Hans Verkuil + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef V4L2_CHIP_IDENT_H_ +#define V4L2_CHIP_IDENT_H_ + +/* VIDIOC_G_CHIP_IDENT: identifies the actual chip installed on the board */ +enum { + /* general idents: reserved range 0-49 */ + V4L2_IDENT_NONE = 0, /* No chip matched */ + V4L2_IDENT_AMBIGUOUS = 1, /* Match too general, multiple chips matched */ + V4L2_IDENT_UNKNOWN = 2, /* Chip found, but cannot identify */ + + /* module saa7110: just ident= 100 */ + V4L2_IDENT_SAA7110 = 100, + + /* module saa7111: just ident= 101 */ + V4L2_IDENT_SAA7111 = 101, + + /* module saa7115: reserved range 102-149 */ + V4L2_IDENT_SAA7113 = 103, + V4L2_IDENT_SAA7114 = 104, + V4L2_IDENT_SAA7115 = 105, + V4L2_IDENT_SAA7118 = 108, + + /* module saa7127: reserved range 150-199 */ + V4L2_IDENT_SAA7127 = 157, + V4L2_IDENT_SAA7129 = 159, + + /* module cx25840: reserved range 200-249 */ + V4L2_IDENT_CX25836 = 236, + V4L2_IDENT_CX25837 = 237, + V4L2_IDENT_CX25840 = 240, + V4L2_IDENT_CX25841 = 241, + V4L2_IDENT_CX25842 = 242, + V4L2_IDENT_CX25843 = 243, + + /* OmniVision sensors - range 250-299 */ + V4L2_IDENT_OV7670 = 250, +}; + +#endif diff --git a/include/media/v4l2-common.h b/include/media/v4l2-common.h index abb9ce9b21b8..181a40c46a52 100644 --- a/include/media/v4l2-common.h +++ b/include/media/v4l2-common.h @@ -98,6 +98,8 @@ u32 v4l2_ctrl_next(const u32 * const *ctrl_classes, u32 id); struct i2c_client; /* forward reference */ int v4l2_chip_match_i2c_client(struct i2c_client *c, u32 id_type, u32 chip_id); +int v4l2_chip_ident_i2c_client(struct i2c_client *c, struct v4l2_chip_ident *chip, + u32 ident, u32 revision); int v4l2_chip_match_host(u32 id_type, u32 chip_id); /* ------------------------------------------------------------------------- */ @@ -114,39 +116,6 @@ struct v4l2_decode_vbi_line { u32 type; /* VBI service type (V4L2_SLICED_*). 0 if no service found */ }; -/* VIDIOC_INT_G_CHIP_IDENT: identifies the actual chip installed on the board */ -enum v4l2_chip_ident { - /* general idents: reserved range 0-49 */ - V4L2_IDENT_UNKNOWN = 0, - - /* module saa7110: just ident= 100 */ - V4L2_IDENT_SAA7110 = 100, - - /* module saa7111: just ident= 101 */ - V4L2_IDENT_SAA7111 = 101, - - /* module saa7115: reserved range 102-149 */ - V4L2_IDENT_SAA7113 = 103, - V4L2_IDENT_SAA7114 = 104, - V4L2_IDENT_SAA7115 = 105, - V4L2_IDENT_SAA7118 = 108, - - /* module saa7127: reserved range 150-199 */ - V4L2_IDENT_SAA7127 = 157, - V4L2_IDENT_SAA7129 = 159, - - /* module cx25840: reserved range 200-249 */ - V4L2_IDENT_CX25836 = 236, - V4L2_IDENT_CX25837 = 237, - V4L2_IDENT_CX25840 = 240, - V4L2_IDENT_CX25841 = 241, - V4L2_IDENT_CX25842 = 242, - V4L2_IDENT_CX25843 = 243, - - /* OmniVision sensors - range 250-299 */ - V4L2_IDENT_OV7670 = 250, -}; - /* audio ioctls */ /* v4l device was opened in Radio mode, to be replaced by VIDIOC_INT_S_TUNER_MODE */ @@ -208,10 +177,6 @@ enum v4l2_chip_ident { whether CC data from the first or second field should be obtained). */ #define VIDIOC_INT_G_VBI_DATA _IOWR('d', 106, struct v4l2_sliced_vbi_data) -/* Returns the chip identifier or V4L2_IDENT_UNKNOWN if no identification can - be made. */ -#define VIDIOC_INT_G_CHIP_IDENT _IOR ('d', 107, enum v4l2_chip_ident) - /* Sets I2S speed in bps. This is used to provide a standard way to select I2S clock used by driving digital audio streams at some board designs. Usual values for the frequency are 1024000 and 2048000. diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h index 2693f3ae6ffb..d62847f846c2 100644 --- a/include/media/v4l2-dev.h +++ b/include/media/v4l2-dev.h @@ -317,6 +317,8 @@ struct video_device int (*vidioc_s_register) (struct file *file, void *fh, struct v4l2_register *reg); #endif + int (*vidioc_g_chip_ident) (struct file *file, void *fh, + struct v4l2_chip_ident *chip); #ifdef OBSOLETE_OWNER /* to be removed soon */ -- cgit v1.2.3-59-g8ed1b From 2435be11ae1afb64ac7dfb25e10b6e3037ab0522 Mon Sep 17 00:00:00 2001 From: Hans Verkuil Date: Fri, 27 Apr 2007 12:31:09 -0300 Subject: V4L/DVB (5307): Add support for the cx23415 MPEG decoding features. The cx23415 adds some extra features that this DVB decoding API did not support. This API has been expanded to support the required features. Both source and binary backwards compatibility is kept intact by these changes. So existing applications are not affected. Signed-off-by: Hans Verkuil Signed-off-by: Ralph Metzler Signed-off-by: Oliver Endriss Signed-off-by: Mauro Carvalho Chehab --- drivers/media/dvb/ttpci/av7110_av.c | 24 +++++++++--------- drivers/media/dvb/ttpci/av7110_hw.h | 10 ++++---- include/linux/dvb/audio.h | 5 +++- include/linux/dvb/version.h | 2 +- include/linux/dvb/video.h | 49 +++++++++++++++++++++++++++++++++++++ 5 files changed, 71 insertions(+), 19 deletions(-) (limited to 'include') diff --git a/drivers/media/dvb/ttpci/av7110_av.c b/drivers/media/dvb/ttpci/av7110_av.c index e719af807685..654c9e919e04 100644 --- a/drivers/media/dvb/ttpci/av7110_av.c +++ b/drivers/media/dvb/ttpci/av7110_av.c @@ -1009,7 +1009,7 @@ static int dvb_video_ioctl(struct inode *inode, struct file *file, if (av7110->videostate.stream_source == VIDEO_SOURCE_MEMORY) ret = av7110_av_stop(av7110, RP_VIDEO); else - ret = vidcom(av7110, VIDEO_CMD_STOP, + ret = vidcom(av7110, AV_VIDEO_CMD_STOP, av7110->videostate.video_blank ? 0 : 1); if (!ret) av7110->trickmode = TRICK_NONE; @@ -1019,7 +1019,7 @@ static int dvb_video_ioctl(struct inode *inode, struct file *file, av7110->trickmode = TRICK_NONE; if (av7110->videostate.play_state == VIDEO_FREEZED) { av7110->videostate.play_state = VIDEO_PLAYING; - ret = vidcom(av7110, VIDEO_CMD_PLAY, 0); + ret = vidcom(av7110, AV_VIDEO_CMD_PLAY, 0); if (ret) break; } @@ -1034,7 +1034,7 @@ static int dvb_video_ioctl(struct inode *inode, struct file *file, ret = av7110_av_start_play(av7110, RP_VIDEO); } if (!ret) - ret = vidcom(av7110, VIDEO_CMD_PLAY, 0); + ret = vidcom(av7110, AV_VIDEO_CMD_PLAY, 0); if (!ret) av7110->videostate.play_state = VIDEO_PLAYING; break; @@ -1044,7 +1044,7 @@ static int dvb_video_ioctl(struct inode *inode, struct file *file, if (av7110->playing & RP_VIDEO) ret = av7110_fw_cmd(av7110, COMTYPE_REC_PLAY, __Pause, 0); else - ret = vidcom(av7110, VIDEO_CMD_FREEZE, 1); + ret = vidcom(av7110, AV_VIDEO_CMD_FREEZE, 1); if (!ret) av7110->trickmode = TRICK_FREEZE; break; @@ -1053,7 +1053,7 @@ static int dvb_video_ioctl(struct inode *inode, struct file *file, if (av7110->playing & RP_VIDEO) ret = av7110_fw_cmd(av7110, COMTYPE_REC_PLAY, __Continue, 0); if (!ret) - ret = vidcom(av7110, VIDEO_CMD_PLAY, 0); + ret = vidcom(av7110, AV_VIDEO_CMD_PLAY, 0); if (!ret) { av7110->videostate.play_state = VIDEO_PLAYING; av7110->trickmode = TRICK_NONE; @@ -1136,7 +1136,7 @@ static int dvb_video_ioctl(struct inode *inode, struct file *file, ret = av7110_fw_cmd(av7110, COMTYPE_REC_PLAY, __Scan_I, 2, AV_PES, 0); else - ret = vidcom(av7110, VIDEO_CMD_FFWD, arg); + ret = vidcom(av7110, AV_VIDEO_CMD_FFWD, arg); if (!ret) { av7110->trickmode = TRICK_FAST; av7110->videostate.play_state = VIDEO_PLAYING; @@ -1147,13 +1147,13 @@ static int dvb_video_ioctl(struct inode *inode, struct file *file, if (av7110->playing&RP_VIDEO) { ret = av7110_fw_cmd(av7110, COMTYPE_REC_PLAY, __Slow, 2, 0, 0); if (!ret) - ret = vidcom(av7110, VIDEO_CMD_SLOW, arg); + ret = vidcom(av7110, AV_VIDEO_CMD_SLOW, arg); } else { - ret = vidcom(av7110, VIDEO_CMD_PLAY, 0); + ret = vidcom(av7110, AV_VIDEO_CMD_PLAY, 0); if (!ret) - ret = vidcom(av7110, VIDEO_CMD_STOP, 0); + ret = vidcom(av7110, AV_VIDEO_CMD_STOP, 0); if (!ret) - ret = vidcom(av7110, VIDEO_CMD_SLOW, arg); + ret = vidcom(av7110, AV_VIDEO_CMD_SLOW, arg); } if (!ret) { av7110->trickmode = TRICK_SLOW; @@ -1182,10 +1182,10 @@ static int dvb_video_ioctl(struct inode *inode, struct file *file, ret = av7110_fw_cmd(av7110, COMTYPE_REC_PLAY, __Slow, 2, 0, 0); if (!ret) - ret = vidcom(av7110, VIDEO_CMD_SLOW, arg); + ret = vidcom(av7110, AV_VIDEO_CMD_SLOW, arg); } if (av7110->trickmode == TRICK_FREEZE) - ret = vidcom(av7110, VIDEO_CMD_STOP, 1); + ret = vidcom(av7110, AV_VIDEO_CMD_STOP, 1); } break; diff --git a/drivers/media/dvb/ttpci/av7110_hw.h b/drivers/media/dvb/ttpci/av7110_hw.h index 4e173c67fbb2..673d9b3f064c 100644 --- a/drivers/media/dvb/ttpci/av7110_hw.h +++ b/drivers/media/dvb/ttpci/av7110_hw.h @@ -216,11 +216,11 @@ enum av7110_command_type { #define VID_CENTRE_CUT_PREF 0x05 /* PanScan with zero vector */ /* MPEG video decoder commands */ -#define VIDEO_CMD_STOP 0x000e -#define VIDEO_CMD_PLAY 0x000d -#define VIDEO_CMD_FREEZE 0x0102 -#define VIDEO_CMD_FFWD 0x0016 -#define VIDEO_CMD_SLOW 0x0022 +#define AV_VIDEO_CMD_STOP 0x000e +#define AV_VIDEO_CMD_PLAY 0x000d +#define AV_VIDEO_CMD_FREEZE 0x0102 +#define AV_VIDEO_CMD_FFWD 0x0016 +#define AV_VIDEO_CMD_SLOW 0x0022 /* MPEG audio decoder commands */ #define AUDIO_CMD_MUTE 0x0001 diff --git a/include/linux/dvb/audio.h b/include/linux/dvb/audio.h index 0874a67c6b92..89412e18f571 100644 --- a/include/linux/dvb/audio.h +++ b/include/linux/dvb/audio.h @@ -47,7 +47,9 @@ typedef enum { typedef enum { AUDIO_STEREO, AUDIO_MONO_LEFT, - AUDIO_MONO_RIGHT + AUDIO_MONO_RIGHT, + AUDIO_MONO, + AUDIO_STEREO_SWAPPED } audio_channel_select_t; @@ -133,5 +135,6 @@ typedef uint16_t audio_attributes_t; * extracted by the PES parser. */ #define AUDIO_GET_PTS _IOR('o', 19, __u64) +#define AUDIO_BILINGUAL_CHANNEL_SELECT _IO('o', 20) #endif /* _DVBAUDIO_H_ */ diff --git a/include/linux/dvb/version.h b/include/linux/dvb/version.h index 6183c9c4849e..126e0c26cb09 100644 --- a/include/linux/dvb/version.h +++ b/include/linux/dvb/version.h @@ -24,6 +24,6 @@ #define _DVBVERSION_H_ #define DVB_API_VERSION 3 -#define DVB_API_VERSION_MINOR 1 +#define DVB_API_VERSION_MINOR 2 #endif /*_DVBVERSION_H_*/ diff --git a/include/linux/dvb/video.h b/include/linux/dvb/video.h index faebfda397ff..a96da40c50f5 100644 --- a/include/linux/dvb/video.h +++ b/include/linux/dvb/video.h @@ -80,10 +80,53 @@ typedef enum { } video_play_state_t; +/* Decoder commands */ +#define VIDEO_CMD_PLAY (0) +#define VIDEO_CMD_STOP (1) +#define VIDEO_CMD_FREEZE (2) +#define VIDEO_CMD_CONTINUE (3) + +/* Flags for VIDEO_CMD_FREEZE */ +#define VIDEO_CMD_FREEZE_TO_BLACK (1 << 0) + +/* Flags for VIDEO_CMD_STOP */ +#define VIDEO_CMD_STOP_TO_BLACK (1 << 0) +#define VIDEO_CMD_STOP_IMMEDIATELY (1 << 1) + +/* Play input formats: */ +/* The decoder has no special format requirements */ +#define VIDEO_PLAY_FMT_NONE (0) +/* The decoder requires full GOPs */ +#define VIDEO_PLAY_FMT_GOP (1) + +/* The structure must be zeroed before use by the application + This ensures it can be extended safely in the future. */ +struct video_command { + __u32 cmd; + __u32 flags; + union { + struct { + __u64 pts; + } stop; + + struct { + __u32 speed; + __u32 format; + } play; + + struct { + __u32 data[16]; + } raw; + }; +}; + + struct video_event { int32_t type; #define VIDEO_EVENT_SIZE_CHANGED 1 #define VIDEO_EVENT_FRAME_RATE_CHANGED 2 +#define VIDEO_EVENT_DECODER_STOPPED 3 +#define VIDEO_EVENT_VSYNC 4 time_t timestamp; union { video_size_t size; @@ -213,4 +256,10 @@ typedef uint16_t video_attributes_t; */ #define VIDEO_GET_PTS _IOR('o', 57, __u64) +/* Read the number of displayed frames since the decoder was started */ +#define VIDEO_GET_FRAME_COUNT _IOR('o', 58, __u64) + +#define VIDEO_COMMAND _IOWR('o', 59, struct video_command) +#define VIDEO_TRY_COMMAND _IOWR('o', 60, struct video_command) + #endif /*_DVBVIDEO_H_*/ -- cgit v1.2.3-59-g8ed1b From de956c1e0f89413a3837b642d592e2dff3e3eb78 Mon Sep 17 00:00:00 2001 From: Hartmut Hackmann Date: Fri, 27 Apr 2007 12:31:12 -0300 Subject: V4L/DVB (5313): Added a config entry and a gpio function pointer to tuner struct These entries mainly are to support configurations of the tda827x silicon tuner with a preamplifier. The values can be set throgh the attach inform or through the extended TUNER_SET_TYPE_ADDR client call. The function pointer will only be updated if the parameter is not NULL. Since a typecast is necessary to set the pointer, i added a typedef for this pointer (tuner_gpio_func_t) in tuner.h Signed-off-by: Hartmut Hackmann Signed-off-by: Mauro Carvalho Chehab --- drivers/media/video/saa7134/saa7134-cards.c | 3 ++- drivers/media/video/saa7134/saa7134-i2c.c | 2 ++ drivers/media/video/saa7134/saa7134.h | 1 + drivers/media/video/tuner-core.c | 24 ++++++++++++++++-------- include/media/tuner.h | 7 +++++++ 5 files changed, 28 insertions(+), 9 deletions(-) (limited to 'include') diff --git a/drivers/media/video/saa7134/saa7134-cards.c b/drivers/media/video/saa7134/saa7134-cards.c index 89f32107f46b..4399d1371cc1 100644 --- a/drivers/media/video/saa7134/saa7134-cards.c +++ b/drivers/media/video/saa7134/saa7134-cards.c @@ -2543,11 +2543,12 @@ struct saa7134_board saa7134_boards[] = { .name = "Philips Tiger reference design", .audio_clock = 0x00187de7, .tuner_type = TUNER_PHILIPS_TDA8290, + .tuner_config = 0, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .mpeg = SAA7134_MPEG_DVB, - .gpiomask = 1 << 21, + .gpiomask = 0x0200000, .inputs = {{ .name = name_tv, .vmux = 1, diff --git a/drivers/media/video/saa7134/saa7134-i2c.c b/drivers/media/video/saa7134/saa7134-i2c.c index cce8da6a4f94..62c107e7759d 100644 --- a/drivers/media/video/saa7134/saa7134-i2c.c +++ b/drivers/media/video/saa7134/saa7134-i2c.c @@ -370,6 +370,8 @@ static int attach_inform(struct i2c_client *client) tun_setup.type = tuner; tun_setup.addr = saa7134_boards[dev->board].tuner_addr; + tun_setup.config = saa7134_boards[dev->board].tuner_config; + tun_setup.gpio_func = (tuner_gpio_func_t) saa7134_set_gpio; if ((tun_setup.addr == ADDR_UNSET)||(tun_setup.addr == client->addr)) { diff --git a/drivers/media/video/saa7134/saa7134.h b/drivers/media/video/saa7134/saa7134.h index 7b5ae194bb11..303c0806df91 100644 --- a/drivers/media/video/saa7134/saa7134.h +++ b/drivers/media/video/saa7134/saa7134.h @@ -280,6 +280,7 @@ struct saa7134_board { unsigned char radio_addr; unsigned int tda9887_conf; + unsigned int tuner_config; /* peripheral I/O */ enum saa7134_video_out video_out; diff --git a/drivers/media/video/tuner-core.c b/drivers/media/video/tuner-core.c index 15dbc6bf42a7..522ec1c35b8c 100644 --- a/drivers/media/video/tuner-core.c +++ b/drivers/media/video/tuner-core.c @@ -144,7 +144,8 @@ static void set_freq(struct i2c_client *c, unsigned long freq) } static void set_type(struct i2c_client *c, unsigned int type, - unsigned int new_mode_mask) + unsigned int new_mode_mask, unsigned int new_config, + tuner_gpio_func_t gpio_func) { struct tuner *t = i2c_get_clientdata(c); unsigned char buffer[4]; @@ -173,6 +174,11 @@ static void set_type(struct i2c_client *c, unsigned int type, microtune_init(c); break; case TUNER_PHILIPS_TDA8290: + t->config = new_config; + if (gpio_func != NULL) { + tuner_dbg("Defining GPIO function\n"); + t->gpio_func = gpio_func; + } tda8290_init(c); break; case TUNER_TEA5767: @@ -234,10 +240,11 @@ static void set_addr(struct i2c_client *c, struct tuner_setup *tun_setup) tuner_dbg("set addr for type %i\n", t->type); - if ( t->type == UNSET && ((tun_setup->addr == ADDR_UNSET && - (t->mode_mask & tun_setup->mode_mask)) || - tun_setup->addr == c->addr)) { - set_type(c, tun_setup->type, tun_setup->mode_mask); + if ( (t->type == UNSET && ((tun_setup->addr == ADDR_UNSET) && + (t->mode_mask & tun_setup->mode_mask))) || + (tun_setup->addr == c->addr)) { + set_type(c, tun_setup->type, tun_setup->mode_mask, + tun_setup->config, tun_setup->gpio_func); } } @@ -496,7 +503,7 @@ static int tuner_attach(struct i2c_adapter *adap, int addr, int kind) register_client: tuner_info("chip found @ 0x%x (%s)\n", addr << 1, adap->name); i2c_attach_client (&t->i2c); - set_type (&t->i2c,t->type, t->mode_mask); + set_type (&t->i2c,t->type, t->mode_mask, t->config, t->gpio_func); return 0; } @@ -576,10 +583,11 @@ static int tuner_command(struct i2c_client *client, unsigned int cmd, void *arg) switch (cmd) { /* --- configuration --- */ case TUNER_SET_TYPE_ADDR: - tuner_dbg ("Calling set_type_addr for type=%d, addr=0x%02x, mode=0x%02x\n", + tuner_dbg ("Calling set_type_addr for type=%d, addr=0x%02x, mode=0x%02x, config=0x%02x\n", ((struct tuner_setup *)arg)->type, ((struct tuner_setup *)arg)->addr, - ((struct tuner_setup *)arg)->mode_mask); + ((struct tuner_setup *)arg)->mode_mask, + ((struct tuner_setup *)arg)->config); set_addr(client, (struct tuner_setup *)arg); break; diff --git a/include/media/tuner.h b/include/media/tuner.h index 99acf847365c..fe567129b22b 100644 --- a/include/media/tuner.h +++ b/include/media/tuner.h @@ -173,10 +173,15 @@ enum tuner_mode { when the tuner is set to TV mode. */ +/* allows to access the GPIOs of the host (pci bridge) */ +typedef void (*tuner_gpio_func_t) (void *dev, int bit_no,int value); + struct tuner_setup { unsigned short addr; /* I2C address */ unsigned int type; /* Tuner type */ unsigned int mode_mask; /* Allowed tuner modes */ + unsigned int config; /* configuraion for more complex tuners */ + tuner_gpio_func_t gpio_func; }; struct tuner { @@ -210,6 +215,8 @@ struct tuner { unsigned char tda827x_addr; unsigned char tda827x_ver; unsigned int sgIF; + unsigned int config; + tuner_gpio_func_t gpio_func; /* function ptrs */ void (*set_tv_freq)(struct i2c_client *c, unsigned int freq); -- cgit v1.2.3-59-g8ed1b From cfeb88398f004a0e85ee011fd89a01f5d3bf3c81 Mon Sep 17 00:00:00 2001 From: Hartmut Hackmann Date: Fri, 27 Apr 2007 12:31:17 -0300 Subject: V4L/DVB (5323): Updated support for tuner callbacks This change supplies a more generic version of the tuner callback. The tuner struct now has a function pointer int (*tuner_callback) (void *dev, int command, int arg) additionally to a int config parameter. both can be set through the TUNER_SET_TYPE_ADDR client call. Note that the meaning of the parameters depend on the tuner type. Signed-off-by: Hartmut Hackmann Signed-off-by: Mauro Carvalho Chehab --- drivers/media/video/saa7134/saa7134-cards.c | 15 +++------------ drivers/media/video/saa7134/saa7134-core.c | 29 +++++++++++++++++++++++------ drivers/media/video/saa7134/saa7134-i2c.c | 2 +- drivers/media/video/saa7134/saa7134.h | 1 + drivers/media/video/tda8290.c | 12 ++++++------ drivers/media/video/tuner-core.c | 16 ++++++++-------- include/media/tuner.h | 8 +++----- 7 files changed, 45 insertions(+), 38 deletions(-) (limited to 'include') diff --git a/drivers/media/video/saa7134/saa7134-cards.c b/drivers/media/video/saa7134/saa7134-cards.c index f44e7c7e18a5..87a8a68efc5c 100644 --- a/drivers/media/video/saa7134/saa7134-cards.c +++ b/drivers/media/video/saa7134/saa7134-cards.c @@ -4185,6 +4185,9 @@ int saa7134_board_init2(struct saa7134_dev *dev) { unsigned char buf; int board; + struct tuner_setup tun_setup; + tun_setup.config = 0; + tun_setup.tuner_callback = saa7134_tuner_callback; switch (dev->board) { case SAA7134_BOARD_BMK_MPEX_NOTUNER: @@ -4201,20 +4204,15 @@ int saa7134_board_init2(struct saa7134_dev *dev) dev->tuner_type = saa7134_boards[dev->board].tuner_type; if (TUNER_ABSENT != dev->tuner_type) { - struct tuner_setup tun_setup; - tun_setup.mode_mask = T_RADIO | T_ANALOG_TV | T_DIGITAL_TV; tun_setup.type = dev->tuner_type; tun_setup.addr = ADDR_UNSET; - tun_setup.config = 0; - tun_setup.gpio_func = NULL; saa7134_i2c_call_clients (dev, TUNER_SET_TYPE_ADDR, &tun_setup); } break; case SAA7134_BOARD_MD7134: { - struct tuner_setup tun_setup; u8 subaddr; u8 data[3]; int ret, tuner_t; @@ -4275,8 +4273,6 @@ int saa7134_board_init2(struct saa7134_dev *dev) tun_setup.mode_mask = T_RADIO | T_ANALOG_TV | T_DIGITAL_TV; tun_setup.type = dev->tuner_type; tun_setup.addr = ADDR_UNSET; - tun_setup.config = 0; - tun_setup.gpio_func = NULL; saa7134_i2c_call_clients (dev, TUNER_SET_TYPE_ADDR,&tun_setup); } @@ -4288,7 +4284,6 @@ int saa7134_board_init2(struct saa7134_dev *dev) * the channel decoder. We have to make it transparent to find it */ { - struct tuner_setup tun_setup; u8 data[] = { 0x07, 0x02}; struct i2c_msg msg = {.addr=0x08, .flags=0, .buf=data, .len = sizeof(data)}; i2c_transfer(&dev->i2c_adap, &msg, 1); @@ -4296,8 +4291,6 @@ int saa7134_board_init2(struct saa7134_dev *dev) tun_setup.mode_mask = T_ANALOG_TV | T_DIGITAL_TV; tun_setup.type = dev->tuner_type; tun_setup.addr = dev->tuner_addr; - tun_setup.config = 0; - tun_setup.gpio_func = NULL; saa7134_i2c_call_clients (dev, TUNER_SET_TYPE_ADDR,&tun_setup); } @@ -4306,7 +4299,6 @@ int saa7134_board_init2(struct saa7134_dev *dev) case SAA7134_BOARD_PHILIPS_TIGER_S: { u8 data[] = { 0x3c, 0x33, 0x60}; - struct tuner_setup tun_setup; struct i2c_msg msg = {.addr=0x08, .flags=0, .buf=data, .len = sizeof(data)}; if(dev->autodetected && (dev->eedata[0x49] == 0x50)) { dev->board = SAA7134_BOARD_PHILIPS_TIGER_S; @@ -4318,7 +4310,6 @@ int saa7134_board_init2(struct saa7134_dev *dev) tun_setup.type = TUNER_PHILIPS_TDA8290; tun_setup.addr = 0x4b; tun_setup.config = 2; - tun_setup.gpio_func = (tuner_gpio_func_t) saa7134_set_gpio; saa7134_i2c_call_clients (dev, TUNER_SET_TYPE_ADDR,&tun_setup); data[2] = 0x68; diff --git a/drivers/media/video/saa7134/saa7134-core.c b/drivers/media/video/saa7134/saa7134-core.c index 8ed03d65a34e..5f74ea467d4e 100644 --- a/drivers/media/video/saa7134/saa7134-core.c +++ b/drivers/media/video/saa7134/saa7134-core.c @@ -120,7 +120,6 @@ void saa7134_track_gpio(struct saa7134_dev *dev, char *msg) void saa7134_set_gpio(struct saa7134_dev *dev, int bit_no, int value) { u32 index, bitval; - u8 sync_control; index = 1 << bit_no; switch (value) { @@ -140,22 +139,40 @@ void saa7134_set_gpio(struct saa7134_dev *dev, int bit_no, int value) dprintk("setting GPIO%d to tristate\n", bit_no); saa_andorl(SAA7134_GPIO_GPMODE0 >> 2, index, 0); break; - case 4: /* sync output on GPIO 22 for tda8275a, 50Hz*/ - case 5: /* sync output on GPIO 22 for tda8275a, 60Hz*/ - if (bit_no == 22) { - dprintk("setting GPIO22 to vsync %d\n", value - 4); + } +} + +int saa7134_tuner_callback(void *ptr, int command, int arg) +{ + u8 sync_control; + struct saa7134_dev *dev = ptr; + + switch (dev->tuner_type) { + case TUNER_PHILIPS_TDA8290: + switch (command) { + case 0: /* switch LNA gain through GPIO 22*/ + saa7134_set_gpio(dev, 22, arg) ; + break; + case 1: /* vsync output at GPIO22. 50 / 60Hz */ + dprintk("setting GPIO22 to vsync %d\n", arg); saa_andorb(SAA7134_VIDEO_PORT_CTRL3, 0x80, 0x80); saa_andorb(SAA7134_VIDEO_PORT_CTRL6, 0x0f, 0x03); - if (value == 5) + if (arg == 1) sync_control = 11; else sync_control = 17; saa_writeb(SAA7134_VGATE_START, sync_control); saa_writeb(SAA7134_VGATE_STOP, sync_control + 1); saa_andorb(SAA7134_MISC_VGATE_MSB, 0x03, 0x00); + break; + default: + return -EINVAL; } break; + default: + return -ENODEV; } + return 0; } /* ------------------------------------------------------------------ */ diff --git a/drivers/media/video/saa7134/saa7134-i2c.c b/drivers/media/video/saa7134/saa7134-i2c.c index 62c107e7759d..4e8d6c94ea60 100644 --- a/drivers/media/video/saa7134/saa7134-i2c.c +++ b/drivers/media/video/saa7134/saa7134-i2c.c @@ -371,7 +371,7 @@ static int attach_inform(struct i2c_client *client) tun_setup.type = tuner; tun_setup.addr = saa7134_boards[dev->board].tuner_addr; tun_setup.config = saa7134_boards[dev->board].tuner_config; - tun_setup.gpio_func = (tuner_gpio_func_t) saa7134_set_gpio; + tun_setup.tuner_callback = saa7134_tuner_callback; if ((tun_setup.addr == ADDR_UNSET)||(tun_setup.addr == client->addr)) { diff --git a/drivers/media/video/saa7134/saa7134.h b/drivers/media/video/saa7134/saa7134.h index b46265a97565..c365ec581a11 100644 --- a/drivers/media/video/saa7134/saa7134.h +++ b/drivers/media/video/saa7134/saa7134.h @@ -565,6 +565,7 @@ extern int saa7134_no_overlay; void saa7134_track_gpio(struct saa7134_dev *dev, char *msg); void saa7134_set_gpio(struct saa7134_dev *dev, int bit_no, int value); +int saa7134_tuner_callback(void *ptr, int command, int arg); #define SAA7134_PGTABLE_SIZE 4096 diff --git a/drivers/media/video/tda8290.c b/drivers/media/video/tda8290.c index e6c3e6167191..44348f95dd4c 100644 --- a/drivers/media/video/tda8290.c +++ b/drivers/media/video/tda8290.c @@ -211,19 +211,19 @@ static void tda827xa_lna_gain(struct i2c_client *c, int high) case 2: /* turn Vsync on */ if (t->std & V4L2_STD_MN) - arg = 5; + arg = 1; else - arg = 4; - if (t->gpio_func) - t->gpio_func(c->adapter->algo_data, 22, 5); + arg = 0; + if (t->tuner_callback) + t->tuner_callback(c->adapter->algo_data, 1, arg); buf[1] = high ? 0 : 1; if (t->config == 2) buf[1] = high ? 1 : 0; i2c_transfer(c->adapter, &msg, 1); break; case 3: /* switch with GPIO of saa713x */ - if (t->gpio_func) - t->gpio_func(c->adapter->algo_data, 22, high); + if (t->tuner_callback) + t->tuner_callback(c->adapter->algo_data, 0, high); break; } } diff --git a/drivers/media/video/tuner-core.c b/drivers/media/video/tuner-core.c index 522ec1c35b8c..b8c38a028841 100644 --- a/drivers/media/video/tuner-core.c +++ b/drivers/media/video/tuner-core.c @@ -145,7 +145,7 @@ static void set_freq(struct i2c_client *c, unsigned long freq) static void set_type(struct i2c_client *c, unsigned int type, unsigned int new_mode_mask, unsigned int new_config, - tuner_gpio_func_t gpio_func) + int (*tuner_callback) (void *dev, int command,int arg)) { struct tuner *t = i2c_get_clientdata(c); unsigned char buffer[4]; @@ -169,16 +169,16 @@ static void set_type(struct i2c_client *c, unsigned int type, } t->type = type; + t->config = new_config; + if (tuner_callback != NULL) { + tuner_dbg("defining GPIO callback\n"); + t->tuner_callback = tuner_callback; + } switch (t->type) { case TUNER_MT2032: microtune_init(c); break; case TUNER_PHILIPS_TDA8290: - t->config = new_config; - if (gpio_func != NULL) { - tuner_dbg("Defining GPIO function\n"); - t->gpio_func = gpio_func; - } tda8290_init(c); break; case TUNER_TEA5767: @@ -244,7 +244,7 @@ static void set_addr(struct i2c_client *c, struct tuner_setup *tun_setup) (t->mode_mask & tun_setup->mode_mask))) || (tun_setup->addr == c->addr)) { set_type(c, tun_setup->type, tun_setup->mode_mask, - tun_setup->config, tun_setup->gpio_func); + tun_setup->config, tun_setup->tuner_callback); } } @@ -503,7 +503,7 @@ static int tuner_attach(struct i2c_adapter *adap, int addr, int kind) register_client: tuner_info("chip found @ 0x%x (%s)\n", addr << 1, adap->name); i2c_attach_client (&t->i2c); - set_type (&t->i2c,t->type, t->mode_mask, t->config, t->gpio_func); + set_type (&t->i2c,t->type, t->mode_mask, t->config, t->tuner_callback); return 0; } diff --git a/include/media/tuner.h b/include/media/tuner.h index fe567129b22b..a41ac41113ac 100644 --- a/include/media/tuner.h +++ b/include/media/tuner.h @@ -173,15 +173,12 @@ enum tuner_mode { when the tuner is set to TV mode. */ -/* allows to access the GPIOs of the host (pci bridge) */ -typedef void (*tuner_gpio_func_t) (void *dev, int bit_no,int value); - struct tuner_setup { unsigned short addr; /* I2C address */ unsigned int type; /* Tuner type */ unsigned int mode_mask; /* Allowed tuner modes */ unsigned int config; /* configuraion for more complex tuners */ - tuner_gpio_func_t gpio_func; + int (*tuner_callback) (void *dev, int command,int arg); }; struct tuner { @@ -215,8 +212,9 @@ struct tuner { unsigned char tda827x_addr; unsigned char tda827x_ver; unsigned int sgIF; + unsigned int config; - tuner_gpio_func_t gpio_func; + int (*tuner_callback) (void *dev, int command,int arg); /* function ptrs */ void (*set_tv_freq)(struct i2c_client *c, unsigned int freq); -- cgit v1.2.3-59-g8ed1b From 0b20060f6c2cc69c5394cf9782513e7b526e87b9 Mon Sep 17 00:00:00 2001 From: Hans Verkuil Date: Fri, 27 Apr 2007 12:31:22 -0300 Subject: V4L/DVB (5336): Cx23416 doc updates + rename CX2341X_ENC_UNKNOWN The documentation of Several miscellaneous commands was updated. As a result of which the CX2341X_ENC_UNKNOWN command was renamed to CX2341X_ENC_SET_VERT_CROP_LINE. Signed-off-by: Hans Verkuil Signed-off-by: Mauro Carvalho Chehab --- Documentation/video4linux/cx2341x/fw-encoder-api.txt | 19 ++++++++++--------- include/media/cx2341x.h | 2 +- 2 files changed, 11 insertions(+), 10 deletions(-) (limited to 'include') diff --git a/Documentation/video4linux/cx2341x/fw-encoder-api.txt b/Documentation/video4linux/cx2341x/fw-encoder-api.txt index 242104ce5b61..5dd3109a8b3f 100644 --- a/Documentation/video4linux/cx2341x/fw-encoder-api.txt +++ b/Documentation/video4linux/cx2341x/fw-encoder-api.txt @@ -663,12 +663,13 @@ Param[0] ------------------------------------------------------------------------------- -Name CX2341X_ENC_UNKNOWN +Name CX2341X_ENC_SET_VERT_CROP_LINE Enum 219/0xDB Description - Unknown API, it's used by Hauppauge though. + Something to do with 'Vertical Crop Line' Param[0] - 0 This is the value Hauppauge uses, Unknown what it means. + If saa7114 and raw VBI capture and 60 Hz, then set to 10001. + Else 0. ------------------------------------------------------------------------------- @@ -682,11 +683,9 @@ Param[0] Command number: 1=set initial SCR value when starting encoding (works). 2=set quality mode (apparently some test setting). - 3=setup advanced VIM protection handling (supposedly only for the cx23416 - for raw YUV). - Actually it looks like this should be 0 for saa7114/5 based card and 1 - for cx25840 based cards. - 4=generate artificial PTS timestamps + 3=setup advanced VIM protection handling. + Always 1 for the cx23416 and 0 for cx23415. + 4=generate DVD compatible PTS timestamps 5=USB flush mode 6=something to do with the quantization matrix 7=set navigation pack insertion for DVD: adds 0xbf (private stream 2) @@ -698,7 +697,9 @@ Param[0] 9=set history parameters of the video input module 10=set input field order of VIM 11=set quantization matrix - 12=reset audio interface + 12=reset audio interface after channel change or input switch (has no argument). + Needed for the cx2584x, not needed for the mspx4xx, but it doesn't seem to + do any harm calling it regardless. 13=set audio volume delay 14=set audio delay diff --git a/include/media/cx2341x.h b/include/media/cx2341x.h index 583b0621ff43..38c12fed7535 100644 --- a/include/media/cx2341x.h +++ b/include/media/cx2341x.h @@ -166,7 +166,7 @@ void cx2341x_log_status(struct cx2341x_mpeg_params *p, const char *prefix); #define CX2341X_ENC_SET_PLACEHOLDER 0xd7 #define CX2341X_ENC_MUTE_VIDEO 0xd9 #define CX2341X_ENC_MUTE_AUDIO 0xda -#define CX2341X_ENC_UNKNOWN 0xdb +#define CX2341X_ENC_SET_VERT_CROP_LINE 0xdb #define CX2341X_ENC_MISC 0xdc /* OSD API, specific to the cx23415 */ -- cgit v1.2.3-59-g8ed1b From 1b5888cea1d371239a130150222e63d476298d89 Mon Sep 17 00:00:00 2001 From: Hans Verkuil Date: Fri, 27 Apr 2007 12:31:23 -0300 Subject: V4L/DVB (5341): Add cx23415/6 chip idents. Signed-off-by: Hans Verkuil Signed-off-by: Mauro Carvalho Chehab --- include/media/v4l2-chip-ident.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'include') diff --git a/include/media/v4l2-chip-ident.h b/include/media/v4l2-chip-ident.h index f6686ce133d8..7d0c65413981 100644 --- a/include/media/v4l2-chip-ident.h +++ b/include/media/v4l2-chip-ident.h @@ -57,6 +57,10 @@ enum { /* OmniVision sensors - range 250-299 */ V4L2_IDENT_OV7670 = 250, + + /* Conexant MPEG encoder/decoders: range 410-420 */ + V4L2_IDENT_CX23415 = 415, + V4L2_IDENT_CX23416 = 416, }; #endif -- cgit v1.2.3-59-g8ed1b From 1a0adaf37c30e89e44d1470ef604a930999a5826 Mon Sep 17 00:00:00 2001 From: Hans Verkuil Date: Fri, 27 Apr 2007 12:31:25 -0300 Subject: V4L/DVB (5345): ivtv driver for Conexant cx23416/cx23415 MPEG encoder/decoder It took three core maintainers, over four years of work, eight new i2c modules, eleven new V4L2 ioctls, three new DVB video ioctls, a Sliced VBI API, a new MPEG encoder API, an enhanced DVB video MPEG decoding API, major YUV/OSD contributions from Ian and John, web/wiki/svn/trac support from Axel Thimm, (hardware) support from Hauppauge, support and assistance from the v4l-dvb people and the many, many users of ivtv to finally make it possible to merge this driver into the kernel. Thank you all! Signed-off-by: Kevin Thayer Signed-off-by: Chris Kennedy Signed-off-by: Hans Verkuil Signed-off-by: John P Harvey Signed-off-by: Ian Armstrong Signed-off-by: Mauro Carvalho Chehab --- drivers/media/video/Kconfig | 2 + drivers/media/video/Makefile | 1 + drivers/media/video/ivtv/Kconfig | 26 + drivers/media/video/ivtv/Makefile | 7 + drivers/media/video/ivtv/ivtv-audio.c | 74 ++ drivers/media/video/ivtv/ivtv-audio.h | 23 + drivers/media/video/ivtv/ivtv-cards.c | 964 ++++++++++++++++++ drivers/media/video/ivtv/ivtv-cards.h | 207 ++++ drivers/media/video/ivtv/ivtv-controls.c | 303 ++++++ drivers/media/video/ivtv/ivtv-controls.h | 21 + drivers/media/video/ivtv/ivtv-driver.c | 1385 ++++++++++++++++++++++++++ drivers/media/video/ivtv/ivtv-driver.h | 866 +++++++++++++++++ drivers/media/video/ivtv/ivtv-fileops.c | 918 ++++++++++++++++++ drivers/media/video/ivtv/ivtv-fileops.h | 45 + drivers/media/video/ivtv/ivtv-firmware.c | 272 ++++++ drivers/media/video/ivtv/ivtv-firmware.h | 25 + drivers/media/video/ivtv/ivtv-gpio.c | 307 ++++++ drivers/media/video/ivtv/ivtv-gpio.h | 25 + drivers/media/video/ivtv/ivtv-i2c.c | 750 ++++++++++++++ drivers/media/video/ivtv/ivtv-i2c.h | 38 + drivers/media/video/ivtv/ivtv-ioctl.c | 1555 ++++++++++++++++++++++++++++++ drivers/media/video/ivtv/ivtv-ioctl.h | 28 + drivers/media/video/ivtv/ivtv-irq.c | 818 ++++++++++++++++ drivers/media/video/ivtv/ivtv-irq.h | 24 + drivers/media/video/ivtv/ivtv-mailbox.c | 360 +++++++ drivers/media/video/ivtv/ivtv-mailbox.h | 25 + drivers/media/video/ivtv/ivtv-queue.c | 262 +++++ drivers/media/video/ivtv/ivtv-queue.h | 64 ++ drivers/media/video/ivtv/ivtv-streams.c | 977 +++++++++++++++++++ drivers/media/video/ivtv/ivtv-streams.h | 31 + drivers/media/video/ivtv/ivtv-udma.c | 200 ++++ drivers/media/video/ivtv/ivtv-udma.h | 43 + drivers/media/video/ivtv/ivtv-vbi.c | 545 +++++++++++ drivers/media/video/ivtv/ivtv-vbi.h | 27 + drivers/media/video/ivtv/ivtv-version.h | 26 + drivers/media/video/ivtv/ivtv-video.c | 150 +++ drivers/media/video/ivtv/ivtv-video.h | 25 + drivers/media/video/ivtv/ivtv-yuv.c | 1129 ++++++++++++++++++++++ drivers/media/video/ivtv/ivtv-yuv.h | 24 + include/media/ivtv.h | 65 ++ 40 files changed, 12637 insertions(+) create mode 100644 drivers/media/video/ivtv/Kconfig create mode 100644 drivers/media/video/ivtv/Makefile create mode 100644 drivers/media/video/ivtv/ivtv-audio.c create mode 100644 drivers/media/video/ivtv/ivtv-audio.h create mode 100644 drivers/media/video/ivtv/ivtv-cards.c create mode 100644 drivers/media/video/ivtv/ivtv-cards.h create mode 100644 drivers/media/video/ivtv/ivtv-controls.c create mode 100644 drivers/media/video/ivtv/ivtv-controls.h create mode 100644 drivers/media/video/ivtv/ivtv-driver.c create mode 100644 drivers/media/video/ivtv/ivtv-driver.h create mode 100644 drivers/media/video/ivtv/ivtv-fileops.c create mode 100644 drivers/media/video/ivtv/ivtv-fileops.h create mode 100644 drivers/media/video/ivtv/ivtv-firmware.c create mode 100644 drivers/media/video/ivtv/ivtv-firmware.h create mode 100644 drivers/media/video/ivtv/ivtv-gpio.c create mode 100644 drivers/media/video/ivtv/ivtv-gpio.h create mode 100644 drivers/media/video/ivtv/ivtv-i2c.c create mode 100644 drivers/media/video/ivtv/ivtv-i2c.h create mode 100644 drivers/media/video/ivtv/ivtv-ioctl.c create mode 100644 drivers/media/video/ivtv/ivtv-ioctl.h create mode 100644 drivers/media/video/ivtv/ivtv-irq.c create mode 100644 drivers/media/video/ivtv/ivtv-irq.h create mode 100644 drivers/media/video/ivtv/ivtv-mailbox.c create mode 100644 drivers/media/video/ivtv/ivtv-mailbox.h create mode 100644 drivers/media/video/ivtv/ivtv-queue.c create mode 100644 drivers/media/video/ivtv/ivtv-queue.h create mode 100644 drivers/media/video/ivtv/ivtv-streams.c create mode 100644 drivers/media/video/ivtv/ivtv-streams.h create mode 100644 drivers/media/video/ivtv/ivtv-udma.c create mode 100644 drivers/media/video/ivtv/ivtv-udma.h create mode 100644 drivers/media/video/ivtv/ivtv-vbi.c create mode 100644 drivers/media/video/ivtv/ivtv-vbi.h create mode 100644 drivers/media/video/ivtv/ivtv-version.h create mode 100644 drivers/media/video/ivtv/ivtv-video.c create mode 100644 drivers/media/video/ivtv/ivtv-video.h create mode 100644 drivers/media/video/ivtv/ivtv-yuv.c create mode 100644 drivers/media/video/ivtv/ivtv-yuv.h create mode 100644 include/media/ivtv.h (limited to 'include') diff --git a/drivers/media/video/Kconfig b/drivers/media/video/Kconfig index fa0a87679190..639e8b6c35b1 100644 --- a/drivers/media/video/Kconfig +++ b/drivers/media/video/Kconfig @@ -647,6 +647,8 @@ config VIDEO_HEXIUM_GEMINI source "drivers/media/video/cx88/Kconfig" +source "drivers/media/video/ivtv/Kconfig" + config VIDEO_M32R_AR tristate "AR devices" depends on M32R && VIDEO_V4L1 diff --git a/drivers/media/video/Makefile b/drivers/media/video/Makefile index 384f01c133c5..9c2de501612f 100644 --- a/drivers/media/video/Makefile +++ b/drivers/media/video/Makefile @@ -61,6 +61,7 @@ obj-$(CONFIG_VIDEO_CPIA_USB) += cpia_usb.o obj-$(CONFIG_VIDEO_MEYE) += meye.o obj-$(CONFIG_VIDEO_SAA7134) += ir-kbd-i2c.o saa7134/ obj-$(CONFIG_VIDEO_CX88) += cx88/ +obj-$(CONFIG_VIDEO_IVTV) += ivtv/ obj-$(CONFIG_VIDEO_EM28XX) += em28xx/ obj-$(CONFIG_VIDEO_USBVISION) += usbvision/ obj-$(CONFIG_VIDEO_TVP5150) += tvp5150.o diff --git a/drivers/media/video/ivtv/Kconfig b/drivers/media/video/ivtv/Kconfig new file mode 100644 index 000000000000..88e510171347 --- /dev/null +++ b/drivers/media/video/ivtv/Kconfig @@ -0,0 +1,26 @@ +config VIDEO_IVTV + tristate "Conexant cx23416/cx23415 MPEG encoder/decoder support" + depends on VIDEO_V4L2 && USB && I2C && EXPERIMENTAL + select FW_LOADER + select VIDEO_TUNER + select VIDEO_TVEEPROM + select VIDEO_CX2341X + select VIDEO_MSP3400 + select VIDEO_SAA711X + select VIDEO_SAA7127 + select VIDEO_TVAUDIO + select VIDEO_CS53L32A + select VIDEO_TLV320AIC23B + select VIDEO_WM8775 + select VIDEO_WM8739 + select VIDEO_UPD64031A + select VIDEO_UPD64083 + ---help--- + This is a video4linux driver for Conexant cx23416 or cx23416 based + PCI personal video recorder devices. + + This is used in devices such as the Hauppauge PVR-150/250/350/500 + cards. + + To compile this driver as a module, choose M here: the + module will be called ivtv. diff --git a/drivers/media/video/ivtv/Makefile b/drivers/media/video/ivtv/Makefile new file mode 100644 index 000000000000..7e95148fbf4f --- /dev/null +++ b/drivers/media/video/ivtv/Makefile @@ -0,0 +1,7 @@ +ivtv-objs := ivtv-audio.o ivtv-cards.o ivtv-controls.o \ + ivtv-driver.o ivtv-fileops.o ivtv-firmware.o \ + ivtv-gpio.o ivtv-i2c.o ivtv-ioctl.o ivtv-irq.o \ + ivtv-mailbox.o ivtv-queue.o ivtv-streams.o ivtv-udma.o \ + ivtv-vbi.o ivtv-video.o ivtv-yuv.o + +obj-$(CONFIG_VIDEO_IVTV) += ivtv.o diff --git a/drivers/media/video/ivtv/ivtv-audio.c b/drivers/media/video/ivtv/ivtv-audio.c new file mode 100644 index 000000000000..d702b8b539a1 --- /dev/null +++ b/drivers/media/video/ivtv/ivtv-audio.c @@ -0,0 +1,74 @@ +/* + Audio-related ivtv functions. + Copyright (C) 2003-2004 Kevin Thayer + Copyright (C) 2005-2007 Hans Verkuil + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include "ivtv-driver.h" +#include "ivtv-mailbox.h" +#include "ivtv-i2c.h" +#include "ivtv-gpio.h" +#include "ivtv-cards.h" +#include "ivtv-audio.h" +#include +#include + +/* Selects the audio input and output according to the current + settings. */ +int ivtv_audio_set_io(struct ivtv *itv) +{ + struct v4l2_routing route; + u32 audio_input; + int mux_input; + + /* Determine which input to use */ + if (test_bit(IVTV_F_I_RADIO_USER, &itv->i_flags)) { + audio_input = itv->card->radio_input.audio_input; + mux_input = itv->card->radio_input.muxer_input; + } else { + audio_input = itv->card->audio_inputs[itv->audio_input].audio_input; + mux_input = itv->card->audio_inputs[itv->audio_input].muxer_input; + } + + /* handle muxer chips */ + route.input = mux_input; + route.output = 0; + ivtv_i2c_hw(itv, itv->card->hw_muxer, VIDIOC_INT_S_AUDIO_ROUTING, &route); + + route.input = audio_input; + if (itv->card->hw_audio & IVTV_HW_MSP34XX) { + route.output = MSP_OUTPUT(MSP_SC_IN_DSP_SCART1); + } + return ivtv_i2c_hw(itv, itv->card->hw_audio, VIDIOC_INT_S_AUDIO_ROUTING, &route); +} + +void ivtv_audio_set_route(struct ivtv *itv, struct v4l2_routing *route) +{ + ivtv_i2c_hw(itv, itv->card->hw_audio, VIDIOC_INT_S_AUDIO_ROUTING, route); +} + +void ivtv_audio_set_audio_clock_freq(struct ivtv *itv, u8 freq) +{ + static u32 freqs[3] = { 44100, 48000, 32000 }; + + /* The audio clock of the digitizer must match the codec sample + rate otherwise you get some very strange effects. */ + if (freq > 2) + return; + ivtv_call_i2c_clients(itv, VIDIOC_INT_AUDIO_CLOCK_FREQ, &freqs[freq]); +} + diff --git a/drivers/media/video/ivtv/ivtv-audio.h b/drivers/media/video/ivtv/ivtv-audio.h new file mode 100644 index 000000000000..9c42846d8124 --- /dev/null +++ b/drivers/media/video/ivtv/ivtv-audio.h @@ -0,0 +1,23 @@ +/* + Audio-related ivtv functions. + Copyright (C) 2003-2004 Kevin Thayer + Copyright (C) 2005-2007 Hans Verkuil + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +int ivtv_audio_set_io(struct ivtv *itv); +void ivtv_audio_set_route(struct ivtv *itv, struct v4l2_routing *route); +void ivtv_audio_set_audio_clock_freq(struct ivtv *itv, u8 freq); diff --git a/drivers/media/video/ivtv/ivtv-cards.c b/drivers/media/video/ivtv/ivtv-cards.c new file mode 100644 index 000000000000..8eab02083887 --- /dev/null +++ b/drivers/media/video/ivtv/ivtv-cards.c @@ -0,0 +1,964 @@ +/* + Functions to query card hardware + Copyright (C) 2003-2004 Kevin Thayer + Copyright (C) 2005-2007 Hans Verkuil + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include "ivtv-driver.h" +#include "ivtv-cards.h" +#include "ivtv-i2c.h" + +#include +#include +#include +#include +#include + +#define MSP_TUNER MSP_INPUT(MSP_IN_SCART1, MSP_IN_TUNER1, \ + MSP_DSP_IN_TUNER, MSP_DSP_IN_TUNER) +#define MSP_SCART1 MSP_INPUT(MSP_IN_SCART1, MSP_IN_TUNER1, \ + MSP_DSP_IN_SCART, MSP_DSP_IN_SCART) +#define MSP_SCART2 MSP_INPUT(MSP_IN_SCART2, MSP_IN_TUNER1, \ + MSP_DSP_IN_SCART, MSP_DSP_IN_SCART) +#define MSP_SCART3 MSP_INPUT(MSP_IN_SCART3, MSP_IN_TUNER1, \ + MSP_DSP_IN_SCART, MSP_DSP_IN_SCART) +#define MSP_MONO MSP_INPUT(MSP_IN_MONO, MSP_IN_TUNER1, \ + MSP_DSP_IN_SCART, MSP_DSP_IN_SCART) + +/********************** card configuration *******************************/ + +/* Please add new PCI IDs to: http://pci-ids.ucw.cz/iii + This keeps the PCI ID database up to date. Note that the entries + must be added under vendor 0x4444 (Conexant) as subsystem IDs. + New vendor IDs should still be added to the vendor ID list. */ + +/* Hauppauge PVR-250 cards */ + +/* Note: for Hauppauge cards the tveeprom information is used instead of PCI IDs */ +static const struct ivtv_card ivtv_card_pvr250 = { + .type = IVTV_CARD_PVR_250, + .name = "Hauppauge WinTV PVR-250", + .v4l2_capabilities = IVTV_CAP_ENCODER, + .hw_video = IVTV_HW_SAA7115, + .hw_audio = IVTV_HW_MSP34XX, + .hw_audio_ctrl = IVTV_HW_MSP34XX, + .hw_all = IVTV_HW_MSP34XX | IVTV_HW_SAA7115 | + IVTV_HW_TVEEPROM | IVTV_HW_TUNER, + .video_inputs = { + { IVTV_CARD_INPUT_VID_TUNER, 0, IVTV_SAA71XX_COMPOSITE4 }, + { IVTV_CARD_INPUT_SVIDEO1, 1, IVTV_SAA71XX_SVIDEO0 }, + { IVTV_CARD_INPUT_COMPOSITE1, 1, IVTV_SAA71XX_COMPOSITE0 }, + { IVTV_CARD_INPUT_SVIDEO2, 2, IVTV_SAA71XX_SVIDEO1 }, + { IVTV_CARD_INPUT_COMPOSITE2, 2, IVTV_SAA71XX_COMPOSITE1 }, + { IVTV_CARD_INPUT_COMPOSITE3, 1, IVTV_SAA71XX_COMPOSITE5 }, + }, + .audio_inputs = { + { IVTV_CARD_INPUT_AUD_TUNER, MSP_TUNER }, + { IVTV_CARD_INPUT_LINE_IN1, MSP_SCART1 }, + { IVTV_CARD_INPUT_LINE_IN2, MSP_SCART3 }, + }, + .radio_input = { IVTV_CARD_INPUT_AUD_TUNER, MSP_SCART2 }, +}; + +/* ------------------------------------------------------------------------- */ + +/* Hauppauge PVR-350 cards */ + +/* Outputs for Hauppauge PVR350 cards */ +static struct ivtv_card_output ivtv_pvr350_outputs[] = { + { + .name = "S-Video + Composite", + .video_output = 0, + }, { + .name = "Composite", + .video_output = 1, + }, { + .name = "S-Video", + .video_output = 2, + }, { + .name = "RGB", + .video_output = 3, + }, { + .name = "YUV C", + .video_output = 4, + }, { + .name = "YUV V", + .video_output = 5, + } +}; + +static const struct ivtv_card ivtv_card_pvr350 = { + .type = IVTV_CARD_PVR_350, + .name = "Hauppauge WinTV PVR-350", + .v4l2_capabilities = IVTV_CAP_ENCODER | IVTV_CAP_DECODER, + .video_outputs = ivtv_pvr350_outputs, + .nof_outputs = ARRAY_SIZE(ivtv_pvr350_outputs), + .hw_video = IVTV_HW_SAA7115, + .hw_audio = IVTV_HW_MSP34XX, + .hw_audio_ctrl = IVTV_HW_MSP34XX, + .hw_all = IVTV_HW_MSP34XX | IVTV_HW_SAA7115 | + IVTV_HW_SAA7127 | IVTV_HW_TVEEPROM | IVTV_HW_TUNER, + .video_inputs = { + { IVTV_CARD_INPUT_VID_TUNER, 0, IVTV_SAA71XX_COMPOSITE4 }, + { IVTV_CARD_INPUT_SVIDEO1, 1, IVTV_SAA71XX_SVIDEO0 }, + { IVTV_CARD_INPUT_COMPOSITE1, 1, IVTV_SAA71XX_COMPOSITE0 }, + { IVTV_CARD_INPUT_SVIDEO2, 2, IVTV_SAA71XX_SVIDEO1 }, + { IVTV_CARD_INPUT_COMPOSITE2, 2, IVTV_SAA71XX_COMPOSITE1 }, + { IVTV_CARD_INPUT_COMPOSITE3, 1, IVTV_SAA71XX_COMPOSITE5 }, + }, + .audio_inputs = { + { IVTV_CARD_INPUT_AUD_TUNER, MSP_TUNER }, + { IVTV_CARD_INPUT_LINE_IN1, MSP_SCART1 }, + { IVTV_CARD_INPUT_LINE_IN2, MSP_SCART3 }, + }, + .radio_input = { IVTV_CARD_INPUT_AUD_TUNER, MSP_SCART2 }, +}; + +/* PVR-350 V1 boards have a different audio tuner input and use a + saa7114 instead of a saa7115. + Note that the info below comes from a pre-production model so it may + not be correct. Especially the audio behaves strangely (mono only it seems) */ +static const struct ivtv_card ivtv_card_pvr350_v1 = { + .type = IVTV_CARD_PVR_350_V1, + .name = "Hauppauge WinTV PVR-350 (V1)", + .v4l2_capabilities = IVTV_CAP_ENCODER | IVTV_CAP_DECODER, + .video_outputs = ivtv_pvr350_outputs, + .nof_outputs = ARRAY_SIZE(ivtv_pvr350_outputs), + .hw_video = IVTV_HW_SAA7114, + .hw_audio = IVTV_HW_MSP34XX, + .hw_audio_ctrl = IVTV_HW_MSP34XX, + .hw_all = IVTV_HW_MSP34XX | IVTV_HW_SAA7114 | + IVTV_HW_SAA7127 | IVTV_HW_TVEEPROM | IVTV_HW_TUNER, + .video_inputs = { + { IVTV_CARD_INPUT_VID_TUNER, 0, IVTV_SAA71XX_COMPOSITE4 }, + { IVTV_CARD_INPUT_SVIDEO1, 1, IVTV_SAA71XX_SVIDEO0 }, + { IVTV_CARD_INPUT_COMPOSITE1, 1, IVTV_SAA71XX_COMPOSITE0 }, + { IVTV_CARD_INPUT_SVIDEO2, 2, IVTV_SAA71XX_SVIDEO1 }, + { IVTV_CARD_INPUT_COMPOSITE2, 2, IVTV_SAA71XX_COMPOSITE1 }, + { IVTV_CARD_INPUT_COMPOSITE3, 1, IVTV_SAA71XX_COMPOSITE5 }, + }, + .audio_inputs = { + { IVTV_CARD_INPUT_AUD_TUNER, MSP_MONO }, + { IVTV_CARD_INPUT_LINE_IN1, MSP_SCART1 }, + { IVTV_CARD_INPUT_LINE_IN2, MSP_SCART3 }, + }, + .radio_input = { IVTV_CARD_INPUT_AUD_TUNER, MSP_SCART2 }, +}; + +/* ------------------------------------------------------------------------- */ + +/* Hauppauge PVR-150/PVR-500 cards */ + +static const struct ivtv_card ivtv_card_pvr150 = { + .type = IVTV_CARD_PVR_150, + .name = "Hauppauge WinTV PVR-150", + .v4l2_capabilities = IVTV_CAP_ENCODER, + .hw_video = IVTV_HW_CX25840, + .hw_audio = IVTV_HW_CX25840, + .hw_audio_ctrl = IVTV_HW_CX25840, + .hw_muxer = IVTV_HW_WM8775, + .hw_all = IVTV_HW_WM8775 | IVTV_HW_CX25840 | + IVTV_HW_TVEEPROM | IVTV_HW_TUNER, + .video_inputs = { + { IVTV_CARD_INPUT_VID_TUNER, 0, CX25840_COMPOSITE7 }, + { IVTV_CARD_INPUT_SVIDEO1, 1, CX25840_SVIDEO1 }, + { IVTV_CARD_INPUT_COMPOSITE1, 1, CX25840_COMPOSITE3 }, + { IVTV_CARD_INPUT_SVIDEO2, 2, CX25840_SVIDEO2 }, + { IVTV_CARD_INPUT_COMPOSITE2, 2, CX25840_COMPOSITE4 }, + }, + .audio_inputs = { + { IVTV_CARD_INPUT_AUD_TUNER, + CX25840_AUDIO8, WM8775_AIN2 }, + { IVTV_CARD_INPUT_LINE_IN1, + CX25840_AUDIO_SERIAL, WM8775_AIN2 }, + { IVTV_CARD_INPUT_LINE_IN2, + CX25840_AUDIO_SERIAL, WM8775_AIN3 }, + }, + .radio_input = { IVTV_CARD_INPUT_AUD_TUNER, + CX25840_AUDIO_SERIAL, WM8775_AIN4 }, + /* apparently needed for the IR blaster */ + .gpio_init = { .direction = 0x1f01, .initial_value = 0x26f3 }, +}; + +/* ------------------------------------------------------------------------- */ + +/* AVerMedia M179 cards */ + +static const struct ivtv_card_pci_info ivtv_pci_m179[] = { + { PCI_DEVICE_ID_IVTV15, IVTV_PCI_ID_AVERMEDIA, 0xa3cf }, + { PCI_DEVICE_ID_IVTV15, IVTV_PCI_ID_AVERMEDIA, 0xa3ce }, + { 0, 0, 0 } +}; + +static const struct ivtv_card ivtv_card_m179 = { + .type = IVTV_CARD_M179, + .name = "AVerMedia M179", + .v4l2_capabilities = IVTV_CAP_ENCODER, + .hw_video = IVTV_HW_SAA7114, + .hw_audio = IVTV_HW_GPIO, + .hw_audio_ctrl = IVTV_HW_GPIO, + .hw_all = IVTV_HW_GPIO | IVTV_HW_SAA7114 | IVTV_HW_TUNER, + .video_inputs = { + { IVTV_CARD_INPUT_VID_TUNER, 0, IVTV_SAA71XX_COMPOSITE4 }, + { IVTV_CARD_INPUT_SVIDEO1, 1, IVTV_SAA71XX_SVIDEO0 }, + { IVTV_CARD_INPUT_COMPOSITE1, 1, IVTV_SAA71XX_COMPOSITE3 }, + }, + .audio_inputs = { + { IVTV_CARD_INPUT_AUD_TUNER, IVTV_GPIO_TUNER }, + { IVTV_CARD_INPUT_LINE_IN1, IVTV_GPIO_LINE_IN }, + }, + .gpio_init = { .direction = 0xe380, .initial_value = 0x8290 }, + .gpio_audio_input = { .mask = 0x8040, .tuner = 0x8000, .linein = 0x0000 }, + .gpio_audio_mute = { .mask = 0x2000, .mute = 0x2000 }, + .gpio_audio_mode = { .mask = 0x4300, .mono = 0x4000, .stereo = 0x0200, + .lang1 = 0x0200, .lang2 = 0x0100, .both = 0x0000 }, + .gpio_audio_freq = { .mask = 0x0018, .f32000 = 0x0000, + .f44100 = 0x0008, .f48000 = 0x0010 }, + .gpio_audio_detect = { .mask = 0x4000, .stereo = 0x0000 }, + .tuners = { + /* As far as we know all M179 cards use this tuner */ + { .std = V4L2_STD_ALL, .tuner = TUNER_PHILIPS_NTSC }, + }, + .pci_list = ivtv_pci_m179, +}; + +/* ------------------------------------------------------------------------- */ + +/* Yuan MPG600/Kuroutoshikou ITVC16-STVLP cards */ + +static const struct ivtv_card_pci_info ivtv_pci_mpg600[] = { + { PCI_DEVICE_ID_IVTV16, IVTV_PCI_ID_YUAN1, 0xfff3 }, + { PCI_DEVICE_ID_IVTV16, IVTV_PCI_ID_YUAN1, 0xffff }, + { 0, 0, 0 } +}; + +static const struct ivtv_card ivtv_card_mpg600 = { + .type = IVTV_CARD_MPG600, + .name = "Yuan MPG600, Kuroutoshikou ITVC16-STVLP", + .v4l2_capabilities = IVTV_CAP_ENCODER, + .hw_video = IVTV_HW_SAA7115, + .hw_audio = IVTV_HW_GPIO, + .hw_audio_ctrl = IVTV_HW_GPIO, + .hw_all = IVTV_HW_GPIO | IVTV_HW_SAA7115 | IVTV_HW_TUNER, + .video_inputs = { + { IVTV_CARD_INPUT_VID_TUNER, 0, IVTV_SAA71XX_COMPOSITE4 }, + { IVTV_CARD_INPUT_SVIDEO1, 1, IVTV_SAA71XX_SVIDEO0 }, + { IVTV_CARD_INPUT_COMPOSITE1, 1, IVTV_SAA71XX_COMPOSITE3 }, + }, + .audio_inputs = { + { IVTV_CARD_INPUT_AUD_TUNER, IVTV_GPIO_TUNER }, + { IVTV_CARD_INPUT_LINE_IN1, IVTV_GPIO_LINE_IN }, + }, + .gpio_init = { .direction = 0x3080, .initial_value = 0x0004 }, + .gpio_audio_input = { .mask = 0x3000, .tuner = 0x0000, .linein = 0x2000 }, + .gpio_audio_mute = { .mask = 0x0001, .mute = 0x0001 }, + .gpio_audio_mode = { .mask = 0x000e, .mono = 0x0006, .stereo = 0x0004, + .lang1 = 0x0004, .lang2 = 0x0000, .both = 0x0008 }, + .gpio_audio_detect = { .mask = 0x0900, .stereo = 0x0100 }, + .tuners = { + /* The PAL tuner is confirmed */ + { .std = V4L2_STD_625_50, .tuner = TUNER_PHILIPS_FQ1216ME }, + { .std = V4L2_STD_ALL, .tuner = TUNER_PHILIPS_FQ1286 }, + }, + .pci_list = ivtv_pci_mpg600, +}; + +/* ------------------------------------------------------------------------- */ + +/* Yuan MPG160/Kuroutoshikou ITVC15-STVLP cards */ + +static const struct ivtv_card_pci_info ivtv_pci_mpg160[] = { + { PCI_DEVICE_ID_IVTV15, IVTV_PCI_ID_YUAN1, 0 }, + { PCI_DEVICE_ID_IVTV15, IVTV_PCI_ID_IODATA, 0x40a0 }, + { 0, 0, 0 } +}; + +static const struct ivtv_card ivtv_card_mpg160 = { + .type = IVTV_CARD_MPG160, + .name = "YUAN MPG160, Kuroutoshikou ITVC15-STVLP, I/O Data GV-M2TV/PCI", + .v4l2_capabilities = IVTV_CAP_ENCODER, + .hw_video = IVTV_HW_SAA7114, + .hw_audio = IVTV_HW_GPIO, + .hw_audio_ctrl = IVTV_HW_GPIO, + .hw_all = IVTV_HW_GPIO | IVTV_HW_SAA7114 | IVTV_HW_TUNER, + .video_inputs = { + { IVTV_CARD_INPUT_VID_TUNER, 0, IVTV_SAA71XX_COMPOSITE4 }, + { IVTV_CARD_INPUT_SVIDEO1, 1, IVTV_SAA71XX_SVIDEO0 }, + { IVTV_CARD_INPUT_COMPOSITE1, 1, IVTV_SAA71XX_COMPOSITE3 }, + }, + .audio_inputs = { + { IVTV_CARD_INPUT_AUD_TUNER, IVTV_GPIO_TUNER }, + { IVTV_CARD_INPUT_LINE_IN1, IVTV_GPIO_LINE_IN }, + }, + .gpio_init = { .direction = 0x7080, .initial_value = 0x400c }, + .gpio_audio_input = { .mask = 0x3000, .tuner = 0x0000, .linein = 0x2000 }, + .gpio_audio_mute = { .mask = 0x0001, .mute = 0x0001 }, + .gpio_audio_mode = { .mask = 0x000e, .mono = 0x0006, .stereo = 0x0004, + .lang1 = 0x0004, .lang2 = 0x0000, .both = 0x0008 }, + .gpio_audio_detect = { .mask = 0x0900, .stereo = 0x0100 }, + .tuners = { + { .std = V4L2_STD_625_50, .tuner = TUNER_PHILIPS_FQ1216ME }, + { .std = V4L2_STD_ALL, .tuner = TUNER_PHILIPS_FQ1286 }, + }, + .pci_list = ivtv_pci_mpg160, +}; + +/* ------------------------------------------------------------------------- */ + +/* Yuan PG600/Diamond PVR-550 cards */ + +static const struct ivtv_card_pci_info ivtv_pci_pg600[] = { + { PCI_DEVICE_ID_IVTV16, IVTV_PCI_ID_DIAMONDMM, 0x0070 }, + { PCI_DEVICE_ID_IVTV16, IVTV_PCI_ID_YUAN3, 0x0600 }, + { 0, 0, 0 } +}; + +static const struct ivtv_card ivtv_card_pg600 = { + .type = IVTV_CARD_PG600, + .name = "Yuan PG600, Diamond PVR-550", + .v4l2_capabilities = IVTV_CAP_ENCODER, + .hw_video = IVTV_HW_CX25840, + .hw_audio = IVTV_HW_CX25840, + .hw_audio_ctrl = IVTV_HW_CX25840, + .hw_all = IVTV_HW_CX25840 | IVTV_HW_TUNER, + .video_inputs = { + { IVTV_CARD_INPUT_VID_TUNER, 0, CX25840_COMPOSITE2 }, + { IVTV_CARD_INPUT_SVIDEO1, 1, + CX25840_SVIDEO_LUMA3 | CX25840_SVIDEO_CHROMA4 }, + { IVTV_CARD_INPUT_COMPOSITE1, 1, CX25840_COMPOSITE1 }, + }, + .audio_inputs = { + { IVTV_CARD_INPUT_AUD_TUNER, CX25840_AUDIO5 }, + { IVTV_CARD_INPUT_LINE_IN1, CX25840_AUDIO_SERIAL }, + }, + .tuners = { + { .std = V4L2_STD_625_50, .tuner = TUNER_PHILIPS_FQ1216ME }, + { .std = V4L2_STD_ALL, .tuner = TUNER_PHILIPS_FQ1286 }, + }, + .pci_list = ivtv_pci_pg600, +}; + +/* ------------------------------------------------------------------------- */ + +/* Adaptec VideOh! AVC-2410 card */ + +static const struct ivtv_card_pci_info ivtv_pci_avc2410[] = { + { PCI_DEVICE_ID_IVTV16, IVTV_PCI_ID_ADAPTEC, 0x0093 }, + { 0, 0, 0 } +}; + +static const struct ivtv_card ivtv_card_avc2410 = { + .type = IVTV_CARD_AVC2410, + .name = "Adaptec VideOh! AVC-2410", + .v4l2_capabilities = IVTV_CAP_ENCODER, + .hw_video = IVTV_HW_SAA7115, + .hw_audio = IVTV_HW_MSP34XX, + .hw_audio_ctrl = IVTV_HW_MSP34XX, + .hw_muxer = IVTV_HW_CS53L32A, + .hw_all = IVTV_HW_MSP34XX | IVTV_HW_CS53L32A | + IVTV_HW_SAA7115 | IVTV_HW_TUNER, + .video_inputs = { + { IVTV_CARD_INPUT_VID_TUNER, 0, IVTV_SAA71XX_COMPOSITE4 }, + { IVTV_CARD_INPUT_SVIDEO1, 1, IVTV_SAA71XX_SVIDEO0 }, + { IVTV_CARD_INPUT_COMPOSITE1, 1, IVTV_SAA71XX_COMPOSITE3 }, + }, + .audio_inputs = { + { IVTV_CARD_INPUT_AUD_TUNER, + MSP_TUNER, CS53L32A_IN0 }, + { IVTV_CARD_INPUT_LINE_IN1, + MSP_SCART1, CS53L32A_IN2 }, + }, + /* This card has no eeprom and in fact the Windows driver relies + on the country/region setting of the user to decide which tuner + is available. */ + .tuners = { + /* This tuner has been verified for the AVC2410 */ + { .std = V4L2_STD_625_50, .tuner = TUNER_PHILIPS_FM1216ME_MK3 }, + /* This is a good guess, but I'm not totally sure this is + the correct tuner for NTSC. */ + { .std = V4L2_STD_ALL, .tuner = TUNER_PHILIPS_FM1236_MK3 }, + }, + .pci_list = ivtv_pci_avc2410, +}; + +/* ------------------------------------------------------------------------- */ + +/* Adaptec VideOh! AVC-2010 card */ + +static const struct ivtv_card_pci_info ivtv_pci_avc2010[] = { + { PCI_DEVICE_ID_IVTV16, IVTV_PCI_ID_ADAPTEC, 0x0092 }, + { 0, 0, 0 } +}; + +static const struct ivtv_card ivtv_card_avc2010 = { + .type = IVTV_CARD_AVC2010, + .name = "Adaptec VideOh! AVC-2010", + .v4l2_capabilities = IVTV_CAP_ENCODER, + .hw_video = IVTV_HW_SAA7115, + .hw_audio = IVTV_HW_CS53L32A, + .hw_audio_ctrl = IVTV_HW_CS53L32A, + .hw_all = IVTV_HW_CS53L32A | IVTV_HW_SAA7115, + .video_inputs = { + { IVTV_CARD_INPUT_SVIDEO1, 0, IVTV_SAA71XX_SVIDEO0 }, + { IVTV_CARD_INPUT_COMPOSITE1, 0, IVTV_SAA71XX_COMPOSITE3 }, + }, + .audio_inputs = { + { IVTV_CARD_INPUT_LINE_IN1, CS53L32A_IN2 }, + }, + /* Does not have a tuner */ + .pci_list = ivtv_pci_avc2010, +}; + +/* ------------------------------------------------------------------------- */ + +/* Nagase Transgear 5000TV card */ + +static const struct ivtv_card_pci_info ivtv_pci_tg5000tv[] = { + { PCI_DEVICE_ID_IVTV16, IVTV_PCI_ID_AVERMEDIA, 0xbfff }, + { 0, 0, 0 } +}; + +static const struct ivtv_card ivtv_card_tg5000tv = { + .type = IVTV_CARD_TG5000TV, + .name = "Nagase Transgear 5000TV", + .v4l2_capabilities = IVTV_CAP_ENCODER, + .hw_video = IVTV_HW_SAA7114 | IVTV_HW_UPD64031A | IVTV_HW_UPD6408X | + IVTV_HW_GPIO, + .hw_audio = IVTV_HW_GPIO, + .hw_audio_ctrl = IVTV_HW_GPIO, + .hw_all = IVTV_HW_GPIO | IVTV_HW_SAA7114 | IVTV_HW_TUNER | + IVTV_HW_UPD64031A | IVTV_HW_UPD6408X, + .video_inputs = { + { IVTV_CARD_INPUT_VID_TUNER, 0, IVTV_SAA71XX_SVIDEO0 }, + { IVTV_CARD_INPUT_SVIDEO1, 1, IVTV_SAA71XX_SVIDEO2 }, + { IVTV_CARD_INPUT_COMPOSITE1, 1, IVTV_SAA71XX_SVIDEO2 }, + }, + .audio_inputs = { + { IVTV_CARD_INPUT_AUD_TUNER, IVTV_GPIO_TUNER }, + { IVTV_CARD_INPUT_LINE_IN1, IVTV_GPIO_LINE_IN }, + }, + .gr_config = UPD64031A_VERTICAL_EXTERNAL, + .gpio_init = { .direction = 0xe080, .initial_value = 0x8000 }, + .gpio_audio_input = { .mask = 0x8080, .tuner = 0x8000, .linein = 0x0080 }, + .gpio_audio_mute = { .mask = 0x6000, .mute = 0x6000 }, + .gpio_audio_mode = { .mask = 0x4300, .mono = 0x4000, .stereo = 0x0200, + .lang1 = 0x0300, .lang2 = 0x0000, .both = 0x0200 }, + .gpio_video_input = { .mask = 0x0030, .tuner = 0x0000, + .composite = 0x0010, .svideo = 0x0020 }, + .tuners = { + { .std = V4L2_STD_525_60, .tuner = TUNER_PHILIPS_FQ1286 }, + }, + .pci_list = ivtv_pci_tg5000tv, +}; + +/* ------------------------------------------------------------------------- */ + +/* AOpen VA2000MAX-SNT6 card */ + +static const struct ivtv_card_pci_info ivtv_pci_va2000[] = { + { PCI_DEVICE_ID_IVTV16, 0, 0xff5f }, + { 0, 0, 0 } +}; + +static const struct ivtv_card ivtv_card_va2000 = { + .type = IVTV_CARD_VA2000MAX_SNT6, + .name = "AOpen VA2000MAX-SNT6", + .v4l2_capabilities = IVTV_CAP_ENCODER, + .hw_video = IVTV_HW_SAA7115 | IVTV_HW_UPD6408X, + .hw_audio = IVTV_HW_MSP34XX, + .hw_audio_ctrl = IVTV_HW_MSP34XX, + .hw_all = IVTV_HW_MSP34XX | IVTV_HW_SAA7115 | + IVTV_HW_UPD6408X | IVTV_HW_TUNER, + .video_inputs = { + { IVTV_CARD_INPUT_VID_TUNER, 0, IVTV_SAA71XX_SVIDEO0 }, + }, + .audio_inputs = { + { IVTV_CARD_INPUT_AUD_TUNER, MSP_TUNER }, + }, + .tuners = { + { .std = V4L2_STD_525_60, .tuner = TUNER_PHILIPS_FQ1286 }, + }, + .pci_list = ivtv_pci_va2000, +}; + +/* ------------------------------------------------------------------------- */ + +/* Yuan MPG600GR/Kuroutoshikou CX23416GYC-STVLP cards */ + +static const struct ivtv_card_pci_info ivtv_pci_cx23416gyc[] = { + { PCI_DEVICE_ID_IVTV16, IVTV_PCI_ID_YUAN1, 0x0600 }, + { PCI_DEVICE_ID_IVTV16, IVTV_PCI_ID_YUAN4, 0x0600 }, + { PCI_DEVICE_ID_IVTV16, IVTV_PCI_ID_MELCO, 0x0523 }, + { 0, 0, 0 } +}; + +static const struct ivtv_card ivtv_card_cx23416gyc = { + .type = IVTV_CARD_CX23416GYC, + .name = "Yuan MPG600GR, Kuroutoshikou CX23416GYC-STVLP", + .v4l2_capabilities = IVTV_CAP_ENCODER, + .hw_video = IVTV_HW_SAA717X | IVTV_HW_GPIO | + IVTV_HW_UPD64031A | IVTV_HW_UPD6408X, + .hw_audio = IVTV_HW_SAA717X, + .hw_audio_ctrl = IVTV_HW_SAA717X, + .hw_all = IVTV_HW_GPIO | IVTV_HW_SAA717X | IVTV_HW_TUNER | + IVTV_HW_UPD64031A | IVTV_HW_UPD6408X, + .video_inputs = { + { IVTV_CARD_INPUT_VID_TUNER, 0, IVTV_SAA71XX_SVIDEO3 | + IVTV_SAA717X_TUNER_FLAG }, + { IVTV_CARD_INPUT_SVIDEO1, 1, IVTV_SAA71XX_SVIDEO0 }, + { IVTV_CARD_INPUT_COMPOSITE1, 1, IVTV_SAA71XX_SVIDEO3 }, + }, + .audio_inputs = { + { IVTV_CARD_INPUT_AUD_TUNER, IVTV_SAA717X_IN2 }, + { IVTV_CARD_INPUT_LINE_IN1, IVTV_SAA717X_IN0 }, + }, + .gr_config = UPD64031A_VERTICAL_EXTERNAL, + .gpio_init = { .direction = 0xf880, .initial_value = 0x8800 }, + .gpio_video_input = { .mask = 0x0020, .tuner = 0x0000, + .composite = 0x0020, .svideo = 0x0020 }, + .gpio_audio_freq = { .mask = 0xc000, .f32000 = 0x0000, + .f44100 = 0x4000, .f48000 = 0x8000 }, + .tuners = { + { .std = V4L2_STD_625_50, .tuner = TUNER_PHILIPS_FM1216ME_MK3 }, + { .std = V4L2_STD_ALL, .tuner = TUNER_PHILIPS_FM1236_MK3 }, + }, + .pci_list = ivtv_pci_cx23416gyc, +}; + +static const struct ivtv_card ivtv_card_cx23416gyc_nogr = { + .type = IVTV_CARD_CX23416GYC_NOGR, + .name = "Yuan MPG600GR, Kuroutoshikou CX23416GYC-STVLP (no GR)", + .v4l2_capabilities = IVTV_CAP_ENCODER, + .hw_video = IVTV_HW_SAA717X | IVTV_HW_GPIO | IVTV_HW_UPD6408X, + .hw_audio = IVTV_HW_SAA717X, + .hw_audio_ctrl = IVTV_HW_SAA717X, + .hw_all = IVTV_HW_GPIO | IVTV_HW_SAA717X | IVTV_HW_TUNER | + IVTV_HW_UPD6408X, + .video_inputs = { + { IVTV_CARD_INPUT_VID_TUNER, 0, IVTV_SAA71XX_COMPOSITE4 | + IVTV_SAA717X_TUNER_FLAG }, + { IVTV_CARD_INPUT_SVIDEO1, 1, IVTV_SAA71XX_SVIDEO0 }, + { IVTV_CARD_INPUT_COMPOSITE1, 1, IVTV_SAA71XX_COMPOSITE0 }, + }, + .audio_inputs = { + { IVTV_CARD_INPUT_AUD_TUNER, IVTV_SAA717X_IN2 }, + { IVTV_CARD_INPUT_LINE_IN1, IVTV_SAA717X_IN0 }, + }, + .gpio_init = { .direction = 0xf880, .initial_value = 0x8800 }, + .gpio_video_input = { .mask = 0x0020, .tuner = 0x0000, + .composite = 0x0020, .svideo = 0x0020 }, + .gpio_audio_freq = { .mask = 0xc000, .f32000 = 0x0000, + .f44100 = 0x4000, .f48000 = 0x8000 }, + .tuners = { + { .std = V4L2_STD_625_50, .tuner = TUNER_PHILIPS_FM1216ME_MK3 }, + { .std = V4L2_STD_ALL, .tuner = TUNER_PHILIPS_FM1236_MK3 }, + }, +}; + +static const struct ivtv_card ivtv_card_cx23416gyc_nogrycs = { + .type = IVTV_CARD_CX23416GYC_NOGRYCS, + .name = "Yuan MPG600GR, Kuroutoshikou CX23416GYC-STVLP (no GR/YCS)", + .v4l2_capabilities = IVTV_CAP_ENCODER, + .hw_video = IVTV_HW_SAA717X | IVTV_HW_GPIO, + .hw_audio = IVTV_HW_SAA717X, + .hw_audio_ctrl = IVTV_HW_SAA717X, + .hw_all = IVTV_HW_GPIO | IVTV_HW_SAA717X | IVTV_HW_TUNER, + .video_inputs = { + { IVTV_CARD_INPUT_VID_TUNER, 0, IVTV_SAA71XX_COMPOSITE4 | + IVTV_SAA717X_TUNER_FLAG }, + { IVTV_CARD_INPUT_SVIDEO1, 1, IVTV_SAA71XX_SVIDEO0 }, + { IVTV_CARD_INPUT_COMPOSITE1, 1, IVTV_SAA71XX_COMPOSITE0 }, + }, + .audio_inputs = { + { IVTV_CARD_INPUT_AUD_TUNER, IVTV_SAA717X_IN2 }, + { IVTV_CARD_INPUT_LINE_IN1, IVTV_SAA717X_IN0 }, + }, + .gpio_init = { .direction = 0xf880, .initial_value = 0x8800 }, + .gpio_video_input = { .mask = 0x0020, .tuner = 0x0000, + .composite = 0x0020, .svideo = 0x0020 }, + .gpio_audio_freq = { .mask = 0xc000, .f32000 = 0x0000, + .f44100 = 0x4000, .f48000 = 0x8000 }, + .tuners = { + { .std = V4L2_STD_625_50, .tuner = TUNER_PHILIPS_FM1216ME_MK3 }, + { .std = V4L2_STD_ALL, .tuner = TUNER_PHILIPS_FM1236_MK3 }, + }, +}; + +/* ------------------------------------------------------------------------- */ + +/* I/O Data GV-MVP/RX & GV-MVP/RX2W (dual tuner) cards */ + +static const struct ivtv_card_pci_info ivtv_pci_gv_mvprx[] = { + { PCI_DEVICE_ID_IVTV16, IVTV_PCI_ID_IODATA, 0xd01e }, + { PCI_DEVICE_ID_IVTV16, IVTV_PCI_ID_IODATA, 0xd038 }, /* 2W unit #1 */ + { PCI_DEVICE_ID_IVTV16, IVTV_PCI_ID_IODATA, 0xd039 }, /* 2W unit #2 */ + { 0, 0, 0 } +}; + +static const struct ivtv_card ivtv_card_gv_mvprx = { + .type = IVTV_CARD_GV_MVPRX, + .name = "I/O Data GV-MVP/RX, GV-MVP/RX2W (dual tuner)", + .v4l2_capabilities = IVTV_CAP_ENCODER, + .hw_video = IVTV_HW_SAA7115 | IVTV_HW_UPD64031A | IVTV_HW_UPD6408X, + .hw_audio = IVTV_HW_GPIO, + .hw_audio_ctrl = IVTV_HW_WM8739, + .hw_all = IVTV_HW_GPIO | IVTV_HW_SAA7115 | IVTV_HW_TVAUDIO | + IVTV_HW_TUNER | IVTV_HW_WM8739 | + IVTV_HW_UPD64031A | IVTV_HW_UPD6408X, + .video_inputs = { + { IVTV_CARD_INPUT_VID_TUNER, 0, IVTV_SAA71XX_SVIDEO0 }, + { IVTV_CARD_INPUT_SVIDEO1, 1, IVTV_SAA71XX_SVIDEO1 }, + { IVTV_CARD_INPUT_COMPOSITE1, 1, IVTV_SAA71XX_SVIDEO2 }, + }, + .audio_inputs = { + { IVTV_CARD_INPUT_AUD_TUNER, IVTV_GPIO_TUNER }, + { IVTV_CARD_INPUT_LINE_IN1, IVTV_GPIO_LINE_IN }, + }, + .gpio_init = { .direction = 0xc301, .initial_value = 0x0200 }, + .gpio_audio_input = { .mask = 0xffff, .tuner = 0x0200, .linein = 0x0300 }, + .tuners = { + /* This card has the Panasonic VP27 tuner */ + { .std = V4L2_STD_525_60, .tuner = TUNER_PANASONIC_VP27 }, + }, + .pci_list = ivtv_pci_gv_mvprx, +}; + +/* ------------------------------------------------------------------------- */ + +/* I/O Data GV-MVP/RX2E card */ + +static const struct ivtv_card_pci_info ivtv_pci_gv_mvprx2e[] = { + { PCI_DEVICE_ID_IVTV16, IVTV_PCI_ID_IODATA, 0xd025 }, + {0, 0, 0} +}; + +static const struct ivtv_card ivtv_card_gv_mvprx2e = { + .type = IVTV_CARD_GV_MVPRX2E, + .name = "I/O Data GV-MVP/RX2E", + .v4l2_capabilities = IVTV_CAP_ENCODER, + .hw_video = IVTV_HW_SAA7115, + .hw_audio = IVTV_HW_GPIO, + .hw_audio_ctrl = IVTV_HW_WM8739, + .hw_all = IVTV_HW_GPIO | IVTV_HW_SAA7115 | IVTV_HW_TUNER | + IVTV_HW_TVAUDIO | IVTV_HW_WM8739, + .video_inputs = { + { IVTV_CARD_INPUT_VID_TUNER, 0, IVTV_SAA71XX_COMPOSITE4 }, + { IVTV_CARD_INPUT_SVIDEO1, 1, IVTV_SAA71XX_SVIDEO0 }, + { IVTV_CARD_INPUT_COMPOSITE1, 1, IVTV_SAA71XX_COMPOSITE3 }, + }, + .audio_inputs = { + { IVTV_CARD_INPUT_AUD_TUNER, IVTV_GPIO_TUNER }, + { IVTV_CARD_INPUT_LINE_IN1, IVTV_GPIO_LINE_IN }, + }, + .gpio_init = { .direction = 0xc301, .initial_value = 0x0200 }, + .gpio_audio_input = { .mask = 0xffff, .tuner = 0x0200, .linein = 0x0300 }, + .tuners = { + /* This card has the Panasonic VP27 tuner */ + { .std = V4L2_STD_525_60, .tuner = TUNER_PANASONIC_VP27 }, + }, + .pci_list = ivtv_pci_gv_mvprx2e, +}; + +/* ------------------------------------------------------------------------- */ + +/* GotVIEW PCI DVD card */ + +static const struct ivtv_card_pci_info ivtv_pci_gotview_pci_dvd[] = { + { PCI_DEVICE_ID_IVTV16, IVTV_PCI_ID_YUAN1, 0x0600 }, + { 0, 0, 0 } +}; + +static const struct ivtv_card ivtv_card_gotview_pci_dvd = { + .type = IVTV_CARD_GOTVIEW_PCI_DVD, + .name = "GotView PCI DVD", + .v4l2_capabilities = IVTV_CAP_ENCODER, + .hw_video = IVTV_HW_SAA717X, + .hw_audio = IVTV_HW_SAA717X, + .hw_audio_ctrl = IVTV_HW_SAA717X, + .hw_all = IVTV_HW_SAA717X | IVTV_HW_TUNER, + .video_inputs = { + { IVTV_CARD_INPUT_VID_TUNER, 0, IVTV_SAA71XX_COMPOSITE1 }, /* pin 116 */ + { IVTV_CARD_INPUT_SVIDEO1, 1, IVTV_SAA71XX_SVIDEO0 }, /* pin 114/109 */ + { IVTV_CARD_INPUT_COMPOSITE1, 1, IVTV_SAA71XX_COMPOSITE3 }, /* pin 118 */ + }, + .audio_inputs = { + { IVTV_CARD_INPUT_AUD_TUNER, IVTV_SAA717X_IN0 }, + { IVTV_CARD_INPUT_LINE_IN1, IVTV_SAA717X_IN2 }, + }, + .gpio_init = { .direction = 0xf000, .initial_value = 0xA000 }, + .tuners = { + /* This card has a Philips FQ1216ME MK3 tuner */ + { .std = V4L2_STD_625_50, .tuner = TUNER_PHILIPS_FM1216ME_MK3 }, + }, + .pci_list = ivtv_pci_gotview_pci_dvd, +}; + +/* ------------------------------------------------------------------------- */ + +/* GotVIEW PCI DVD2 Deluxe card */ + +static const struct ivtv_card_pci_info ivtv_pci_gotview_pci_dvd2[] = { + { PCI_DEVICE_ID_IVTV16, IVTV_PCI_ID_GOTVIEW1, 0x0600 }, + { 0, 0, 0 } +}; + +static const struct ivtv_card ivtv_card_gotview_pci_dvd2 = { + .type = IVTV_CARD_GOTVIEW_PCI_DVD2, + .name = "GotView PCI DVD2 Deluxe", + .v4l2_capabilities = IVTV_CAP_ENCODER, + .hw_video = IVTV_HW_CX25840, + .hw_audio = IVTV_HW_CX25840, + .hw_audio_ctrl = IVTV_HW_CX25840, + .hw_muxer = IVTV_HW_GPIO, + .hw_all = IVTV_HW_CX25840 | IVTV_HW_TUNER, + .video_inputs = { + { IVTV_CARD_INPUT_VID_TUNER, 0, CX25840_COMPOSITE2 }, + { IVTV_CARD_INPUT_SVIDEO1, 1, + CX25840_SVIDEO_LUMA3 | CX25840_SVIDEO_CHROMA4 }, + { IVTV_CARD_INPUT_COMPOSITE1, 1, CX25840_COMPOSITE1 }, + }, + .audio_inputs = { + { IVTV_CARD_INPUT_AUD_TUNER, CX25840_AUDIO5, 0 }, + { IVTV_CARD_INPUT_LINE_IN1, CX25840_AUDIO_SERIAL, 1 }, + }, + .radio_input = { IVTV_CARD_INPUT_AUD_TUNER, CX25840_AUDIO_SERIAL, 2 }, + .gpio_init = { .direction = 0x0800, .initial_value = 0 }, + .gpio_audio_input = { .mask = 0x0800, .tuner = 0, .linein = 0, .radio = 0x0800 }, + .tuners = { + /* This card has a Philips FQ1216ME MK5 tuner */ + { .std = V4L2_STD_625_50, .tuner = TUNER_PHILIPS_FM1216ME_MK3 }, + }, + .pci_list = ivtv_pci_gotview_pci_dvd2, +}; + +/* ------------------------------------------------------------------------- */ + +/* Yuan MPC622 miniPCI card */ + +static const struct ivtv_card_pci_info ivtv_pci_yuan_mpc622[] = { + { PCI_DEVICE_ID_IVTV16, IVTV_PCI_ID_YUAN2, 0xd998 }, + { 0, 0, 0 } +}; + +static const struct ivtv_card ivtv_card_yuan_mpc622 = { + .type = IVTV_CARD_YUAN_MPC622, + .name = "Yuan MPC622", + .v4l2_capabilities = IVTV_CAP_ENCODER, + .hw_video = IVTV_HW_CX25840, + .hw_audio = IVTV_HW_CX25840, + .hw_audio_ctrl = IVTV_HW_CX25840, + .hw_all = IVTV_HW_CX25840 | IVTV_HW_TUNER, + .video_inputs = { + { IVTV_CARD_INPUT_VID_TUNER, 0, CX25840_COMPOSITE2 }, + { IVTV_CARD_INPUT_SVIDEO1, 1, + CX25840_SVIDEO_LUMA3 | CX25840_SVIDEO_CHROMA4 }, + { IVTV_CARD_INPUT_COMPOSITE1, 1, CX25840_COMPOSITE1 }, + }, + .audio_inputs = { + { IVTV_CARD_INPUT_AUD_TUNER, CX25840_AUDIO5 }, + { IVTV_CARD_INPUT_LINE_IN1, CX25840_AUDIO_SERIAL }, + }, + .gpio_init = { .direction = 0x00ff, .initial_value = 0x0002 }, + .tuners = { + /* This card has the TDA8290/TDA8275 tuner chips */ + { .std = V4L2_STD_ALL, .tuner = TUNER_PHILIPS_TDA8290 }, + }, + .pci_list = ivtv_pci_yuan_mpc622, +}; + +/* ------------------------------------------------------------------------- */ + +/* DIGITAL COWBOY DCT-MTVP1 card */ + +static const struct ivtv_card_pci_info ivtv_pci_dctmvtvp1[] = { + { PCI_DEVICE_ID_IVTV16, IVTV_PCI_ID_AVERMEDIA, 0xbfff }, + { 0, 0, 0 } +}; + +static const struct ivtv_card ivtv_card_dctmvtvp1 = { + .type = IVTV_CARD_DCTMTVP1, + .name = "Digital Cowboy DCT-MTVP1", + .v4l2_capabilities = IVTV_CAP_ENCODER, + .hw_video = IVTV_HW_SAA7115 | IVTV_HW_UPD64031A | IVTV_HW_UPD6408X | + IVTV_HW_GPIO, + .hw_audio = IVTV_HW_GPIO, + .hw_audio_ctrl = IVTV_HW_GPIO, + .hw_all = IVTV_HW_GPIO | IVTV_HW_SAA7115 | IVTV_HW_TUNER | + IVTV_HW_UPD64031A | IVTV_HW_UPD6408X, + .video_inputs = { + { IVTV_CARD_INPUT_VID_TUNER, 0, IVTV_SAA71XX_SVIDEO0 }, + { IVTV_CARD_INPUT_SVIDEO1, 1, IVTV_SAA71XX_SVIDEO2 }, + { IVTV_CARD_INPUT_COMPOSITE1, 1, IVTV_SAA71XX_SVIDEO2 }, + }, + .audio_inputs = { + { IVTV_CARD_INPUT_AUD_TUNER, IVTV_GPIO_TUNER }, + { IVTV_CARD_INPUT_LINE_IN1, IVTV_GPIO_LINE_IN }, + }, + .gpio_init = { .direction = 0xe080, .initial_value = 0x8000 }, + .gpio_audio_input = { .mask = 0x8080, .tuner = 0x8000, .linein = 0x0080 }, + .gpio_audio_mute = { .mask = 0x6000, .mute = 0x6000 }, + .gpio_audio_mode = { .mask = 0x4300, .mono = 0x4000, .stereo = 0x0200, + .lang1 = 0x0300, .lang2 = 0x0000, .both = 0x0200 }, + .gpio_video_input = { .mask = 0x0030, .tuner = 0x0000, + .composite = 0x0010, .svideo = 0x0020}, + .tuners = { + { .std = V4L2_STD_525_60, .tuner = TUNER_PHILIPS_FQ1286 }, + }, + .pci_list = ivtv_pci_dctmvtvp1, +}; + +/* ------------------------------------------------------------------------- */ + +#ifdef HAVE_XC3028 + +/* Yuan PG600-2/GotView PCI DVD Lite/Club3D ZAP-TV1x01 cards */ + +static const struct ivtv_card_pci_info ivtv_pci_pg600v2[] = { + { PCI_DEVICE_ID_IVTV16, IVTV_PCI_ID_YUAN3, 0x0600 }, + { PCI_DEVICE_ID_IVTV16, IVTV_PCI_ID_GOTVIEW2, 0x0600 }, + { 0, 0, 0 } +}; + +static const struct ivtv_card ivtv_card_pg600v2 = { + .type = IVTV_CARD_PG600V2, + .name = "Yuan PG600-2, GotView PCI DVD Lite, Club3D ZAP-TV1x01", + .v4l2_capabilities = IVTV_CAP_ENCODER, + .hw_video = IVTV_HW_CX25840, + .hw_audio = IVTV_HW_CX25840, + .hw_audio_ctrl = IVTV_HW_CX25840, + .hw_all = IVTV_HW_CX25840 | IVTV_HW_TUNER, + .video_inputs = { + { IVTV_CARD_INPUT_VID_TUNER, 0, CX25840_COMPOSITE2 }, + { IVTV_CARD_INPUT_SVIDEO1, 1, + CX25840_SVIDEO_LUMA3 | CX25840_SVIDEO_CHROMA4 }, + }, + .audio_inputs = { + { IVTV_CARD_INPUT_AUD_TUNER, CX25840_AUDIO5 }, + { IVTV_CARD_INPUT_LINE_IN1, CX25840_AUDIO_SERIAL }, + }, + .radio_input = { IVTV_CARD_INPUT_AUD_TUNER, CX25840_AUDIO5 }, + .tuners = { + { .std = V4L2_STD_ALL, .tuner = TUNER_XCEIVE_XC3028 }, + }, + .gpio_init = { .direction = 0x1000, .initial_value = 0x1000 }, /* tuner reset */ + .pci_list = ivtv_pci_pg600v2, +}; +#endif + +static const struct ivtv_card *ivtv_card_list[] = { + &ivtv_card_pvr250, + &ivtv_card_pvr350, + &ivtv_card_pvr150, + &ivtv_card_m179, + &ivtv_card_mpg600, + &ivtv_card_mpg160, + &ivtv_card_pg600, + &ivtv_card_avc2410, + &ivtv_card_avc2010, + &ivtv_card_tg5000tv, + &ivtv_card_va2000, + &ivtv_card_cx23416gyc, + &ivtv_card_gv_mvprx, + &ivtv_card_gv_mvprx2e, + &ivtv_card_gotview_pci_dvd, + &ivtv_card_gotview_pci_dvd2, + &ivtv_card_yuan_mpc622, + &ivtv_card_dctmvtvp1, +#ifdef HAVE_XC3028 + &ivtv_card_pg600v2, +#endif + + /* Variations of standard cards but with the same PCI IDs. + These cards must come last in this list. */ + &ivtv_card_pvr350_v1, + &ivtv_card_cx23416gyc_nogr, + &ivtv_card_cx23416gyc_nogrycs, +}; + +const struct ivtv_card *ivtv_get_card(u16 index) +{ + if (index >= ARRAY_SIZE(ivtv_card_list)) + return NULL; + return ivtv_card_list[index]; +} + +int ivtv_get_input(struct ivtv *itv, u16 index, struct v4l2_input *input) +{ + const struct ivtv_card_video_input *card_input = itv->card->video_inputs + index; + static const char * const input_strs[] = { + "Tuner 1", + "S-Video 1", + "S-Video 2", + "Composite 1", + "Composite 2", + "Composite 3" + }; + + memset(input, 0, sizeof(*input)); + if (index >= itv->nof_inputs) + return -EINVAL; + input->index = index; + strcpy(input->name, input_strs[card_input->video_type - 1]); + input->type = (card_input->video_type == IVTV_CARD_INPUT_VID_TUNER ? + V4L2_INPUT_TYPE_TUNER : V4L2_INPUT_TYPE_CAMERA); + input->audioset = (1 << itv->nof_audio_inputs) - 1; + input->std = (input->type == V4L2_INPUT_TYPE_TUNER) ? + itv->tuner_std : V4L2_STD_ALL; + return 0; +} + +int ivtv_get_output(struct ivtv *itv, u16 index, struct v4l2_output *output) +{ + const struct ivtv_card_output *card_output = itv->card->video_outputs + index; + + memset(output, 0, sizeof(*output)); + if (index >= itv->card->nof_outputs) + return -EINVAL; + output->index = index; + strcpy(output->name, card_output->name); + output->type = V4L2_OUTPUT_TYPE_ANALOG; + output->audioset = 1; + output->std = V4L2_STD_ALL; + return 0; +} + +int ivtv_get_audio_input(struct ivtv *itv, u16 index, struct v4l2_audio *audio) +{ + const struct ivtv_card_audio_input *aud_input = itv->card->audio_inputs + index; + static const char * const input_strs[] = { + "Tuner 1", + "Line In 1", + "Line In 2" + }; + + memset(audio, 0, sizeof(*audio)); + if (index >= itv->nof_audio_inputs) + return -EINVAL; + strcpy(audio->name, input_strs[aud_input->audio_type - 1]); + audio->index = index; + audio->capability = V4L2_AUDCAP_STEREO; + return 0; +} + +int ivtv_get_audio_output(struct ivtv *itv, u16 index, struct v4l2_audioout *aud_output) +{ + memset(aud_output, 0, sizeof(*aud_output)); + if (itv->card->video_outputs == NULL || index != 0) + return -EINVAL; + strcpy(aud_output->name, "A/V Audio Out"); + return 0; +} diff --git a/drivers/media/video/ivtv/ivtv-cards.h b/drivers/media/video/ivtv/ivtv-cards.h new file mode 100644 index 000000000000..15012f88b802 --- /dev/null +++ b/drivers/media/video/ivtv/ivtv-cards.h @@ -0,0 +1,207 @@ +/* + Functions to query card hardware + Copyright (C) 2003-2004 Kevin Thayer + Copyright (C) 2005-2007 Hans Verkuil + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +/* hardware flags */ +#define IVTV_HW_CX25840 (1 << 0) +#define IVTV_HW_SAA7115 (1 << 1) +#define IVTV_HW_SAA7127 (1 << 2) +#define IVTV_HW_MSP34XX (1 << 3) +#define IVTV_HW_TUNER (1 << 4) +#define IVTV_HW_WM8775 (1 << 5) +#define IVTV_HW_CS53L32A (1 << 6) +#define IVTV_HW_TVEEPROM (1 << 7) +#define IVTV_HW_SAA7114 (1 << 8) +#define IVTV_HW_TVAUDIO (1 << 9) +#define IVTV_HW_UPD64031A (1 << 10) +#define IVTV_HW_UPD6408X (1 << 11) +#define IVTV_HW_SAA717X (1 << 12) +#define IVTV_HW_WM8739 (1 << 13) +#define IVTV_HW_GPIO (1 << 14) + +#define IVTV_HW_SAA711X (IVTV_HW_SAA7115 | IVTV_HW_SAA7114) + +/* video inputs */ +#define IVTV_CARD_INPUT_VID_TUNER 1 +#define IVTV_CARD_INPUT_SVIDEO1 2 +#define IVTV_CARD_INPUT_SVIDEO2 3 +#define IVTV_CARD_INPUT_COMPOSITE1 4 +#define IVTV_CARD_INPUT_COMPOSITE2 5 +#define IVTV_CARD_INPUT_COMPOSITE3 6 + +/* audio inputs */ +#define IVTV_CARD_INPUT_AUD_TUNER 1 +#define IVTV_CARD_INPUT_LINE_IN1 2 +#define IVTV_CARD_INPUT_LINE_IN2 3 + +#define IVTV_CARD_MAX_VIDEO_INPUTS 6 +#define IVTV_CARD_MAX_AUDIO_INPUTS 3 +#define IVTV_CARD_MAX_TUNERS 2 + +/* SAA71XX HW inputs */ +#define IVTV_SAA71XX_COMPOSITE0 0 +#define IVTV_SAA71XX_COMPOSITE1 1 +#define IVTV_SAA71XX_COMPOSITE2 2 +#define IVTV_SAA71XX_COMPOSITE3 3 +#define IVTV_SAA71XX_COMPOSITE4 4 +#define IVTV_SAA71XX_COMPOSITE5 5 +#define IVTV_SAA71XX_SVIDEO0 6 +#define IVTV_SAA71XX_SVIDEO1 7 +#define IVTV_SAA71XX_SVIDEO2 8 +#define IVTV_SAA71XX_SVIDEO3 9 + +/* SAA717X needs to mark the tuner input by ORing with this flag */ +#define IVTV_SAA717X_TUNER_FLAG 0x80 + +/* Dummy HW input */ +#define IVTV_DUMMY_AUDIO 0 + +/* GPIO HW inputs */ +#define IVTV_GPIO_TUNER 0 +#define IVTV_GPIO_LINE_IN 1 + +/* SAA717X HW inputs */ +#define IVTV_SAA717X_IN0 0 +#define IVTV_SAA717X_IN1 1 +#define IVTV_SAA717X_IN2 2 + +/* V4L2 capability aliases */ +#define IVTV_CAP_ENCODER (V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_TUNER | \ + V4L2_CAP_AUDIO | V4L2_CAP_READWRITE | V4L2_CAP_VBI_CAPTURE | \ + V4L2_CAP_SLICED_VBI_CAPTURE) +#define IVTV_CAP_DECODER (V4L2_CAP_VBI_OUTPUT | V4L2_CAP_VIDEO_OUTPUT | \ + V4L2_CAP_SLICED_VBI_OUTPUT | V4L2_CAP_VIDEO_OUTPUT_OVERLAY | V4L2_CAP_VIDEO_OUTPUT_POS) + +struct ivtv_card_video_input { + u8 video_type; /* video input type */ + u8 audio_index; /* index in ivtv_card_audio_input array */ + u16 video_input; /* hardware video input */ +}; + +struct ivtv_card_audio_input { + u8 audio_type; /* audio input type */ + u32 audio_input; /* hardware audio input */ + u16 muxer_input; /* hardware muxer input for boards with a + multiplexer chip */ +}; + +struct ivtv_card_output { + u8 name[32]; + u16 video_output; /* hardware video output */ +}; + +struct ivtv_card_pci_info { + u16 device; + u16 subsystem_vendor; + u16 subsystem_device; +}; + +/* GPIO definitions */ + +/* The mask is the set of bits used by the operation */ + +struct ivtv_gpio_init { /* set initial GPIO DIR and OUT values */ + u16 direction; /* DIR setting. Leave to 0 if no init is needed */ + u16 initial_value; +}; + +struct ivtv_gpio_video_input { /* select tuner/line in input */ + u16 mask; /* leave to 0 if not supported */ + u16 tuner; + u16 composite; + u16 svideo; +}; + +struct ivtv_gpio_audio_input { /* select tuner/line in input */ + u16 mask; /* leave to 0 if not supported */ + u16 tuner; + u16 linein; + u16 radio; +}; + +struct ivtv_gpio_audio_mute { + u16 mask; /* leave to 0 if not supported */ + u16 mute; /* set this value to mute, 0 to unmute */ +}; + +struct ivtv_gpio_audio_mode { + u16 mask; /* leave to 0 if not supported */ + u16 mono; /* set audio to mono */ + u16 stereo; /* set audio to stereo */ + u16 lang1; /* set audio to the first language */ + u16 lang2; /* set audio to the second language */ + u16 both; /* both languages are output */ +}; + +struct ivtv_gpio_audio_freq { + u16 mask; /* leave to 0 if not supported */ + u16 f32000; + u16 f44100; + u16 f48000; +}; + +struct ivtv_gpio_audio_detect { + u16 mask; /* leave to 0 if not supported */ + u16 stereo; /* if the input matches this value then + stereo is detected */ +}; + +struct ivtv_card_tuner { + v4l2_std_id std; /* standard for which the tuner is suitable */ + int tuner; /* tuner ID (from tuner.h) */ +}; + +/* for card information/parameters */ +struct ivtv_card { + int type; + char *name; + u32 v4l2_capabilities; + u32 hw_video; /* hardware used to process video */ + u32 hw_audio; /* hardware used to process audio */ + u32 hw_audio_ctrl; /* hardware used for the V4L2 controls (only 1 dev allowed) */ + u32 hw_muxer; /* hardware used to multiplex audio input */ + u32 hw_all; /* all hardware used by the board */ + struct ivtv_card_video_input video_inputs[IVTV_CARD_MAX_VIDEO_INPUTS]; + struct ivtv_card_audio_input audio_inputs[IVTV_CARD_MAX_AUDIO_INPUTS]; + struct ivtv_card_audio_input radio_input; + int nof_outputs; + const struct ivtv_card_output *video_outputs; + u8 gr_config; /* config byte for the ghost reduction device */ + + /* GPIO card-specific settings */ + struct ivtv_gpio_init gpio_init; + struct ivtv_gpio_video_input gpio_video_input; + struct ivtv_gpio_audio_input gpio_audio_input; + struct ivtv_gpio_audio_mute gpio_audio_mute; + struct ivtv_gpio_audio_mode gpio_audio_mode; + struct ivtv_gpio_audio_freq gpio_audio_freq; + struct ivtv_gpio_audio_detect gpio_audio_detect; + + struct ivtv_card_tuner tuners[IVTV_CARD_MAX_TUNERS]; + + /* list of device and subsystem vendor/devices that + correspond to this card type. */ + const struct ivtv_card_pci_info *pci_list; +}; + +int ivtv_get_input(struct ivtv *itv, u16 index, struct v4l2_input *input); +int ivtv_get_output(struct ivtv *itv, u16 index, struct v4l2_output *output); +int ivtv_get_audio_input(struct ivtv *itv, u16 index, struct v4l2_audio *input); +int ivtv_get_audio_output(struct ivtv *itv, u16 index, struct v4l2_audioout *output); +const struct ivtv_card *ivtv_get_card(u16 index); diff --git a/drivers/media/video/ivtv/ivtv-controls.c b/drivers/media/video/ivtv/ivtv-controls.c new file mode 100644 index 000000000000..7a876c3e5b19 --- /dev/null +++ b/drivers/media/video/ivtv/ivtv-controls.c @@ -0,0 +1,303 @@ +/* + ioctl control functions + Copyright (C) 2003-2004 Kevin Thayer + Copyright (C) 2005-2007 Hans Verkuil + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include "ivtv-driver.h" +#include "ivtv-cards.h" +#include "ivtv-ioctl.h" +#include "ivtv-audio.h" +#include "ivtv-i2c.h" +#include "ivtv-mailbox.h" +#include "ivtv-controls.h" + +static const u32 user_ctrls[] = { + V4L2_CID_USER_CLASS, + V4L2_CID_BRIGHTNESS, + V4L2_CID_CONTRAST, + V4L2_CID_SATURATION, + V4L2_CID_HUE, + V4L2_CID_AUDIO_VOLUME, + V4L2_CID_AUDIO_BALANCE, + V4L2_CID_AUDIO_BASS, + V4L2_CID_AUDIO_TREBLE, + V4L2_CID_AUDIO_MUTE, + V4L2_CID_AUDIO_LOUDNESS, + 0 +}; + +static const u32 *ctrl_classes[] = { + user_ctrls, + cx2341x_mpeg_ctrls, + NULL +}; + +static int ivtv_queryctrl(struct ivtv *itv, struct v4l2_queryctrl *qctrl) +{ + const char *name; + + IVTV_DEBUG_IOCTL("VIDIOC_QUERYCTRL(%08x)\n", qctrl->id); + + qctrl->id = v4l2_ctrl_next(ctrl_classes, qctrl->id); + if (qctrl->id == 0) + return -EINVAL; + + switch (qctrl->id) { + /* Standard V4L2 controls */ + case V4L2_CID_BRIGHTNESS: + case V4L2_CID_HUE: + case V4L2_CID_SATURATION: + case V4L2_CID_CONTRAST: + if (itv->video_dec_func(itv, VIDIOC_QUERYCTRL, qctrl)) + qctrl->flags |= V4L2_CTRL_FLAG_DISABLED; + return 0; + + case V4L2_CID_AUDIO_VOLUME: + case V4L2_CID_AUDIO_MUTE: + case V4L2_CID_AUDIO_BALANCE: + case V4L2_CID_AUDIO_BASS: + case V4L2_CID_AUDIO_TREBLE: + case V4L2_CID_AUDIO_LOUDNESS: + if (ivtv_i2c_hw(itv, itv->card->hw_audio_ctrl, VIDIOC_QUERYCTRL, qctrl)) + qctrl->flags |= V4L2_CTRL_FLAG_DISABLED; + return 0; + + default: + if (cx2341x_ctrl_query(&itv->params, qctrl)) + qctrl->flags |= V4L2_CTRL_FLAG_DISABLED; + return 0; + } + strncpy(qctrl->name, name, sizeof(qctrl->name) - 1); + qctrl->name[sizeof(qctrl->name) - 1] = 0; + return 0; +} + +static int ivtv_querymenu(struct ivtv *itv, struct v4l2_querymenu *qmenu) +{ + struct v4l2_queryctrl qctrl; + + qctrl.id = qmenu->id; + ivtv_queryctrl(itv, &qctrl); + return v4l2_ctrl_query_menu(qmenu, &qctrl, cx2341x_ctrl_get_menu(qmenu->id)); +} + +static int ivtv_s_ctrl(struct ivtv *itv, struct v4l2_control *vctrl) +{ + s32 v = vctrl->value; + + IVTV_DEBUG_IOCTL("VIDIOC_S_CTRL(%08x, %x)\n", vctrl->id, v); + + switch (vctrl->id) { + /* Standard V4L2 controls */ + case V4L2_CID_BRIGHTNESS: + case V4L2_CID_HUE: + case V4L2_CID_SATURATION: + case V4L2_CID_CONTRAST: + return itv->video_dec_func(itv, VIDIOC_S_CTRL, vctrl); + + case V4L2_CID_AUDIO_VOLUME: + case V4L2_CID_AUDIO_MUTE: + case V4L2_CID_AUDIO_BALANCE: + case V4L2_CID_AUDIO_BASS: + case V4L2_CID_AUDIO_TREBLE: + case V4L2_CID_AUDIO_LOUDNESS: + return ivtv_i2c_hw(itv, itv->card->hw_audio_ctrl, VIDIOC_S_CTRL, vctrl); + + default: + IVTV_DEBUG_IOCTL("invalid control %x\n", vctrl->id); + return -EINVAL; + } + return 0; +} + +static int ivtv_g_ctrl(struct ivtv *itv, struct v4l2_control *vctrl) +{ + IVTV_DEBUG_IOCTL("VIDIOC_G_CTRL(%08x)\n", vctrl->id); + + switch (vctrl->id) { + /* Standard V4L2 controls */ + case V4L2_CID_BRIGHTNESS: + case V4L2_CID_HUE: + case V4L2_CID_SATURATION: + case V4L2_CID_CONTRAST: + return itv->video_dec_func(itv, VIDIOC_G_CTRL, vctrl); + + case V4L2_CID_AUDIO_VOLUME: + case V4L2_CID_AUDIO_MUTE: + case V4L2_CID_AUDIO_BALANCE: + case V4L2_CID_AUDIO_BASS: + case V4L2_CID_AUDIO_TREBLE: + case V4L2_CID_AUDIO_LOUDNESS: + return ivtv_i2c_hw(itv, itv->card->hw_audio_ctrl, VIDIOC_G_CTRL, vctrl); + default: + IVTV_DEBUG_IOCTL("invalid control %x\n", vctrl->id); + return -EINVAL; + } + return 0; +} + +static int ivtv_setup_vbi_fmt(struct ivtv *itv, enum v4l2_mpeg_stream_vbi_fmt fmt) +{ + if (!(itv->v4l2_cap & V4L2_CAP_SLICED_VBI_CAPTURE)) + return -EINVAL; + if (atomic_read(&itv->capturing) > 0) + return -EBUSY; + + /* First try to allocate sliced VBI buffers if needed. */ + if (fmt && itv->vbi.sliced_mpeg_data[0] == NULL) { + int i; + + for (i = 0; i < IVTV_VBI_FRAMES; i++) { + /* Yuck, hardcoded. Needs to be a define */ + itv->vbi.sliced_mpeg_data[i] = kmalloc(2049, GFP_KERNEL); + if (itv->vbi.sliced_mpeg_data[i] == NULL) { + while (--i >= 0) { + kfree(itv->vbi.sliced_mpeg_data[i]); + itv->vbi.sliced_mpeg_data[i] = NULL; + } + return -ENOMEM; + } + } + } + + itv->vbi.insert_mpeg = fmt; + + if (itv->vbi.insert_mpeg == 0) { + return 0; + } + /* Need sliced data for mpeg insertion */ + if (get_service_set(itv->vbi.sliced_in) == 0) { + if (itv->is_60hz) + itv->vbi.sliced_in->service_set = V4L2_SLICED_CAPTION_525; + else + itv->vbi.sliced_in->service_set = V4L2_SLICED_WSS_625; + expand_service_set(itv->vbi.sliced_in, itv->is_50hz); + } + return 0; +} + +int ivtv_control_ioctls(struct ivtv *itv, unsigned int cmd, void *arg) +{ + struct v4l2_control ctrl; + + switch (cmd) { + case VIDIOC_QUERYMENU: + IVTV_DEBUG_IOCTL("VIDIOC_QUERYMENU\n"); + return ivtv_querymenu(itv, arg); + + case VIDIOC_QUERYCTRL: + return ivtv_queryctrl(itv, arg); + + case VIDIOC_S_CTRL: + return ivtv_s_ctrl(itv, arg); + + case VIDIOC_G_CTRL: + return ivtv_g_ctrl(itv, arg); + + case VIDIOC_S_EXT_CTRLS: + { + struct v4l2_ext_controls *c = arg; + + if (c->ctrl_class == V4L2_CTRL_CLASS_USER) { + int i; + int err = 0; + + for (i = 0; i < c->count; i++) { + ctrl.id = c->controls[i].id; + ctrl.value = c->controls[i].value; + err = ivtv_s_ctrl(itv, &ctrl); + c->controls[i].value = ctrl.value; + if (err) { + c->error_idx = i; + break; + } + } + return err; + } + IVTV_DEBUG_IOCTL("VIDIOC_S_EXT_CTRLS\n"); + if (c->ctrl_class == V4L2_CTRL_CLASS_MPEG) { + struct cx2341x_mpeg_params p = itv->params; + int err = cx2341x_ext_ctrls(&p, arg, cmd); + + if (err) + return err; + + if (p.video_encoding != itv->params.video_encoding) { + int is_mpeg1 = p.video_encoding == + V4L2_MPEG_VIDEO_ENCODING_MPEG_1; + struct v4l2_format fmt; + + /* fix videodecoder resolution */ + fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; + fmt.fmt.pix.width = itv->params.width / (is_mpeg1 ? 2 : 1); + fmt.fmt.pix.height = itv->params.height; + itv->video_dec_func(itv, VIDIOC_S_FMT, &fmt); + } + err = cx2341x_update(itv, ivtv_api_func, &itv->params, &p); + if (!err && itv->params.stream_vbi_fmt != p.stream_vbi_fmt) { + err = ivtv_setup_vbi_fmt(itv, p.stream_vbi_fmt); + } + itv->params = p; + itv->dualwatch_stereo_mode = p.audio_properties & 0x0300; + ivtv_audio_set_audio_clock_freq(itv, p.audio_properties & 0x03); + return err; + } + return -EINVAL; + } + + case VIDIOC_G_EXT_CTRLS: + { + struct v4l2_ext_controls *c = arg; + + if (c->ctrl_class == V4L2_CTRL_CLASS_USER) { + int i; + int err = 0; + + for (i = 0; i < c->count; i++) { + ctrl.id = c->controls[i].id; + ctrl.value = c->controls[i].value; + err = ivtv_g_ctrl(itv, &ctrl); + c->controls[i].value = ctrl.value; + if (err) { + c->error_idx = i; + break; + } + } + return err; + } + IVTV_DEBUG_IOCTL("VIDIOC_G_EXT_CTRLS\n"); + if (c->ctrl_class == V4L2_CTRL_CLASS_MPEG) + return cx2341x_ext_ctrls(&itv->params, arg, cmd); + return -EINVAL; + } + + case VIDIOC_TRY_EXT_CTRLS: + { + struct v4l2_ext_controls *c = arg; + + IVTV_DEBUG_IOCTL("VIDIOC_TRY_EXT_CTRLS\n"); + if (c->ctrl_class == V4L2_CTRL_CLASS_MPEG) + return cx2341x_ext_ctrls(&itv->params, arg, cmd); + return -EINVAL; + } + + default: + return -EINVAL; + } + return 0; +} diff --git a/drivers/media/video/ivtv/ivtv-controls.h b/drivers/media/video/ivtv/ivtv-controls.h new file mode 100644 index 000000000000..5a11149725ad --- /dev/null +++ b/drivers/media/video/ivtv/ivtv-controls.h @@ -0,0 +1,21 @@ +/* + ioctl control functions + Copyright (C) 2003-2004 Kevin Thayer + Copyright (C) 2005-2007 Hans Verkuil + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +int ivtv_control_ioctls(struct ivtv *itv, unsigned int cmd, void *arg); diff --git a/drivers/media/video/ivtv/ivtv-driver.c b/drivers/media/video/ivtv/ivtv-driver.c new file mode 100644 index 000000000000..8d3876588b88 --- /dev/null +++ b/drivers/media/video/ivtv/ivtv-driver.c @@ -0,0 +1,1385 @@ +/* + ivtv driver initialization and card probing + Copyright (C) 2003-2004 Kevin Thayer + Copyright (C) 2004 Chris Kennedy + Copyright (C) 2005-2007 Hans Verkuil + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +/* Main Driver file for the ivtv project: + * Driver for the Conexant CX23415/CX23416 chip. + * Author: Kevin Thayer (nufan_wfk at yahoo.com) + * License: GPL + * http://www.ivtvdriver.org + * + * ----- + * MPG600/MPG160 support by T.Adachi + * and Takeru KOMORIYA + * + * AVerMedia M179 GPIO info by Chris Pinkham + * using information provided by Jiun-Kuei Jung @ AVerMedia. + * + * Kurouto Sikou CX23416GYC-STVLP tested by K.Ohta + * using information from T.Adachi,Takeru KOMORIYA and others :-) + * + * Nagase TRANSGEAR 5000TV, Aopen VA2000MAX-STN6 and I/O data GV-MVP/RX + * version by T.Adachi. Special thanks Mr.Suzuki + */ + +#include "ivtv-driver.h" +#include "ivtv-version.h" +#include "ivtv-fileops.h" +#include "ivtv-i2c.h" +#include "ivtv-firmware.h" +#include "ivtv-queue.h" +#include "ivtv-udma.h" +#include "ivtv-irq.h" +#include "ivtv-mailbox.h" +#include "ivtv-streams.h" +#include "ivtv-ioctl.h" +#include "ivtv-cards.h" +#include "ivtv-vbi.h" +#include "ivtv-audio.h" +#include "ivtv-gpio.h" +#include "ivtv-yuv.h" + +#include +#include +#include + +/* var to keep track of the number of array elements in use */ +int ivtv_cards_active = 0; + +/* If you have already X v4l cards, then set this to X. This way + the device numbers stay matched. Example: you have a WinTV card + without radio and a PVR-350 with. Normally this would give a + video1 device together with a radio0 device for the PVR. By + setting this to 1 you ensure that radio0 is now also radio1. */ +int ivtv_first_minor = 0; + +/* Master variable for all ivtv info */ +struct ivtv *ivtv_cards[IVTV_MAX_CARDS]; + +/* Protects ivtv_cards_active */ +spinlock_t ivtv_cards_lock = SPIN_LOCK_UNLOCKED; + +/* add your revision and whatnot here */ +static struct pci_device_id ivtv_pci_tbl[] __devinitdata = { + {PCI_VENDOR_ID_ICOMP, PCI_DEVICE_ID_IVTV15, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, + {PCI_VENDOR_ID_ICOMP, PCI_DEVICE_ID_IVTV16, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, + {0,} +}; + +MODULE_DEVICE_TABLE(pci,ivtv_pci_tbl); + +const u32 yuv_offset[4] = { + IVTV_YUV_BUFFER_OFFSET, + IVTV_YUV_BUFFER_OFFSET_1, + IVTV_YUV_BUFFER_OFFSET_2, + IVTV_YUV_BUFFER_OFFSET_3 +}; + +/* Parameter declarations */ +static int cardtype[IVTV_MAX_CARDS]; +static int tuner[IVTV_MAX_CARDS] = { -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 }; +static int radio[IVTV_MAX_CARDS] = { -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 }; + +static int cardtype_c = 1; +static int tuner_c = 1; +static int radio_c = 1; +static char pal[] = "--"; +static char secam[] = "--"; +static char ntsc[] = "-"; + +/* Buffers */ +static int enc_mpg_buffers = IVTV_DEFAULT_ENC_MPG_BUFFERS; +static int enc_yuv_buffers = IVTV_DEFAULT_ENC_YUV_BUFFERS; +static int enc_vbi_buffers = IVTV_DEFAULT_ENC_VBI_BUFFERS; +static int enc_pcm_buffers = IVTV_DEFAULT_ENC_PCM_BUFFERS; +static int dec_mpg_buffers = IVTV_DEFAULT_DEC_MPG_BUFFERS; +static int dec_yuv_buffers = IVTV_DEFAULT_DEC_YUV_BUFFERS; +static int dec_vbi_buffers = IVTV_DEFAULT_DEC_VBI_BUFFERS; + +static int ivtv_yuv_mode = 0; +static int ivtv_yuv_threshold=480; +static int ivtv_pci_latency = 1; + +int ivtv_debug = 0; + +int newi2c = -1; + +module_param_array(tuner, int, &tuner_c, 0644); +module_param_array(radio, bool, &radio_c, 0644); +module_param_array(cardtype, int, &cardtype_c, 0644); +module_param_string(pal, pal, sizeof(pal), 0644); +module_param_string(secam, secam, sizeof(secam), 0644); +module_param_string(ntsc, ntsc, sizeof(ntsc), 0644); +module_param_named(debug,ivtv_debug, int, 0644); +module_param(ivtv_pci_latency, int, 0644); +module_param(ivtv_yuv_mode, int, 0644); +module_param(ivtv_yuv_threshold, int, 0644); +module_param(ivtv_first_minor, int, 0644); + +module_param(enc_mpg_buffers, int, 0644); +module_param(enc_yuv_buffers, int, 0644); +module_param(enc_vbi_buffers, int, 0644); +module_param(enc_pcm_buffers, int, 0644); +module_param(dec_mpg_buffers, int, 0644); +module_param(dec_yuv_buffers, int, 0644); +module_param(dec_vbi_buffers, int, 0644); + +module_param(newi2c, int, 0644); + +MODULE_PARM_DESC(tuner, "Tuner type selection,\n" + "\t\t\tsee tuner.h for values"); +MODULE_PARM_DESC(radio, + "Enable or disable the radio. Use only if autodetection\n" + "\t\t\tfails. 0 = disable, 1 = enable"); +MODULE_PARM_DESC(cardtype, + "Only use this option if your card is not detected properly.\n" + "\t\tSpecify card type:\n" + "\t\t\t 1 = WinTV PVR 250\n" + "\t\t\t 2 = WinTV PVR 350\n" + "\t\t\t 3 = WinTV PVR-150 or PVR-500\n" + "\t\t\t 4 = AVerMedia M179\n" + "\t\t\t 5 = YUAN MPG600/Kuroutoshikou iTVC16-STVLP\n" + "\t\t\t 6 = YUAN MPG160/Kuroutoshikou iTVC15-STVLP\n" + "\t\t\t 7 = YUAN PG600/DIAMONDMM PVR-550 (CX Falcon 2)\n" + "\t\t\t 8 = Adaptec AVC-2410\n" + "\t\t\t 9 = Adaptec AVC-2010\n" + "\t\t\t10 = NAGASE TRANSGEAR 5000TV\n" + "\t\t\t11 = AOpen VA2000MAX-STN6\n" + "\t\t\t12 = YUAN MPG600GR/Kuroutoshikou CX23416GYC-STVLP\n" + "\t\t\t13 = I/O Data GV-MVP/RX\n" + "\t\t\t14 = I/O Data GV-MVP/RX2E\n" + "\t\t\t15 = GOTVIEW PCI DVD\n" + "\t\t\t16 = GOTVIEW PCI DVD2 Deluxe\n" + "\t\t\t17 = Yuan MPC622\n" + "\t\t\t18 = Digital Cowboy DCT-MTVP1\n" +#ifdef HAVE_XC3028 + "\t\t\t19 = Yuan PG600V2/GotView PCI DVD Lite/Club3D ZAP-TV1x01\n" +#endif + "\t\t\t 0 = Autodetect (default)\n" + "\t\t\t-1 = Ignore this card\n\t\t"); +MODULE_PARM_DESC(pal, "Set PAL standard: B, G, H, D, K, I, M, N, Nc, 60"); +MODULE_PARM_DESC(secam, "Set SECAM standard: B, G, H, D, K, L, LC"); +MODULE_PARM_DESC(ntsc, "Set NTSC standard: M, J, K"); +MODULE_PARM_DESC(debug, + "Debug level (bitmask). Default: errors only\n" + "\t\t\t(debug = 511 gives full debugging)"); +MODULE_PARM_DESC(ivtv_pci_latency, + "Change the PCI latency to 64 if lower: 0 = No, 1 = Yes,\n" + "\t\t\tDefault: Yes"); +MODULE_PARM_DESC(ivtv_yuv_mode, + "Specify the yuv playback mode:\n" + "\t\t\t0 = interlaced\n\t\t\t1 = progressive\n\t\t\t2 = auto\n" + "\t\t\tDefault: 0 (interlaced)"); +MODULE_PARM_DESC(ivtv_yuv_threshold, + "If ivtv_yuv_mode is 2 (auto) then playback content as\n\t\tprogressive if src height <= ivtv_yuvthreshold\n" + "\t\t\tDefault: 480");; +MODULE_PARM_DESC(enc_mpg_buffers, + "Encoder MPG Buffers (in MB)\n" + "\t\t\tDefault: " __stringify(IVTV_DEFAULT_ENC_MPG_BUFFERS)); +MODULE_PARM_DESC(enc_yuv_buffers, + "Encoder YUV Buffers (in MB)\n" + "\t\t\tDefault: " __stringify(IVTV_DEFAULT_ENC_YUV_BUFFERS)); +MODULE_PARM_DESC(enc_vbi_buffers, + "Encoder VBI Buffers (in MB)\n" + "\t\t\tDefault: " __stringify(IVTV_DEFAULT_ENC_VBI_BUFFERS)); +MODULE_PARM_DESC(enc_pcm_buffers, + "Encoder PCM buffers (in MB)\n" + "\t\t\tDefault: " __stringify(IVTV_DEFAULT_ENC_PCM_BUFFERS)); +MODULE_PARM_DESC(dec_mpg_buffers, + "Decoder MPG buffers (in MB)\n" + "\t\t\tDefault: " __stringify(IVTV_DEFAULT_DEC_MPG_BUFFERS)); +MODULE_PARM_DESC(dec_yuv_buffers, + "Decoder YUV buffers (in MB)\n" + "\t\t\tDefault: " __stringify(IVTV_DEFAULT_DEC_YUV_BUFFERS)); +MODULE_PARM_DESC(dec_vbi_buffers, + "Decoder VBI buffers (in MB)\n" + "\t\t\tDefault: " __stringify(IVTV_DEFAULT_DEC_VBI_BUFFERS)); +MODULE_PARM_DESC(newi2c, + "Use new I2C implementation\n" + "\t\t\t-1 is autodetect, 0 is off, 1 is on\n" + "\t\t\tDefault is autodetect"); + +MODULE_PARM_DESC(ivtv_first_minor, "Set minor assigned to first card"); + +MODULE_AUTHOR("Kevin Thayer, Chris Kennedy, Hans Verkuil"); +MODULE_DESCRIPTION("CX23415/CX23416 driver"); +MODULE_SUPPORTED_DEVICE + ("CX23415/CX23416 MPEG2 encoder (WinTV PVR-150/250/350/500,\n" + "\t\t\tYuan MPG series and similar)"); +MODULE_LICENSE("GPL"); + +MODULE_VERSION(IVTV_VERSION); + +void ivtv_clear_irq_mask(struct ivtv *itv, u32 mask) +{ + itv->irqmask &= ~mask; + write_reg_sync(itv->irqmask, IVTV_REG_IRQMASK); +} + +void ivtv_set_irq_mask(struct ivtv *itv, u32 mask) +{ + itv->irqmask |= mask; + write_reg_sync(itv->irqmask, IVTV_REG_IRQMASK); +} + +int ivtv_set_output_mode(struct ivtv *itv, int mode) +{ + int old_mode; + + spin_lock(&itv->lock); + old_mode = itv->output_mode; + if (old_mode == 0) + itv->output_mode = old_mode = mode; + spin_unlock(&itv->lock); + return old_mode; +} + +struct ivtv_stream *ivtv_get_output_stream(struct ivtv *itv) +{ + switch (itv->output_mode) { + case OUT_MPG: + return &itv->streams[IVTV_DEC_STREAM_TYPE_MPG]; + case OUT_YUV: + return &itv->streams[IVTV_DEC_STREAM_TYPE_YUV]; + default: + return NULL; + } +} + +int ivtv_waitq(wait_queue_head_t *waitq) +{ + DEFINE_WAIT(wait); + + prepare_to_wait(waitq, &wait, TASK_INTERRUPTIBLE); + schedule(); + finish_wait(waitq, &wait); + return signal_pending(current) ? -EINTR : 0; +} + +/* Generic utility functions */ +int ivtv_sleep_timeout(int timeout, int intr) +{ + int ret; + + do { + set_current_state(intr ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE); + timeout = schedule_timeout(timeout); + if (intr && (ret = signal_pending(current))) + return ret; + } while (timeout); + return 0; +} + +/* Release ioremapped memory */ +static void ivtv_iounmap(struct ivtv *itv) +{ + if (itv == NULL) + return; + + /* Release registers memory */ + if (itv->reg_mem != NULL) { + IVTV_DEBUG_INFO("releasing reg_mem\n"); + iounmap(itv->reg_mem); + itv->reg_mem = NULL; + } + /* Release io memory */ + if (itv->has_cx23415 && itv->dec_mem != NULL) { + IVTV_DEBUG_INFO("releasing dec_mem\n"); + iounmap(itv->dec_mem); + } + itv->dec_mem = NULL; + + /* Release io memory */ + if (itv->enc_mem != NULL) { + IVTV_DEBUG_INFO("releasing enc_mem\n"); + iounmap(itv->enc_mem); + itv->enc_mem = NULL; + } +} + +/* Hauppauge card? get values from tveeprom */ +void ivtv_read_eeprom(struct ivtv *itv, struct tveeprom *tv) +{ + u8 eedata[256]; + + itv->i2c_client.addr = 0xA0 >> 1; + tveeprom_read(&itv->i2c_client, eedata, sizeof(eedata)); + tveeprom_hauppauge_analog(&itv->i2c_client, tv, eedata); +} + +static void ivtv_process_eeprom(struct ivtv *itv) +{ + struct tveeprom tv; + int pci_slot = PCI_SLOT(itv->dev->devfn); + + ivtv_read_eeprom(itv, &tv); + + /* Many thanks to Steven Toth from Hauppauge for providing the + model numbers */ + switch (tv.model) { + /* In a few cases the PCI subsystem IDs do not correctly + identify the card. A better method is to check the + model number from the eeprom instead. */ + case 32000 ... 32999: + case 48000 ... 48099: /* 48??? range are PVR250s with a cx23415 */ + case 48400 ... 48599: + itv->card = ivtv_get_card(IVTV_CARD_PVR_250); + break; + case 48100 ... 48399: + case 48600 ... 48999: + itv->card = ivtv_get_card(IVTV_CARD_PVR_350); + break; + case 23000 ... 23999: /* PVR500 */ + case 25000 ... 25999: /* Low profile PVR150 */ + case 26000 ... 26999: /* Regular PVR150 */ + itv->card = ivtv_get_card(IVTV_CARD_PVR_150); + break; + case 0: + IVTV_ERR("Invalid EEPROM\n"); + return; + default: + IVTV_ERR("Unknown model %d, defaulting to PVR-150\n", tv.model); + itv->card = ivtv_get_card(IVTV_CARD_PVR_150); + break; + } + + switch (tv.model) { + /* Old style PVR350 (with an saa7114) uses this input for + the tuner. */ + case 48254: + itv->card = ivtv_get_card(IVTV_CARD_PVR_350_V1); + break; + default: + break; + } + + itv->v4l2_cap = itv->card->v4l2_capabilities; + itv->card_name = itv->card->name; + + /* If this is a PVR500 then it should be possible to detect whether it is the + first or second unit by looking at the subsystem device ID: is bit 4 is + set, then it is the second unit (according to info from Hauppauge). + + However, while this works for most cards, I have seen a few PVR500 cards + where both units have the same subsystem ID. + + So instead I look at the reported 'PCI slot' (which is the slot on the PVR500 + PCI bridge) and if it is 8, then it is assumed to be the first unit, otherwise + it is the second unit. It is possible that it is a different slot when ivtv is + used in Xen, in that case I ignore this card here. The worst that can happen + is that the card presents itself with a non-working radio device. + + This detection is needed since the eeprom reports incorrectly that a radio is + present on the second unit. */ + if (tv.model / 1000 == 23) { + itv->card_name = "WinTV PVR 500"; + if (pci_slot == 8 || pci_slot == 9) { + int is_first = (pci_slot & 1) == 0; + + itv->card_name = is_first ? "WinTV PVR 500 (unit #1)" : + "WinTV PVR 500 (unit #2)"; + if (!is_first) { + IVTV_INFO("Correcting tveeprom data: no radio present on second unit\n"); + tv.has_radio = 0; + } + } + } + IVTV_INFO("Autodetected %s\n", itv->card_name); + + switch (tv.tuner_hauppauge_model) { + case 85: + case 99: + case 112: + itv->pvr150_workaround = 1; + break; + default: + break; + } + if (tv.tuner_type == TUNER_ABSENT) + IVTV_ERR("tveeprom cannot autodetect tuner!"); + + if (itv->options.tuner == -1) + itv->options.tuner = tv.tuner_type; + if (itv->options.radio == -1) + itv->options.radio = (tv.has_radio != 0); + /* only enable newi2c if an IR blaster is present */ + /* FIXME: for 2.6.20 the test against 2 should be removed */ + if (itv->options.newi2c == -1 && tv.has_ir != -1 && tv.has_ir != 2) { + itv->options.newi2c = (tv.has_ir & 2) ? 1 : 0; + if (itv->options.newi2c) { + IVTV_INFO("reopen i2c bus for IR-blaster support\n"); + exit_ivtv_i2c(itv); + init_ivtv_i2c(itv); + } + } + + if (itv->std != 0) + /* user specified tuner standard */ + return; + + /* autodetect tuner standard */ + if (tv.tuner_formats & V4L2_STD_PAL) { + IVTV_DEBUG_INFO("PAL tuner detected\n"); + itv->std |= V4L2_STD_PAL_BG | V4L2_STD_PAL_H; + } else if (tv.tuner_formats & V4L2_STD_NTSC) { + IVTV_DEBUG_INFO("NTSC tuner detected\n"); + itv->std |= V4L2_STD_NTSC_M; + } else if (tv.tuner_formats & V4L2_STD_SECAM) { + IVTV_DEBUG_INFO("SECAM tuner detected\n"); + itv->std |= V4L2_STD_SECAM_L; + } else { + IVTV_INFO("No tuner detected, default to NTSC-M\n"); + itv->std |= V4L2_STD_NTSC_M; + } +} + +static v4l2_std_id ivtv_parse_std(struct ivtv *itv) +{ + switch (pal[0]) { + case '6': + return V4L2_STD_PAL_60; + case 'b': + case 'B': + case 'g': + case 'G': + return V4L2_STD_PAL_BG; + case 'h': + case 'H': + return V4L2_STD_PAL_H; + case 'n': + case 'N': + if (pal[1] == 'c' || pal[1] == 'C') + return V4L2_STD_PAL_Nc; + return V4L2_STD_PAL_N; + case 'i': + case 'I': + return V4L2_STD_PAL_I; + case 'd': + case 'D': + case 'k': + case 'K': + return V4L2_STD_PAL_DK; + case 'M': + case 'm': + return V4L2_STD_PAL_M; + case '-': + break; + default: + IVTV_WARN("pal= argument not recognised\n"); + return 0; + } + + switch (secam[0]) { + case 'b': + case 'B': + case 'g': + case 'G': + case 'h': + case 'H': + return V4L2_STD_SECAM_B | V4L2_STD_SECAM_G | V4L2_STD_SECAM_H; + case 'd': + case 'D': + case 'k': + case 'K': + return V4L2_STD_SECAM_DK; + case 'l': + case 'L': + if (secam[1] == 'C' || secam[1] == 'c') + return V4L2_STD_SECAM_LC; + return V4L2_STD_SECAM_L; + case '-': + break; + default: + IVTV_WARN("secam= argument not recognised\n"); + return 0; + } + + switch (ntsc[0]) { + case 'm': + case 'M': + return V4L2_STD_NTSC_M; + case 'j': + case 'J': + return V4L2_STD_NTSC_M_JP; + case 'k': + case 'K': + return V4L2_STD_NTSC_M_KR; + case '-': + break; + default: + IVTV_WARN("ntsc= argument not recognised\n"); + return 0; + } + + /* no match found */ + return 0; +} + +static void ivtv_process_options(struct ivtv *itv) +{ + const char *chipname; + int i, j; + + itv->options.megabytes[IVTV_ENC_STREAM_TYPE_MPG] = enc_mpg_buffers; + itv->options.megabytes[IVTV_ENC_STREAM_TYPE_YUV] = enc_yuv_buffers; + itv->options.megabytes[IVTV_ENC_STREAM_TYPE_VBI] = enc_vbi_buffers; + itv->options.megabytes[IVTV_ENC_STREAM_TYPE_PCM] = enc_pcm_buffers; + itv->options.megabytes[IVTV_DEC_STREAM_TYPE_MPG] = dec_mpg_buffers; + itv->options.megabytes[IVTV_DEC_STREAM_TYPE_YUV] = dec_yuv_buffers; + itv->options.megabytes[IVTV_DEC_STREAM_TYPE_VBI] = dec_vbi_buffers; + itv->options.cardtype = cardtype[itv->num]; + itv->options.tuner = tuner[itv->num]; + itv->options.radio = radio[itv->num]; + itv->options.newi2c = newi2c; + + itv->std = ivtv_parse_std(itv); + itv->has_cx23415 = (itv->dev->device == PCI_DEVICE_ID_IVTV15); + chipname = itv->has_cx23415 ? "cx23415" : "cx23416"; + if (itv->options.cardtype == -1) { + IVTV_INFO("Ignore card (detected %s based chip)\n", chipname); + return; + } + if ((itv->card = ivtv_get_card(itv->options.cardtype - 1))) { + IVTV_INFO("User specified %s card (detected %s based chip)\n", + itv->card->name, chipname); + } else if (itv->options.cardtype != 0) { + IVTV_ERR("Unknown user specified type, trying to autodetect card\n"); + } + if (itv->card == NULL) { + if (itv->dev->subsystem_vendor == IVTV_PCI_ID_HAUPPAUGE || + itv->dev->subsystem_vendor == IVTV_PCI_ID_HAUPPAUGE_ALT1 || + itv->dev->subsystem_vendor == IVTV_PCI_ID_HAUPPAUGE_ALT2) { + itv->card = ivtv_get_card(itv->has_cx23415 ? IVTV_CARD_PVR_350 : IVTV_CARD_PVR_150); + IVTV_INFO("Autodetected Hauppauge card (%s based)\n", + chipname); + } + } + if (itv->card == NULL) { + for (i = 0; (itv->card = ivtv_get_card(i)); i++) { + if (itv->card->pci_list == NULL) + continue; + for (j = 0; itv->card->pci_list[j].device; j++) { + if (itv->dev->device != + itv->card->pci_list[j].device) + continue; + if (itv->dev->subsystem_vendor != + itv->card->pci_list[j].subsystem_vendor) + continue; + if (itv->dev->subsystem_device != + itv->card->pci_list[j].subsystem_device) + continue; + IVTV_INFO("Autodetected %s card (%s based)\n", + itv->card->name, chipname); + goto done; + } + } + } +done: + + if (itv->card == NULL) { + itv->card = ivtv_get_card(IVTV_CARD_PVR_150); + IVTV_ERR("Unknown card: vendor/device: %04x/%04x\n", + itv->dev->vendor, itv->dev->device); + IVTV_ERR(" subsystem vendor/device: %04x/%04x\n", + itv->dev->subsystem_vendor, itv->dev->subsystem_device); + IVTV_ERR(" %s based\n", chipname); + IVTV_ERR("Defaulting to %s card\n", itv->card->name); + IVTV_ERR("Please mail the vendor/device and subsystem vendor/device IDs and what kind of\n"); + IVTV_ERR("card you have to the ivtv-devel mailinglist (www.ivtvdriver.org)\n"); + IVTV_ERR("Prefix your subject line with [UNKNOWN CARD].\n"); + } + itv->v4l2_cap = itv->card->v4l2_capabilities; + itv->card_name = itv->card->name; +} + +/* Precondition: the ivtv structure has been memset to 0. Only + the dev and num fields have been filled in. + No assumptions on the card type may be made here (see ivtv_init_struct2 + for that). + */ +static int __devinit ivtv_init_struct1(struct ivtv *itv) +{ + itv->base_addr = pci_resource_start(itv->dev, 0); + itv->enc_mbox.max_mbox = 2; /* the encoder has 3 mailboxes (0-2) */ + itv->dec_mbox.max_mbox = 1; /* the decoder has 2 mailboxes (0-1) */ + + mutex_init(&itv->i2c_bus_lock); + mutex_init(&itv->udma.lock); + + itv->lock = SPIN_LOCK_UNLOCKED; + itv->dma_reg_lock = SPIN_LOCK_UNLOCKED; + + itv->vbi.work_queues = create_workqueue("ivtv_vbi"); + if (itv->vbi.work_queues == NULL) { + IVTV_ERR("Could not create VBI workqueue\n"); + return -1; + } + + itv->yuv_info.work_queues = create_workqueue("ivtv_yuv"); + if (itv->yuv_info.work_queues == NULL) { + IVTV_ERR("Could not create YUV workqueue\n"); + destroy_workqueue(itv->vbi.work_queues); + return -1; + } + + INIT_WORK(&itv->vbi.work_queue, vbi_work_handler); + INIT_WORK(&itv->yuv_info.work_queue, ivtv_yuv_work_handler); + + /* start counting open_id at 1 */ + itv->open_id = 1; + + /* Initial settings */ + cx2341x_fill_defaults(&itv->params); + itv->params.port = CX2341X_PORT_MEMORY; + itv->params.capabilities = CX2341X_CAP_HAS_SLICED_VBI; + init_waitqueue_head(&itv->cap_w); + init_waitqueue_head(&itv->event_waitq); + init_waitqueue_head(&itv->vsync_waitq); + init_waitqueue_head(&itv->dma_waitq); + init_timer(&itv->dma_timer); + itv->dma_timer.function = ivtv_unfinished_dma; + itv->dma_timer.data = (unsigned long)itv; + + itv->cur_dma_stream = -1; + itv->audio_stereo_mode = AUDIO_STEREO; + itv->audio_bilingual_mode = AUDIO_MONO_LEFT; + + /* Ctrls */ + itv->speed = 1000; + + /* VBI */ + itv->vbi.in.type = V4L2_BUF_TYPE_SLICED_VBI_CAPTURE; + itv->vbi.sliced_in = &itv->vbi.in.fmt.sliced; + + /* OSD */ + itv->osd_global_alpha_state = 1; + itv->osd_global_alpha = 255; + + /* YUV */ + atomic_set(&itv->yuv_info.next_dma_frame, -1); + itv->yuv_info.lace_mode = ivtv_yuv_mode; + itv->yuv_info.lace_threshold = ivtv_yuv_threshold; + return 0; +} + +/* Second initialization part. Here the card type has been + autodetected. */ +static void __devinit ivtv_init_struct2(struct ivtv *itv) +{ + int i; + + for (i = 0; i < IVTV_CARD_MAX_VIDEO_INPUTS; i++) + if (itv->card->video_inputs[i].video_type == 0) + break; + itv->nof_inputs = i; + for (i = 0; i < IVTV_CARD_MAX_AUDIO_INPUTS; i++) + if (itv->card->audio_inputs[i].audio_type == 0) + break; + itv->nof_audio_inputs = i; + + /* 0x00EF = saa7114(239) 0x00F0 = saa7115(240) 0x0106 = micro */ + if (itv->card->hw_all & (IVTV_HW_SAA7115 | IVTV_HW_SAA717X)) + itv->digitizer = 0xF1; + else if (itv->card->hw_all & IVTV_HW_SAA7114) + itv->digitizer = 0xEF; + else /* cx25840 */ + itv->digitizer = 0x140; + + if (itv->card->hw_all & IVTV_HW_CX25840) { + itv->vbi.sliced_size = 288; /* multiple of 16, real size = 284 */ + } else { + itv->vbi.sliced_size = 64; /* multiple of 16, real size = 52 */ + } + + /* Find tuner input */ + for (i = 0; i < itv->nof_inputs; i++) { + if (itv->card->video_inputs[i].video_type == + IVTV_CARD_INPUT_VID_TUNER) + break; + } + if (i == itv->nof_inputs) + i = 0; + itv->active_input = i; + itv->audio_input = itv->card->video_inputs[i].audio_index; + if (itv->card->hw_all & IVTV_HW_CX25840) + itv->video_dec_func = ivtv_cx25840; + else if (itv->card->hw_all & IVTV_HW_SAA717X) + itv->video_dec_func = ivtv_saa717x; + else + itv->video_dec_func = ivtv_saa7115; +} + +static int ivtv_setup_pci(struct ivtv *itv, struct pci_dev *dev, + const struct pci_device_id *pci_id) +{ + u16 cmd; + unsigned char pci_latency; + + IVTV_DEBUG_INFO("Enabling pci device\n"); + + if (pci_enable_device(dev)) { + IVTV_ERR("Can't enable device %d!\n", itv->num); + return -EIO; + } + if (pci_set_dma_mask(dev, 0xffffffff)) { + IVTV_ERR("No suitable DMA available on card %d.\n", itv->num); + return -EIO; + } + if (!request_mem_region(itv->base_addr, IVTV_ENCODER_SIZE, "ivtv encoder")) { + IVTV_ERR("Cannot request encoder memory region on card %d.\n", itv->num); + return -EIO; + } + + if (!request_mem_region(itv->base_addr + IVTV_REG_OFFSET, + IVTV_REG_SIZE, "ivtv registers")) { + IVTV_ERR("Cannot request register memory region on card %d.\n", itv->num); + release_mem_region(itv->base_addr, IVTV_ENCODER_SIZE); + return -EIO; + } + + if (itv->has_cx23415 && + !request_mem_region(itv->base_addr + IVTV_DECODER_OFFSET, + IVTV_DECODER_SIZE, "ivtv decoder")) { + IVTV_ERR("Cannot request decoder memory region on card %d.\n", itv->num); + release_mem_region(itv->base_addr, IVTV_ENCODER_SIZE); + release_mem_region(itv->base_addr + IVTV_REG_OFFSET, IVTV_REG_SIZE); + return -EIO; + } + + /* Check for bus mastering */ + pci_read_config_word(dev, PCI_COMMAND, &cmd); + if (!(cmd & PCI_COMMAND_MASTER)) { + IVTV_DEBUG_INFO("Attempting to enable Bus Mastering\n"); + pci_set_master(dev); + pci_read_config_word(dev, PCI_COMMAND, &cmd); + if (!(cmd & PCI_COMMAND_MASTER)) { + IVTV_ERR("Bus Mastering is not enabled\n"); + return -ENXIO; + } + } + IVTV_DEBUG_INFO("Bus Mastering Enabled.\n"); + + pci_read_config_byte(dev, PCI_CLASS_REVISION, &itv->card_rev); + pci_read_config_byte(dev, PCI_LATENCY_TIMER, &pci_latency); + + if (pci_latency < 64 && ivtv_pci_latency) { + IVTV_INFO("Unreasonably low latency timer, " + "setting to 64 (was %d)\n", pci_latency); + pci_write_config_byte(dev, PCI_LATENCY_TIMER, 64); + pci_read_config_byte(dev, PCI_LATENCY_TIMER, &pci_latency); + } + /* This config space value relates to DMA latencies. The + default value 0x8080 is too low however and will lead + to DMA errors. 0xffff is the max value which solves + these problems. */ + pci_write_config_dword(dev, 0x40, 0xffff); + + IVTV_DEBUG_INFO("%d (rev %d) at %02x:%02x.%x, " + "irq: %d, latency: %d, memory: 0x%lx\n", + itv->dev->device, itv->card_rev, dev->bus->number, + PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn), + itv->dev->irq, pci_latency, (unsigned long)itv->base_addr); + + return 0; +} + +static void ivtv_request_module(struct ivtv *itv, const char *name) +{ + if (request_module(name) != 0) { + IVTV_ERR("Failed to load module %s\n", name); + } else { + IVTV_DEBUG_INFO("Loaded module %s\n", name); + } +} + +static void ivtv_load_and_init_modules(struct ivtv *itv) +{ + struct v4l2_control ctrl; + u32 hw = itv->card->hw_all; + int i; + + /* load modules */ +#ifndef CONFIG_VIDEO_TUNER + if (hw & IVTV_HW_TUNER) { + ivtv_request_module(itv, "tuner"); +#ifdef HAVE_XC3028 + if (itv->options.tuner == TUNER_XCEIVE_XC3028) + ivtv_request_module(itv, "xc3028-tuner"); +#endif + } +#endif +#ifndef CONFIG_VIDEO_CX25840 + if (hw & IVTV_HW_CX25840) + ivtv_request_module(itv, "cx25840"); +#endif +#ifndef CONFIG_VIDEO_SAA711X + if (hw & IVTV_HW_SAA711X) + ivtv_request_module(itv, "saa7115"); +#endif +#ifndef CONFIG_VIDEO_SAA7127 + if (hw & IVTV_HW_SAA7127) + ivtv_request_module(itv, "saa7127"); +#endif + if (hw & IVTV_HW_SAA717X) + ivtv_request_module(itv, "saa717x"); +#ifndef CONFIG_VIDEO_UPD64031A + if (hw & IVTV_HW_UPD64031A) + ivtv_request_module(itv, "upd64031a"); +#endif +#ifndef CONFIG_VIDEO_UPD64083 + if (hw & IVTV_HW_UPD6408X) + ivtv_request_module(itv, "upd64083"); +#endif +#ifndef CONFIG_VIDEO_MSP3400 + if (hw & IVTV_HW_MSP34XX) + ivtv_request_module(itv, "msp3400"); +#endif + if (hw & IVTV_HW_TVAUDIO) + ivtv_request_module(itv, "tvaudio"); +#ifndef CONFIG_VIDEO_WM8775 + if (hw & IVTV_HW_WM8775) + ivtv_request_module(itv, "wm8775"); +#endif +#ifndef CONFIG_VIDEO_WM8739 + if (hw & IVTV_HW_WM8739) + ivtv_request_module(itv, "wm8739"); +#endif +#ifndef CONFIG_VIDEO_CS53L32A + if (hw & IVTV_HW_CS53L32A) + ivtv_request_module(itv, "cs53l32a"); +#endif + + /* check which i2c devices are actually found */ + for (i = 0; i < 32; i++) { + u32 device = 1 << i; + + if (!(device & hw)) + continue; + if (device == IVTV_HW_GPIO) { + /* GPIO is always available */ + itv->hw_flags |= IVTV_HW_GPIO; + continue; + } + if (ivtv_i2c_hw_addr(itv, device) > 0) + itv->hw_flags |= device; + } + + hw = itv->hw_flags; + + if (itv->card->type == IVTV_CARD_CX23416GYC) { + /* Several variations of this card exist, detect which card + type should be used. */ + if ((hw & (IVTV_HW_UPD64031A | IVTV_HW_UPD6408X)) == 0) + itv->card = ivtv_get_card(IVTV_CARD_CX23416GYC_NOGRYCS); + else if ((hw & IVTV_HW_UPD64031A) == 0) + itv->card = ivtv_get_card(IVTV_CARD_CX23416GYC_NOGR); + } + + if (hw & IVTV_HW_CX25840) { + /* CX25840_CID_ENABLE_PVR150_WORKAROUND */ + ctrl.id = V4L2_CID_PRIVATE_BASE; + ctrl.value = itv->pvr150_workaround; + itv->video_dec_func(itv, VIDIOC_S_CTRL, &ctrl); + + itv->vbi.raw_decoder_line_size = 1444; + itv->vbi.raw_decoder_sav_odd_field = 0x20; + itv->vbi.raw_decoder_sav_even_field = 0x60; + itv->vbi.sliced_decoder_line_size = 272; + itv->vbi.sliced_decoder_sav_odd_field = 0xB0; + itv->vbi.sliced_decoder_sav_even_field = 0xF0; + } + + if (hw & IVTV_HW_SAA711X) { + struct v4l2_chip_ident v = { V4L2_CHIP_MATCH_I2C_DRIVER, I2C_DRIVERID_SAA711X }; + + /* determine the exact saa711x model */ + itv->hw_flags &= ~IVTV_HW_SAA711X; + + ivtv_saa7115(itv, VIDIOC_G_CHIP_IDENT, &v); + if (v.ident == V4L2_IDENT_SAA7114) { + itv->hw_flags |= IVTV_HW_SAA7114; + /* VBI is not yet supported by the saa7114 driver. */ + itv->v4l2_cap &= ~(V4L2_CAP_SLICED_VBI_CAPTURE|V4L2_CAP_VBI_CAPTURE); + } + else { + itv->hw_flags |= IVTV_HW_SAA7115; + } + itv->vbi.raw_decoder_line_size = 1443; + itv->vbi.raw_decoder_sav_odd_field = 0x25; + itv->vbi.raw_decoder_sav_even_field = 0x62; + itv->vbi.sliced_decoder_line_size = 51; + itv->vbi.sliced_decoder_sav_odd_field = 0xAB; + itv->vbi.sliced_decoder_sav_even_field = 0xEC; + } + + if (hw & IVTV_HW_SAA717X) { + itv->vbi.raw_decoder_line_size = 1443; + itv->vbi.raw_decoder_sav_odd_field = 0x25; + itv->vbi.raw_decoder_sav_even_field = 0x62; + itv->vbi.sliced_decoder_line_size = 51; + itv->vbi.sliced_decoder_sav_odd_field = 0xAB; + itv->vbi.sliced_decoder_sav_even_field = 0xEC; + } +} + +static int __devinit ivtv_probe(struct pci_dev *dev, + const struct pci_device_id *pci_id) +{ + int retval = 0; + int video_input; + int yuv_buf_size; + int vbi_buf_size; + int fw_retry_count = 3; + struct ivtv *itv; + struct v4l2_frequency vf; + + spin_lock(&ivtv_cards_lock); + + /* Make sure we've got a place for this card */ + if (ivtv_cards_active == IVTV_MAX_CARDS) { + printk(KERN_ERR "ivtv: Maximum number of cards detected (%d).\n", + ivtv_cards_active); + spin_unlock(&ivtv_cards_lock); + return -ENOMEM; + } + + itv = kzalloc(sizeof(struct ivtv), GFP_ATOMIC); + if (itv == 0) { + spin_unlock(&ivtv_cards_lock); + return -ENOMEM; + } + ivtv_cards[ivtv_cards_active] = itv; + itv->dev = dev; + itv->num = ivtv_cards_active++; + snprintf(itv->name, sizeof(itv->name) - 1, "ivtv%d", itv->num); + if (itv->num) { + printk(KERN_INFO "ivtv: ====================== NEXT CARD ======================\n"); + } + + spin_unlock(&ivtv_cards_lock); + + ivtv_process_options(itv); + if (itv->options.cardtype == -1) { + retval = -ENODEV; + goto err; + } + if (ivtv_init_struct1(itv)) { + retval = -ENOMEM; + goto err; + } + + IVTV_DEBUG_INFO("base addr: 0x%08x\n", itv->base_addr); + + /* PCI Device Setup */ + if ((retval = ivtv_setup_pci(itv, dev, pci_id)) != 0) { + if (retval == -EIO) + goto free_workqueue; + else if (retval == -ENXIO) + goto free_mem; + } + /* save itv in the pci struct for later use */ + pci_set_drvdata(dev, itv); + + /* map io memory */ + IVTV_DEBUG_INFO("attempting ioremap at 0x%08x len 0x%08x\n", + itv->base_addr + IVTV_ENCODER_OFFSET, IVTV_ENCODER_SIZE); + itv->enc_mem = ioremap_nocache(itv->base_addr + IVTV_ENCODER_OFFSET, + IVTV_ENCODER_SIZE); + if (!itv->enc_mem) { + IVTV_ERR("ioremap failed, perhaps increasing __VMALLOC_RESERVE in page.h\n"); + IVTV_ERR("or disabling CONFIG_HIMEM4G into the kernel would help\n"); + retval = -ENOMEM; + goto free_mem; + } + + if (itv->has_cx23415) { + IVTV_DEBUG_INFO("attempting ioremap at 0x%08x len 0x%08x\n", + itv->base_addr + IVTV_DECODER_OFFSET, IVTV_DECODER_SIZE); + itv->dec_mem = ioremap_nocache(itv->base_addr + IVTV_DECODER_OFFSET, + IVTV_DECODER_SIZE); + if (!itv->dec_mem) { + IVTV_ERR("ioremap failed, perhaps increasing __VMALLOC_RESERVE in page.h\n"); + IVTV_ERR("or disabling CONFIG_HIMEM4G into the kernel would help\n"); + retval = -ENOMEM; + goto free_mem; + } + } + else { + itv->dec_mem = itv->enc_mem; + } + + /* map registers memory */ + IVTV_DEBUG_INFO("attempting ioremap at 0x%08x len 0x%08x\n", + itv->base_addr + IVTV_REG_OFFSET, IVTV_REG_SIZE); + itv->reg_mem = + ioremap_nocache(itv->base_addr + IVTV_REG_OFFSET, IVTV_REG_SIZE); + if (!itv->reg_mem) { + IVTV_ERR("ioremap failed, perhaps increasing __VMALLOC_RESERVE in page.h\n"); + IVTV_ERR("or disabling CONFIG_HIMEM4G into the kernel would help\n"); + retval = -ENOMEM; + goto free_io; + } + + while (--fw_retry_count > 0) { + /* load firmware */ + if (ivtv_firmware_init(itv) == 0) + break; + if (fw_retry_count > 1) + IVTV_WARN("Retry loading firmware\n"); + } + if (fw_retry_count == 0) { + IVTV_ERR("Error initializing firmware\n"); + goto free_i2c; + } + + /* Try and get firmware versions */ + IVTV_DEBUG_INFO("Getting firmware version..\n"); + ivtv_firmware_versions(itv); + + /* Check yuv output filter table */ + if (itv->has_cx23415) ivtv_yuv_filter_check(itv); + + ivtv_gpio_init(itv); + + /* active i2c */ + IVTV_DEBUG_INFO("activating i2c...\n"); + if (init_ivtv_i2c(itv)) { + IVTV_ERR("Could not initialize i2c\n"); + goto free_irq; + } + + IVTV_DEBUG_INFO("Active card count: %d.\n", ivtv_cards_active); + + if (itv->card->hw_all & IVTV_HW_TVEEPROM) { +#ifdef CONFIG_VIDEO_TVEEPROM_MODULE + ivtv_request_module(itv, "tveeprom"); +#endif + /* Based on the model number the cardtype may be changed. + The PCI IDs are not always reliable. */ + ivtv_process_eeprom(itv); + } + + if (itv->std == 0) { + itv->std = V4L2_STD_NTSC_M; + } + + if (itv->options.tuner == -1) { + int i; + + for (i = 0; i < IVTV_CARD_MAX_TUNERS; i++) { + if ((itv->std & itv->card->tuners[i].std) == 0) + continue; + itv->options.tuner = itv->card->tuners[i].tuner; + break; + } + } + /* if no tuner was found, then pick the first tuner in the card list */ + if (itv->options.tuner == -1 && itv->card->tuners[0].std) { + itv->std = itv->card->tuners[0].std; + itv->options.tuner = itv->card->tuners[0].tuner; + } + if (itv->options.radio == -1) + itv->options.radio = (itv->card->radio_input.audio_type != 0); + + /* The card is now fully identified, continue with card-specific + initialization. */ + ivtv_init_struct2(itv); + + ivtv_load_and_init_modules(itv); + + if (itv->std & V4L2_STD_525_60) { + itv->is_60hz = 1; + itv->is_out_60hz = 1; + } else { + itv->is_50hz = 1; + itv->is_out_50hz = 1; + } + itv->params.video_gop_size = itv->is_60hz ? 15 : 12; + + itv->stream_buf_size[IVTV_ENC_STREAM_TYPE_MPG] = 0x08000; + itv->stream_buf_size[IVTV_ENC_STREAM_TYPE_PCM] = 0x01200; + itv->stream_buf_size[IVTV_DEC_STREAM_TYPE_MPG] = 0x10000; + + /* 0x15180 == 720 * 480 / 4, 0x19500 == 720 * 576 / 4 */ + yuv_buf_size = itv->is_60hz ? 0x15180 : 0x19500; + itv->stream_buf_size[IVTV_DEC_STREAM_TYPE_YUV] = yuv_buf_size / 2; + itv->stream_buf_size[IVTV_ENC_STREAM_TYPE_YUV] = yuv_buf_size / 8; + + /* Setup VBI Raw Size. Should be big enough to hold PAL. + It is possible to switch between PAL and NTSC, so we need to + take the largest size here. */ + /* 1456 is multiple of 16, real size = 1444 */ + itv->vbi.raw_size = 1456; + /* We use a buffer size of 1/2 of the total size needed for a + frame. This is actually very useful, since we now receive + a field at a time and that makes 'compressing' the raw data + down to size by stripping off the SAV codes a lot easier. + Note: having two different buffer sizes prevents standard + switching on the fly. We need to find a better solution... */ + vbi_buf_size = itv->vbi.raw_size * (itv->is_60hz ? 24 : 36) / 2; + itv->stream_buf_size[IVTV_ENC_STREAM_TYPE_VBI] = vbi_buf_size; + itv->stream_buf_size[IVTV_DEC_STREAM_TYPE_VBI] = sizeof(struct v4l2_sliced_vbi_data) * 36; + + if (itv->options.radio > 0) + itv->v4l2_cap |= V4L2_CAP_RADIO; + + retval = ivtv_streams_setup(itv); + if (retval) { + IVTV_ERR("Error %d setting up streams\n", retval); + goto free_i2c; + } + + /* Start Threads */ + IVTV_DEBUG_INFO("Starting Threads\n"); + + /* Decoder Thread */ + if (itv->card->v4l2_capabilities & V4L2_CAP_VIDEO_OUTPUT) { + ivtv_init_mpeg_decoder(itv); + } + + IVTV_DEBUG_IRQ("Masking interrupts\n"); + /* clear interrupt mask, effectively disabling interrupts */ + ivtv_set_irq_mask(itv, 0xffffffff); + + /* Register IRQ */ + retval = request_irq(itv->dev->irq, ivtv_irq_handler, + SA_SHIRQ | SA_INTERRUPT, itv->name, (void *)itv); + if (retval) { + IVTV_ERR("Failed to register irq %d\n", retval); + goto free_streams; + } + + /* On a cx23416 this seems to be able to enable DMA to the chip? */ + if (!itv->has_cx23415) + write_reg_sync(0x03, IVTV_REG_DMACONTROL); + + /* Default interrupts enabled. For the PVR350 this includes the + decoder VSYNC interrupt, which is always on. It is not only used + during decoding but also by the OSD. + Some old PVR250 cards had a cx23415, so testing for that is too + general. Instead test if the card has video output capability. */ + if (itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT) + ivtv_clear_irq_mask(itv, IVTV_IRQ_MASK_INIT | IVTV_IRQ_DEC_VSYNC); + else + ivtv_clear_irq_mask(itv, IVTV_IRQ_MASK_INIT); + + if (itv->options.tuner > -1) { + struct tuner_setup setup; + + setup.addr = ADDR_UNSET; + setup.type = itv->options.tuner; + setup.mode_mask = T_ANALOG_TV; /* matches TV tuners */ +#ifdef HAVE_XC3028 + setup.initmode = V4L2_TUNER_ANALOG_TV; + if (itv->options.tuner == TUNER_XCEIVE_XC3028) { + setup.gpio_write = ivtv_reset_tuner_gpio; + setup.gpio_priv = itv; + } +#endif + ivtv_call_i2c_clients(itv, TUNER_SET_TYPE_ADDR, &setup); + } + + vf.tuner = 0; + vf.type = V4L2_TUNER_ANALOG_TV; + vf.frequency = 6400; /* the tuner 'baseline' frequency */ + if (itv->std & V4L2_STD_NTSC_M) { + /* Why on earth? */ + vf.frequency = 1076; /* ch. 4 67250*16/1000 */ + } + + /* The tuner is fixed to the standard. The other inputs (e.g. S-Video) + are not. */ + itv->tuner_std = itv->std; + + video_input = itv->active_input; + itv->active_input++; /* Force update of input */ + ivtv_v4l2_ioctls(itv, NULL, VIDIOC_S_INPUT, &video_input); + + /* Let the VIDIOC_S_STD ioctl do all the work, keeps the code + in one place. */ + itv->std++; /* Force full standard initialization */ + itv->std_out = itv->std; + ivtv_v4l2_ioctls(itv, NULL, VIDIOC_S_STD, &itv->tuner_std); + ivtv_v4l2_ioctls(itv, NULL, VIDIOC_S_FREQUENCY, &vf); + if (itv->has_cx23415) + ivtv_set_osd_alpha(itv); + + IVTV_INFO("Initialized %s, card #%d\n", itv->card_name, itv->num); + + return 0; + + free_irq: + free_irq(itv->dev->irq, (void *)itv); + free_streams: + ivtv_streams_cleanup(itv); + free_i2c: + exit_ivtv_i2c(itv); + free_io: + ivtv_iounmap(itv); + free_mem: + release_mem_region(itv->base_addr, IVTV_ENCODER_SIZE); + release_mem_region(itv->base_addr + IVTV_REG_OFFSET, IVTV_REG_SIZE); + if (itv->has_cx23415) + release_mem_region(itv->base_addr + IVTV_DECODER_OFFSET, IVTV_DECODER_SIZE); + free_workqueue: + destroy_workqueue(itv->vbi.work_queues); + destroy_workqueue(itv->yuv_info.work_queues); + err: + if (retval == 0) + retval = -ENODEV; + IVTV_ERR("Error %d on initialization\n", retval); + + kfree(ivtv_cards[ivtv_cards_active]); + ivtv_cards[ivtv_cards_active] = NULL; + return retval; +} + +static void ivtv_remove(struct pci_dev *pci_dev) +{ + struct ivtv *itv = pci_get_drvdata(pci_dev); + + IVTV_DEBUG_INFO("Removing Card #%d.\n", itv->num); + + /* Stop all captures */ + IVTV_DEBUG_INFO(" Stopping all streams.\n"); + if (atomic_read(&itv->capturing) > 0) + ivtv_stop_all_captures(itv); + + /* Stop all decoding */ + IVTV_DEBUG_INFO(" Stopping decoding.\n"); + if (atomic_read(&itv->decoding) > 0) { + int type; + + if (test_bit(IVTV_F_I_DEC_YUV, &itv->i_flags)) + type = IVTV_DEC_STREAM_TYPE_YUV; + else + type = IVTV_DEC_STREAM_TYPE_MPG; + ivtv_stop_v4l2_decode_stream(&itv->streams[type], + VIDEO_CMD_STOP_TO_BLACK | VIDEO_CMD_STOP_IMMEDIATELY, 0); + } + + /* Interrupts */ + IVTV_DEBUG_INFO(" Disabling interrupts.\n"); + ivtv_set_irq_mask(itv, 0xffffffff); + del_timer_sync(&itv->dma_timer); + + /* Stop all Work Queues */ + IVTV_DEBUG_INFO(" Stop Work Queues.\n"); + flush_workqueue(itv->vbi.work_queues); + flush_workqueue(itv->yuv_info.work_queues); + destroy_workqueue(itv->vbi.work_queues); + destroy_workqueue(itv->yuv_info.work_queues); + + IVTV_DEBUG_INFO(" Stopping Firmware.\n"); + ivtv_halt_firmware(itv); + + IVTV_DEBUG_INFO(" Unregistering v4l devices.\n"); + ivtv_streams_cleanup(itv); + IVTV_DEBUG_INFO(" Freeing dma resources.\n"); + ivtv_udma_free(itv); + + exit_ivtv_i2c(itv); + + IVTV_DEBUG_INFO(" Releasing irq.\n"); + free_irq(itv->dev->irq, (void *)itv); + + if (itv->dev) { + ivtv_iounmap(itv); + } + + IVTV_DEBUG_INFO(" Releasing mem.\n"); + release_mem_region(itv->base_addr, IVTV_ENCODER_SIZE); + release_mem_region(itv->base_addr + IVTV_REG_OFFSET, IVTV_REG_SIZE); + if (itv->has_cx23415) + release_mem_region(itv->base_addr + IVTV_DECODER_OFFSET, IVTV_DECODER_SIZE); + + pci_disable_device(itv->dev); + + IVTV_INFO("Removed %s, card #%d\n", itv->card_name, itv->num); +} + +/* define a pci_driver for card detection */ +static struct pci_driver ivtv_pci_driver = { + .name = "ivtv", + .id_table = ivtv_pci_tbl, + .probe = ivtv_probe, + .remove = ivtv_remove, +}; + +static int module_start(void) +{ + printk(KERN_INFO "ivtv: ==================== START INIT IVTV ====================\n"); + printk(KERN_INFO "ivtv: version %s (" VERMAGIC_STRING ") loading\n", IVTV_VERSION); + + memset(ivtv_cards, 0, sizeof(ivtv_cards)); + + /* Validate parameters */ + if (ivtv_first_minor < 0 || ivtv_first_minor >= IVTV_MAX_CARDS) { + printk(KERN_ERR "ivtv: ivtv_first_minor must be between 0 and %d. Exiting...\n", + IVTV_MAX_CARDS - 1); + return -1; + } + + if (ivtv_debug < 0 || ivtv_debug > 511) { + ivtv_debug = 0; + printk(KERN_INFO "ivtv: debug value must be >= 0 and <= 511!\n"); + } + + if (pci_module_init(&ivtv_pci_driver)) { + printk(KERN_ERR "ivtv: Error detecting PCI card\n"); + return -ENODEV; + } + printk(KERN_INFO "ivtv: ==================== END INIT IVTV ====================\n"); + return 0; +} + +static void module_cleanup(void) +{ + int i, j; + + for (i = 0; i < ivtv_cards_active; i++) { + if (ivtv_cards[i] == NULL) + continue; + for (j = 0; j < IVTV_VBI_FRAMES; j++) { + kfree(ivtv_cards[i]->vbi.sliced_mpeg_data[j]); + } + kfree(ivtv_cards[i]); + } + pci_unregister_driver(&ivtv_pci_driver); +} + +EXPORT_SYMBOL(ivtv_set_irq_mask); +EXPORT_SYMBOL(ivtv_cards_active); +EXPORT_SYMBOL(ivtv_cards); +EXPORT_SYMBOL(ivtv_api); +EXPORT_SYMBOL(ivtv_vapi); +EXPORT_SYMBOL(ivtv_vapi_result); +EXPORT_SYMBOL(ivtv_clear_irq_mask); +EXPORT_SYMBOL(ivtv_debug); +EXPORT_SYMBOL(ivtv_reset_ir_gpio); +EXPORT_SYMBOL(ivtv_udma_setup); +EXPORT_SYMBOL(ivtv_udma_unmap); +EXPORT_SYMBOL(ivtv_udma_alloc); +EXPORT_SYMBOL(ivtv_udma_prepare); + +module_init(module_start); +module_exit(module_cleanup); diff --git a/drivers/media/video/ivtv/ivtv-driver.h b/drivers/media/video/ivtv/ivtv-driver.h new file mode 100644 index 000000000000..546d7bbfcf5b --- /dev/null +++ b/drivers/media/video/ivtv/ivtv-driver.h @@ -0,0 +1,866 @@ +/* + ivtv driver internal defines and structures + Copyright (C) 2003-2004 Kevin Thayer + Copyright (C) 2004 Chris Kennedy + Copyright (C) 2005-2007 Hans Verkuil + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef IVTV_DRIVER_H +#define IVTV_DRIVER_H + +/* Internal header for ivtv project: + * Driver for the cx23415/6 chip. + * Author: Kevin Thayer (nufan_wfk at yahoo.com) + * License: GPL + * http://www.ivtvdriver.org + * + * ----- + * MPG600/MPG160 support by T.Adachi + * and Takeru KOMORIYA + * + * AVerMedia M179 GPIO info by Chris Pinkham + * using information provided by Jiun-Kuei Jung @ AVerMedia. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +/* #define HAVE_XC3028 1 */ + +#include + +#ifdef CONFIG_LIRC_I2C +# error "This driver is not compatible with the LIRC I2C kernel configuration option." +#endif /* CONFIG_LIRC_I2C */ + +#ifndef CONFIG_PCI +# error "This driver requires kernel PCI support." +#endif /* CONFIG_PCI */ + +#define IVTV_ENCODER_OFFSET 0x00000000 +#define IVTV_ENCODER_SIZE 0x00800000 /* Last half isn't needed 0x01000000 */ + +#define IVTV_DECODER_OFFSET 0x01000000 +#define IVTV_DECODER_SIZE 0x00800000 /* Last half isn't needed 0x01000000 */ + +#define IVTV_REG_OFFSET 0x02000000 +#define IVTV_REG_SIZE 0x00010000 + +/* Buffers on hardware offsets */ +#define IVTV_YUV_BUFFER_OFFSET 0x001a8600 /* First YUV Buffer */ +#define IVTV_YUV_BUFFER_OFFSET_1 0x00240400 /* Second YUV Buffer */ +#define IVTV_YUV_BUFFER_OFFSET_2 0x002d8200 /* Third YUV Buffer */ +#define IVTV_YUV_BUFFER_OFFSET_3 0x00370000 /* Fourth YUV Buffer */ +#define IVTV_YUV_BUFFER_UV_OFFSET 0x65400 /* Offset to UV Buffer */ + +/* Offset to filter table in firmware */ +#define IVTV_YUV_HORIZONTAL_FILTER_OFFSET 0x025d8 +#define IVTV_YUV_VERTICAL_FILTER_OFFSET 0x03358 + +extern const u32 yuv_offset[4]; + +/* Maximum ivtv driver instances. + Based on 6 PVR500s each with two PVR15s... + TODO: make this dynamic. I believe it is only a global in order to support + ivtv-fb. There must be a better way to do that. */ +#define IVTV_MAX_CARDS 12 + +/* Supported cards */ +#define IVTV_CARD_PVR_250 0 /* WinTV PVR 250 */ +#define IVTV_CARD_PVR_350 1 /* encoder, decoder, tv-out */ +#define IVTV_CARD_PVR_150 2 /* WinTV PVR 150 and PVR 500 (really just two + PVR150s on one PCI board) */ +#define IVTV_CARD_M179 3 /* AVerMedia M179 (encoder only) */ +#define IVTV_CARD_MPG600 4 /* Kuroutoshikou ITVC16-STVLP/YUAN MPG600, encoder only */ +#define IVTV_CARD_MPG160 5 /* Kuroutoshikou ITVC15-STVLP/YUAN MPG160 + cx23415 based, but does not have tv-out */ +#define IVTV_CARD_PG600 6 /* YUAN PG600/DIAMONDMM PVR-550 based on the CX Falcon 2 */ +#define IVTV_CARD_AVC2410 7 /* Adaptec AVC-2410 */ +#define IVTV_CARD_AVC2010 8 /* Adaptec AVD-2010 (No Tuner) */ +#define IVTV_CARD_TG5000TV 9 /* NAGASE TRANSGEAR 5000TV, encoder only */ +#define IVTV_CARD_VA2000MAX_SNT6 10 /* VA2000MAX-STN6 */ +#define IVTV_CARD_CX23416GYC 11 /* Kuroutoshikou CX23416GYC-STVLP (Yuan MPG600GR OEM) */ +#define IVTV_CARD_GV_MVPRX 12 /* I/O Data GV-MVP/RX, RX2, RX2W */ +#define IVTV_CARD_GV_MVPRX2E 13 /* I/O Data GV-MVP/RX2E */ +#define IVTV_CARD_GOTVIEW_PCI_DVD 14 /* GotView PCI DVD */ +#define IVTV_CARD_GOTVIEW_PCI_DVD2 15 /* GotView PCI DVD2 */ +#define IVTV_CARD_YUAN_MPC622 16 /* Yuan MPC622 miniPCI */ +#define IVTV_CARD_DCTMTVP1 17 /* DIGITAL COWBOY DCT-MTVP1 */ +#ifdef HAVE_XC3028 +#define IVTV_CARD_PG600V2 18 /* Yuan PG600V2/GotView PCI DVD Lite/Club3D ZAP-TV1x01 */ +#define IVTV_CARD_LAST 18 +#else +#define IVTV_CARD_LAST 17 +#endif + +/* Variants of existing cards but with the same PCI IDs. The driver + detects these based on other device information. + These cards must always come last. + New cards must be inserted above, and the indices of the cards below + must be adjusted accordingly. */ + +/* PVR-350 V1 (uses saa7114) */ +#define IVTV_CARD_PVR_350_V1 (IVTV_CARD_LAST+1) +/* 2 variants of Kuroutoshikou CX23416GYC-STVLP (Yuan MPG600GR OEM) */ +#define IVTV_CARD_CX23416GYC_NOGR (IVTV_CARD_LAST+2) +#define IVTV_CARD_CX23416GYC_NOGRYCS (IVTV_CARD_LAST+3) + +#define IVTV_ENC_STREAM_TYPE_MPG 0 +#define IVTV_ENC_STREAM_TYPE_YUV 1 +#define IVTV_ENC_STREAM_TYPE_VBI 2 +#define IVTV_ENC_STREAM_TYPE_PCM 3 +#define IVTV_ENC_STREAM_TYPE_RAD 4 +#define IVTV_DEC_STREAM_TYPE_MPG 5 +#define IVTV_DEC_STREAM_TYPE_VBI 6 +#define IVTV_DEC_STREAM_TYPE_VOUT 7 +#define IVTV_DEC_STREAM_TYPE_YUV 8 +#define IVTV_MAX_STREAMS 9 + +#define IVTV_V4L2_DEC_MPG_OFFSET 16 /* offset from 0 to register decoder mpg v4l2 minors on */ +#define IVTV_V4L2_ENC_PCM_OFFSET 24 /* offset from 0 to register pcm v4l2 minors on */ +#define IVTV_V4L2_ENC_YUV_OFFSET 32 /* offset from 0 to register yuv v4l2 minors on */ +#define IVTV_V4L2_DEC_YUV_OFFSET 48 /* offset from 0 to register decoder yuv v4l2 minors on */ +#define IVTV_V4L2_DEC_VBI_OFFSET 8 /* offset from 0 to register decoder vbi input v4l2 minors on */ +#define IVTV_V4L2_DEC_VOUT_OFFSET 16 /* offset from 0 to register vbi output v4l2 minors on */ + +#define IVTV_ENC_MEM_START 0x00000000 +#define IVTV_DEC_MEM_START 0x01000000 + +/* system vendor and device IDs */ +#define PCI_VENDOR_ID_ICOMP 0x4444 +#define PCI_DEVICE_ID_IVTV15 0x0803 +#define PCI_DEVICE_ID_IVTV16 0x0016 + +/* subsystem vendor ID */ +#define IVTV_PCI_ID_HAUPPAUGE 0x0070 +#define IVTV_PCI_ID_HAUPPAUGE_ALT1 0x0270 +#define IVTV_PCI_ID_HAUPPAUGE_ALT2 0x4070 +#define IVTV_PCI_ID_ADAPTEC 0x9005 +#define IVTV_PCI_ID_AVERMEDIA 0x1461 +#define IVTV_PCI_ID_YUAN1 0x12ab +#define IVTV_PCI_ID_YUAN2 0xff01 +#define IVTV_PCI_ID_YUAN3 0xffab +#define IVTV_PCI_ID_YUAN4 0xfbab +#define IVTV_PCI_ID_DIAMONDMM 0xff92 +#define IVTV_PCI_ID_IODATA 0x10fc +#define IVTV_PCI_ID_MELCO 0x1154 +#define IVTV_PCI_ID_GOTVIEW1 0xffac +#define IVTV_PCI_ID_GOTVIEW2 0xffad + +/* Decoder Buffer hardware size on Chip */ +#define IVTV_DEC_MAX_BUF 0x00100000 /* max bytes in decoder buffer */ +#define IVTV_DEC_MIN_BUF 0x00010000 /* min bytes in dec buffer */ + +/* ======================================================================== */ +/* ========================== START USER SETTABLE DMA VARIABLES =========== */ +/* ======================================================================== */ + +#define IVTV_DMA_SG_OSD_ENT (2883584/PAGE_SIZE) /* sg entities */ + +/* DMA Buffers, Default size in MB allocated */ +#define IVTV_DEFAULT_ENC_MPG_BUFFERS 4 +#define IVTV_DEFAULT_ENC_YUV_BUFFERS 2 +#define IVTV_DEFAULT_ENC_VBI_BUFFERS 1 +#define IVTV_DEFAULT_ENC_PCM_BUFFERS 1 +#define IVTV_DEFAULT_DEC_MPG_BUFFERS 1 +#define IVTV_DEFAULT_DEC_YUV_BUFFERS 1 +#define IVTV_DEFAULT_DEC_VBI_BUFFERS 1 + +/* ======================================================================== */ +/* ========================== END USER SETTABLE DMA VARIABLES ============= */ +/* ======================================================================== */ + +/* Decoder Status Register */ +#define IVTV_DMA_ERR_LIST 0x00000010 +#define IVTV_DMA_ERR_WRITE 0x00000008 +#define IVTV_DMA_ERR_READ 0x00000004 +#define IVTV_DMA_SUCCESS_WRITE 0x00000002 +#define IVTV_DMA_SUCCESS_READ 0x00000001 +#define IVTV_DMA_READ_ERR (IVTV_DMA_ERR_LIST | IVTV_DMA_ERR_READ) +#define IVTV_DMA_WRITE_ERR (IVTV_DMA_ERR_LIST | IVTV_DMA_ERR_WRITE) +#define IVTV_DMA_ERR (IVTV_DMA_ERR_LIST | IVTV_DMA_ERR_WRITE | IVTV_DMA_ERR_READ) + +/* DMA Registers */ +#define IVTV_REG_DMAXFER (0x0000) +#define IVTV_REG_DMASTATUS (0x0004) +#define IVTV_REG_DECDMAADDR (0x0008) +#define IVTV_REG_ENCDMAADDR (0x000c) +#define IVTV_REG_DMACONTROL (0x0010) +#define IVTV_REG_IRQSTATUS (0x0040) +#define IVTV_REG_IRQMASK (0x0048) + +/* Setup Registers */ +#define IVTV_REG_ENC_SDRAM_REFRESH (0x07F8) +#define IVTV_REG_ENC_SDRAM_PRECHARGE (0x07FC) +#define IVTV_REG_DEC_SDRAM_REFRESH (0x08F8) +#define IVTV_REG_DEC_SDRAM_PRECHARGE (0x08FC) +#define IVTV_REG_VDM (0x2800) +#define IVTV_REG_AO (0x2D00) +#define IVTV_REG_BYTEFLUSH (0x2D24) +#define IVTV_REG_SPU (0x9050) +#define IVTV_REG_HW_BLOCKS (0x9054) +#define IVTV_REG_VPU (0x9058) +#define IVTV_REG_APU (0xA064) + +#define IVTV_IRQ_ENC_START_CAP (0x1 << 31) +#define IVTV_IRQ_ENC_EOS (0x1 << 30) +#define IVTV_IRQ_ENC_VBI_CAP (0x1 << 29) +#define IVTV_IRQ_ENC_VIM_RST (0x1 << 28) +#define IVTV_IRQ_ENC_DMA_COMPLETE (0x1 << 27) +#define IVTV_IRQ_DEC_AUD_MODE_CHG (0x1 << 24) +#define IVTV_IRQ_DEC_DATA_REQ (0x1 << 22) +#define IVTV_IRQ_DEC_DMA_COMPLETE (0x1 << 20) +#define IVTV_IRQ_DEC_VBI_RE_INSERT (0x1 << 19) +#define IVTV_IRQ_DMA_ERR (0x1 << 18) +#define IVTV_IRQ_DMA_WRITE (0x1 << 17) +#define IVTV_IRQ_DMA_READ (0x1 << 16) +#define IVTV_IRQ_DEC_VSYNC (0x1 << 10) + +/* IRQ Masks */ +#define IVTV_IRQ_MASK_INIT (IVTV_IRQ_DMA_ERR|IVTV_IRQ_ENC_DMA_COMPLETE|IVTV_IRQ_DMA_READ) + +#define IVTV_IRQ_MASK_CAPTURE (IVTV_IRQ_ENC_START_CAP | IVTV_IRQ_ENC_EOS) +#define IVTV_IRQ_MASK_DECODE (IVTV_IRQ_DEC_DATA_REQ|IVTV_IRQ_DEC_AUD_MODE_CHG) + +/* i2c stuff */ +#define I2C_CLIENTS_MAX 16 + +/* debugging */ + +#define IVTV_DBGFLG_WARN (1 << 0) +#define IVTV_DBGFLG_INFO (1 << 1) +#define IVTV_DBGFLG_API (1 << 2) +#define IVTV_DBGFLG_DMA (1 << 3) +#define IVTV_DBGFLG_IOCTL (1 << 4) +#define IVTV_DBGFLG_I2C (1 << 5) +#define IVTV_DBGFLG_IRQ (1 << 6) +#define IVTV_DBGFLG_DEC (1 << 7) +#define IVTV_DBGFLG_YUV (1 << 8) + +/* NOTE: extra space before comma in 'itv->num , ## args' is required for + gcc-2.95, otherwise it won't compile. */ +#define IVTV_DEBUG(x, type, fmt, args...) \ + do { \ + if ((x) & ivtv_debug) \ + printk(KERN_INFO "ivtv%d " type ": " fmt, itv->num , ## args); \ + } while (0) +#define IVTV_DEBUG_WARN(fmt, args...) IVTV_DEBUG(IVTV_DBGFLG_WARN, "warning", fmt , ## args) +#define IVTV_DEBUG_INFO(fmt, args...) IVTV_DEBUG(IVTV_DBGFLG_INFO, "info",fmt , ## args) +#define IVTV_DEBUG_API(fmt, args...) IVTV_DEBUG(IVTV_DBGFLG_API, "api", fmt , ## args) +#define IVTV_DEBUG_DMA(fmt, args...) IVTV_DEBUG(IVTV_DBGFLG_DMA, "dma", fmt , ## args) +#define IVTV_DEBUG_IOCTL(fmt, args...) IVTV_DEBUG(IVTV_DBGFLG_IOCTL, "ioctl", fmt , ## args) +#define IVTV_DEBUG_I2C(fmt, args...) IVTV_DEBUG(IVTV_DBGFLG_I2C, "i2c", fmt , ## args) +#define IVTV_DEBUG_IRQ(fmt, args...) IVTV_DEBUG(IVTV_DBGFLG_IRQ, "irq", fmt , ## args) +#define IVTV_DEBUG_DEC(fmt, args...) IVTV_DEBUG(IVTV_DBGFLG_DEC, "dec", fmt , ## args) +#define IVTV_DEBUG_YUV(fmt, args...) IVTV_DEBUG(IVTV_DBGFLG_YUV, "yuv", fmt , ## args) + +#define IVTV_FB_DEBUG(x, type, fmt, args...) \ + do { \ + if ((x) & ivtv_debug) \ + printk(KERN_INFO "ivtv%d-fb " type ": " fmt, itv->num , ## args); \ + } while (0) +#define IVTV_FB_DEBUG_WARN(fmt, args...) IVTV_FB_DEBUG(IVTV_DBGFLG_WARN, "warning", fmt , ## args) +#define IVTV_FB_DEBUG_INFO(fmt, args...) IVTV_FB_DEBUG(IVTV_DBGFLG_INFO, "info", fmt , ## args) +#define IVTV_FB_DEBUG_API(fmt, args...) IVTV_FB_DEBUG(IVTV_DBGFLG_API, "api", fmt , ## args) +#define IVTV_FB_DEBUG_DMA(fmt, args...) IVTV_FB_DEBUG(IVTV_DBGFLG_DMA, "dma", fmt , ## args) +#define IVTV_FB_DEBUG_IOCTL(fmt, args...) IVTV_FB_DEBUG(IVTV_DBGFLG_IOCTL, "ioctl", fmt , ## args) +#define IVTV_FB_DEBUG_I2C(fmt, args...) IVTV_FB_DEBUG(IVTV_DBGFLG_I2C, "i2c", fmt , ## args) +#define IVTV_FB_DEBUG_IRQ(fmt, args...) IVTV_FB_DEBUG(IVTV_DBGFLG_IRQ, "irq", fmt , ## args) +#define IVTV_FB_DEBUG_DEC(fmt, args...) IVTV_FB_DEBUG(IVTV_DBGFLG_DEC, "dec", fmt , ## args) +#define IVTV_FB_DEBUG_YUV(fmt, args...) IVTV_FB_DEBUG(IVTV_DBGFLG_YUV, "yuv", fmt , ## args) + +/* Standard kernel messages */ +#define IVTV_ERR(fmt, args...) printk(KERN_ERR "ivtv%d: " fmt, itv->num , ## args) +#define IVTV_WARN(fmt, args...) printk(KERN_WARNING "ivtv%d: " fmt, itv->num , ## args) +#define IVTV_INFO(fmt, args...) printk(KERN_INFO "ivtv%d: " fmt, itv->num , ## args) +#define IVTV_FB_ERR(fmt, args...) printk(KERN_ERR "ivtv%d-fb: " fmt, itv->num , ## args) +#define IVTV_FB_INFO(fmt, args...) printk(KERN_INFO "ivtv%d-fb: " fmt, itv->num , ## args) + +/* Values for IVTV_API_DEC_PLAYBACK_SPEED mpeg_frame_type_mask parameter: */ +#define MPEG_FRAME_TYPE_IFRAME 1 +#define MPEG_FRAME_TYPE_IFRAME_PFRAME 3 +#define MPEG_FRAME_TYPE_ALL 7 + +/* output modes (cx23415 only) */ +#define OUT_NONE 0 +#define OUT_MPG 1 +#define OUT_YUV 2 +#define OUT_UDMA_YUV 3 +#define OUT_PASSTHROUGH 4 + +#define IVTV_MAX_PGM_INDEX (400) + +extern int ivtv_debug; + + +struct ivtv_options { + int megabytes[IVTV_MAX_STREAMS]; /* Size in megabytes of each stream */ + int cardtype; /* force card type on load */ + int tuner; /* set tuner on load */ + int radio; /* enable/disable radio */ + int newi2c; /* New I2C algorithm */ +}; + +#define IVTV_MBOX_DMA_START 6 +#define IVTV_MBOX_DMA_END 8 +#define IVTV_MBOX_DMA 9 +#define IVTV_MBOX_FIELD_DISPLAYED 8 + +/* ivtv-specific mailbox template */ +struct ivtv_mailbox { + u32 flags; + u32 cmd; + u32 retval; + u32 timeout; + u32 data[CX2341X_MBOX_MAX_DATA]; +}; + +struct ivtv_api_cache { + unsigned long last_jiffies; /* when last command was issued */ + u32 data[CX2341X_MBOX_MAX_DATA]; /* last sent api data */ +}; + +struct ivtv_mailbox_data { + volatile struct ivtv_mailbox __iomem *mbox; + /* Bits 0-2 are for the encoder mailboxes, 0-1 are for the decoder mailboxes. + If the bit is set, then the corresponding mailbox is in use by the driver. */ + unsigned long busy; + u8 max_mbox; +}; + +/* per-buffer bit flags */ +#define IVTV_F_B_NEED_BUF_SWAP 0 /* this buffer should be byte swapped */ + +/* per-stream, s_flags */ +#define IVTV_F_S_DMA_PENDING 0 /* this stream has pending DMA */ +#define IVTV_F_S_DMA_HAS_VBI 1 /* the current DMA request also requests VBI data */ +#define IVTV_F_S_NEEDS_DATA 2 /* this decoding stream needs more data */ + +#define IVTV_F_S_CLAIMED 3 /* this stream is claimed */ +#define IVTV_F_S_STREAMING 4 /* the fw is decoding/encoding this stream */ +#define IVTV_F_S_INTERNAL_USE 5 /* this stream is used internally (sliced VBI processing) */ +#define IVTV_F_S_PASSTHROUGH 6 /* this stream is in passthrough mode */ +#define IVTV_F_S_STREAMOFF 7 /* signal end of stream EOS */ +#define IVTV_F_S_APPL_IO 8 /* this stream is used read/written by an application */ + +/* per-ivtv, i_flags */ +#define IVTV_F_I_DMA 0 /* DMA in progress */ +#define IVTV_F_I_UDMA 1 /* UDMA in progress */ +#define IVTV_F_I_UDMA_PENDING 2 /* UDMA pending */ + +#define IVTV_F_I_SPEED_CHANGE 3 /* A speed change is in progress */ +#define IVTV_F_I_EOS 4 /* End of encoder stream reached */ +#define IVTV_F_I_RADIO_USER 5 /* The radio tuner is selected */ +#define IVTV_F_I_DIG_RST 6 /* Reset digitizer */ +#define IVTV_F_I_DEC_YUV 7 /* YUV instead of MPG is being decoded */ +#define IVTV_F_I_ENC_VBI 8 /* VBI DMA */ +#define IVTV_F_I_UPDATE_CC 9 /* CC should be updated */ +#define IVTV_F_I_UPDATE_WSS 10 /* WSS should be updated */ +#define IVTV_F_I_UPDATE_VPS 11 /* VPS should be updated */ +#define IVTV_F_I_DECODING_YUV 12 /* this stream is YUV frame decoding */ +#define IVTV_F_I_ENC_PAUSED 13 /* the encoder is paused */ +#define IVTV_F_I_VALID_DEC_TIMINGS 14 /* last_dec_timing is valid */ + +/* Event notifications */ +#define IVTV_F_I_EV_DEC_STOPPED 28 /* decoder stopped event */ +#define IVTV_F_I_EV_VSYNC 29 /* VSYNC event */ +#define IVTV_F_I_EV_VSYNC_FIELD 30 /* VSYNC event field (0 = first, 1 = second field) */ +#define IVTV_F_I_EV_VSYNC_ENABLED 31 /* VSYNC event enabled */ + +/* Scatter-Gather array element, used in DMA transfers */ +struct ivtv_SG_element { + u32 src; + u32 dst; + u32 size; +}; + +struct ivtv_user_dma { + struct mutex lock; + int page_count; + struct page *map[IVTV_DMA_SG_OSD_ENT]; + + /* Base Dev SG Array for cx23415/6 */ + struct ivtv_SG_element SGarray[IVTV_DMA_SG_OSD_ENT]; + dma_addr_t SG_handle; + int SG_length; + + /* SG List of Buffers */ + struct scatterlist SGlist[IVTV_DMA_SG_OSD_ENT]; +}; + +struct ivtv_dma_page_info { + unsigned long uaddr; + unsigned long first; + unsigned long last; + unsigned int offset; + unsigned int tail; + int page_count; +}; + +struct ivtv_buffer { + struct list_head list; + dma_addr_t dma_handle; + unsigned long b_flags; + char *buf; + + u32 bytesused; + u32 readpos; +}; + +struct ivtv_queue { + struct list_head list; + u32 buffers; + u32 length; + u32 bytesused; +}; + +struct ivtv; /* forward reference */ + +struct ivtv_stream { + /* These first four fields are always set, even if the stream + is not actually created. */ + struct video_device *v4l2dev; /* NULL when stream not created */ + struct ivtv *itv; /* for ease of use */ + const char *name; /* name of the stream */ + int type; /* stream type */ + + u32 id; + spinlock_t qlock; /* locks access to the queues */ + unsigned long s_flags; /* status flags, see above */ + int dma; /* can be PCI_DMA_TODEVICE, + PCI_DMA_FROMDEVICE or + PCI_DMA_NONE */ + u32 dma_offset; + u32 dma_backup; + u64 dma_pts; + + int subtype; + wait_queue_head_t waitq; + u32 dma_last_offset; + + /* Buffer Stats */ + u32 buffers; + u32 buf_size; + u32 buffers_stolen; + + /* Buffer Queues */ + struct ivtv_queue q_free; /* free buffers */ + struct ivtv_queue q_full; /* full buffers */ + struct ivtv_queue q_io; /* waiting for I/O */ + struct ivtv_queue q_dma; /* waiting for DMA */ + struct ivtv_queue q_predma; /* waiting for DMA */ + + /* Base Dev SG Array for cx23415/6 */ + struct ivtv_SG_element *SGarray; + dma_addr_t SG_handle; + int SG_length; + + /* SG List of Buffers */ + struct scatterlist *SGlist; +}; + +struct ivtv_open_id { + u32 open_id; + int type; + struct ivtv *itv; +}; + +#define IVTV_YUV_UPDATE_HORIZONTAL 0x01 +#define IVTV_YUV_UPDATE_VERTICAL 0x02 + +struct yuv_frame_info +{ + u32 update; + int src_x; + int src_y; + unsigned int src_w; + unsigned int src_h; + int dst_x; + int dst_y; + unsigned int dst_w; + unsigned int dst_h; + int pan_x; + int pan_y; + u32 vis_w; + u32 vis_h; + u32 interlaced_y; + u32 interlaced_uv; + int tru_x; + u32 tru_w; + u32 tru_h; + u32 offset_y; +}; + +#define IVTV_YUV_MODE_INTERLACED 0x00 +#define IVTV_YUV_MODE_PROGRESSIVE 0x01 +#define IVTV_YUV_MODE_AUTO 0x02 +#define IVTV_YUV_MODE_MASK 0x03 + +#define IVTV_YUV_SYNC_EVEN 0x00 +#define IVTV_YUV_SYNC_ODD 0x04 +#define IVTV_YUV_SYNC_MASK 0x04 + +struct yuv_playback_info +{ + u32 reg_2834; + u32 reg_2838; + u32 reg_283c; + u32 reg_2840; + u32 reg_2844; + u32 reg_2848; + u32 reg_2854; + u32 reg_285c; + u32 reg_2864; + + u32 reg_2870; + u32 reg_2874; + u32 reg_2890; + u32 reg_2898; + u32 reg_289c; + + u32 reg_2918; + u32 reg_291c; + u32 reg_2920; + u32 reg_2924; + u32 reg_2928; + u32 reg_292c; + u32 reg_2930; + + u32 reg_2934; + + u32 reg_2938; + u32 reg_293c; + u32 reg_2940; + u32 reg_2944; + u32 reg_2948; + u32 reg_294c; + u32 reg_2950; + u32 reg_2954; + u32 reg_2958; + u32 reg_295c; + u32 reg_2960; + u32 reg_2964; + u32 reg_2968; + u32 reg_296c; + + u32 reg_2970; + + int v_filter_1; + int v_filter_2; + int h_filter; + + u32 osd_x_offset; + u32 osd_y_offset; + + u32 osd_x_pan; + u32 osd_y_pan; + + u32 osd_vis_w; + u32 osd_vis_h; + + int decode_height; + + int frame_interlaced; + int frame_interlaced_last; + + int lace_mode; + int lace_threshold; + int lace_threshold_last; + int lace_sync_field; + + atomic_t next_dma_frame; + atomic_t next_fill_frame; + + u32 yuv_forced_update; + int update_frame; + struct workqueue_struct *work_queues; + struct work_struct work_queue; + struct yuv_frame_info new_frame_info[4]; + struct yuv_frame_info old_frame_info; + struct yuv_frame_info old_frame_info_args; + + void *blanking_ptr; + dma_addr_t blanking_dmaptr; +}; + +#define IVTV_VBI_FRAMES 32 + +/* VBI data */ +struct vbi_info { + u32 dec_start; + u32 enc_start, enc_size; + int fpi; + u32 frame; + u32 dma_offset; + u8 cc_data_odd[256]; + u8 cc_data_even[256]; + int cc_pos; + u8 cc_no_update; + u8 vps[5]; + u8 vps_found; + int wss; + u8 wss_found; + u8 wss_no_update; + u32 raw_decoder_line_size; + u8 raw_decoder_sav_odd_field; + u8 raw_decoder_sav_even_field; + u32 sliced_decoder_line_size; + u8 sliced_decoder_sav_odd_field; + u8 sliced_decoder_sav_even_field; + struct v4l2_format in; + /* convenience pointer to sliced struct in vbi_in union */ + struct v4l2_sliced_vbi_format *sliced_in; + u32 service_set_in; + u32 service_set_out; + int insert_mpeg; + + /* Buffer for the maximum of 2 * 18 * packet_size sliced VBI lines. + One for /dev/vbi0 and one for /dev/vbi8 */ + struct v4l2_sliced_vbi_data sliced_data[36]; + struct v4l2_sliced_vbi_data sliced_dec_data[36]; + + /* Buffer for VBI data inserted into MPEG stream. + The first byte is a dummy byte that's never used. + The next 16 bytes contain the MPEG header for the VBI data, + the remainder is the actual VBI data. + The max size accepted by the MPEG VBI reinsertion turns out + to be 1552 bytes, which happens to be 4 + (1 + 42) * (2 * 18) bytes, + where 4 is a four byte header, 42 is the max sliced VBI payload, 1 is + a single line header byte and 2 * 18 is the number of VBI lines per frame. + + However, it seems that the data must be 1K aligned, so we have to + pad the data until the 1 or 2 K boundary. + + This pointer array will allocate 2049 bytes to store each VBI frame. */ + u8 *sliced_mpeg_data[IVTV_VBI_FRAMES]; + u32 sliced_mpeg_size[IVTV_VBI_FRAMES]; + struct ivtv_buffer sliced_mpeg_buf; + u32 inserted_frame; + + struct workqueue_struct *work_queues; + struct work_struct work_queue; + u32 start[2], count; + u32 raw_size; + u32 sliced_size; +}; + +/* forward declaration of struct defined in ivtv-cards.h */ +struct ivtv_card; + +/* Struct to hold info about ivtv cards */ +struct ivtv { + int num; /* board number, -1 during init! */ + char name[8]; /* board name for printk and interrupts (e.g. 'ivtv0') */ + struct pci_dev *dev; /* PCI device */ + const struct ivtv_card *card; /* card information */ + const char *card_name; /* full name of the card */ + u8 has_cx23415; /* 1 if it is a cx23415 based card, 0 for cx23416 */ + u8 is_50hz; + u8 is_60hz; + u8 is_out_50hz; + u8 is_out_60hz; + u8 pvr150_workaround; /* 1 if the cx25840 needs to workaround a PVR150 bug */ + u8 nof_inputs; /* number of video inputs */ + u8 nof_audio_inputs; /* number of audio inputs */ + u32 v4l2_cap; /* V4L2 capabilities of card */ + u32 hw_flags; /* Hardware description of the board */ + + /* controlling Video decoder function */ + int (*video_dec_func)(struct ivtv *, unsigned int, void *); + + struct ivtv_options options; /* User options */ + int stream_buf_size[IVTV_MAX_STREAMS]; /* Stream buffer size */ + struct ivtv_stream streams[IVTV_MAX_STREAMS]; /* Stream data */ + int speed; + u8 speed_mute_audio; + unsigned long i_flags; /* global ivtv flags */ + atomic_t capturing; /* count number of active capture streams */ + atomic_t decoding; /* count number of active decoding streams */ + u32 irq_rr_idx; /* Round-robin stream index */ + int cur_dma_stream; /* index of stream doing DMA */ + u32 dma_data_req_offset; + u32 dma_data_req_size; + int output_mode; /* NONE, MPG, YUV, UDMA YUV, passthrough */ + spinlock_t lock; /* lock access to this struct */ + int search_pack_header; + + spinlock_t dma_reg_lock; /* lock access to DMA engine registers */ + + /* User based DMA for OSD */ + struct ivtv_user_dma udma; + + int open_id; /* incremented each time an open occurs, used as unique ID. + starts at 1, so 0 can be used as uninitialized value + in the stream->id. */ + + u32 base_addr; + u32 irqmask; + struct timer_list dma_timer; /* Timer used to catch unfinished DMAs */ + + struct vbi_info vbi; + + struct ivtv_mailbox_data enc_mbox; + struct ivtv_mailbox_data dec_mbox; + struct ivtv_api_cache api_cache[256]; /* Cached API Commands */ + + u8 card_rev; + volatile void __iomem *enc_mem, *dec_mem, *reg_mem; + + u32 pgm_info_offset; + u32 pgm_info_num; + u32 pgm_info_write_idx; + u32 pgm_info_read_idx; + struct v4l2_enc_idx_entry pgm_info[IVTV_MAX_PGM_INDEX]; + + u64 mpg_data_received; + u64 vbi_data_inserted; + + wait_queue_head_t cap_w; + /* when the next decoder event arrives this queue is woken up */ + wait_queue_head_t event_waitq; + /* when the next decoder vsync arrives this queue is woken up */ + wait_queue_head_t vsync_waitq; + /* when the current DMA is finished this queue is woken up */ + wait_queue_head_t dma_waitq; + + /* OSD support */ + unsigned long osd_video_pbase; + int osd_global_alpha_state; /* 0=off : 1=on */ + int osd_local_alpha_state; /* 0=off : 1=on */ + int osd_color_key_state; /* 0=off : 1=on */ + u8 osd_global_alpha; /* Current global alpha */ + u32 osd_color_key; /* Current color key */ + u32 osd_pixelformat; /* Current pixel format */ + struct v4l2_rect osd_rect; /* Current OSD position and size */ + struct v4l2_rect main_rect; /* Current Main window position and size */ + + u32 last_dec_timing[3]; /* Store last retrieved pts/scr/frame values */ + + /* i2c */ + struct i2c_adapter i2c_adap; + struct i2c_algo_bit_data i2c_algo; + struct i2c_client i2c_client; + struct mutex i2c_bus_lock; + int i2c_state; + struct i2c_client *i2c_clients[I2C_CLIENTS_MAX]; + + /* v4l2 and User settings */ + + /* codec settings */ + struct cx2341x_mpeg_params params; + u32 audio_input; + u32 active_input; + u32 active_output; + v4l2_std_id std; + v4l2_std_id std_out; + v4l2_std_id tuner_std; /* The norm of the tuner (fixed) */ + u8 audio_stereo_mode; + u8 audio_bilingual_mode; + + /* dualwatch */ + unsigned long dualwatch_jiffies; + u16 dualwatch_stereo_mode; + + /* Digitizer type */ + int digitizer; /* 0x00EF = saa7114 0x00FO = saa7115 0x0106 = mic */ + + u32 lastVsyncFrame; + + struct yuv_playback_info yuv_info; + struct osd_info *osd_info; +}; + +/* Globals */ +extern struct ivtv *ivtv_cards[]; +extern int ivtv_cards_active; +extern int ivtv_first_minor; +extern spinlock_t ivtv_cards_lock; + +/*==============Prototypes==================*/ + +/* Hardware/IRQ */ +void ivtv_set_irq_mask(struct ivtv *itv, u32 mask); +void ivtv_clear_irq_mask(struct ivtv *itv, u32 mask); + +/* try to set output mode, return current mode. */ +int ivtv_set_output_mode(struct ivtv *itv, int mode); + +/* return current output stream based on current mode */ +struct ivtv_stream *ivtv_get_output_stream(struct ivtv *itv); + +/* Return non-zero if a signal is pending */ +int ivtv_sleep_timeout(int timeout, int intr); + +/* Wait on queue, returns -EINTR if interrupted */ +int ivtv_waitq(wait_queue_head_t *waitq); + +/* Read Hauppauge eeprom */ +struct tveeprom; /* forward reference */ +void ivtv_read_eeprom(struct ivtv *itv, struct tveeprom *tv); + +/* This is a PCI post thing, where if the pci register is not read, then + the write doesn't always take effect right away. By reading back the + register any pending PCI writes will be performed (in order), and so + you can be sure that the writes are guaranteed to be done. + + Rarely needed, only in some timing sensitive cases. + Apparently if this is not done some motherboards seem + to kill the firmware and get into the broken state until computer is + rebooted. */ +#define write_sync(val, reg) \ + do { writel(val, reg); readl(reg); } while (0) + +#define read_reg(reg) readl(itv->reg_mem + (reg)) +#define write_reg(val, reg) writel(val, itv->reg_mem + (reg)) +#define write_reg_sync(val, reg) \ + do { write_reg(val, reg); read_reg(reg); } while (0) + +#define read_enc(addr) readl(itv->enc_mem + (u32)(addr)) +#define write_enc(val, addr) writel(val, itv->enc_mem + (u32)(addr)) +#define write_enc_sync(val, addr) \ + do { write_enc(val, addr); read_enc(addr); } while (0) + +#define read_dec(addr) readl(itv->dec_mem + (u32)(addr)) +#define write_dec(val, addr) writel(val, itv->dec_mem + (u32)(addr)) +#define write_dec_sync(val, addr) \ + do { write_dec(val, addr); read_dec(addr); } while (0) + +#endif /* IVTV_DRIVER_H */ diff --git a/drivers/media/video/ivtv/ivtv-fileops.c b/drivers/media/video/ivtv/ivtv-fileops.c new file mode 100644 index 000000000000..90e0f51e635c --- /dev/null +++ b/drivers/media/video/ivtv/ivtv-fileops.c @@ -0,0 +1,918 @@ +/* + file operation functions + Copyright (C) 2003-2004 Kevin Thayer + Copyright (C) 2004 Chris Kennedy + Copyright (C) 2005-2007 Hans Verkuil + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include "ivtv-driver.h" +#include "ivtv-fileops.h" +#include "ivtv-i2c.h" +#include "ivtv-queue.h" +#include "ivtv-udma.h" +#include "ivtv-irq.h" +#include "ivtv-vbi.h" +#include "ivtv-mailbox.h" +#include "ivtv-audio.h" +#include "ivtv-streams.h" +#include "ivtv-yuv.h" +#include "ivtv-controls.h" +#include "ivtv-ioctl.h" + +/* This function tries to claim the stream for a specific file descriptor. + If no one else is using this stream then the stream is claimed and + associated VBI streams are also automatically claimed. + Possible error returns: -EBUSY if someone else has claimed + the stream or 0 on success. */ +int ivtv_claim_stream(struct ivtv_open_id *id, int type) +{ + struct ivtv *itv = id->itv; + struct ivtv_stream *s = &itv->streams[type]; + struct ivtv_stream *s_vbi; + int vbi_type; + + if (test_and_set_bit(IVTV_F_S_CLAIMED, &s->s_flags)) { + /* someone already claimed this stream */ + if (s->id == id->open_id) { + /* yes, this file descriptor did. So that's OK. */ + return 0; + } + if (s->id == -1 && (type == IVTV_DEC_STREAM_TYPE_VBI || + type == IVTV_ENC_STREAM_TYPE_VBI)) { + /* VBI is handled already internally, now also assign + the file descriptor to this stream for external + reading of the stream. */ + s->id = id->open_id; + IVTV_DEBUG_INFO("Start Read VBI\n"); + return 0; + } + /* someone else is using this stream already */ + IVTV_DEBUG_INFO("Stream %d is busy\n", type); + return -EBUSY; + } + s->id = id->open_id; + if (type == IVTV_DEC_STREAM_TYPE_VBI) { + /* Enable reinsertion interrupt */ + ivtv_clear_irq_mask(itv, IVTV_IRQ_DEC_VBI_RE_INSERT); + } + + /* IVTV_DEC_STREAM_TYPE_MPG needs to claim IVTV_DEC_STREAM_TYPE_VBI, + IVTV_ENC_STREAM_TYPE_MPG needs to claim IVTV_ENC_STREAM_TYPE_VBI + (provided VBI insertion is on and sliced VBI is selected), for all + other streams we're done */ + if (type == IVTV_DEC_STREAM_TYPE_MPG) { + vbi_type = IVTV_DEC_STREAM_TYPE_VBI; + } else if (type == IVTV_ENC_STREAM_TYPE_MPG && + itv->vbi.insert_mpeg && itv->vbi.sliced_in->service_set) { + vbi_type = IVTV_ENC_STREAM_TYPE_VBI; + } else { + return 0; + } + s_vbi = &itv->streams[vbi_type]; + + if (!test_and_set_bit(IVTV_F_S_CLAIMED, &s_vbi->s_flags)) { + /* Enable reinsertion interrupt */ + if (vbi_type == IVTV_DEC_STREAM_TYPE_VBI) + ivtv_clear_irq_mask(itv, IVTV_IRQ_DEC_VBI_RE_INSERT); + } + /* mark that it is used internally */ + set_bit(IVTV_F_S_INTERNAL_USE, &s_vbi->s_flags); + return 0; +} + +/* This function releases a previously claimed stream. It will take into + account associated VBI streams. */ +void ivtv_release_stream(struct ivtv_stream *s) +{ + struct ivtv *itv = s->itv; + struct ivtv_stream *s_vbi; + + s->id = -1; + if ((s->type == IVTV_DEC_STREAM_TYPE_VBI || s->type == IVTV_ENC_STREAM_TYPE_VBI) && + test_bit(IVTV_F_S_INTERNAL_USE, &s->s_flags)) { + /* this stream is still in use internally */ + return; + } + if (!test_and_clear_bit(IVTV_F_S_CLAIMED, &s->s_flags)) { + IVTV_DEBUG_WARN("Release stream %s not in use!\n", s->name); + return; + } + + ivtv_flush_queues(s); + + /* disable reinsertion interrupt */ + if (s->type == IVTV_DEC_STREAM_TYPE_VBI) + ivtv_set_irq_mask(itv, IVTV_IRQ_DEC_VBI_RE_INSERT); + + /* IVTV_DEC_STREAM_TYPE_MPG needs to release IVTV_DEC_STREAM_TYPE_VBI, + IVTV_ENC_STREAM_TYPE_MPG needs to release IVTV_ENC_STREAM_TYPE_VBI, + for all other streams we're done */ + if (s->type == IVTV_DEC_STREAM_TYPE_MPG) + s_vbi = &itv->streams[IVTV_DEC_STREAM_TYPE_VBI]; + else if (s->type == IVTV_ENC_STREAM_TYPE_MPG) + s_vbi = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI]; + else + return; + + /* clear internal use flag */ + if (!test_and_clear_bit(IVTV_F_S_INTERNAL_USE, &s_vbi->s_flags)) { + /* was already cleared */ + return; + } + if (s_vbi->id != -1) { + /* VBI stream still claimed by a file descriptor */ + return; + } + /* disable reinsertion interrupt */ + if (s_vbi->type == IVTV_DEC_STREAM_TYPE_VBI) + ivtv_set_irq_mask(itv, IVTV_IRQ_DEC_VBI_RE_INSERT); + clear_bit(IVTV_F_S_CLAIMED, &s_vbi->s_flags); + ivtv_flush_queues(s_vbi); +} + +static void ivtv_dualwatch(struct ivtv *itv) +{ + struct v4l2_tuner vt; + u16 new_bitmap; + u16 new_stereo_mode; + const u16 stereo_mask = 0x0300; + const u16 dual = 0x0200; + + new_stereo_mode = itv->params.audio_properties & stereo_mask; + memset(&vt, 0, sizeof(vt)); + ivtv_call_i2c_clients(itv, VIDIOC_G_TUNER, &vt); + if (vt.audmode == V4L2_TUNER_MODE_LANG1_LANG2 && (vt.rxsubchans & V4L2_TUNER_SUB_LANG2)) + new_stereo_mode = dual; + + if (new_stereo_mode == itv->dualwatch_stereo_mode) + return; + + new_bitmap = new_stereo_mode | (itv->params.audio_properties & ~stereo_mask); + + IVTV_DEBUG_INFO("dualwatch: change stereo flag from 0x%x to 0x%x. new audio_bitmask=0x%ux\n", + itv->dualwatch_stereo_mode, new_stereo_mode, new_bitmap); + + if (ivtv_vapi(itv, CX2341X_ENC_SET_AUDIO_PROPERTIES, 1, new_bitmap) == 0) { + itv->dualwatch_stereo_mode = new_stereo_mode; + return; + } + IVTV_DEBUG_INFO("dualwatch: changing stereo flag failed\n"); +} + +static void ivtv_update_pgm_info(struct ivtv *itv) +{ + u32 wr_idx = (read_enc(itv->pgm_info_offset) - itv->pgm_info_offset - 4) / 24; + int cnt; + int i = 0; + + if (wr_idx >= itv->pgm_info_num) { + IVTV_DEBUG_WARN("Invalid PGM index %d (>= %d)\n", wr_idx, itv->pgm_info_num); + return; + } + cnt = (wr_idx + itv->pgm_info_num - itv->pgm_info_write_idx) % itv->pgm_info_num; + while (i < cnt) { + int idx = (itv->pgm_info_write_idx + i) % itv->pgm_info_num; + struct v4l2_enc_idx_entry *e = itv->pgm_info + idx; + u32 addr = itv->pgm_info_offset + 4 + idx * 24; + const int mapping[] = { V4L2_ENC_IDX_FRAME_P, V4L2_ENC_IDX_FRAME_I, V4L2_ENC_IDX_FRAME_B, 0 }; + + e->offset = read_enc(addr + 4) + ((u64)read_enc(addr + 8) << 32); + if (e->offset > itv->mpg_data_received) { + break; + } + e->offset += itv->vbi_data_inserted; + e->length = read_enc(addr); + e->pts = read_enc(addr + 16) + ((u64)(read_enc(addr + 20) & 1) << 32); + e->flags = mapping[read_enc(addr + 12) & 3]; + i++; + } + itv->pgm_info_write_idx = (itv->pgm_info_write_idx + i) % itv->pgm_info_num; +} + +static struct ivtv_buffer *ivtv_get_buffer(struct ivtv_stream *s, int non_block, int *err) +{ + struct ivtv *itv = s->itv; + struct ivtv_stream *s_vbi = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI]; + struct ivtv_buffer *buf; + DEFINE_WAIT(wait); + + *err = 0; + while (1) { + if (s->type == IVTV_ENC_STREAM_TYPE_MPG) { + /* Process pending program info updates and pending VBI data */ + ivtv_update_pgm_info(itv); + + if (jiffies - itv->dualwatch_jiffies > HZ) { + itv->dualwatch_jiffies = jiffies; + ivtv_dualwatch(itv); + } + + if (test_bit(IVTV_F_S_INTERNAL_USE, &s_vbi->s_flags) && + !test_bit(IVTV_F_S_APPL_IO, &s_vbi->s_flags)) { + while ((buf = ivtv_dequeue(s_vbi, &s_vbi->q_full))) { + /* byteswap and process VBI data */ + ivtv_process_vbi_data(itv, buf, s_vbi->dma_pts, s_vbi->type); + ivtv_enqueue(s_vbi, buf, &s_vbi->q_free); + } + } + buf = &itv->vbi.sliced_mpeg_buf; + if (buf->readpos != buf->bytesused) { + return buf; + } + } + + /* do we have leftover data? */ + buf = ivtv_dequeue(s, &s->q_io); + if (buf) + return buf; + + /* do we have new data? */ + buf = ivtv_dequeue(s, &s->q_full); + if (buf) { + if (!test_and_clear_bit(IVTV_F_B_NEED_BUF_SWAP, &buf->b_flags)) + return buf; + if (s->type == IVTV_ENC_STREAM_TYPE_MPG) + /* byteswap MPG data */ + ivtv_buf_swap(buf); + else if (s->type != IVTV_DEC_STREAM_TYPE_VBI) { + /* byteswap and process VBI data */ + ivtv_process_vbi_data(itv, buf, s->dma_pts, s->type); + } + return buf; + } + /* return if file was opened with O_NONBLOCK */ + if (non_block) { + *err = -EAGAIN; + return NULL; + } + + /* return if end of stream */ + if (s->type != IVTV_DEC_STREAM_TYPE_VBI && !test_bit(IVTV_F_S_STREAMING, &s->s_flags)) { + clear_bit(IVTV_F_S_STREAMOFF, &s->s_flags); + IVTV_DEBUG_INFO("EOS %s\n", s->name); + return NULL; + } + + /* wait for more data to arrive */ + prepare_to_wait(&s->waitq, &wait, TASK_INTERRUPTIBLE); + /* New buffers might have become available before we were added to the waitqueue */ + if (!s->q_full.buffers) + schedule(); + finish_wait(&s->waitq, &wait); + if (signal_pending(current)) { + /* return if a signal was received */ + IVTV_DEBUG_INFO("User stopped %s\n", s->name); + *err = -EINTR; + return NULL; + } + } +} + +static void ivtv_setup_sliced_vbi_buf(struct ivtv *itv) +{ + int idx = itv->vbi.inserted_frame % IVTV_VBI_FRAMES; + + itv->vbi.sliced_mpeg_buf.buf = itv->vbi.sliced_mpeg_data[idx]; + itv->vbi.sliced_mpeg_buf.bytesused = itv->vbi.sliced_mpeg_size[idx]; + itv->vbi.sliced_mpeg_buf.readpos = 0; +} + +static size_t ivtv_copy_buf_to_user(struct ivtv_stream *s, struct ivtv_buffer *buf, + char __user *ubuf, size_t ucount) +{ + struct ivtv *itv = s->itv; + size_t len = buf->bytesused - buf->readpos; + + if (len > ucount) len = ucount; + if (itv->vbi.insert_mpeg && s->type == IVTV_ENC_STREAM_TYPE_MPG && + itv->vbi.sliced_in->service_set && buf != &itv->vbi.sliced_mpeg_buf) { + const char *start = buf->buf + buf->readpos; + const char *p = start + 1; + const u8 *q; + u8 ch = itv->search_pack_header ? 0xba : 0xe0; + int stuffing, i; + + while (start + len > p && (q = memchr(p, 0, start + len - p))) { + p = q + 1; + if ((char *)q + 15 >= buf->buf + buf->bytesused || + q[1] != 0 || q[2] != 1 || q[3] != ch) { + continue; + } + if (!itv->search_pack_header) { + if ((q[6] & 0xc0) != 0x80) + continue; + if (((q[7] & 0xc0) == 0x80 && (q[9] & 0xf0) == 0x20) || + ((q[7] & 0xc0) == 0xc0 && (q[9] & 0xf0) == 0x30)) { + ch = 0xba; + itv->search_pack_header = 1; + p = q + 9; + } + continue; + } + stuffing = q[13] & 7; + /* all stuffing bytes must be 0xff */ + for (i = 0; i < stuffing; i++) + if (q[14 + i] != 0xff) + break; + if (i == stuffing && (q[4] & 0xc4) == 0x44 && (q[12] & 3) == 3 && + q[14 + stuffing] == 0 && q[15 + stuffing] == 0 && + q[16 + stuffing] == 1) { + itv->search_pack_header = 0; + len = (char *)q - start; + ivtv_setup_sliced_vbi_buf(itv); + break; + } + } + } + if (copy_to_user(ubuf, (u8 *)buf->buf + buf->readpos, len)) { + IVTV_DEBUG_WARN("copy %zd bytes to user failed for %s\n", len, s->name); + return -EFAULT; + } + /*IVTV_INFO("copied %lld %d %d %d %d %d vbi %d\n", itv->mpg_data_received, len, ucount, + buf->readpos, buf->bytesused, buf->bytesused - buf->readpos - len, + buf == &itv->vbi.sliced_mpeg_buf); */ + buf->readpos += len; + if (s->type == IVTV_ENC_STREAM_TYPE_MPG && buf != &itv->vbi.sliced_mpeg_buf) + itv->mpg_data_received += len; + return len; +} + +static ssize_t ivtv_read(struct ivtv_stream *s, char __user *ubuf, size_t tot_count, int non_block) +{ + struct ivtv *itv = s->itv; + size_t tot_written = 0; + int single_frame = 0; + + if (atomic_read(&itv->capturing) == 0 && s->id == -1) { + /* shouldn't happen */ + IVTV_DEBUG_WARN("Stream %s not initialized before read\n", s->name); + return -EIO; + } + + /* Each VBI buffer is one frame, the v4l2 API says that for VBI the frames should + arrive one-by-one, so make sure we never output more than one VBI frame at a time */ + if (s->type == IVTV_DEC_STREAM_TYPE_VBI || + (s->type == IVTV_ENC_STREAM_TYPE_VBI && itv->vbi.sliced_in->service_set)) + single_frame = 1; + + for (;;) { + struct ivtv_buffer *buf; + int rc; + + buf = ivtv_get_buffer(s, non_block, &rc); + if (buf == NULL && rc == -EAGAIN && tot_written) + break; + if (buf == NULL) + return rc; + rc = ivtv_copy_buf_to_user(s, buf, ubuf + tot_written, tot_count - tot_written); + if (buf != &itv->vbi.sliced_mpeg_buf) { + ivtv_enqueue(s, buf, (buf->readpos == buf->bytesused) ? &s->q_free : &s->q_io); + } + else if (buf->readpos == buf->bytesused) { + int idx = itv->vbi.inserted_frame % IVTV_VBI_FRAMES; + itv->vbi.sliced_mpeg_size[idx] = 0; + itv->vbi.inserted_frame++; + itv->vbi_data_inserted += buf->bytesused; + } + if (rc < 0) + return rc; + tot_written += rc; + + if (tot_written == tot_count || single_frame) + break; + } + return tot_written; +} + +static ssize_t ivtv_read_pos(struct ivtv_stream *s, char __user *ubuf, size_t count, + loff_t *pos, int non_block) +{ + ssize_t rc = count ? ivtv_read(s, ubuf, count, non_block) : 0; + struct ivtv *itv = s->itv; + + IVTV_DEBUG_INFO("read %zd from %s, got %zd\n", count, s->name, rc); + if (rc > 0) + pos += rc; + return rc; +} + +int ivtv_start_capture(struct ivtv_open_id *id) +{ + struct ivtv *itv = id->itv; + struct ivtv_stream *s = &itv->streams[id->type]; + struct ivtv_stream *s_vbi; + + if (s->type == IVTV_ENC_STREAM_TYPE_RAD || + s->type == IVTV_DEC_STREAM_TYPE_MPG || + s->type == IVTV_DEC_STREAM_TYPE_YUV || + s->type == IVTV_DEC_STREAM_TYPE_VOUT) { + /* you cannot read from these stream types. */ + return -EPERM; + } + + /* Try to claim this stream. */ + if (ivtv_claim_stream(id, s->type)) + return -EBUSY; + + /* This stream does not need to start capturing */ + if (s->type == IVTV_DEC_STREAM_TYPE_VBI) { + set_bit(IVTV_F_S_APPL_IO, &s->s_flags); + return 0; + } + + /* If capture is already in progress, then we also have to + do nothing extra. */ + if (test_bit(IVTV_F_S_STREAMOFF, &s->s_flags) || test_and_set_bit(IVTV_F_S_STREAMING, &s->s_flags)) { + set_bit(IVTV_F_S_APPL_IO, &s->s_flags); + return 0; + } + + /* Start VBI capture if required */ + s_vbi = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI]; + if (s->type == IVTV_ENC_STREAM_TYPE_MPG && + test_bit(IVTV_F_S_INTERNAL_USE, &s_vbi->s_flags) && + !test_and_set_bit(IVTV_F_S_STREAMING, &s_vbi->s_flags)) { + /* Note: the IVTV_ENC_STREAM_TYPE_VBI is claimed + automatically when the MPG stream is claimed. + We only need to start the VBI capturing. */ + if (ivtv_start_v4l2_encode_stream(s_vbi)) { + IVTV_DEBUG_WARN("VBI capture start failed\n"); + + /* Failure, clean up and return an error */ + clear_bit(IVTV_F_S_STREAMING, &s_vbi->s_flags); + clear_bit(IVTV_F_S_STREAMING, &s->s_flags); + /* also releases the associated VBI stream */ + ivtv_release_stream(s); + return -EIO; + } + IVTV_DEBUG_INFO("VBI insertion started\n"); + } + + /* Tell the card to start capturing */ + if (!ivtv_start_v4l2_encode_stream(s)) { + /* We're done */ + set_bit(IVTV_F_S_APPL_IO, &s->s_flags); + /* Resume a possibly paused encoder */ + if (test_and_clear_bit(IVTV_F_I_ENC_PAUSED, &itv->i_flags)) + ivtv_vapi(itv, CX2341X_ENC_PAUSE_ENCODER, 1, 1); + return 0; + } + + /* failure, clean up */ + IVTV_DEBUG_WARN("Failed to start capturing for stream %s\n", s->name); + + /* Note: the IVTV_ENC_STREAM_TYPE_VBI is released + automatically when the MPG stream is released. + We only need to stop the VBI capturing. */ + if (s->type == IVTV_ENC_STREAM_TYPE_MPG && + test_bit(IVTV_F_S_STREAMING, &s_vbi->s_flags)) { + ivtv_stop_v4l2_encode_stream(s_vbi, 0); + clear_bit(IVTV_F_S_STREAMING, &s_vbi->s_flags); + } + clear_bit(IVTV_F_S_STREAMING, &s->s_flags); + ivtv_release_stream(s); + return -EIO; +} + +ssize_t ivtv_v4l2_read(struct file * filp, char __user *buf, size_t count, loff_t * pos) +{ + struct ivtv_open_id *id = filp->private_data; + struct ivtv *itv = id->itv; + struct ivtv_stream *s = &itv->streams[id->type]; + int rc; + + IVTV_DEBUG_IOCTL("read %zd bytes from %s\n", count, s->name); + + rc = ivtv_start_capture(id); + if (rc) + return rc; + return ivtv_read_pos(s, buf, count, pos, filp->f_flags & O_NONBLOCK); +} + +int ivtv_start_decoding(struct ivtv_open_id *id, int speed) +{ + struct ivtv *itv = id->itv; + struct ivtv_stream *s = &itv->streams[id->type]; + + if (atomic_read(&itv->decoding) == 0) { + if (ivtv_claim_stream(id, s->type)) { + /* someone else is using this stream already */ + IVTV_DEBUG_WARN("start decode, stream already claimed\n"); + return -EBUSY; + } + ivtv_start_v4l2_decode_stream(s, 0); + } + if (s->type == IVTV_DEC_STREAM_TYPE_MPG) + return ivtv_set_speed(itv, speed); + return 0; +} + +ssize_t ivtv_v4l2_write(struct file *filp, const char __user *user_buf, size_t count, loff_t *pos) +{ + struct ivtv_open_id *id = filp->private_data; + struct ivtv *itv = id->itv; + struct ivtv_stream *s = &itv->streams[id->type]; + struct ivtv_buffer *buf; + struct ivtv_queue q; + int bytes_written = 0; + int mode; + int rc; + DEFINE_WAIT(wait); + + IVTV_DEBUG_IOCTL("write %zd bytes to %s\n", count, s->name); + + if (s->type != IVTV_DEC_STREAM_TYPE_MPG && + s->type != IVTV_DEC_STREAM_TYPE_YUV && + s->type != IVTV_DEC_STREAM_TYPE_VOUT) + /* not decoder streams */ + return -EPERM; + + /* Try to claim this stream */ + if (ivtv_claim_stream(id, s->type)) + return -EBUSY; + + /* This stream does not need to start any decoding */ + if (s->type == IVTV_DEC_STREAM_TYPE_VOUT) { + set_bit(IVTV_F_S_APPL_IO, &s->s_flags); + return ivtv_write_vbi(itv, user_buf, count); + } + + mode = s->type == IVTV_DEC_STREAM_TYPE_MPG ? OUT_MPG : OUT_YUV; + + if (ivtv_set_output_mode(itv, mode) != mode) { + ivtv_release_stream(s); + return -EBUSY; + } + ivtv_queue_init(&q); + set_bit(IVTV_F_S_APPL_IO, &s->s_flags); + +retry: + for (;;) { + /* Gather buffers */ + while (q.length - q.bytesused < count && (buf = ivtv_dequeue(s, &s->q_io))) + ivtv_enqueue(s, buf, &q); + while (q.length - q.bytesused < count && (buf = ivtv_dequeue(s, &s->q_free))) { + ivtv_enqueue(s, buf, &q); + } + if (q.buffers) + break; + if (filp->f_flags & O_NONBLOCK) + return -EAGAIN; + prepare_to_wait(&s->waitq, &wait, TASK_INTERRUPTIBLE); + /* New buffers might have become free before we were added to the waitqueue */ + if (!s->q_free.buffers) + schedule(); + finish_wait(&s->waitq, &wait); + if (signal_pending(current)) { + IVTV_DEBUG_INFO("User stopped %s\n", s->name); + return -EINTR; + } + } + + /* copy user data into buffers */ + while ((buf = ivtv_dequeue(s, &q))) { + /* Make sure we really got all the user data */ + rc = ivtv_buf_copy_from_user(s, buf, user_buf, count); + + if (rc < 0) { + ivtv_queue_move(s, &q, NULL, &s->q_free, 0); + return rc; + } + user_buf += rc; + count -= rc; + bytes_written += rc; + + if (buf->bytesused != s->buf_size) { + /* incomplete, leave in q_io for next time */ + ivtv_enqueue(s, buf, &s->q_io); + break; + } + /* Byteswap MPEG buffer */ + if (s->type == IVTV_DEC_STREAM_TYPE_MPG) + ivtv_buf_swap(buf); + ivtv_enqueue(s, buf, &s->q_full); + } + + /* Start decoder (returns 0 if already started) */ + rc = ivtv_start_decoding(id, itv->speed); + if (rc) { + IVTV_DEBUG_WARN("Failed start decode stream %s\n", s->name); + + /* failure, clean up */ + clear_bit(IVTV_F_S_STREAMING, &s->s_flags); + clear_bit(IVTV_F_S_APPL_IO, &s->s_flags); + return rc; + } + if (test_bit(IVTV_F_S_NEEDS_DATA, &s->s_flags)) { + if (s->q_full.length >= itv->dma_data_req_size) { + int got_sig; + + prepare_to_wait(&itv->dma_waitq, &wait, TASK_INTERRUPTIBLE); + while (!(got_sig = signal_pending(current)) && + test_bit(IVTV_F_S_DMA_PENDING, &s->s_flags)) { + schedule(); + } + finish_wait(&itv->dma_waitq, &wait); + if (got_sig) { + IVTV_DEBUG_INFO("User interrupted %s\n", s->name); + return -EINTR; + } + + clear_bit(IVTV_F_S_NEEDS_DATA, &s->s_flags); + ivtv_queue_move(s, &s->q_full, NULL, &s->q_predma, itv->dma_data_req_size); + ivtv_dma_stream_dec_prepare(s, itv->dma_data_req_offset + IVTV_DECODER_OFFSET, 1); + } + } + /* more user data is available, wait until buffers become free + to transfer the rest. */ + if (count && !(filp->f_flags & O_NONBLOCK)) + goto retry; + IVTV_DEBUG_INFO("Wrote %d bytes to %s (%d)\n", bytes_written, s->name, s->q_full.bytesused); + return bytes_written; +} + +unsigned int ivtv_v4l2_dec_poll(struct file *filp, poll_table *wait) +{ + struct ivtv_open_id *id = filp->private_data; + struct ivtv *itv = id->itv; + struct ivtv_stream *s = &itv->streams[id->type]; + int res = 0; + + /* add stream's waitq to the poll list */ + poll_wait(filp, &s->waitq, wait); + + set_bit(IVTV_F_I_EV_VSYNC_ENABLED, &itv->i_flags); + if (test_bit(IVTV_F_I_EV_VSYNC, &itv->i_flags) || + test_bit(IVTV_F_I_EV_DEC_STOPPED, &itv->i_flags)) + res = POLLPRI; + + /* Allow write if buffers are available for writing */ + if (s->q_free.buffers) + res |= POLLOUT | POLLWRNORM; + return res; +} + +unsigned int ivtv_v4l2_enc_poll(struct file *filp, poll_table * wait) +{ + struct ivtv_open_id *id = filp->private_data; + struct ivtv *itv = id->itv; + struct ivtv_stream *s = &itv->streams[id->type]; + int eof = test_bit(IVTV_F_S_STREAMOFF, &s->s_flags); + + /* Start a capture if there is none */ + if (!eof && !test_bit(IVTV_F_S_STREAMING, &s->s_flags)) { + int rc = ivtv_start_capture(id); + + if (rc) { + IVTV_DEBUG_INFO("Could not start capture for %s (%d)\n", + s->name, rc); + return POLLERR; + } + } + + /* add stream's waitq to the poll list */ + poll_wait(filp, &s->waitq, wait); + + if (eof || s->q_full.length) + return POLLIN | POLLRDNORM; + return 0; +} + +void ivtv_stop_capture(struct ivtv_open_id *id, int gop_end) +{ + struct ivtv *itv = id->itv; + struct ivtv_stream *s = &itv->streams[id->type]; + + IVTV_DEBUG_IOCTL("close() of %s\n", s->name); + + /* 'Unclaim' this stream */ + + /* Stop capturing */ + if (test_bit(IVTV_F_S_STREAMING, &s->s_flags)) { + struct ivtv_stream *s_vbi = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI]; + + IVTV_DEBUG_INFO("close stopping capture\n"); + /* Special case: a running VBI capture for VBI insertion + in the mpeg stream. Need to stop that too. */ + if (id->type == IVTV_ENC_STREAM_TYPE_MPG && + test_bit(IVTV_F_S_STREAMING, &s_vbi->s_flags) && + !test_bit(IVTV_F_S_APPL_IO, &s_vbi->s_flags)) { + IVTV_DEBUG_INFO("close stopping embedded VBI capture\n"); + ivtv_stop_v4l2_encode_stream(s_vbi, 0); + } + if ((id->type == IVTV_DEC_STREAM_TYPE_VBI || + id->type == IVTV_ENC_STREAM_TYPE_VBI) && + test_bit(IVTV_F_S_INTERNAL_USE, &s->s_flags)) { + /* Also used internally, don't stop capturing */ + s->id = -1; + } + else { + ivtv_stop_v4l2_encode_stream(s, gop_end); + } + } + clear_bit(IVTV_F_S_APPL_IO, &s->s_flags); + clear_bit(IVTV_F_S_STREAMOFF, &s->s_flags); + + ivtv_release_stream(s); +} + +void ivtv_stop_decoding(struct ivtv_open_id *id, int flags, u64 pts) +{ + struct ivtv *itv = id->itv; + struct ivtv_stream *s = &itv->streams[id->type]; + + IVTV_DEBUG_IOCTL("close() of %s\n", s->name); + + /* Stop decoding */ + if (test_bit(IVTV_F_S_STREAMING, &s->s_flags)) { + IVTV_DEBUG_INFO("close stopping decode\n"); + + ivtv_stop_v4l2_decode_stream(s, flags, pts); + } + clear_bit(IVTV_F_S_APPL_IO, &s->s_flags); + clear_bit(IVTV_F_S_STREAMOFF, &s->s_flags); + if (id->type == IVTV_DEC_STREAM_TYPE_YUV && test_bit(IVTV_F_I_DECODING_YUV, &itv->i_flags)) { + /* Restore registers we've changed & clean up any mess we've made */ + ivtv_yuv_close(itv); + } + if (s->type == IVTV_DEC_STREAM_TYPE_YUV && itv->output_mode == OUT_YUV) + itv->output_mode = OUT_NONE; + else if (s->type == IVTV_DEC_STREAM_TYPE_MPG && itv->output_mode == OUT_MPG) + itv->output_mode = OUT_NONE; + + itv->speed = 0; + ivtv_release_stream(s); +} + +int ivtv_v4l2_close(struct inode *inode, struct file *filp) +{ + struct ivtv_open_id *id = filp->private_data; + struct ivtv *itv = id->itv; + struct ivtv_stream *s = &itv->streams[id->type]; + + IVTV_DEBUG_IOCTL("close() of %s\n", s->name); + + /* Easy case first: this stream was never claimed by us */ + if (s->id != id->open_id) { + kfree(id); + return 0; + } + + /* 'Unclaim' this stream */ + + /* Stop radio */ + if (id->type == IVTV_ENC_STREAM_TYPE_RAD) { + /* Closing radio device, return to TV mode */ + ivtv_mute(itv); + /* Mark that the radio is no longer in use */ + clear_bit(IVTV_F_I_RADIO_USER, &itv->i_flags); + /* Switch tuner to TV */ + ivtv_call_i2c_clients(itv, VIDIOC_S_STD, &itv->std); + /* Select correct audio input (i.e. TV tuner or Line in) */ + ivtv_audio_set_io(itv); + /* Done! Unmute and continue. */ + ivtv_unmute(itv); + ivtv_release_stream(s); + } else if (s->type >= IVTV_DEC_STREAM_TYPE_MPG) { + ivtv_stop_decoding(id, VIDEO_CMD_STOP_TO_BLACK | VIDEO_CMD_STOP_IMMEDIATELY, 0); + } else { + ivtv_stop_capture(id, 0); + } + kfree(id); + return 0; +} + +int ivtv_v4l2_open(struct inode *inode, struct file *filp) +{ + int x, y = 0; + struct ivtv_open_id *item; + struct ivtv *itv = NULL; + struct ivtv_stream *s = NULL; + int minor = MINOR(inode->i_rdev); + + /* Find which card this open was on */ + spin_lock(&ivtv_cards_lock); + for (x = 0; itv == NULL && x < ivtv_cards_active; x++) { + /* find out which stream this open was on */ + for (y = 0; y < IVTV_MAX_STREAMS; y++) { + s = &ivtv_cards[x]->streams[y]; + if (s->v4l2dev && s->v4l2dev->minor == minor) { + itv = ivtv_cards[x]; + break; + } + } + } + spin_unlock(&ivtv_cards_lock); + + if (itv == NULL) { + /* Couldn't find a device registered + on that minor, shouldn't happen! */ + printk(KERN_WARNING "ivtv: no ivtv device found on minor %d\n", minor); + return -ENXIO; + } + + if (y == IVTV_DEC_STREAM_TYPE_MPG && + test_bit(IVTV_F_S_CLAIMED, &itv->streams[IVTV_DEC_STREAM_TYPE_YUV].s_flags)) + return -EBUSY; + + if (y == IVTV_DEC_STREAM_TYPE_YUV && + test_bit(IVTV_F_S_CLAIMED, &itv->streams[IVTV_DEC_STREAM_TYPE_MPG].s_flags)) + return -EBUSY; + + if (y == IVTV_DEC_STREAM_TYPE_YUV) { + if (read_reg(0x82c) == 0) { + IVTV_ERR("Tried to open YUV output device but need to send data to mpeg decoder before it can be used\n"); + /* return -ENODEV; */ + } + ivtv_udma_alloc(itv); + } + + /* Allocate memory */ + item = kmalloc(sizeof(struct ivtv_open_id), GFP_KERNEL); + if (NULL == item) { + IVTV_DEBUG_WARN("nomem on v4l2 open\n"); + return -ENOMEM; + } + item->itv = itv; + item->type = y; + + item->open_id = itv->open_id++; + filp->private_data = item; + + if (item->type == IVTV_ENC_STREAM_TYPE_RAD) { + /* Try to claim this stream */ + if (ivtv_claim_stream(item, item->type)) { + /* No, it's already in use */ + kfree(item); + return -EBUSY; + } + + /* We have the radio */ + ivtv_mute(itv); + /* Switch tuner to radio */ + ivtv_call_i2c_clients(itv, AUDC_SET_RADIO, NULL); + /* Mark that the radio is being used. */ + set_bit(IVTV_F_I_RADIO_USER, &itv->i_flags); + /* Select the correct audio input (i.e. radio tuner) */ + ivtv_audio_set_io(itv); + /* Done! Unmute and continue. */ + ivtv_unmute(itv); + } + + /* YUV or MPG Decoding Mode? */ + if (y == IVTV_DEC_STREAM_TYPE_MPG) + clear_bit(IVTV_F_I_DEC_YUV, &itv->i_flags); + else if (y == IVTV_DEC_STREAM_TYPE_YUV) + { + set_bit(IVTV_F_I_DEC_YUV, &itv->i_flags); + } + + return 0; +} + +void ivtv_mute(struct ivtv *itv) +{ + struct v4l2_control ctrl = { V4L2_CID_AUDIO_MUTE, 1 }; + + /* Mute sound to avoid pop */ + ivtv_control_ioctls(itv, VIDIOC_S_CTRL, &ctrl); + + if (atomic_read(&itv->capturing)) + ivtv_vapi(itv, CX2341X_ENC_MUTE_AUDIO, 1, 1); + + IVTV_DEBUG_INFO("Mute\n"); +} + +void ivtv_unmute(struct ivtv *itv) +{ + struct v4l2_control ctrl = { V4L2_CID_AUDIO_MUTE, 0 }; + + /* initialize or refresh input */ + if (atomic_read(&itv->capturing) == 0) + ivtv_vapi(itv, CX2341X_ENC_INITIALIZE_INPUT, 0); + + ivtv_sleep_timeout(HZ / 10, 0); + + if (atomic_read(&itv->capturing)) { + ivtv_vapi(itv, CX2341X_ENC_MISC, 1, 12); + ivtv_vapi(itv, CX2341X_ENC_MUTE_AUDIO, 1, 0); + } + + /* Unmute */ + ivtv_control_ioctls(itv, VIDIOC_S_CTRL, &ctrl); + IVTV_DEBUG_INFO("Unmute\n"); +} diff --git a/drivers/media/video/ivtv/ivtv-fileops.h b/drivers/media/video/ivtv/ivtv-fileops.h new file mode 100644 index 000000000000..1afa950209b8 --- /dev/null +++ b/drivers/media/video/ivtv/ivtv-fileops.h @@ -0,0 +1,45 @@ +/* + file operation functions + Copyright (C) 2003-2004 Kevin Thayer + Copyright (C) 2005-2007 Hans Verkuil + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +/* Testing/Debugging */ +int ivtv_v4l2_open(struct inode *inode, struct file *filp); +ssize_t ivtv_v4l2_read(struct file *filp, char __user *buf, size_t count, + loff_t * pos); +ssize_t ivtv_v4l2_write(struct file *filp, const char __user *buf, size_t count, + loff_t * pos); +int ivtv_v4l2_close(struct inode *inode, struct file *filp); +unsigned int ivtv_v4l2_enc_poll(struct file *filp, poll_table * wait); +unsigned int ivtv_v4l2_dec_poll(struct file *filp, poll_table * wait); +int ivtv_start_capture(struct ivtv_open_id *id); +void ivtv_stop_capture(struct ivtv_open_id *id, int gop_end); +int ivtv_start_decoding(struct ivtv_open_id *id, int speed); +void ivtv_stop_decoding(struct ivtv_open_id *id, int flags, u64 pts); +void ivtv_mute(struct ivtv *itv); +void ivtv_unmute(struct ivtv *itv); + +/* Utilities */ + +/* Try to claim a stream for the filehandle. Return 0 on success, + -EBUSY if stream already claimed. Once a stream is claimed, it + remains claimed until the associated filehandle is closed. */ +int ivtv_claim_stream(struct ivtv_open_id *id, int type); + +/* Release a previously claimed stream. */ +void ivtv_release_stream(struct ivtv_stream *s); diff --git a/drivers/media/video/ivtv/ivtv-firmware.c b/drivers/media/video/ivtv/ivtv-firmware.c new file mode 100644 index 000000000000..d4c910b782af --- /dev/null +++ b/drivers/media/video/ivtv/ivtv-firmware.c @@ -0,0 +1,272 @@ +/* + ivtv firmware functions. + Copyright (C) 2003-2004 Kevin Thayer + Copyright (C) 2004 Chris Kennedy + Copyright (C) 2005-2007 Hans Verkuil + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include "ivtv-driver.h" +#include "ivtv-mailbox.h" +#include "ivtv-firmware.h" +#include + +#define IVTV_MASK_SPU_ENABLE 0xFFFFFFFE +#define IVTV_MASK_VPU_ENABLE15 0xFFFFFFF6 +#define IVTV_MASK_VPU_ENABLE16 0xFFFFFFFB +#define IVTV_CMD_VDM_STOP 0x00000000 +#define IVTV_CMD_AO_STOP 0x00000005 +#define IVTV_CMD_APU_PING 0x00000000 +#define IVTV_CMD_VPU_STOP15 0xFFFFFFFE +#define IVTV_CMD_VPU_STOP16 0xFFFFFFEE +#define IVTV_CMD_HW_BLOCKS_RST 0xFFFFFFFF +#define IVTV_CMD_SPU_STOP 0x00000001 +#define IVTV_CMD_SDRAM_PRECHARGE_INIT 0x0000001A +#define IVTV_CMD_SDRAM_REFRESH_INIT 0x80000640 +#define IVTV_SDRAM_SLEEPTIME (60 * HZ / 100) /* 600 ms */ + +#define IVTV_DECODE_INIT_MPEG_FILENAME "v4l-cx2341x-init.mpg" +#define IVTV_DECODE_INIT_MPEG_SIZE (152*1024) + +/* Encoder/decoder firmware sizes */ +#define IVTV_FW_ENC_SIZE (376836) +#define IVTV_FW_DEC_SIZE (256*1024) + +static int load_fw_direct(const char *fn, volatile u8 __iomem *mem, struct ivtv *itv, long size) +{ + const struct firmware *fw = NULL; + int retries = 3; + +retry: + if (retries && request_firmware(&fw, fn, &itv->dev->dev) == 0) { + int i; + volatile u32 __iomem *dst = (volatile u32 __iomem *)mem; + const u32 *src = (const u32 *)fw->data; + + /* temporarily allow 256 KB encoding firmwares as well for + compatibility with blackbird cards */ + if (fw->size != size && fw->size != 256 * 1024) { + /* Due to race conditions in firmware loading (esp. with udev <0.95) + the wrong file was sometimes loaded. So we check filesizes to + see if at least the right-sized file was loaded. If not, then we + retry. */ + IVTV_INFO("retry: file loaded was not %s (expected size %ld, got %zd)\n", fn, size, fw->size); + release_firmware(fw); + retries--; + goto retry; + } + for (i = 0; i < fw->size; i += 4) { + /* no need for endianness conversion on the ppc */ + __raw_writel(*src, dst); + dst++; + src++; + } + release_firmware(fw); + IVTV_INFO("loaded %s firmware (%zd bytes)\n", fn, fw->size); + return size; + } + IVTV_ERR("unable to open firmware %s (must be %ld bytes)\n", fn, size); + IVTV_ERR("did you put the firmware in the hotplug firmware directory?\n"); + return -ENOMEM; +} + +void ivtv_halt_firmware(struct ivtv *itv) +{ + IVTV_DEBUG_INFO("Preparing for firmware halt.\n"); + if (itv->has_cx23415 && itv->dec_mbox.mbox) + ivtv_vapi(itv, CX2341X_DEC_HALT_FW, 0); + if (itv->enc_mbox.mbox) + ivtv_vapi(itv, CX2341X_ENC_HALT_FW, 0); + + ivtv_sleep_timeout(HZ / 100, 0); + itv->enc_mbox.mbox = itv->dec_mbox.mbox = NULL; + + IVTV_DEBUG_INFO("Stopping VDM\n"); + write_reg(IVTV_CMD_VDM_STOP, IVTV_REG_VDM); + + IVTV_DEBUG_INFO("Stopping AO\n"); + write_reg(IVTV_CMD_AO_STOP, IVTV_REG_AO); + + IVTV_DEBUG_INFO("pinging (?) APU\n"); + write_reg(IVTV_CMD_APU_PING, IVTV_REG_APU); + + IVTV_DEBUG_INFO("Stopping VPU\n"); + if (!itv->has_cx23415) + write_reg(IVTV_CMD_VPU_STOP16, IVTV_REG_VPU); + else + write_reg(IVTV_CMD_VPU_STOP15, IVTV_REG_VPU); + + IVTV_DEBUG_INFO("Resetting Hw Blocks\n"); + write_reg(IVTV_CMD_HW_BLOCKS_RST, IVTV_REG_HW_BLOCKS); + + IVTV_DEBUG_INFO("Stopping SPU\n"); + write_reg(IVTV_CMD_SPU_STOP, IVTV_REG_SPU); + + ivtv_sleep_timeout(HZ / 100, 0); + + IVTV_DEBUG_INFO("init Encoder SDRAM pre-charge\n"); + write_reg(IVTV_CMD_SDRAM_PRECHARGE_INIT, IVTV_REG_ENC_SDRAM_PRECHARGE); + + IVTV_DEBUG_INFO("init Encoder SDRAM refresh to 1us\n"); + write_reg(IVTV_CMD_SDRAM_REFRESH_INIT, IVTV_REG_ENC_SDRAM_REFRESH); + + if (itv->has_cx23415) { + IVTV_DEBUG_INFO("init Decoder SDRAM pre-charge\n"); + write_reg(IVTV_CMD_SDRAM_PRECHARGE_INIT, IVTV_REG_DEC_SDRAM_PRECHARGE); + + IVTV_DEBUG_INFO("init Decoder SDRAM refresh to 1us\n"); + write_reg(IVTV_CMD_SDRAM_REFRESH_INIT, IVTV_REG_DEC_SDRAM_REFRESH); + } + + IVTV_DEBUG_INFO("Sleeping for %dms (600 recommended)\n", + (int)(IVTV_SDRAM_SLEEPTIME * 1000 / HZ)); + ivtv_sleep_timeout(IVTV_SDRAM_SLEEPTIME, 0); +} + +void ivtv_firmware_versions(struct ivtv *itv) +{ + u32 data[CX2341X_MBOX_MAX_DATA]; + + /* Encoder */ + ivtv_vapi_result(itv, data, CX2341X_ENC_GET_VERSION, 0); + IVTV_INFO("Encoder revision: 0x%08x\n", data[0]); + + if (data[0] != 0x02060039) + IVTV_WARN("Recommended firmware version is 0x02060039.\n"); + + if (itv->has_cx23415) { + /* Decoder */ + ivtv_vapi_result(itv, data, CX2341X_DEC_GET_VERSION, 0); + IVTV_INFO("Decoder revision: 0x%08x\n", data[0]); + } +} + +static int ivtv_firmware_copy(struct ivtv *itv) +{ + IVTV_DEBUG_INFO("Loading encoder image\n"); + if (load_fw_direct(CX2341X_FIRM_ENC_FILENAME, + itv->enc_mem, itv, IVTV_FW_ENC_SIZE) != IVTV_FW_ENC_SIZE) { + IVTV_DEBUG_WARN("failed loading encoder firmware\n"); + return -3; + } + if (!itv->has_cx23415) + return 0; + + IVTV_DEBUG_INFO("Loading decoder image\n"); + if (load_fw_direct(CX2341X_FIRM_DEC_FILENAME, + itv->dec_mem, itv, IVTV_FW_DEC_SIZE) != IVTV_FW_DEC_SIZE) { + IVTV_DEBUG_WARN("failed loading decoder firmware\n"); + return -1; + } + return 0; +} + +static volatile struct ivtv_mailbox __iomem *ivtv_search_mailbox(const volatile u8 __iomem *mem, u32 size) +{ + int i; + + /* mailbox is preceeded by a 16 byte 'magic cookie' starting at a 256-byte + address boundary */ + for (i = 0; i < size; i += 0x100) { + if (readl(mem + i) == 0x12345678 && + readl(mem + i + 4) == 0x34567812 && + readl(mem + i + 8) == 0x56781234 && + readl(mem + i + 12) == 0x78123456) { + return (volatile struct ivtv_mailbox __iomem *)(mem + i + 16); + } + } + return NULL; +} + +int ivtv_firmware_init(struct ivtv *itv) +{ + int err; + + ivtv_halt_firmware(itv); + + /* load firmware */ + err = ivtv_firmware_copy(itv); + if (err) { + IVTV_DEBUG_WARN("Error %d loading firmware\n", err); + return err; + } + + /* start firmware */ + write_reg(read_reg(IVTV_REG_SPU) & IVTV_MASK_SPU_ENABLE, IVTV_REG_SPU); + ivtv_sleep_timeout(HZ / 10, 0); + if (itv->has_cx23415) + write_reg(read_reg(IVTV_REG_VPU) & IVTV_MASK_VPU_ENABLE15, IVTV_REG_VPU); + else + write_reg(read_reg(IVTV_REG_VPU) & IVTV_MASK_VPU_ENABLE16, IVTV_REG_VPU); + ivtv_sleep_timeout(HZ / 10, 0); + + /* find mailboxes and ping firmware */ + itv->enc_mbox.mbox = ivtv_search_mailbox(itv->enc_mem, IVTV_ENCODER_SIZE); + if (itv->enc_mbox.mbox == NULL) + IVTV_ERR("Encoder mailbox not found\n"); + else if (ivtv_vapi(itv, CX2341X_ENC_PING_FW, 0)) { + IVTV_ERR("Encoder firmware dead!\n"); + itv->enc_mbox.mbox = NULL; + } + if (itv->enc_mbox.mbox == NULL) + return -ENODEV; + + if (!itv->has_cx23415) + return 0; + + itv->dec_mbox.mbox = ivtv_search_mailbox(itv->dec_mem, IVTV_DECODER_SIZE); + if (itv->dec_mbox.mbox == NULL) + IVTV_ERR("Decoder mailbox not found\n"); + else if (itv->has_cx23415 && ivtv_vapi(itv, CX2341X_DEC_PING_FW, 0)) { + IVTV_ERR("Decoder firmware dead!\n"); + itv->dec_mbox.mbox = NULL; + } + return itv->dec_mbox.mbox ? 0 : -ENODEV; +} + +void ivtv_init_mpeg_decoder(struct ivtv *itv) +{ + u32 data[CX2341X_MBOX_MAX_DATA]; + long readbytes; + volatile u8 __iomem *mem_offset; + + data[0] = 0; + data[1] = itv->params.width; /* YUV source width */ + data[2] = itv->params.height; + data[3] = itv->params.audio_properties; /* Audio settings to use, + bitmap. see docs. */ + if (ivtv_api(itv, CX2341X_DEC_SET_DECODER_SOURCE, 4, data)) { + IVTV_ERR("ivtv_init_mpeg_decoder failed to set decoder source\n"); + return; + } + + if (ivtv_vapi(itv, CX2341X_DEC_START_PLAYBACK, 2, 0, 1) != 0) { + IVTV_ERR("ivtv_init_mpeg_decoder failed to start playback\n"); + return; + } + ivtv_api_get_data(&itv->dec_mbox, IVTV_MBOX_DMA, data); + mem_offset = itv->dec_mem + data[1]; + + if ((readbytes = load_fw_direct(IVTV_DECODE_INIT_MPEG_FILENAME, + mem_offset, itv, IVTV_DECODE_INIT_MPEG_SIZE)) <= 0) { + IVTV_DEBUG_WARN("failed to read mpeg decoder initialisation file %s\n", + IVTV_DECODE_INIT_MPEG_FILENAME); + } else { + ivtv_vapi(itv, CX2341X_DEC_SCHED_DMA_FROM_HOST, 3, 0, readbytes, 0); + ivtv_sleep_timeout(HZ / 10, 0); + } + ivtv_vapi(itv, CX2341X_DEC_STOP_PLAYBACK, 4, 0, 0, 0, 1); +} diff --git a/drivers/media/video/ivtv/ivtv-firmware.h b/drivers/media/video/ivtv/ivtv-firmware.h new file mode 100644 index 000000000000..8b2ffe658905 --- /dev/null +++ b/drivers/media/video/ivtv/ivtv-firmware.h @@ -0,0 +1,25 @@ +/* + ivtv firmware functions. + Copyright (C) 2003-2004 Kevin Thayer + Copyright (C) 2004 Chris Kennedy + Copyright (C) 2005-2007 Hans Verkuil + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +int ivtv_firmware_init(struct ivtv *itv); +void ivtv_firmware_versions(struct ivtv *itv); +void ivtv_halt_firmware(struct ivtv *itv); +void ivtv_init_mpeg_decoder(struct ivtv *itv); diff --git a/drivers/media/video/ivtv/ivtv-gpio.c b/drivers/media/video/ivtv/ivtv-gpio.c new file mode 100644 index 000000000000..bc8f8ca2961f --- /dev/null +++ b/drivers/media/video/ivtv/ivtv-gpio.c @@ -0,0 +1,307 @@ +/* + gpio functions. + Merging GPIO support into driver: + Copyright (C) 2004 Chris Kennedy + Copyright (C) 2005-2007 Hans Verkuil + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include "ivtv-driver.h" +#include "ivtv-cards.h" +#include "ivtv-gpio.h" +#include + +/* + * GPIO assignment of Yuan MPG600/MPG160 + * + * bit 15 14 13 12 | 11 10 9 8 | 7 6 5 4 | 3 2 1 0 + * OUTPUT IN1 IN0 AM3 AM2 AM1 AM0 + * INPUT DM1 DM0 + * + * IN* : Input selection + * IN1 IN0 + * 1 1 N/A + * 1 0 Line + * 0 1 N/A + * 0 0 Tuner + * + * AM* : Audio Mode + * AM3 0: Normal 1: Mixed(Sub+Main channel) + * AM2 0: Subchannel 1: Main channel + * AM1 0: Stereo 1: Mono + * AM0 0: Normal 1: Mute + * + * DM* : Detected tuner audio Mode + * DM1 0: Stereo 1: Mono + * DM0 0: Multiplex 1: Normal + * + * GPIO Initial Settings + * MPG600 MPG160 + * DIR 0x3080 0x7080 + * OUTPUT 0x000C 0x400C + * + * Special thanks to Makoto Iguchi and Mr. Anonymous + * for analyzing GPIO of MPG160. + * + ***************************************************************************** + * + * GPIO assignment of Avermedia M179 (per information direct from AVerMedia) + * + * bit 15 14 13 12 | 11 10 9 8 | 7 6 5 4 | 3 2 1 0 + * OUTPUT IN0 AM0 IN1 AM1 AM2 IN2 BR0 BR1 + * INPUT + * + * IN* : Input selection + * IN0 IN1 IN2 + * * 1 * Mute + * 0 0 0 Line-In + * 1 0 0 TV Tuner Audio + * 0 0 1 FM Audio + * 1 0 1 Mute + * + * AM* : Audio Mode + * AM0 AM1 AM2 + * 0 0 0 TV Tuner Audio: L_OUT=(L+R)/2, R_OUT=SAP + * 0 0 1 TV Tuner Audio: L_OUT=R_OUT=SAP (SAP) + * 0 1 0 TV Tuner Audio: L_OUT=L, R_OUT=R (stereo) + * 0 1 1 TV Tuner Audio: mute + * 1 * * TV Tuner Audio: L_OUT=R_OUT=(L+R)/2 (mono) + * + * BR* : Audio Sample Rate (BR stands for bitrate for some reason) + * BR0 BR1 + * 0 0 32 kHz + * 0 1 44.1 kHz + * 1 0 48 kHz + * + * DM* : Detected tuner audio Mode + * Unknown currently + * + * Special thanks to AVerMedia Technologies, Inc. and Jiun-Kuei Jung at + * AVerMedia for providing the GPIO information used to add support + * for the M179 cards. + */ + +/********************* GPIO stuffs *********************/ + +/* GPIO registers */ +#define IVTV_REG_GPIO_IN 0x9008 +#define IVTV_REG_GPIO_OUT 0x900c +#define IVTV_REG_GPIO_DIR 0x9020 + +void ivtv_reset_ir_gpio(struct ivtv *itv) +{ + int curdir, curout; + + if (itv->card->type != IVTV_CARD_PVR_150) + return; + IVTV_DEBUG_INFO("Resetting PVR150 IR\n"); + curout = read_reg(IVTV_REG_GPIO_OUT); + curdir = read_reg(IVTV_REG_GPIO_DIR); + curdir |= 0x80; + write_reg(curdir, IVTV_REG_GPIO_DIR); + curout = (curout & ~0xF) | 1; + write_reg(curout, IVTV_REG_GPIO_OUT); + /* We could use something else for smaller time */ + current->state = TASK_INTERRUPTIBLE; + schedule_timeout(1); + curout |= 2; + write_reg(curout, IVTV_REG_GPIO_OUT); + curdir &= ~0x80; + write_reg(curdir, IVTV_REG_GPIO_DIR); +} + +#ifdef HAVE_XC3028 +int ivtv_reset_tuner_gpio(enum v4l2_tuner_type mode, void *priv, int ptr) +{ + int curdir, curout; + struct ivtv *itv = (struct ivtv *) priv; + + if (itv->card->type != IVTV_CARD_PG600V2 || itv->options.tuner != TUNER_XCEIVE_XC3028) + return -EINVAL; + IVTV_INFO("Resetting tuner.\n"); + curout = read_reg(IVTV_REG_GPIO_OUT); + curdir = read_reg(IVTV_REG_GPIO_DIR); + curdir |= (1 << 12); /* GPIO bit 12 */ + + curout &= ~(1 << 12); + write_reg(curout, IVTV_REG_GPIO_OUT); + current->state = TASK_INTERRUPTIBLE; + schedule_timeout(1); + + curout |= (1 << 12); + write_reg(curout, IVTV_REG_GPIO_OUT); + current->state = TASK_INTERRUPTIBLE; + schedule_timeout(1); + + return 0; +} +#endif + +void ivtv_gpio_init(struct ivtv *itv) +{ + if (itv->card->gpio_init.direction == 0) + return; + + IVTV_DEBUG_INFO("GPIO initial dir: %08x out: %08x\n", + read_reg(IVTV_REG_GPIO_DIR), read_reg(IVTV_REG_GPIO_OUT)); + + /* init output data then direction */ + write_reg(itv->card->gpio_init.initial_value, IVTV_REG_GPIO_OUT); + write_reg(itv->card->gpio_init.direction, IVTV_REG_GPIO_DIR); +} + +static struct v4l2_queryctrl gpio_ctrl_mute = { + .id = V4L2_CID_AUDIO_MUTE, + .type = V4L2_CTRL_TYPE_BOOLEAN, + .name = "Mute", + .minimum = 0, + .maximum = 1, + .step = 1, + .default_value = 1, + .flags = 0, +}; + +int ivtv_gpio(struct ivtv *itv, unsigned int command, void *arg) +{ + struct v4l2_tuner *tuner = arg; + struct v4l2_control *ctrl = arg; + struct v4l2_routing *route = arg; + u16 mask, data; + + switch (command) { + case VIDIOC_INT_AUDIO_CLOCK_FREQ: + mask = itv->card->gpio_audio_freq.mask; + switch (*(u32 *)arg) { + case 32000: + data = itv->card->gpio_audio_freq.f32000; + break; + case 44100: + data = itv->card->gpio_audio_freq.f44100; + break; + case 48000: + default: + data = itv->card->gpio_audio_freq.f48000; + break; + } + break; + + case VIDIOC_G_TUNER: + mask = itv->card->gpio_audio_detect.mask; + if (mask == 0 || (read_reg(IVTV_REG_GPIO_IN) & mask)) + tuner->rxsubchans = V4L2_TUNER_MODE_STEREO | + V4L2_TUNER_MODE_LANG1 | V4L2_TUNER_MODE_LANG2; + else + tuner->rxsubchans = V4L2_TUNER_SUB_MONO; + return 0; + + case VIDIOC_S_TUNER: + mask = itv->card->gpio_audio_mode.mask; + switch (tuner->audmode) { + case V4L2_TUNER_MODE_LANG1: + data = itv->card->gpio_audio_mode.lang1; + break; + case V4L2_TUNER_MODE_LANG2: + data = itv->card->gpio_audio_mode.lang2; + break; + case V4L2_TUNER_MODE_MONO: + data = itv->card->gpio_audio_mode.mono; + break; + case V4L2_TUNER_MODE_STEREO: + case V4L2_TUNER_MODE_LANG1_LANG2: + default: + data = itv->card->gpio_audio_mode.stereo; + break; + } + break; + + case AUDC_SET_RADIO: + mask = itv->card->gpio_audio_input.mask; + data = itv->card->gpio_audio_input.radio; + break; + + case VIDIOC_S_STD: + mask = itv->card->gpio_audio_input.mask; + data = itv->card->gpio_audio_input.tuner; + break; + + case VIDIOC_INT_S_AUDIO_ROUTING: + if (route->input > 2) + return -EINVAL; + mask = itv->card->gpio_audio_input.mask; + switch (route->input) { + case 0: + data = itv->card->gpio_audio_input.tuner; + break; + case 1: + data = itv->card->gpio_audio_input.linein; + break; + case 2: + default: + data = itv->card->gpio_audio_input.radio; + break; + } + break; + + case VIDIOC_G_CTRL: + if (ctrl->id != V4L2_CID_AUDIO_MUTE) + return -EINVAL; + mask = itv->card->gpio_audio_mute.mask; + data = itv->card->gpio_audio_mute.mute; + ctrl->value = (read_reg(IVTV_REG_GPIO_OUT) & mask) == data; + return 0; + + case VIDIOC_S_CTRL: + if (ctrl->id != V4L2_CID_AUDIO_MUTE) + return -EINVAL; + mask = itv->card->gpio_audio_mute.mask; + data = ctrl->value ? itv->card->gpio_audio_mute.mute : 0; + break; + + case VIDIOC_QUERYCTRL: + { + struct v4l2_queryctrl *qc = arg; + + if (qc->id != V4L2_CID_AUDIO_MUTE) + return -EINVAL; + *qc = gpio_ctrl_mute; + return 0; + } + + case VIDIOC_LOG_STATUS: + IVTV_INFO("GPIO status: DIR=0x%04x OUT=0x%04x IN=0x%04x\n", + read_reg(IVTV_REG_GPIO_DIR), read_reg(IVTV_REG_GPIO_OUT), + read_reg(IVTV_REG_GPIO_IN)); + return 0; + + case VIDIOC_INT_S_VIDEO_ROUTING: + if (route->input > 2) /* 0:Tuner 1:Composite 2:S-Video */ + return -EINVAL; + mask = itv->card->gpio_video_input.mask; + if (route->input == 0) + data = itv->card->gpio_video_input.tuner; + else if (route->input == 1) + data = itv->card->gpio_video_input.composite; + else + data = itv->card->gpio_video_input.svideo; + break; + + default: + return -EINVAL; + } + if (mask) + write_reg((read_reg(IVTV_REG_GPIO_OUT) & ~mask) | (data & mask), IVTV_REG_GPIO_OUT); + return 0; +} diff --git a/drivers/media/video/ivtv/ivtv-gpio.h b/drivers/media/video/ivtv/ivtv-gpio.h new file mode 100644 index 000000000000..c301d2a39346 --- /dev/null +++ b/drivers/media/video/ivtv/ivtv-gpio.h @@ -0,0 +1,25 @@ +/* + gpio functions. + Copyright (C) 2004 Chris Kennedy + Copyright (C) 2005-2007 Hans Verkuil + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +/* GPIO stuff */ +void ivtv_gpio_init(struct ivtv *itv); +void ivtv_reset_ir_gpio(struct ivtv *itv); +int ivtv_reset_tuner_gpio(enum v4l2_tuner_type mode, void *priv, int ptr); +int ivtv_gpio(struct ivtv *itv, unsigned int command, void *arg); diff --git a/drivers/media/video/ivtv/ivtv-i2c.c b/drivers/media/video/ivtv/ivtv-i2c.c new file mode 100644 index 000000000000..17353415b0a3 --- /dev/null +++ b/drivers/media/video/ivtv/ivtv-i2c.c @@ -0,0 +1,750 @@ +/* + I2C functions + Copyright (C) 2003-2004 Kevin Thayer + Copyright (C) 2005-2007 Hans Verkuil + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +/* + This file includes an i2c implementation that was reverse engineered + from the Hauppauge windows driver. Older ivtv versions used i2c-algo-bit, + which whilst fine under most circumstances, had trouble with the Zilog + CPU on the PVR-150 which handles IR functions (occasional inability to + communicate with the chip until it was reset) and also with the i2c + bus being completely unreachable when multiple PVR cards were present. + + The implementation is very similar to i2c-algo-bit, but there are enough + subtle differences that the two are hard to merge. The general strategy + employed by i2c-algo-bit is to use udelay() to implement the timing + when putting out bits on the scl/sda lines. The general strategy taken + here is to poll the lines for state changes (see ivtv_waitscl and + ivtv_waitsda). In addition there are small delays at various locations + which poll the SCL line 5 times (ivtv_scldelay). I would guess that + since this is memory mapped I/O that the length of those delays is tied + to the PCI bus clock. There is some extra code to do with recovery + and retries. Since it is not known what causes the actual i2c problems + in the first place, the only goal if one was to attempt to use + i2c-algo-bit would be to try to make it follow the same code path. + This would be a lot of work, and I'm also not convinced that it would + provide a generic benefit to i2c-algo-bit. Therefore consider this + an engineering solution -- not pretty, but it works. + + Some more general comments about what we are doing: + + The i2c bus is a 2 wire serial bus, with clock (SCL) and data (SDA) + lines. To communicate on the bus (as a master, we don't act as a slave), + we first initiate a start condition (ivtv_start). We then write the + address of the device that we want to communicate with, along with a flag + that indicates whether this is a read or a write. The slave then issues + an ACK signal (ivtv_ack), which tells us that it is ready for reading / + writing. We then proceed with reading or writing (ivtv_read/ivtv_write), + and finally issue a stop condition (ivtv_stop) to make the bus available + to other masters. + + There is an additional form of transaction where a write may be + immediately followed by a read. In this case, there is no intervening + stop condition. (Only the msp3400 chip uses this method of data transfer). + */ + +#include "ivtv-driver.h" +#include "ivtv-cards.h" +#include "ivtv-gpio.h" + +#include + +/* i2c implementation for cx23415/6 chip, ivtv project. + * Author: Kevin Thayer (nufan_wfk at yahoo.com) + */ +/* i2c stuff */ +#define IVTV_REG_I2C_SETSCL_OFFSET 0x7000 +#define IVTV_REG_I2C_SETSDA_OFFSET 0x7004 +#define IVTV_REG_I2C_GETSCL_OFFSET 0x7008 +#define IVTV_REG_I2C_GETSDA_OFFSET 0x700c + +#ifndef I2C_ADAP_CLASS_TV_ANALOG +#define I2C_ADAP_CLASS_TV_ANALOG I2C_CLASS_TV_ANALOG +#endif /* I2C_ADAP_CLASS_TV_ANALOG */ + +#define IVTV_CS53L32A_I2C_ADDR 0x11 +#define IVTV_CX25840_I2C_ADDR 0x44 +#define IVTV_SAA7115_I2C_ADDR 0x21 +#define IVTV_SAA7127_I2C_ADDR 0x44 +#define IVTV_SAA717x_I2C_ADDR 0x21 +#define IVTV_MSP3400_I2C_ADDR 0x40 +#define IVTV_HAUPPAUGE_I2C_ADDR 0x50 +#define IVTV_WM8739_I2C_ADDR 0x1a +#define IVTV_WM8775_I2C_ADDR 0x1b +#define IVTV_TEA5767_I2C_ADDR 0x60 +#define IVTV_UPD64031A_I2C_ADDR 0x12 +#define IVTV_UPD64083_I2C_ADDR 0x5c +#define IVTV_TDA985X_I2C_ADDR 0x5b + +/* This array should match the IVTV_HW_ defines */ +static const u8 hw_driverids[] = { + I2C_DRIVERID_CX25840, + I2C_DRIVERID_SAA711X, + I2C_DRIVERID_SAA7127, + I2C_DRIVERID_MSP3400, + I2C_DRIVERID_TUNER, + I2C_DRIVERID_WM8775, + I2C_DRIVERID_CS53L32A, + I2C_DRIVERID_TVEEPROM, + I2C_DRIVERID_SAA711X, + I2C_DRIVERID_TVAUDIO, + I2C_DRIVERID_UPD64031A, + I2C_DRIVERID_UPD64083, + I2C_DRIVERID_SAA717X, + I2C_DRIVERID_WM8739, + 0 /* IVTV_HW_GPIO dummy driver ID */ +}; + +/* This array should match the IVTV_HW_ defines */ +static const char * const hw_drivernames[] = { + "cx2584x", + "saa7115", + "saa7127", + "msp3400", + "tuner", + "wm8775", + "cs53l32a", + "tveeprom", + "saa7114", + "tvaudio", + "upd64031a", + "upd64083", + "saa717x", + "wm8739", + "gpio", +}; + +static int attach_inform(struct i2c_client *client) +{ + struct ivtv *itv = (struct ivtv *)i2c_get_adapdata(client->adapter); + int i; + + IVTV_DEBUG_I2C("i2c client attach\n"); + for (i = 0; i < I2C_CLIENTS_MAX; i++) { + if (itv->i2c_clients[i] == NULL) { + itv->i2c_clients[i] = client; + break; + } + } + if (i == I2C_CLIENTS_MAX) { + IVTV_ERR("insufficient room for new I2C client!\n"); + } + return 0; +} + +static int detach_inform(struct i2c_client *client) +{ + int i; + struct ivtv *itv = (struct ivtv *)i2c_get_adapdata(client->adapter); + + IVTV_DEBUG_I2C("i2c client detach\n"); + for (i = 0; i < I2C_CLIENTS_MAX; i++) { + if (itv->i2c_clients[i] == client) { + itv->i2c_clients[i] = NULL; + break; + } + } + IVTV_DEBUG_I2C("i2c detach [client=%s,%s]\n", + client->name, (i < I2C_CLIENTS_MAX) ? "ok" : "failed"); + + return 0; +} + +/* Set the serial clock line to the desired state */ +static void ivtv_setscl(struct ivtv *itv, int state) +{ + /* write them out */ + /* write bits are inverted */ + write_reg(~state, IVTV_REG_I2C_SETSCL_OFFSET); +} + +/* Set the serial data line to the desired state */ +static void ivtv_setsda(struct ivtv *itv, int state) +{ + /* write them out */ + /* write bits are inverted */ + write_reg(~state & 1, IVTV_REG_I2C_SETSDA_OFFSET); +} + +/* Read the serial clock line */ +static int ivtv_getscl(struct ivtv *itv) +{ + return read_reg(IVTV_REG_I2C_GETSCL_OFFSET) & 1; +} + +/* Read the serial data line */ +static int ivtv_getsda(struct ivtv *itv) +{ + return read_reg(IVTV_REG_I2C_GETSDA_OFFSET) & 1; +} + +/* Implement a short delay by polling the serial clock line */ +static void ivtv_scldelay(struct ivtv *itv) +{ + int i; + + for (i = 0; i < 5; ++i) + ivtv_getscl(itv); +} + +/* Wait for the serial clock line to become set to a specific value */ +static int ivtv_waitscl(struct ivtv *itv, int val) +{ + int i; + + ivtv_scldelay(itv); + for (i = 0; i < 1000; ++i) { + if (ivtv_getscl(itv) == val) + return 1; + } + return 0; +} + +/* Wait for the serial data line to become set to a specific value */ +static int ivtv_waitsda(struct ivtv *itv, int val) +{ + int i; + + ivtv_scldelay(itv); + for (i = 0; i < 1000; ++i) { + if (ivtv_getsda(itv) == val) + return 1; + } + return 0; +} + +/* Wait for the slave to issue an ACK */ +static int ivtv_ack(struct ivtv *itv) +{ + int ret = 0; + + if (ivtv_getscl(itv) == 1) { + IVTV_DEBUG_I2C("SCL was high starting an ack\n"); + ivtv_setscl(itv, 0); + if (!ivtv_waitscl(itv, 0)) { + IVTV_DEBUG_I2C("Could not set SCL low starting an ack\n"); + return -EREMOTEIO; + } + } + ivtv_setsda(itv, 1); + ivtv_scldelay(itv); + ivtv_setscl(itv, 1); + if (!ivtv_waitsda(itv, 0)) { + IVTV_DEBUG_I2C("Slave did not ack\n"); + ret = -EREMOTEIO; + } + ivtv_setscl(itv, 0); + if (!ivtv_waitscl(itv, 0)) { + IVTV_DEBUG_I2C("Failed to set SCL low after ACK\n"); + ret = -EREMOTEIO; + } + return ret; +} + +/* Write a single byte to the i2c bus and wait for the slave to ACK */ +static int ivtv_sendbyte(struct ivtv *itv, unsigned char byte) +{ + int i, bit; + + IVTV_DEBUG_I2C("write %x\n",byte); + for (i = 0; i < 8; ++i, byte<<=1) { + ivtv_setscl(itv, 0); + if (!ivtv_waitscl(itv, 0)) { + IVTV_DEBUG_I2C("Error setting SCL low\n"); + return -EREMOTEIO; + } + bit = (byte>>7)&1; + ivtv_setsda(itv, bit); + if (!ivtv_waitsda(itv, bit)) { + IVTV_DEBUG_I2C("Error setting SDA\n"); + return -EREMOTEIO; + } + ivtv_setscl(itv, 1); + if (!ivtv_waitscl(itv, 1)) { + IVTV_DEBUG_I2C("Slave not ready for bit\n"); + return -EREMOTEIO; + } + } + ivtv_setscl(itv, 0); + if (!ivtv_waitscl(itv, 0)) { + IVTV_DEBUG_I2C("Error setting SCL low\n"); + return -EREMOTEIO; + } + return ivtv_ack(itv); +} + +/* Read a byte from the i2c bus and send a NACK if applicable (i.e. for the + final byte) */ +static int ivtv_readbyte(struct ivtv *itv, unsigned char *byte, int nack) +{ + int i; + + *byte = 0; + + ivtv_setsda(itv, 1); + ivtv_scldelay(itv); + for (i = 0; i < 8; ++i) { + ivtv_setscl(itv, 0); + ivtv_scldelay(itv); + ivtv_setscl(itv, 1); + if (!ivtv_waitscl(itv, 1)) { + IVTV_DEBUG_I2C("Error setting SCL high\n"); + return -EREMOTEIO; + } + *byte = ((*byte)<<1)|ivtv_getsda(itv); + } + ivtv_setscl(itv, 0); + ivtv_scldelay(itv); + ivtv_setsda(itv, nack); + ivtv_scldelay(itv); + ivtv_setscl(itv, 1); + ivtv_scldelay(itv); + ivtv_setscl(itv, 0); + ivtv_scldelay(itv); + IVTV_DEBUG_I2C("read %x\n",*byte); + return 0; +} + +/* Issue a start condition on the i2c bus to alert slaves to prepare for + an address write */ +static int ivtv_start(struct ivtv *itv) +{ + int sda; + + sda = ivtv_getsda(itv); + if (sda != 1) { + IVTV_DEBUG_I2C("SDA was low at start\n"); + ivtv_setsda(itv, 1); + if (!ivtv_waitsda(itv, 1)) { + IVTV_DEBUG_I2C("SDA stuck low\n"); + return -EREMOTEIO; + } + } + if (ivtv_getscl(itv) != 1) { + ivtv_setscl(itv, 1); + if (!ivtv_waitscl(itv, 1)) { + IVTV_DEBUG_I2C("SCL stuck low at start\n"); + return -EREMOTEIO; + } + } + ivtv_setsda(itv, 0); + ivtv_scldelay(itv); + return 0; +} + +/* Issue a stop condition on the i2c bus to release it */ +static int ivtv_stop(struct ivtv *itv) +{ + int i; + + if (ivtv_getscl(itv) != 0) { + IVTV_DEBUG_I2C("SCL not low when stopping\n"); + ivtv_setscl(itv, 0); + if (!ivtv_waitscl(itv, 0)) { + IVTV_DEBUG_I2C("SCL could not be set low\n"); + } + } + ivtv_setsda(itv, 0); + ivtv_scldelay(itv); + ivtv_setscl(itv, 1); + if (!ivtv_waitscl(itv, 1)) { + IVTV_DEBUG_I2C("SCL could not be set high\n"); + return -EREMOTEIO; + } + ivtv_scldelay(itv); + ivtv_setsda(itv, 1); + if (!ivtv_waitsda(itv, 1)) { + IVTV_DEBUG_I2C("resetting I2C\n"); + for (i = 0; i < 16; ++i) { + ivtv_setscl(itv, 0); + ivtv_scldelay(itv); + ivtv_setscl(itv, 1); + ivtv_scldelay(itv); + ivtv_setsda(itv, 1); + } + ivtv_waitsda(itv, 1); + return -EREMOTEIO; + } + return 0; +} + +/* Write a message to the given i2c slave. do_stop may be 0 to prevent + issuing the i2c stop condition (when following with a read) */ +static int ivtv_write(struct ivtv *itv, unsigned char addr, unsigned char *data, u32 len, int do_stop) +{ + int retry, ret = -EREMOTEIO; + u32 i; + + for (retry = 0; ret != 0 && retry < 8; ++retry) { + ret = ivtv_start(itv); + + if (ret == 0) { + ret = ivtv_sendbyte(itv, addr<<1); + for (i = 0; ret == 0 && i < len; ++i) + ret = ivtv_sendbyte(itv, data[i]); + } + if (ret != 0 || do_stop) { + ivtv_stop(itv); + } + } + if (ret) + IVTV_DEBUG_I2C("i2c write to %x failed\n", addr); + return ret; +} + +/* Read data from the given i2c slave. A stop condition is always issued. */ +static int ivtv_read(struct ivtv *itv, unsigned char addr, unsigned char *data, u32 len) +{ + int retry, ret = -EREMOTEIO; + u32 i; + + for (retry = 0; ret != 0 && retry < 8; ++retry) { + ret = ivtv_start(itv); + if (ret == 0) + ret = ivtv_sendbyte(itv, (addr << 1) | 1); + for (i = 0; ret == 0 && i < len; ++i) { + ret = ivtv_readbyte(itv, &data[i], i == len - 1); + } + ivtv_stop(itv); + } + if (ret) + IVTV_DEBUG_I2C("i2c read from %x failed\n", addr); + return ret; +} + +/* Kernel i2c transfer implementation. Takes a number of messages to be read + or written. If a read follows a write, this will occur without an + intervening stop condition */ +static int ivtv_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg *msgs, int num) +{ + struct ivtv *itv = i2c_get_adapdata(i2c_adap); + int retval; + int i; + + mutex_lock(&itv->i2c_bus_lock); + for (i = retval = 0; retval == 0 && i < num; i++) { + if (msgs[i].flags & I2C_M_RD) + retval = ivtv_read(itv, msgs[i].addr, msgs[i].buf, msgs[i].len); + else { + /* if followed by a read, don't stop */ + int stop = !(i + 1 < num && msgs[i + 1].flags == I2C_M_RD); + + retval = ivtv_write(itv, msgs[i].addr, msgs[i].buf, msgs[i].len, stop); + } + } + mutex_unlock(&itv->i2c_bus_lock); + return retval ? retval : num; +} + +/* Kernel i2c capabilities */ +static u32 ivtv_functionality(struct i2c_adapter *adap) +{ + return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; +} + +static struct i2c_algorithm ivtv_algo = { + .master_xfer = ivtv_xfer, + .functionality = ivtv_functionality, +}; + +/* template for our-bit banger */ +static struct i2c_adapter ivtv_i2c_adap_hw_template = { + .name = "ivtv i2c driver", + .id = I2C_HW_B_CX2341X, + .algo = &ivtv_algo, + .algo_data = NULL, /* filled from template */ + .client_register = attach_inform, + .client_unregister = detach_inform, + .owner = THIS_MODULE, +#ifdef I2C_ADAP_CLASS_TV_ANALOG + .class = I2C_ADAP_CLASS_TV_ANALOG, +#endif +}; + +static void ivtv_setscl_old(void *data, int state) +{ + struct ivtv *itv = (struct ivtv *)data; + + if (state) + itv->i2c_state |= 0x01; + else + itv->i2c_state &= ~0x01; + + /* write them out */ + /* write bits are inverted */ + write_reg(~itv->i2c_state, IVTV_REG_I2C_SETSCL_OFFSET); +} + +static void ivtv_setsda_old(void *data, int state) +{ + struct ivtv *itv = (struct ivtv *)data; + + if (state) + itv->i2c_state |= 0x01; + else + itv->i2c_state &= ~0x01; + + /* write them out */ + /* write bits are inverted */ + write_reg(~itv->i2c_state, IVTV_REG_I2C_SETSDA_OFFSET); +} + +static int ivtv_getscl_old(void *data) +{ + struct ivtv *itv = (struct ivtv *)data; + + return read_reg(IVTV_REG_I2C_GETSCL_OFFSET) & 1; +} + +static int ivtv_getsda_old(void *data) +{ + struct ivtv *itv = (struct ivtv *)data; + + return read_reg(IVTV_REG_I2C_GETSDA_OFFSET) & 1; +} + +/* template for i2c-bit-algo */ +static struct i2c_adapter ivtv_i2c_adap_template = { + .name = "ivtv i2c driver", + .id = I2C_HW_B_CX2341X, /* algo-bit is OR'd with this */ + .algo = NULL, /* set by i2c-algo-bit */ + .algo_data = NULL, /* filled from template */ + .client_register = attach_inform, + .client_unregister = detach_inform, + .owner = THIS_MODULE, +#ifdef I2C_ADAP_CLASS_TV_ANALOG + .class = I2C_ADAP_CLASS_TV_ANALOG, +#endif +}; + +static struct i2c_algo_bit_data ivtv_i2c_algo_template = { + NULL, /* ?? */ + ivtv_setsda_old, /* setsda function */ + ivtv_setscl_old, /* " */ + ivtv_getsda_old, /* " */ + ivtv_getscl_old, /* " */ + 10, /* udelay */ + 200 /* timeout */ +}; + +static struct i2c_client ivtv_i2c_client_template = { + .name = "ivtv internal use only", +}; + +int ivtv_call_i2c_client(struct ivtv *itv, int addr, unsigned int cmd, void *arg) +{ + struct i2c_client *client; + int retval; + int i; + + IVTV_DEBUG_I2C("call_i2c_client addr=%02x\n", addr); + for (i = 0; i < I2C_CLIENTS_MAX; i++) { + client = itv->i2c_clients[i]; + if (client == NULL) { + continue; + } + if (client->driver->command == NULL) { + continue; + } + if (addr == client->addr) { + retval = client->driver->command(client, cmd, arg); + return retval; + } + } + IVTV_ERR("i2c addr 0x%02x not found for command 0x%x!\n", addr, cmd); + return -ENODEV; +} + +/* Find the i2c device based on the driver ID and return + its i2c address or -ENODEV if no matching device was found. */ +int ivtv_i2c_id_addr(struct ivtv *itv, u32 id) +{ + struct i2c_client *client; + int retval = -ENODEV; + int i; + + for (i = 0; i < I2C_CLIENTS_MAX; i++) { + client = itv->i2c_clients[i]; + if (client == NULL) + continue; + if (id == client->driver->id) { + retval = client->addr; + break; + } + } + return retval; +} + +/* Find the i2c device name matching the DRIVERID */ +static const char *ivtv_i2c_id_name(u32 id) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(hw_driverids); i++) + if (hw_driverids[i] == id) + return hw_drivernames[i]; + return "unknown device"; +} + +/* Find the i2c device name matching the IVTV_HW_ flag */ +static const char *ivtv_i2c_hw_name(u32 hw) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(hw_driverids); i++) + if (1 << i == hw) + return hw_drivernames[i]; + return "unknown device"; +} + +/* Find the i2c device matching the IVTV_HW_ flag and return + its i2c address or -ENODEV if no matching device was found. */ +int ivtv_i2c_hw_addr(struct ivtv *itv, u32 hw) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(hw_driverids); i++) + if (1 << i == hw) + return ivtv_i2c_id_addr(itv, hw_driverids[i]); + return -ENODEV; +} + +/* Calls i2c device based on IVTV_HW_ flag. If hw == 0, then do nothing. + If hw == IVTV_HW_GPIO then call the gpio handler. */ +int ivtv_i2c_hw(struct ivtv *itv, u32 hw, unsigned int cmd, void *arg) +{ + int addr; + + if (hw == IVTV_HW_GPIO) + return ivtv_gpio(itv, cmd, arg); + if (hw == 0) + return 0; + + addr = ivtv_i2c_hw_addr(itv, hw); + if (addr < 0) { + IVTV_ERR("i2c hardware 0x%08x (%s) not found for command 0x%x!\n", + hw, ivtv_i2c_hw_name(hw), cmd); + return addr; + } + return ivtv_call_i2c_client(itv, addr, cmd, arg); +} + +/* Calls i2c device based on I2C driver ID. */ +int ivtv_i2c_id(struct ivtv *itv, u32 id, unsigned int cmd, void *arg) +{ + int addr; + + addr = ivtv_i2c_id_addr(itv, id); + if (addr < 0) { + IVTV_ERR("i2c ID 0x%08x (%s) not found for command 0x%x!\n", + id, ivtv_i2c_id_name(id), cmd); + return addr; + } + return ivtv_call_i2c_client(itv, addr, cmd, arg); +} + +int ivtv_cx25840(struct ivtv *itv, unsigned int cmd, void *arg) +{ + return ivtv_call_i2c_client(itv, IVTV_CX25840_I2C_ADDR, cmd, arg); +} + +int ivtv_saa7115(struct ivtv *itv, unsigned int cmd, void *arg) +{ + return ivtv_call_i2c_client(itv, IVTV_SAA7115_I2C_ADDR, cmd, arg); +} + +int ivtv_saa7127(struct ivtv *itv, unsigned int cmd, void *arg) +{ + return ivtv_call_i2c_client(itv, IVTV_SAA7127_I2C_ADDR, cmd, arg); +} + +int ivtv_saa717x(struct ivtv *itv, unsigned int cmd, void *arg) +{ + return ivtv_call_i2c_client(itv, IVTV_SAA717x_I2C_ADDR, cmd, arg); +} + +int ivtv_msp34xx(struct ivtv *itv, unsigned int cmd, void *arg) +{ + return ivtv_call_i2c_client(itv, IVTV_MSP3400_I2C_ADDR, cmd, arg); +} + +int ivtv_upd64031a(struct ivtv *itv, unsigned int cmd, void *arg) +{ + return ivtv_call_i2c_client(itv, IVTV_UPD64031A_I2C_ADDR, cmd, arg); +} + +int ivtv_upd64083(struct ivtv *itv, unsigned int cmd, void *arg) +{ + return ivtv_call_i2c_client(itv, IVTV_UPD64083_I2C_ADDR, cmd, arg); +} + +/* broadcast cmd for all I2C clients and for the gpio subsystem */ +void ivtv_call_i2c_clients(struct ivtv *itv, unsigned int cmd, void *arg) +{ + if (itv->i2c_adap.algo == NULL) { + IVTV_ERR("adapter is not set"); + return; + } + i2c_clients_command(&itv->i2c_adap, cmd, arg); + if (itv->hw_flags & IVTV_HW_GPIO) + ivtv_gpio(itv, cmd, arg); +} + +/* init + register i2c algo-bit adapter */ +int __devinit init_ivtv_i2c(struct ivtv *itv) +{ + IVTV_DEBUG_I2C("i2c init\n"); + + if (itv->options.newi2c > 0) { + memcpy(&itv->i2c_adap, &ivtv_i2c_adap_hw_template, + sizeof(struct i2c_adapter)); + } else { + memcpy(&itv->i2c_adap, &ivtv_i2c_adap_template, + sizeof(struct i2c_adapter)); + memcpy(&itv->i2c_algo, &ivtv_i2c_algo_template, + sizeof(struct i2c_algo_bit_data)); + itv->i2c_algo.data = itv; + itv->i2c_adap.algo_data = &itv->i2c_algo; + } + + sprintf(itv->i2c_adap.name + strlen(itv->i2c_adap.name), " #%d", + itv->num); + i2c_set_adapdata(&itv->i2c_adap, itv); + + memcpy(&itv->i2c_client, &ivtv_i2c_client_template, + sizeof(struct i2c_client)); + itv->i2c_client.adapter = &itv->i2c_adap; + itv->i2c_adap.dev.parent = &itv->dev->dev; + + IVTV_DEBUG_I2C("setting scl and sda to 1\n"); + ivtv_setscl(itv, 1); + ivtv_setsda(itv, 1); + + if (itv->options.newi2c > 0) + return i2c_add_adapter(&itv->i2c_adap); + else + return i2c_bit_add_bus(&itv->i2c_adap); +} + +void __devexit exit_ivtv_i2c(struct ivtv *itv) +{ + IVTV_DEBUG_I2C("i2c exit\n"); + + i2c_del_adapter(&itv->i2c_adap); +} diff --git a/drivers/media/video/ivtv/ivtv-i2c.h b/drivers/media/video/ivtv/ivtv-i2c.h new file mode 100644 index 000000000000..136dd684f4b5 --- /dev/null +++ b/drivers/media/video/ivtv/ivtv-i2c.h @@ -0,0 +1,38 @@ +/* + I2C functions + Copyright (C) 2003-2004 Kevin Thayer + Copyright (C) 2005-2007 Hans Verkuil + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +int ivtv_cx25840(struct ivtv *itv, unsigned int cmd, void *arg); +int ivtv_saa7115(struct ivtv *itv, unsigned int cmd, void *arg); +int ivtv_saa7127(struct ivtv *itv, unsigned int cmd, void *arg); +int ivtv_saa717x(struct ivtv *itv, unsigned int cmd, void *arg); +int ivtv_msp34xx(struct ivtv *itv, unsigned int cmd, void *arg); +int ivtv_upd64031a(struct ivtv *itv, unsigned int cmd, void *arg); +int ivtv_upd64083(struct ivtv *itv, unsigned int cmd, void *arg); + +int ivtv_i2c_id_addr(struct ivtv *itv, u32 id); +int ivtv_i2c_hw_addr(struct ivtv *itv, u32 hw); +int ivtv_i2c_hw(struct ivtv *itv, u32 hw, unsigned int cmd, void *arg); +int ivtv_i2c_id(struct ivtv *itv, u32 id, unsigned int cmd, void *arg); +int ivtv_call_i2c_client(struct ivtv *itv, int addr, unsigned int cmd, void *arg); +void ivtv_call_i2c_clients(struct ivtv *itv, unsigned int cmd, void *arg); + +/* init + register i2c algo-bit adapter */ +int __devinit init_ivtv_i2c(struct ivtv *itv); +void __devexit exit_ivtv_i2c(struct ivtv *itv); diff --git a/drivers/media/video/ivtv/ivtv-ioctl.c b/drivers/media/video/ivtv/ivtv-ioctl.c new file mode 100644 index 000000000000..448e8dd5b42f --- /dev/null +++ b/drivers/media/video/ivtv/ivtv-ioctl.c @@ -0,0 +1,1555 @@ +/* + ioctl system call + Copyright (C) 2003-2004 Kevin Thayer + Copyright (C) 2005-2007 Hans Verkuil + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include "ivtv-driver.h" +#include "ivtv-version.h" +#include "ivtv-mailbox.h" +#include "ivtv-i2c.h" +#include "ivtv-queue.h" +#include "ivtv-fileops.h" +#include "ivtv-vbi.h" +#include "ivtv-audio.h" +#include "ivtv-video.h" +#include "ivtv-streams.h" +#include "ivtv-yuv.h" +#include "ivtv-ioctl.h" +#include "ivtv-gpio.h" +#include "ivtv-controls.h" +#include "ivtv-cards.h" +#include +#include +#include +#include +#include + +u16 service2vbi(int type) +{ + switch (type) { + case V4L2_SLICED_TELETEXT_B: + return IVTV_SLICED_TYPE_TELETEXT_B; + case V4L2_SLICED_CAPTION_525: + return IVTV_SLICED_TYPE_CAPTION_525; + case V4L2_SLICED_WSS_625: + return IVTV_SLICED_TYPE_WSS_625; + case V4L2_SLICED_VPS: + return IVTV_SLICED_TYPE_VPS; + default: + return 0; + } +} + +static int valid_service_line(int field, int line, int is_pal) +{ + return (is_pal && line >= 6 && (line != 23 || field == 0)) || + (!is_pal && line >= 10 && line < 22); +} + +static u16 select_service_from_set(int field, int line, u16 set, int is_pal) +{ + u16 valid_set = (is_pal ? V4L2_SLICED_VBI_625 : V4L2_SLICED_VBI_525); + int i; + + set = set & valid_set; + if (set == 0 || !valid_service_line(field, line, is_pal)) { + return 0; + } + if (!is_pal) { + if (line == 21 && (set & V4L2_SLICED_CAPTION_525)) + return V4L2_SLICED_CAPTION_525; + } + else { + if (line == 16 && field == 0 && (set & V4L2_SLICED_VPS)) + return V4L2_SLICED_VPS; + if (line == 23 && field == 0 && (set & V4L2_SLICED_WSS_625)) + return V4L2_SLICED_WSS_625; + if (line == 23) + return 0; + } + for (i = 0; i < 32; i++) { + if ((1 << i) & set) + return 1 << i; + } + return 0; +} + +void expand_service_set(struct v4l2_sliced_vbi_format *fmt, int is_pal) +{ + u16 set = fmt->service_set; + int f, l; + + fmt->service_set = 0; + for (f = 0; f < 2; f++) { + for (l = 0; l < 24; l++) { + fmt->service_lines[f][l] = select_service_from_set(f, l, set, is_pal); + } + } +} + +static int check_service_set(struct v4l2_sliced_vbi_format *fmt, int is_pal) +{ + int f, l; + u16 set = 0; + + for (f = 0; f < 2; f++) { + for (l = 0; l < 24; l++) { + fmt->service_lines[f][l] = select_service_from_set(f, l, fmt->service_lines[f][l], is_pal); + set |= fmt->service_lines[f][l]; + } + } + return set != 0; +} + +u16 get_service_set(struct v4l2_sliced_vbi_format *fmt) +{ + int f, l; + u16 set = 0; + + for (f = 0; f < 2; f++) { + for (l = 0; l < 24; l++) { + set |= fmt->service_lines[f][l]; + } + } + return set; +} + +static const struct { + v4l2_std_id std; + char *name; +} enum_stds[] = { + { V4L2_STD_PAL_BG | V4L2_STD_PAL_H, "PAL-BGH" }, + { V4L2_STD_PAL_DK, "PAL-DK" }, + { V4L2_STD_PAL_I, "PAL-I" }, + { V4L2_STD_PAL_M, "PAL-M" }, + { V4L2_STD_PAL_N, "PAL-N" }, + { V4L2_STD_PAL_Nc, "PAL-Nc" }, + { V4L2_STD_SECAM_B | V4L2_STD_SECAM_G | V4L2_STD_SECAM_H, "SECAM-BGH" }, + { V4L2_STD_SECAM_DK, "SECAM-DK" }, + { V4L2_STD_SECAM_L, "SECAM-L" }, + { V4L2_STD_SECAM_LC, "SECAM-L'" }, + { V4L2_STD_NTSC_M, "NTSC-M" }, + { V4L2_STD_NTSC_M_JP, "NTSC-J" }, + { V4L2_STD_NTSC_M_KR, "NTSC-K" }, +}; + +static const struct v4l2_standard ivtv_std_60hz = +{ + .frameperiod = {.numerator = 1001, .denominator = 30000}, + .framelines = 525, +}; + +static const struct v4l2_standard ivtv_std_50hz = +{ + .frameperiod = {.numerator = 1, .denominator = 25}, + .framelines = 625, +}; + +void ivtv_set_osd_alpha(struct ivtv *itv) +{ + ivtv_vapi(itv, CX2341X_OSD_SET_GLOBAL_ALPHA, 3, + itv->osd_global_alpha_state, itv->osd_global_alpha, !itv->osd_local_alpha_state); + ivtv_vapi(itv, CX2341X_OSD_SET_CHROMA_KEY, 2, itv->osd_color_key_state, itv->osd_color_key); +} + +int ivtv_set_speed(struct ivtv *itv, int speed) +{ + u32 data[CX2341X_MBOX_MAX_DATA]; + struct ivtv_stream *s; + int single_step = (speed == 1 || speed == -1); + DEFINE_WAIT(wait); + + if (speed == 0) speed = 1000; + + /* No change? */ + if (speed == itv->speed && !single_step) + return 0; + + s = &itv->streams[IVTV_DEC_STREAM_TYPE_MPG]; + + if (single_step && (speed < 0) == (itv->speed < 0)) { + /* Single step video and no need to change direction */ + ivtv_vapi(itv, CX2341X_DEC_STEP_VIDEO, 1, 0); + itv->speed = speed; + return 0; + } + if (single_step) + /* Need to change direction */ + speed = speed < 0 ? -1000 : 1000; + + data[0] = (speed > 1000 || speed < -1000) ? 0x80000000 : 0; + data[0] |= (speed > 1000 || speed < -1500) ? 0x40000000 : 0; + data[1] = (speed < 0); + data[2] = speed < 0 ? 3 : 7; + data[3] = itv->params.video_b_frames; + data[4] = (speed == 1500 || speed == 500) ? itv->speed_mute_audio : 0; + data[5] = 0; + data[6] = 0; + + if (speed == 1500 || speed == -1500) data[0] |= 1; + else if (speed == 2000 || speed == -2000) data[0] |= 2; + else if (speed > -1000 && speed < 0) data[0] |= (-1000 / speed); + else if (speed < 1000 && speed > 0) data[0] |= (1000 / speed); + + /* If not decoding, just change speed setting */ + if (atomic_read(&itv->decoding) > 0) { + int got_sig = 0; + + /* Stop all DMA and decoding activity */ + ivtv_vapi(itv, CX2341X_DEC_PAUSE_PLAYBACK, 1, 0); + + /* Wait for any DMA to finish */ + prepare_to_wait(&itv->dma_waitq, &wait, TASK_INTERRUPTIBLE); + while (itv->i_flags & IVTV_F_I_DMA) { + got_sig = signal_pending(current); + if (got_sig) + break; + got_sig = 0; + schedule(); + } + finish_wait(&itv->dma_waitq, &wait); + if (got_sig) + return -EINTR; + + /* Change Speed safely */ + ivtv_api(itv, CX2341X_DEC_SET_PLAYBACK_SPEED, 7, data); + IVTV_DEBUG_INFO("Setting Speed to 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", + data[0], data[1], data[2], data[3], data[4], data[5], data[6]); + } + if (single_step) { + speed = (speed < 0) ? -1 : 1; + ivtv_vapi(itv, CX2341X_DEC_STEP_VIDEO, 1, 0); + } + itv->speed = speed; + return 0; +} + +static int ivtv_validate_speed(int cur_speed, int new_speed) +{ + int fact = new_speed < 0 ? -1 : 1; + int s; + + if (new_speed < 0) new_speed = -new_speed; + if (cur_speed < 0) cur_speed = -cur_speed; + + if (cur_speed <= new_speed) { + if (new_speed > 1500) return fact * 2000; + if (new_speed > 1000) return fact * 1500; + } + else { + if (new_speed >= 2000) return fact * 2000; + if (new_speed >= 1500) return fact * 1500; + if (new_speed >= 1000) return fact * 1000; + } + if (new_speed == 0) return 1000; + if (new_speed == 1 || new_speed == 1000) return fact * new_speed; + + s = new_speed; + new_speed = 1000 / new_speed; + if (1000 / cur_speed == new_speed) + new_speed += (cur_speed < s) ? -1 : 1; + if (new_speed > 60) return 1000 / (fact * 60); + return 1000 / (fact * new_speed); +} + +static int ivtv_video_command(struct ivtv *itv, struct ivtv_open_id *id, + struct video_command *vc, int try) +{ + struct ivtv_stream *s = &itv->streams[IVTV_DEC_STREAM_TYPE_MPG]; + + if (!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT)) + return -EINVAL; + + switch (vc->cmd) { + case VIDEO_CMD_PLAY: { + vc->play.speed = ivtv_validate_speed(itv->speed, vc->play.speed); + if (vc->play.speed < 0) + vc->play.format = VIDEO_PLAY_FMT_GOP; + if (try) break; + + if (ivtv_set_output_mode(itv, OUT_MPG) != OUT_MPG) + return -EBUSY; + return ivtv_start_decoding(id, vc->play.speed); + } + + case VIDEO_CMD_STOP: + if (vc->flags & VIDEO_CMD_STOP_IMMEDIATELY) + vc->stop.pts = 0; + if (try) break; + if (atomic_read(&itv->decoding) == 0) + return 0; + if (itv->output_mode != OUT_MPG) + return -EBUSY; + + itv->output_mode = OUT_NONE; + return ivtv_stop_v4l2_decode_stream(s, vc->flags, vc->stop.pts); + + case VIDEO_CMD_FREEZE: + if (try) break; + if (itv->output_mode != OUT_MPG) + return -EBUSY; + if (atomic_read(&itv->decoding) > 0) { + ivtv_vapi(itv, CX2341X_DEC_PAUSE_PLAYBACK, 1, + (vc->flags & VIDEO_CMD_FREEZE_TO_BLACK) ? 1 : 0); + } + break; + + case VIDEO_CMD_CONTINUE: + if (try) break; + if (itv->output_mode != OUT_MPG) + return -EBUSY; + if (atomic_read(&itv->decoding) > 0) { + ivtv_vapi(itv, CX2341X_DEC_START_PLAYBACK, 2, 0, 0); + } + break; + + default: + return -EINVAL; + } + return 0; +} + +static int ivtv_itvc(struct ivtv *itv, unsigned int cmd, void *arg) +{ + struct v4l2_register *regs = arg; + unsigned long flags; + volatile u8 __iomem *reg_start; + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + if (regs->reg >= IVTV_REG_OFFSET && regs->reg < IVTV_REG_OFFSET + IVTV_REG_SIZE) + reg_start = itv->reg_mem - IVTV_REG_OFFSET; + else if (itv->has_cx23415 && regs->reg >= IVTV_DECODER_OFFSET && + regs->reg < IVTV_DECODER_OFFSET + IVTV_DECODER_SIZE) + reg_start = itv->dec_mem - IVTV_DECODER_OFFSET; + else if (regs->reg >= 0 && regs->reg < IVTV_ENCODER_SIZE) + reg_start = itv->enc_mem; + else + return -EINVAL; + + spin_lock_irqsave(&ivtv_cards_lock, flags); + if (cmd == VIDIOC_DBG_G_REGISTER) { + regs->val = readl(regs->reg + reg_start); + } else { + writel(regs->val, regs->reg + reg_start); + } + spin_unlock_irqrestore(&ivtv_cards_lock, flags); + return 0; +} + +static int ivtv_get_fmt(struct ivtv *itv, int streamtype, struct v4l2_format *fmt) +{ + switch (fmt->type) { + case V4L2_BUF_TYPE_VIDEO_OUTPUT: + if (!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT)) + return -EINVAL; + fmt->fmt.pix.left = itv->main_rect.left; + fmt->fmt.pix.top = itv->main_rect.top; + fmt->fmt.pix.width = itv->main_rect.width; + fmt->fmt.pix.height = itv->main_rect.height; + fmt->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M; + fmt->fmt.pix.field = V4L2_FIELD_INTERLACED; + if (itv->output_mode == OUT_UDMA_YUV) { + switch (itv->yuv_info.lace_mode & IVTV_YUV_MODE_MASK) { + case IVTV_YUV_MODE_INTERLACED: + fmt->fmt.pix.field = (itv->yuv_info.lace_mode & IVTV_YUV_SYNC_MASK) ? + V4L2_FIELD_INTERLACED_BT : V4L2_FIELD_INTERLACED_TB; + break; + case IVTV_YUV_MODE_PROGRESSIVE: + fmt->fmt.pix.field = V4L2_FIELD_NONE; + break; + default: + fmt->fmt.pix.field = V4L2_FIELD_ANY; + break; + } + fmt->fmt.pix.pixelformat = V4L2_PIX_FMT_HM12; + /* YUV size is (Y=(h*w) + UV=(h*(w/2))) */ + fmt->fmt.pix.sizeimage = + fmt->fmt.pix.height * fmt->fmt.pix.width + + fmt->fmt.pix.height * (fmt->fmt.pix.width / 2); + } + else if (itv->output_mode == OUT_YUV || + streamtype == IVTV_ENC_STREAM_TYPE_YUV || + streamtype == IVTV_DEC_STREAM_TYPE_YUV) { + fmt->fmt.pix.pixelformat = V4L2_PIX_FMT_HM12; + /* YUV size is (Y=(h*w) + UV=(h*(w/2))) */ + fmt->fmt.pix.sizeimage = + fmt->fmt.pix.height * fmt->fmt.pix.width + + fmt->fmt.pix.height * (fmt->fmt.pix.width / 2); + } else { + fmt->fmt.pix.pixelformat = V4L2_PIX_FMT_MPEG; + fmt->fmt.pix.sizeimage = 128 * 1024; + } + break; + + case V4L2_BUF_TYPE_VIDEO_CAPTURE: + fmt->fmt.pix.left = 0; + fmt->fmt.pix.top = 0; + fmt->fmt.pix.width = itv->params.width; + fmt->fmt.pix.height = itv->params.height; + fmt->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M; + fmt->fmt.pix.field = V4L2_FIELD_INTERLACED; + if (streamtype == IVTV_ENC_STREAM_TYPE_YUV || + streamtype == IVTV_DEC_STREAM_TYPE_YUV) { + fmt->fmt.pix.pixelformat = V4L2_PIX_FMT_HM12; + /* YUV size is (Y=(h*w) + UV=(h*(w/2))) */ + fmt->fmt.pix.sizeimage = + fmt->fmt.pix.height * fmt->fmt.pix.width + + fmt->fmt.pix.height * (fmt->fmt.pix.width / 2); + } else { + fmt->fmt.pix.pixelformat = V4L2_PIX_FMT_MPEG; + fmt->fmt.pix.sizeimage = 128 * 1024; + } + break; + + case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY: + if (!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT)) + return -EINVAL; + fmt->fmt.win.chromakey = itv->osd_color_key; + fmt->fmt.win.global_alpha = itv->osd_global_alpha; + break; + + case V4L2_BUF_TYPE_VBI_CAPTURE: + fmt->fmt.vbi.sampling_rate = 27000000; + fmt->fmt.vbi.offset = 248; + fmt->fmt.vbi.samples_per_line = itv->vbi.raw_decoder_line_size - 4; + fmt->fmt.vbi.sample_format = V4L2_PIX_FMT_GREY; + fmt->fmt.vbi.start[0] = itv->vbi.start[0]; + fmt->fmt.vbi.start[1] = itv->vbi.start[1]; + fmt->fmt.vbi.count[0] = fmt->fmt.vbi.count[1] = itv->vbi.count; + break; + + case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT: + { + struct v4l2_sliced_vbi_format *vbifmt = &fmt->fmt.sliced; + + if (!(itv->v4l2_cap & V4L2_CAP_SLICED_VBI_OUTPUT)) + return -EINVAL; + vbifmt->io_size = sizeof(struct v4l2_sliced_vbi_data) * 36; + memset(vbifmt->reserved, 0, sizeof(vbifmt->reserved)); + memset(vbifmt->service_lines, 0, sizeof(vbifmt->service_lines)); + if (itv->is_60hz) { + vbifmt->service_lines[0][21] = V4L2_SLICED_CAPTION_525; + vbifmt->service_lines[1][21] = V4L2_SLICED_CAPTION_525; + } else { + vbifmt->service_lines[0][23] = V4L2_SLICED_WSS_625; + vbifmt->service_lines[0][16] = V4L2_SLICED_VPS; + } + vbifmt->service_set = get_service_set(vbifmt); + break; + } + + case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE: + { + struct v4l2_sliced_vbi_format *vbifmt = &fmt->fmt.sliced; + + vbifmt->io_size = sizeof(struct v4l2_sliced_vbi_data) * 36; + memset(vbifmt->reserved, 0, sizeof(vbifmt->reserved)); + memset(vbifmt->service_lines, 0, sizeof(vbifmt->service_lines)); + + if (streamtype == IVTV_DEC_STREAM_TYPE_VBI) { + vbifmt->service_set = itv->is_50hz ? V4L2_SLICED_VBI_625 : + V4L2_SLICED_VBI_525; + expand_service_set(vbifmt, itv->is_50hz); + break; + } + + itv->video_dec_func(itv, VIDIOC_G_FMT, fmt); + vbifmt->service_set = get_service_set(vbifmt); + break; + } + case V4L2_BUF_TYPE_VBI_OUTPUT: + case V4L2_BUF_TYPE_VIDEO_OVERLAY: + default: + return -EINVAL; + } + return 0; +} + +static int ivtv_try_or_set_fmt(struct ivtv *itv, int streamtype, + struct v4l2_format *fmt, int set_fmt) +{ + struct v4l2_sliced_vbi_format *vbifmt = &fmt->fmt.sliced; + u16 set; + + if (fmt->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) { + struct v4l2_rect r; + int field; + + if (!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT)) + return -EINVAL; + field = fmt->fmt.pix.field; + r.top = fmt->fmt.pix.top; + r.left = fmt->fmt.pix.left; + r.width = fmt->fmt.pix.width; + r.height = fmt->fmt.pix.height; + ivtv_get_fmt(itv, streamtype, fmt); + if (itv->output_mode != OUT_UDMA_YUV) { + /* TODO: would setting the rect also be valid for this mode? */ + fmt->fmt.pix.top = r.top; + fmt->fmt.pix.left = r.left; + fmt->fmt.pix.width = r.width; + fmt->fmt.pix.height = r.height; + } + if (itv->output_mode == OUT_UDMA_YUV) { + /* TODO: add checks for validity */ + fmt->fmt.pix.field = field; + } + if (set_fmt) { + if (itv->output_mode == OUT_UDMA_YUV) { + switch (field) { + case V4L2_FIELD_NONE: + itv->yuv_info.lace_mode = IVTV_YUV_MODE_PROGRESSIVE; + break; + case V4L2_FIELD_ANY: + itv->yuv_info.lace_mode = IVTV_YUV_MODE_AUTO; + break; + case V4L2_FIELD_INTERLACED_BT: + itv->yuv_info.lace_mode = + IVTV_YUV_MODE_INTERLACED|IVTV_YUV_SYNC_ODD; + break; + case V4L2_FIELD_INTERLACED_TB: + default: + itv->yuv_info.lace_mode = IVTV_YUV_MODE_INTERLACED; + break; + } + itv->yuv_info.lace_sync_field = (itv->yuv_info.lace_mode & IVTV_YUV_SYNC_MASK) == IVTV_YUV_SYNC_EVEN ? 0 : 1; + + /* Force update of yuv registers */ + itv->yuv_info.yuv_forced_update = 1; + return 0; + } + if (!ivtv_vapi(itv, CX2341X_OSD_SET_FRAMEBUFFER_WINDOW, 4, + r.width, r.height, r.left, r.top)) + itv->main_rect = r; + else + return -EINVAL; + } + return 0; + } + + if (fmt->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY) { + if (!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT)) + return -EINVAL; + if (set_fmt) { + itv->osd_color_key = fmt->fmt.win.chromakey; + itv->osd_global_alpha = fmt->fmt.win.global_alpha; + ivtv_set_osd_alpha(itv); + } + return 0; + } + + /* set window size */ + if (fmt->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) { + int w = fmt->fmt.pix.width; + int h = fmt->fmt.pix.height; + + if (w > 720) w = 720; + else if (w < 1) w = 1; + if (h > (itv->is_50hz ? 576 : 480)) h = (itv->is_50hz ? 576 : 480); + else if (h < 2) h = 2; + ivtv_get_fmt(itv, streamtype, fmt); + fmt->fmt.pix.width = w; + fmt->fmt.pix.height = h; + + if (!set_fmt || (itv->params.width == w && itv->params.height == h)) + return 0; + if (atomic_read(&itv->capturing) > 0) + return -EBUSY; + + itv->params.width = w; + itv->params.height = h; + if (w != 720 || h != (itv->is_50hz ? 576 : 480)) + itv->params.video_temporal_filter = 0; + else + itv->params.video_temporal_filter = 8; + itv->video_dec_func(itv, VIDIOC_S_FMT, fmt); + return ivtv_get_fmt(itv, streamtype, fmt); + } + + /* set raw VBI format */ + if (fmt->type == V4L2_BUF_TYPE_VBI_CAPTURE) { + if (set_fmt && streamtype == IVTV_ENC_STREAM_TYPE_VBI && + itv->vbi.sliced_in->service_set && + atomic_read(&itv->capturing) > 0) { + return -EBUSY; + } + if (set_fmt) { + itv->vbi.sliced_in->service_set = 0; + itv->video_dec_func(itv, VIDIOC_S_FMT, &itv->vbi.in); + } + return ivtv_get_fmt(itv, streamtype, fmt); + } + + /* set sliced VBI output + In principle the user could request that only certain + VBI types are output and that the others are ignored. + I.e., suppress CC in the even fields or only output + WSS and no VPS. Currently though there is no choice. */ + if (fmt->type == V4L2_BUF_TYPE_SLICED_VBI_OUTPUT) + return ivtv_get_fmt(itv, streamtype, fmt); + + /* any else but sliced VBI capture is an error */ + if (fmt->type != V4L2_BUF_TYPE_SLICED_VBI_CAPTURE) + return -EINVAL; + + if (streamtype == IVTV_DEC_STREAM_TYPE_VBI) + return ivtv_get_fmt(itv, streamtype, fmt); + + /* set sliced VBI capture format */ + vbifmt->io_size = sizeof(struct v4l2_sliced_vbi_data) * 36; + memset(vbifmt->reserved, 0, sizeof(vbifmt->reserved)); + + if (vbifmt->service_set) + expand_service_set(vbifmt, itv->is_50hz); + set = check_service_set(vbifmt, itv->is_50hz); + vbifmt->service_set = get_service_set(vbifmt); + + if (!set_fmt) + return 0; + if (set == 0) + return -EINVAL; + if (atomic_read(&itv->capturing) > 0 && itv->vbi.sliced_in->service_set == 0) { + return -EBUSY; + } + itv->video_dec_func(itv, VIDIOC_S_FMT, fmt); + memcpy(itv->vbi.sliced_in, vbifmt, sizeof(*itv->vbi.sliced_in)); + return 0; +} + +static int ivtv_internal_ioctls(struct file *filp, unsigned int cmd, void *arg) +{ + struct ivtv_open_id *id = (struct ivtv_open_id *)filp->private_data; + struct ivtv *itv = id->itv; + struct v4l2_register *reg = arg; + + switch (cmd) { + /* ioctls to allow direct access to the encoder registers for testing */ + case VIDIOC_DBG_G_REGISTER: + IVTV_DEBUG_IOCTL("VIDIOC_DBG_G_REGISTER\n"); + if (v4l2_chip_match_host(reg->match_type, reg->match_chip)) + return ivtv_itvc(itv, cmd, arg); + if (reg->match_type == V4L2_CHIP_MATCH_I2C_DRIVER) + return ivtv_i2c_id(itv, reg->match_chip, cmd, arg); + return ivtv_call_i2c_client(itv, reg->match_chip, cmd, arg); + + case VIDIOC_DBG_S_REGISTER: + IVTV_DEBUG_IOCTL("VIDIOC_DBG_S_REGISTER\n"); + if (v4l2_chip_match_host(reg->match_type, reg->match_chip)) + return ivtv_itvc(itv, cmd, arg); + if (reg->match_type == V4L2_CHIP_MATCH_I2C_DRIVER) + return ivtv_i2c_id(itv, reg->match_chip, cmd, arg); + return ivtv_call_i2c_client(itv, reg->match_chip, cmd, arg); + + case VIDIOC_G_CHIP_IDENT: { + struct v4l2_chip_ident *chip = arg; + + IVTV_DEBUG_IOCTL("VIDIOC_G_CHIP_IDENT\n"); + chip->ident = V4L2_IDENT_NONE; + chip->revision = 0; + if (reg->match_type == V4L2_CHIP_MATCH_HOST) { + if (v4l2_chip_match_host(reg->match_type, reg->match_chip)) { + struct v4l2_chip_ident *chip = arg; + + chip->ident = itv->has_cx23415 ? V4L2_IDENT_CX23415 : V4L2_IDENT_CX23416; + } + return 0; + } + if (reg->match_type == V4L2_CHIP_MATCH_I2C_DRIVER) + return ivtv_i2c_id(itv, reg->match_chip, cmd, arg); + if (reg->match_type == V4L2_CHIP_MATCH_I2C_ADDR) + return ivtv_call_i2c_client(itv, reg->match_chip, cmd, arg); + return -EINVAL; + } + + case VIDIOC_INT_S_AUDIO_ROUTING: { + struct v4l2_routing *route = arg; + + IVTV_DEBUG_IOCTL("VIDIOC_INT_S_AUDIO_ROUTING\n"); + ivtv_audio_set_route(itv, route); + break; + } + + case VIDIOC_INT_RESET: + IVTV_DEBUG_IOCTL("VIDIOC_INT_RESET\n"); + ivtv_reset_ir_gpio(itv); + break; + + default: + return -EINVAL; + } + return 0; +} + +int ivtv_v4l2_ioctls(struct ivtv *itv, struct file *filp, unsigned int cmd, void *arg) +{ + struct ivtv_open_id *id = NULL; + + if (filp) id = (struct ivtv_open_id *)filp->private_data; + + switch (cmd) { + case VIDIOC_QUERYCAP:{ + struct v4l2_capability *vcap = arg; + + IVTV_DEBUG_IOCTL("VIDIOC_QUERYCAP\n"); + + memset(vcap, 0, sizeof(*vcap)); + strcpy(vcap->driver, IVTV_DRIVER_NAME); /* driver name */ + strcpy(vcap->card, itv->card_name); /* card type */ + strcpy(vcap->bus_info, pci_name(itv->dev)); /* bus info... */ + vcap->version = IVTV_DRIVER_VERSION; /* version */ + vcap->capabilities = itv->v4l2_cap; /* capabilities */ + + /* reserved.. must set to 0! */ + vcap->reserved[0] = vcap->reserved[1] = + vcap->reserved[2] = vcap->reserved[3] = 0; + break; + } + + case VIDIOC_ENUMAUDIO:{ + struct v4l2_audio *vin = arg; + + IVTV_DEBUG_IOCTL("VIDIOC_ENUMAUDIO\n"); + + return ivtv_get_audio_input(itv, vin->index, vin); + } + + case VIDIOC_G_AUDIO:{ + struct v4l2_audio *vin = arg; + + IVTV_DEBUG_IOCTL("VIDIOC_G_AUDIO\n"); + vin->index = itv->audio_input; + return ivtv_get_audio_input(itv, vin->index, vin); + } + + case VIDIOC_S_AUDIO:{ + struct v4l2_audio *vout = arg; + + IVTV_DEBUG_IOCTL("VIDIOC_S_AUDIO\n"); + + if (vout->index >= itv->nof_audio_inputs) + return -EINVAL; + itv->audio_input = vout->index; + ivtv_audio_set_io(itv); + break; + } + + case VIDIOC_ENUMAUDOUT:{ + struct v4l2_audioout *vin = arg; + + IVTV_DEBUG_IOCTL("VIDIOC_ENUMAUDOUT\n"); + + /* set it to defaults from our table */ + return ivtv_get_audio_output(itv, vin->index, vin); + } + + case VIDIOC_G_AUDOUT:{ + struct v4l2_audioout *vin = arg; + + IVTV_DEBUG_IOCTL("VIDIOC_G_AUDOUT\n"); + vin->index = 0; + return ivtv_get_audio_output(itv, vin->index, vin); + } + + case VIDIOC_S_AUDOUT:{ + struct v4l2_audioout *vout = arg; + + IVTV_DEBUG_IOCTL("VIDIOC_S_AUDOUT\n"); + + return ivtv_get_audio_output(itv, vout->index, vout); + } + + case VIDIOC_ENUMINPUT:{ + struct v4l2_input *vin = arg; + + IVTV_DEBUG_IOCTL("VIDIOC_ENUMINPUT\n"); + + /* set it to defaults from our table */ + return ivtv_get_input(itv, vin->index, vin); + } + + case VIDIOC_ENUMOUTPUT:{ + struct v4l2_output *vout = arg; + + IVTV_DEBUG_IOCTL("VIDIOC_ENUMOUTPUT\n"); + + return ivtv_get_output(itv, vout->index, vout); + } + + case VIDIOC_TRY_FMT: + case VIDIOC_S_FMT: { + struct v4l2_format *fmt = arg; + + if (cmd == VIDIOC_S_FMT) { + IVTV_DEBUG_IOCTL("VIDIOC_S_FMT\n"); + } else { + IVTV_DEBUG_IOCTL("VIDIOC_TRY_FMT\n"); + } + return ivtv_try_or_set_fmt(itv, id->type, fmt, cmd == VIDIOC_S_FMT); + } + + case VIDIOC_G_FMT: { + struct v4l2_format *fmt = arg; + int type = fmt->type; + + IVTV_DEBUG_IOCTL("VIDIOC_G_FMT\n"); + memset(fmt, 0, sizeof(*fmt)); + fmt->type = type; + return ivtv_get_fmt(itv, id->type, fmt); + } + + case VIDIOC_S_CROP: { + struct v4l2_crop *crop = arg; + + IVTV_DEBUG_IOCTL("VIDIOC_S_CROP\n"); + if (crop->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) + return -EINVAL; + return itv->video_dec_func(itv, VIDIOC_S_CROP, arg); + } + + case VIDIOC_G_CROP: { + struct v4l2_crop *crop = arg; + + IVTV_DEBUG_IOCTL("VIDIOC_G_CROP\n"); + if (crop->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) + return -EINVAL; + return itv->video_dec_func(itv, VIDIOC_G_CROP, arg); + } + + case VIDIOC_ENUM_FMT: { + static struct v4l2_fmtdesc formats[] = { + { 0, 0, 0, + "HM12 (YUV 4:1:1)", V4L2_PIX_FMT_HM12, + { 0, 0, 0, 0 } + }, + { 1, 0, V4L2_FMT_FLAG_COMPRESSED, + "MPEG", V4L2_PIX_FMT_MPEG, + { 0, 0, 0, 0 } + } + }; + struct v4l2_fmtdesc *fmt = arg; + enum v4l2_buf_type type = fmt->type; + + switch (type) { + case V4L2_BUF_TYPE_VIDEO_CAPTURE: + break; + case V4L2_BUF_TYPE_VIDEO_OUTPUT: + if (!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT)) + return -EINVAL; + break; + default: + return -EINVAL; + } + if (fmt->index > 1) + return -EINVAL; + *fmt = formats[fmt->index]; + fmt->type = type; + return 0; + } + + case VIDIOC_G_INPUT:{ + IVTV_DEBUG_IOCTL("VIDIOC_G_INPUT\n"); + + *(int *)arg = itv->active_input; + break; + } + + case VIDIOC_S_INPUT:{ + int inp = *(int *)arg; + + IVTV_DEBUG_IOCTL("VIDIOC_S_INPUT\n"); + + if (inp < 0 || inp >= itv->nof_inputs) + return -EINVAL; + + if (inp == itv->active_input) { + IVTV_DEBUG_INFO("Input unchanged\n"); + break; + } + IVTV_DEBUG_INFO("Changing input from %d to %d\n", + itv->active_input, inp); + + itv->active_input = inp; + /* Set the audio input to whatever is appropriate for the + input type. */ + itv->audio_input = itv->card->video_inputs[inp].audio_index; + + /* prevent others from messing with the streams until + we're finished changing inputs. */ + ivtv_mute(itv); + ivtv_video_set_io(itv); + ivtv_audio_set_io(itv); + ivtv_unmute(itv); + break; + } + + case VIDIOC_G_OUTPUT:{ + IVTV_DEBUG_IOCTL("VIDIOC_G_OUTPUT\n"); + + if (!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT)) + return -EINVAL; + *(int *)arg = itv->active_output; + break; + } + + case VIDIOC_S_OUTPUT:{ + int outp = *(int *)arg; + struct v4l2_routing route; + + IVTV_DEBUG_IOCTL("VIDIOC_S_OUTPUT\n"); + + if (outp >= itv->card->nof_outputs) + return -EINVAL; + + if (outp == itv->active_output) { + IVTV_DEBUG_INFO("Output unchanged\n"); + break; + } + IVTV_DEBUG_INFO("Changing output from %d to %d\n", + itv->active_output, outp); + + itv->active_output = outp; + route.input = SAA7127_INPUT_TYPE_NORMAL; + route.output = itv->card->video_outputs[outp].video_output; + ivtv_saa7127(itv, VIDIOC_INT_S_VIDEO_ROUTING, &route); + break; + } + + case VIDIOC_G_FREQUENCY:{ + struct v4l2_frequency *vf = arg; + + IVTV_DEBUG_IOCTL("VIDIOC_G_FREQUENCY\n"); + + if (vf->tuner != 0) + return -EINVAL; + ivtv_call_i2c_clients(itv, cmd, arg); + break; + } + + case VIDIOC_S_FREQUENCY:{ + struct v4l2_frequency vf = *(struct v4l2_frequency *)arg; + + IVTV_DEBUG_IOCTL("VIDIOC_S_FREQUENCY\n"); + + if (vf.tuner != 0) + return -EINVAL; + + ivtv_mute(itv); + IVTV_DEBUG_INFO("v4l2 ioctl: set frequency %d\n", vf.frequency); + ivtv_call_i2c_clients(itv, cmd, &vf); + ivtv_unmute(itv); + break; + } + + case VIDIOC_ENUMSTD:{ + struct v4l2_standard *vs = arg; + int idx = vs->index; + + IVTV_DEBUG_IOCTL("VIDIOC_ENUMSTD\n"); + + if (idx < 0 || idx >= ARRAY_SIZE(enum_stds)) + return -EINVAL; + + *vs = (enum_stds[idx].std & V4L2_STD_525_60) ? + ivtv_std_60hz : ivtv_std_50hz; + vs->index = idx; + vs->id = enum_stds[idx].std; + strcpy(vs->name, enum_stds[idx].name); + break; + } + + case VIDIOC_G_STD:{ + IVTV_DEBUG_IOCTL("VIDIOC_G_STD\n"); + *(v4l2_std_id *) arg = itv->std; + break; + } + + case VIDIOC_S_STD: { + v4l2_std_id std = *(v4l2_std_id *) arg; + + IVTV_DEBUG_IOCTL("VIDIOC_S_STD\n"); + + if ((std & V4L2_STD_ALL) == 0) + return -EINVAL; + + if (std == itv->std) + break; + + if (test_bit(IVTV_F_I_RADIO_USER, &itv->i_flags) || + atomic_read(&itv->capturing) > 0 || + atomic_read(&itv->decoding) > 0) { + /* Switching standard would turn off the radio or mess + with already running streams, prevent that by + returning EBUSY. */ + return -EBUSY; + } + + itv->std = std; + itv->is_60hz = (std & V4L2_STD_525_60) ? 1 : 0; + itv->params.is_50hz = itv->is_50hz = !itv->is_60hz; + itv->params.width = 720; + itv->params.height = itv->is_50hz ? 576 : 480; + itv->vbi.count = itv->is_50hz ? 18 : 12; + itv->vbi.start[0] = itv->is_50hz ? 6 : 10; + itv->vbi.start[1] = itv->is_50hz ? 318 : 273; + if (itv->hw_flags & IVTV_HW_CX25840) { + itv->vbi.sliced_decoder_line_size = itv->is_60hz ? 272 : 284; + } + IVTV_DEBUG_INFO("Switching standard to %llx.\n", itv->std); + + /* Tuner */ + ivtv_call_i2c_clients(itv, VIDIOC_S_STD, &itv->std); + + if (itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT) { + /* set display standard */ + itv->std_out = std; + itv->is_out_60hz = itv->is_60hz; + itv->is_out_50hz = itv->is_50hz; + ivtv_call_i2c_clients(itv, VIDIOC_INT_S_STD_OUTPUT, &itv->std_out); + ivtv_vapi(itv, CX2341X_DEC_SET_STANDARD, 1, itv->is_out_50hz); + itv->main_rect.left = itv->main_rect.top = 0; + itv->main_rect.width = 720; + itv->main_rect.height = itv->params.height; + ivtv_vapi(itv, CX2341X_OSD_SET_FRAMEBUFFER_WINDOW, 4, + 720, itv->main_rect.height, 0, 0); + } + break; + } + + case VIDIOC_S_TUNER: { /* Setting tuner can only set audio mode */ + struct v4l2_tuner *vt = arg; + + IVTV_DEBUG_IOCTL("VIDIOC_S_TUNER\n"); + + if (vt->index != 0) + return -EINVAL; + + ivtv_call_i2c_clients(itv, VIDIOC_S_TUNER, vt); + break; + } + + case VIDIOC_G_TUNER: { + struct v4l2_tuner *vt = arg; + + IVTV_DEBUG_IOCTL("VIDIOC_G_TUNER\n"); + + if (vt->index != 0) + return -EINVAL; + + memset(vt, 0, sizeof(*vt)); + ivtv_call_i2c_clients(itv, VIDIOC_G_TUNER, vt); + + if (test_bit(IVTV_F_I_RADIO_USER, &itv->i_flags)) { + strcpy(vt->name, "ivtv Radio Tuner"); + vt->type = V4L2_TUNER_RADIO; + } else { + strcpy(vt->name, "ivtv TV Tuner"); + vt->type = V4L2_TUNER_ANALOG_TV; + } + break; + } + + case VIDIOC_G_SLICED_VBI_CAP: { + struct v4l2_sliced_vbi_cap *cap = arg; + int set = itv->is_50hz ? V4L2_SLICED_VBI_625 : V4L2_SLICED_VBI_525; + int f, l; + enum v4l2_buf_type type = cap->type; + + IVTV_DEBUG_IOCTL("VIDIOC_G_SLICED_VBI_CAP\n"); + memset(cap, 0, sizeof(*cap)); + cap->type = type; + if (type == V4L2_BUF_TYPE_SLICED_VBI_CAPTURE) { + for (f = 0; f < 2; f++) { + for (l = 0; l < 24; l++) { + if (valid_service_line(f, l, itv->is_50hz)) { + cap->service_lines[f][l] = set; + } + } + } + return 0; + } + if (type == V4L2_BUF_TYPE_SLICED_VBI_OUTPUT) { + if (!(itv->v4l2_cap & V4L2_CAP_SLICED_VBI_OUTPUT)) + return -EINVAL; + if (itv->is_60hz) { + cap->service_lines[0][21] = V4L2_SLICED_CAPTION_525; + cap->service_lines[1][21] = V4L2_SLICED_CAPTION_525; + } else { + cap->service_lines[0][23] = V4L2_SLICED_WSS_625; + cap->service_lines[0][16] = V4L2_SLICED_VPS; + } + return 0; + } + return -EINVAL; + } + + case VIDIOC_LOG_STATUS: + { + int has_output = itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT; + struct v4l2_input vidin; + struct v4l2_audio audin; + int i; + + IVTV_INFO("================= START STATUS CARD #%d =================\n", itv->num); + if (itv->hw_flags & IVTV_HW_TVEEPROM) { + struct tveeprom tv; + + ivtv_read_eeprom(itv, &tv); + } + ivtv_call_i2c_clients(itv, VIDIOC_LOG_STATUS, NULL); + ivtv_get_input(itv, itv->active_input, &vidin); + ivtv_get_audio_input(itv, itv->audio_input, &audin); + IVTV_INFO("Video Input: %s\n", vidin.name); + IVTV_INFO("Audio Input: %s\n", audin.name); + if (has_output) { + struct v4l2_output vidout; + struct v4l2_audioout audout; + int mode = itv->output_mode; + static const char * const output_modes[] = { + "None", + "MPEG Streaming", + "YUV Streaming", + "YUV Frames", + "Passthrough", + }; + + ivtv_get_output(itv, itv->active_output, &vidout); + ivtv_get_audio_output(itv, 0, &audout); + IVTV_INFO("Video Output: %s\n", vidout.name); + IVTV_INFO("Audio Output: %s\n", audout.name); + if (mode < 0 || mode > OUT_PASSTHROUGH) + mode = OUT_NONE; + IVTV_INFO("Output Mode: %s\n", output_modes[mode]); + } + IVTV_INFO("Tuner: %s\n", + test_bit(IVTV_F_I_RADIO_USER, &itv->i_flags) ? "Radio" : "TV"); + cx2341x_log_status(&itv->params, itv->name); + IVTV_INFO("Status flags: 0x%08lx\n", itv->i_flags); + for (i = 0; i < IVTV_MAX_STREAMS; i++) { + struct ivtv_stream *s = &itv->streams[i]; + + if (s->v4l2dev == NULL || s->buffers == 0) + continue; + IVTV_INFO("Stream %s: status 0x%04lx, %d%% of %d KiB (%d buffers) in use\n", s->name, s->s_flags, + (s->buffers - s->q_free.buffers) * 100 / s->buffers, + (s->buffers * s->buf_size) / 1024, s->buffers); + } + IVTV_INFO("Read MPEG/VBI: %lld/%lld bytes\n", itv->mpg_data_received, itv->vbi_data_inserted); + IVTV_INFO("================== END STATUS CARD #%d ==================\n", itv->num); + break; + } + + default: + return -EINVAL; + } + return 0; +} + +static int ivtv_ivtv_ioctls(struct file *filp, unsigned int cmd, void *arg) +{ + struct ivtv_open_id *id = (struct ivtv_open_id *)filp->private_data; + struct ivtv *itv = id->itv; + int nonblocking = filp->f_flags & O_NONBLOCK; + struct ivtv_stream *s = &itv->streams[id->type]; + + switch (cmd) { + case IVTV_IOC_DMA_FRAME: { + struct ivtv_dma_frame *args = arg; + + IVTV_DEBUG_IOCTL("IVTV_IOC_DMA_FRAME\n"); + if (!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT)) + return -EINVAL; + if (args->type != V4L2_BUF_TYPE_VIDEO_OUTPUT) + return -EINVAL; + if (itv->output_mode == OUT_UDMA_YUV && args->y_source == NULL) + return 0; + if (ivtv_claim_stream(id, id->type)) { + return -EBUSY; + } + if (ivtv_set_output_mode(itv, OUT_UDMA_YUV) != OUT_UDMA_YUV) { + ivtv_release_stream(s); + return -EBUSY; + } + if (args->y_source == NULL) + return 0; + return ivtv_yuv_prep_frame(itv, args); + } + + case VIDEO_GET_PTS: { + u32 data[CX2341X_MBOX_MAX_DATA]; + u64 *pts = arg; + + IVTV_DEBUG_IOCTL("VIDEO_GET_PTS\n"); + if (s->type < IVTV_DEC_STREAM_TYPE_MPG) { + *pts = s->dma_pts; + break; + } + if (!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT)) + return -EINVAL; + + if (test_bit(IVTV_F_I_VALID_DEC_TIMINGS, &itv->i_flags)) { + *pts = (u64) ((u64)itv->last_dec_timing[2] << 32) | + (u64)itv->last_dec_timing[1]; + break; + } + *pts = 0; + if (atomic_read(&itv->decoding)) { + if (ivtv_api(itv, CX2341X_DEC_GET_TIMING_INFO, 5, data)) { + IVTV_DEBUG_WARN("GET_TIMING: couldn't read clock\n"); + return -EIO; + } + memcpy(itv->last_dec_timing, data, sizeof(itv->last_dec_timing)); + set_bit(IVTV_F_I_VALID_DEC_TIMINGS, &itv->i_flags); + *pts = (u64) ((u64) data[2] << 32) | (u64) data[1]; + /*timing->scr = (u64) (((u64) data[4] << 32) | (u64) (data[3]));*/ + } + break; + } + + case VIDEO_GET_FRAME_COUNT: { + u32 data[CX2341X_MBOX_MAX_DATA]; + u64 *frame = arg; + + IVTV_DEBUG_IOCTL("VIDEO_GET_FRAME_COUNT\n"); + if (s->type < IVTV_DEC_STREAM_TYPE_MPG) { + *frame = 0; + break; + } + if (!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT)) + return -EINVAL; + + if (test_bit(IVTV_F_I_VALID_DEC_TIMINGS, &itv->i_flags)) { + *frame = itv->last_dec_timing[0]; + break; + } + *frame = 0; + if (atomic_read(&itv->decoding)) { + if (ivtv_api(itv, CX2341X_DEC_GET_TIMING_INFO, 5, data)) { + IVTV_DEBUG_WARN("GET_TIMING: couldn't read clock\n"); + return -EIO; + } + memcpy(itv->last_dec_timing, data, sizeof(itv->last_dec_timing)); + set_bit(IVTV_F_I_VALID_DEC_TIMINGS, &itv->i_flags); + *frame = data[0]; + } + break; + } + + case VIDEO_PLAY: { + struct video_command vc; + + IVTV_DEBUG_IOCTL("VIDEO_PLAY\n"); + memset(&vc, 0, sizeof(vc)); + vc.cmd = VIDEO_CMD_PLAY; + return ivtv_video_command(itv, id, &vc, 0); + } + + case VIDEO_STOP: { + struct video_command vc; + + IVTV_DEBUG_IOCTL("VIDEO_STOP\n"); + memset(&vc, 0, sizeof(vc)); + vc.cmd = VIDEO_CMD_STOP; + vc.flags = VIDEO_CMD_STOP_TO_BLACK | VIDEO_CMD_STOP_IMMEDIATELY; + return ivtv_video_command(itv, id, &vc, 0); + } + + case VIDEO_FREEZE: { + struct video_command vc; + + IVTV_DEBUG_IOCTL("VIDEO_FREEZE\n"); + memset(&vc, 0, sizeof(vc)); + vc.cmd = VIDEO_CMD_FREEZE; + return ivtv_video_command(itv, id, &vc, 0); + } + + case VIDEO_CONTINUE: { + struct video_command vc; + + IVTV_DEBUG_IOCTL("VIDEO_CONTINUE\n"); + memset(&vc, 0, sizeof(vc)); + vc.cmd = VIDEO_CMD_CONTINUE; + return ivtv_video_command(itv, id, &vc, 0); + } + + case VIDEO_COMMAND: + case VIDEO_TRY_COMMAND: { + struct video_command *vc = arg; + int try = (cmd == VIDEO_TRY_COMMAND); + + if (try) + IVTV_DEBUG_IOCTL("VIDEO_TRY_COMMAND\n"); + else + IVTV_DEBUG_IOCTL("VIDEO_COMMAND\n"); + return ivtv_video_command(itv, id, vc, try); + } + + case VIDEO_GET_EVENT: { + struct video_event *ev = arg; + DEFINE_WAIT(wait); + + IVTV_DEBUG_IOCTL("VIDEO_GET_EVENT\n"); + if (!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT)) + return -EINVAL; + memset(ev, 0, sizeof(*ev)); + set_bit(IVTV_F_I_EV_VSYNC_ENABLED, &itv->i_flags); + + while (1) { + if (test_and_clear_bit(IVTV_F_I_EV_DEC_STOPPED, &itv->i_flags)) + ev->type = VIDEO_EVENT_DECODER_STOPPED; + else if (test_and_clear_bit(IVTV_F_I_EV_VSYNC, &itv->i_flags)) { + ev->type = VIDEO_EVENT_VSYNC; + ev->timestamp = test_bit(IVTV_F_I_EV_VSYNC_FIELD, &itv->i_flags) ? + 1 : 0; + clear_bit(IVTV_F_I_EV_VSYNC_ENABLED, &itv->i_flags); + } + if (ev->type) + return 0; + if (nonblocking) + return -EAGAIN; + /* wait for event */ + prepare_to_wait(&itv->event_waitq, &wait, TASK_INTERRUPTIBLE); + if ((itv->i_flags & (IVTV_F_I_EV_DEC_STOPPED|IVTV_F_I_EV_VSYNC)) == 0) + schedule(); + finish_wait(&itv->event_waitq, &wait); + if (signal_pending(current)) { + /* return if a signal was received */ + IVTV_DEBUG_INFO("User stopped wait for event\n"); + return -EINTR; + } + } + break; + } + + case VIDIOC_G_ENC_INDEX: { + struct v4l2_enc_idx *idx = arg; + int i; + + IVTV_DEBUG_IOCTL("VIDIOC_G_ENC_INDEX\n"); + idx->entries = (itv->pgm_info_write_idx + IVTV_MAX_PGM_INDEX - itv->pgm_info_read_idx) % + IVTV_MAX_PGM_INDEX; + if (idx->entries > V4L2_ENC_IDX_ENTRIES) + idx->entries = V4L2_ENC_IDX_ENTRIES; + for (i = 0; i < idx->entries; i++) { + idx->entry[i] = itv->pgm_info[(itv->pgm_info_read_idx + i) % IVTV_MAX_PGM_INDEX]; + } + itv->pgm_info_read_idx = (itv->pgm_info_read_idx + idx->entries) % IVTV_MAX_PGM_INDEX; + break; + } + + case VIDIOC_ENCODER_CMD: + case VIDIOC_TRY_ENCODER_CMD: { + struct v4l2_encoder_cmd *enc = arg; + int try = cmd == VIDIOC_TRY_ENCODER_CMD; + + if (try) + IVTV_DEBUG_IOCTL("VIDIOC_TRY_ENCODER_CMD\n"); + else + IVTV_DEBUG_IOCTL("VIDIOC_ENCODER_CMD\n"); + switch (enc->cmd) { + case V4L2_ENC_CMD_START: + return ivtv_start_capture(id); + + case V4L2_ENC_CMD_STOP: + ivtv_stop_capture(id, enc->flags & V4L2_ENC_CMD_STOP_AT_GOP_END); + return 0; + + case V4L2_ENC_CMD_PAUSE: + if (!atomic_read(&itv->capturing)) + return -EPERM; + if (test_and_set_bit(IVTV_F_I_ENC_PAUSED, &itv->i_flags)) + return 0; + ivtv_mute(itv); + ivtv_vapi(itv, CX2341X_ENC_PAUSE_ENCODER, 1, 0); + break; + + case V4L2_ENC_CMD_RESUME: + if (!atomic_read(&itv->capturing)) + return -EPERM; + if (!test_and_clear_bit(IVTV_F_I_ENC_PAUSED, &itv->i_flags)) + return 0; + ivtv_vapi(itv, CX2341X_ENC_PAUSE_ENCODER, 1, 1); + ivtv_unmute(itv); + break; + } + break; + } + + case VIDIOC_G_FBUF: { + struct v4l2_framebuffer *fb = arg; + + IVTV_DEBUG_IOCTL("VIDIOC_G_FBUF\n"); + memset(fb, 0, sizeof(*fb)); + if (!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT_OVERLAY)) + break; + fb->capability = V4L2_FBUF_CAP_EXTERNOVERLAY | V4L2_FBUF_CAP_CHROMAKEY | + V4L2_FBUF_CAP_LOCAL_ALPHA | V4L2_FBUF_CAP_GLOBAL_ALPHA; + fb->fmt.pixelformat = itv->osd_pixelformat; + fb->fmt.width = itv->osd_rect.width; + fb->fmt.height = itv->osd_rect.height; + fb->fmt.left = itv->osd_rect.left; + fb->fmt.top = itv->osd_rect.top; + fb->base = (void *)itv->osd_video_pbase; + if (itv->osd_global_alpha_state) + fb->flags |= V4L2_FBUF_FLAG_GLOBAL_ALPHA; + if (itv->osd_local_alpha_state) + fb->flags |= V4L2_FBUF_FLAG_LOCAL_ALPHA; + if (itv->osd_color_key_state) + fb->flags |= V4L2_FBUF_FLAG_CHROMAKEY; + break; + } + + case VIDIOC_S_FBUF: { + struct v4l2_framebuffer *fb = arg; + + IVTV_DEBUG_IOCTL("VIDIOC_S_FBUF\n"); + if (!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT_OVERLAY)) + break; + itv->osd_global_alpha_state = (fb->flags & V4L2_FBUF_FLAG_GLOBAL_ALPHA) != 0; + itv->osd_local_alpha_state = (fb->flags & V4L2_FBUF_FLAG_LOCAL_ALPHA) != 0; + itv->osd_color_key_state = (fb->flags & V4L2_FBUF_FLAG_CHROMAKEY) != 0; + break; + } + + default: + return -EINVAL; + } + return 0; +} + +static int ivtv_v4l2_do_ioctl(struct inode *inode, struct file *filp, + unsigned int cmd, void *arg) +{ + struct ivtv_open_id *id = (struct ivtv_open_id *)filp->private_data; + struct ivtv *itv = id->itv; + + IVTV_DEBUG_IOCTL("v4l2 ioctl 0x%08x\n", cmd); + + switch (cmd) { + case VIDIOC_DBG_G_REGISTER: + case VIDIOC_DBG_S_REGISTER: + case VIDIOC_G_CHIP_IDENT: + case VIDIOC_INT_S_AUDIO_ROUTING: + case VIDIOC_INT_RESET: + return ivtv_internal_ioctls(filp, cmd, arg); + + case VIDIOC_QUERYCAP: + case VIDIOC_ENUMINPUT: + case VIDIOC_G_INPUT: + case VIDIOC_S_INPUT: + case VIDIOC_ENUMOUTPUT: + case VIDIOC_G_OUTPUT: + case VIDIOC_S_OUTPUT: + case VIDIOC_G_FMT: + case VIDIOC_S_FMT: + case VIDIOC_TRY_FMT: + case VIDIOC_ENUM_FMT: + case VIDIOC_G_CROP: + case VIDIOC_S_CROP: + case VIDIOC_G_FREQUENCY: + case VIDIOC_S_FREQUENCY: + case VIDIOC_ENUMSTD: + case VIDIOC_G_STD: + case VIDIOC_S_STD: + case VIDIOC_S_TUNER: + case VIDIOC_G_TUNER: + case VIDIOC_ENUMAUDIO: + case VIDIOC_S_AUDIO: + case VIDIOC_G_AUDIO: + case VIDIOC_ENUMAUDOUT: + case VIDIOC_S_AUDOUT: + case VIDIOC_G_AUDOUT: + case VIDIOC_G_SLICED_VBI_CAP: + case VIDIOC_LOG_STATUS: + return ivtv_v4l2_ioctls(itv, filp, cmd, arg); + + case VIDIOC_QUERYMENU: + case VIDIOC_QUERYCTRL: + case VIDIOC_S_CTRL: + case VIDIOC_G_CTRL: + case VIDIOC_S_EXT_CTRLS: + case VIDIOC_G_EXT_CTRLS: + case VIDIOC_TRY_EXT_CTRLS: + return ivtv_control_ioctls(itv, cmd, arg); + + case IVTV_IOC_DMA_FRAME: + case VIDEO_GET_PTS: + case VIDEO_GET_FRAME_COUNT: + case VIDEO_GET_EVENT: + case VIDEO_PLAY: + case VIDEO_STOP: + case VIDEO_FREEZE: + case VIDEO_CONTINUE: + case VIDEO_COMMAND: + case VIDEO_TRY_COMMAND: + case VIDIOC_G_ENC_INDEX: + case VIDIOC_ENCODER_CMD: + case VIDIOC_TRY_ENCODER_CMD: + case VIDIOC_G_FBUF: + case VIDIOC_S_FBUF: + return ivtv_ivtv_ioctls(filp, cmd, arg); + + case 0x00005401: /* Handle isatty() calls */ + return -EINVAL; + default: + return v4l_compat_translate_ioctl(inode, filp, cmd, arg, + ivtv_v4l2_do_ioctl); + } + return 0; +} + +int ivtv_v4l2_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, + unsigned long arg) +{ + struct ivtv_open_id *id = (struct ivtv_open_id *)filp->private_data; + struct ivtv *itv = id->itv; + + /* Filter dvb ioctls that cannot be handled by video_usercopy */ + switch (cmd) { + case VIDEO_SELECT_SOURCE: + IVTV_DEBUG_IOCTL("VIDEO_SELECT_SOURCE\n"); + if (!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT)) + return -EINVAL; + return ivtv_passthrough_mode(itv, arg == VIDEO_SOURCE_DEMUX); + + case AUDIO_SET_MUTE: + IVTV_DEBUG_IOCTL("AUDIO_SET_MUTE\n"); + itv->speed_mute_audio = arg; + return 0; + + case AUDIO_CHANNEL_SELECT: + IVTV_DEBUG_IOCTL("AUDIO_CHANNEL_SELECT\n"); + if (arg > AUDIO_STEREO_SWAPPED) + return -EINVAL; + itv->audio_stereo_mode = arg; + ivtv_vapi(itv, CX2341X_DEC_SET_AUDIO_MODE, 2, itv->audio_bilingual_mode, itv->audio_stereo_mode); + return 0; + + case AUDIO_BILINGUAL_CHANNEL_SELECT: + IVTV_DEBUG_IOCTL("AUDIO_BILINGUAL_CHANNEL_SELECT\n"); + if (arg > AUDIO_STEREO_SWAPPED) + return -EINVAL; + itv->audio_bilingual_mode = arg; + ivtv_vapi(itv, CX2341X_DEC_SET_AUDIO_MODE, 2, itv->audio_bilingual_mode, itv->audio_stereo_mode); + return 0; + + default: + break; + } + return video_usercopy(inode, filp, cmd, arg, ivtv_v4l2_do_ioctl); +} diff --git a/drivers/media/video/ivtv/ivtv-ioctl.h b/drivers/media/video/ivtv/ivtv-ioctl.h new file mode 100644 index 000000000000..cbccf7a9f65c --- /dev/null +++ b/drivers/media/video/ivtv/ivtv-ioctl.h @@ -0,0 +1,28 @@ +/* + ioctl system call + Copyright (C) 2003-2004 Kevin Thayer + Copyright (C) 2005-2007 Hans Verkuil + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +u16 service2vbi(int type); +void expand_service_set(struct v4l2_sliced_vbi_format *fmt, int is_pal); +u16 get_service_set(struct v4l2_sliced_vbi_format *fmt); +int ivtv_v4l2_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, + unsigned long arg); +int ivtv_v4l2_ioctls(struct ivtv *itv, struct file *filp, unsigned int cmd, void *arg); +void ivtv_set_osd_alpha(struct ivtv *itv); +int ivtv_set_speed(struct ivtv *itv, int speed); diff --git a/drivers/media/video/ivtv/ivtv-irq.c b/drivers/media/video/ivtv/ivtv-irq.c new file mode 100644 index 000000000000..0656e18b7c7e --- /dev/null +++ b/drivers/media/video/ivtv/ivtv-irq.c @@ -0,0 +1,818 @@ +/* interrupt handling + Copyright (C) 2003-2004 Kevin Thayer + Copyright (C) 2004 Chris Kennedy + Copyright (C) 2005-2007 Hans Verkuil + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include "ivtv-driver.h" +#include "ivtv-firmware.h" +#include "ivtv-fileops.h" +#include "ivtv-queue.h" +#include "ivtv-udma.h" +#include "ivtv-irq.h" +#include "ivtv-ioctl.h" +#include "ivtv-mailbox.h" +#include "ivtv-vbi.h" + +#define DMA_MAGIC_COOKIE 0x000001fe + +#define SLICED_VBI_PIO 1 + +static void ivtv_dma_dec_start(struct ivtv_stream *s); + +static const int ivtv_stream_map[] = { + IVTV_ENC_STREAM_TYPE_MPG, + IVTV_ENC_STREAM_TYPE_YUV, + IVTV_ENC_STREAM_TYPE_PCM, + IVTV_ENC_STREAM_TYPE_VBI, +}; + +static inline int ivtv_use_pio(struct ivtv_stream *s) +{ + struct ivtv *itv = s->itv; + + return s->dma == PCI_DMA_NONE || + (SLICED_VBI_PIO && s->type == IVTV_ENC_STREAM_TYPE_VBI && itv->vbi.sliced_in->service_set); +} + +/* Determine the required DMA size, setup enough buffers in the predma queue and + actually copy the data from the card to the buffers in case a PIO transfer is + required for this stream. + */ +static int stream_enc_dma_append(struct ivtv_stream *s, u32 data[CX2341X_MBOX_MAX_DATA]) +{ + struct ivtv *itv = s->itv; + struct ivtv_buffer *buf; + struct list_head *p; + u32 bytes_needed = 0; + u32 offset, size; + u32 UVoffset = 0, UVsize = 0; + int skip_bufs = s->q_predma.buffers; + int idx = s->SG_length; + int rc; + + /* sanity checks */ + if (s->v4l2dev == NULL) { + IVTV_DEBUG_WARN("Stream %s not started\n", s->name); + return -1; + } + if (!test_bit(IVTV_F_S_CLAIMED, &s->s_flags)) { + IVTV_DEBUG_WARN("Stream %s not open\n", s->name); + return -1; + } + + /* determine offset, size and PTS for the various streams */ + switch (s->type) { + case IVTV_ENC_STREAM_TYPE_MPG: + offset = data[1]; + size = data[2]; + s->dma_pts = 0; + break; + + case IVTV_ENC_STREAM_TYPE_YUV: + offset = data[1]; + size = data[2]; + UVoffset = data[3]; + UVsize = data[4]; + s->dma_pts = ((u64) data[5] << 32) | data[6]; + break; + + case IVTV_ENC_STREAM_TYPE_PCM: + offset = data[1] + 12; + size = data[2] - 12; + s->dma_pts = read_dec(offset - 8) | + ((u64)(read_dec(offset - 12)) << 32); + if (itv->has_cx23415) + offset += IVTV_DECODER_OFFSET; + break; + + case IVTV_ENC_STREAM_TYPE_VBI: + size = itv->vbi.enc_size * itv->vbi.fpi; + offset = read_enc(itv->vbi.enc_start - 4) + 12; + if (offset == 12) { + IVTV_DEBUG_INFO("VBI offset == 0\n"); + return -1; + } + s->dma_pts = read_enc(offset - 4) | ((u64)read_enc(offset - 8) << 32); + break; + + case IVTV_DEC_STREAM_TYPE_VBI: + size = read_dec(itv->vbi.dec_start + 4) + 8; + offset = read_dec(itv->vbi.dec_start) + itv->vbi.dec_start; + s->dma_pts = 0; + offset += IVTV_DECODER_OFFSET; + break; + default: + /* shouldn't happen */ + return -1; + } + + /* if this is the start of the DMA then fill in the magic cookie */ + if (s->SG_length == 0) { + if (itv->has_cx23415 && (s->type == IVTV_ENC_STREAM_TYPE_PCM || + s->type == IVTV_DEC_STREAM_TYPE_VBI)) { + s->dma_backup = read_dec(offset - IVTV_DECODER_OFFSET); + write_dec_sync(cpu_to_le32(DMA_MAGIC_COOKIE), offset - IVTV_DECODER_OFFSET); + } + else { + s->dma_backup = read_enc(offset); + write_enc_sync(cpu_to_le32(DMA_MAGIC_COOKIE), offset); + } + s->dma_offset = offset; + } + + bytes_needed = size; + if (s->type == IVTV_ENC_STREAM_TYPE_YUV) { + /* The size for the Y samples needs to be rounded upwards to a + multiple of the buf_size. The UV samples then start in the + next buffer. */ + bytes_needed = s->buf_size * ((bytes_needed + s->buf_size - 1) / s->buf_size); + bytes_needed += UVsize; + } + + IVTV_DEBUG_DMA("%s %s: 0x%08x bytes at 0x%08x\n", + ivtv_use_pio(s) ? "PIO" : "DMA", s->name, bytes_needed, offset); + + rc = ivtv_queue_move(s, &s->q_free, &s->q_full, &s->q_predma, bytes_needed); + if (rc < 0) { /* Insufficient buffers */ + IVTV_DEBUG_WARN("Cannot obtain %d bytes for %s data transfer\n", + bytes_needed, s->name); + return -1; + } + if (rc && !s->buffers_stolen && (s->s_flags & IVTV_F_S_APPL_IO)) { + IVTV_WARN("All %s stream buffers are full. Dropping data.\n", s->name); + IVTV_WARN("Cause: the application is not reading fast enough.\n"); + } + s->buffers_stolen = rc; + + /* got the buffers, now fill in SGarray (DMA) or copy the data from the card + to the buffers (PIO). */ + buf = list_entry(s->q_predma.list.next, struct ivtv_buffer, list); + memset(buf->buf, 0, 128); + list_for_each(p, &s->q_predma.list) { + struct ivtv_buffer *buf = list_entry(p, struct ivtv_buffer, list); + + if (skip_bufs-- > 0) + continue; + if (!ivtv_use_pio(s)) { + s->SGarray[idx].dst = cpu_to_le32(buf->dma_handle); + s->SGarray[idx].src = cpu_to_le32(offset); + s->SGarray[idx].size = cpu_to_le32(s->buf_size); + } + buf->bytesused = (size < s->buf_size) ? size : s->buf_size; + + /* If PIO, then copy the data from the card to the buffer */ + if (s->type == IVTV_DEC_STREAM_TYPE_VBI) { + memcpy_fromio(buf->buf, itv->dec_mem + offset - IVTV_DECODER_OFFSET, buf->bytesused); + } + else if (ivtv_use_pio(s)) { + memcpy_fromio(buf->buf, itv->enc_mem + offset, buf->bytesused); + } + + s->q_predma.bytesused += buf->bytesused; + size -= buf->bytesused; + offset += s->buf_size; + + /* Sync SG buffers */ + ivtv_buf_sync_for_device(s, buf); + + if (size == 0) { /* YUV */ + /* process the UV section */ + offset = UVoffset; + size = UVsize; + } + idx++; + } + s->SG_length = idx; + return 0; +} + +static void dma_post(struct ivtv_stream *s) +{ + struct ivtv *itv = s->itv; + struct ivtv_buffer *buf = NULL; + struct list_head *p; + u32 offset; + u32 *u32buf; + int x = 0; + + if (ivtv_use_pio(s)) { + if (s->q_predma.bytesused) + ivtv_queue_move(s, &s->q_predma, NULL, &s->q_dma, s->q_predma.bytesused); + s->SG_length = 0; + } + IVTV_DEBUG_DMA("%s %s completed (%x)\n", ivtv_use_pio(s) ? "PIO" : "DMA", + s->name, s->dma_offset); + list_for_each(p, &s->q_dma.list) { + buf = list_entry(p, struct ivtv_buffer, list); + u32buf = (u32 *)buf->buf; + + /* Sync Buffer */ + ivtv_buf_sync_for_cpu(s, buf); + + if (x == 0) { + offset = s->dma_last_offset; + if (u32buf[offset / 4] != DMA_MAGIC_COOKIE) + { + for (offset = 0; offset < 64; offset++) { + if (u32buf[offset] == DMA_MAGIC_COOKIE) { + break; + } + } + offset *= 4; + if (offset == 256) { + IVTV_DEBUG_WARN("%s: Couldn't find start of buffer within the first 256 bytes\n", s->name); + offset = s->dma_last_offset; + } + if (s->dma_last_offset != offset) + IVTV_DEBUG_WARN("%s: offset %d -> %d\n", s->name, s->dma_last_offset, offset); + s->dma_last_offset = offset; + } + if (itv->has_cx23415 && (s->type == IVTV_ENC_STREAM_TYPE_PCM || + s->type == IVTV_DEC_STREAM_TYPE_VBI)) { + write_dec_sync(0, s->dma_offset - IVTV_DECODER_OFFSET); + } + else { + write_enc_sync(0, s->dma_offset); + } + if (offset) { + buf->bytesused -= offset; + memcpy(buf->buf, buf->buf + offset, buf->bytesused + offset); + } + *u32buf = cpu_to_le32(s->dma_backup); + } + x++; + /* flag byteswap ABCD -> DCBA for MPG & VBI data outside irq */ + if (s->type == IVTV_ENC_STREAM_TYPE_MPG || + s->type == IVTV_ENC_STREAM_TYPE_VBI) + set_bit(IVTV_F_B_NEED_BUF_SWAP, &buf->b_flags); + } + if (buf) + buf->bytesused += s->dma_last_offset; + if (buf && s->type == IVTV_DEC_STREAM_TYPE_VBI) { + /* Parse and Groom VBI Data */ + s->q_dma.bytesused -= buf->bytesused; + ivtv_process_vbi_data(itv, buf, 0, s->type); + s->q_dma.bytesused += buf->bytesused; + if (s->id == -1) { + ivtv_queue_move(s, &s->q_dma, NULL, &s->q_free, 0); + return; + } + } + ivtv_queue_move(s, &s->q_dma, NULL, &s->q_full, s->q_dma.bytesused); + if (s->id != -1) + wake_up(&s->waitq); +} + +void ivtv_dma_stream_dec_prepare(struct ivtv_stream *s, u32 offset, int lock) +{ + struct ivtv *itv = s->itv; + struct ivtv_buffer *buf; + struct list_head *p; + u32 y_size = itv->params.height * itv->params.width; + u32 uv_offset = offset + IVTV_YUV_BUFFER_UV_OFFSET; + int y_done = 0; + int bytes_written = 0; + unsigned long flags = 0; + int idx = 0; + + IVTV_DEBUG_DMA("DEC PREPARE DMA %s: %08x %08x\n", s->name, s->q_predma.bytesused, offset); + buf = list_entry(s->q_predma.list.next, struct ivtv_buffer, list); + list_for_each(p, &s->q_predma.list) { + struct ivtv_buffer *buf = list_entry(p, struct ivtv_buffer, list); + + /* YUV UV Offset from Y Buffer */ + if (s->type == IVTV_DEC_STREAM_TYPE_YUV && !y_done && bytes_written >= y_size) { + offset = uv_offset; + y_done = 1; + } + s->SGarray[idx].src = cpu_to_le32(buf->dma_handle); + s->SGarray[idx].dst = cpu_to_le32(offset); + s->SGarray[idx].size = cpu_to_le32(buf->bytesused); + + offset += buf->bytesused; + bytes_written += buf->bytesused; + + /* Sync SG buffers */ + ivtv_buf_sync_for_device(s, buf); + idx++; + } + s->SG_length = idx; + + /* Mark last buffer size for Interrupt flag */ + s->SGarray[s->SG_length - 1].size |= cpu_to_le32(0x80000000); + + /* Sync Hardware SG List of buffers */ + ivtv_stream_sync_for_device(s); + if (lock) + spin_lock_irqsave(&itv->dma_reg_lock, flags); + if (!test_bit(IVTV_F_I_DMA, &itv->i_flags)) { + ivtv_dma_dec_start(s); + } + else { + set_bit(IVTV_F_S_DMA_PENDING, &s->s_flags); + } + if (lock) + spin_unlock_irqrestore(&itv->dma_reg_lock, flags); +} + +/* start the encoder DMA */ +static void ivtv_dma_enc_start(struct ivtv_stream *s) +{ + struct ivtv *itv = s->itv; + struct ivtv_stream *s_vbi = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI]; + int i; + + if (s->q_predma.bytesused) + ivtv_queue_move(s, &s->q_predma, NULL, &s->q_dma, s->q_predma.bytesused); + IVTV_DEBUG_DMA("start DMA for %s\n", s->name); + s->SGarray[s->SG_length - 1].size = cpu_to_le32(le32_to_cpu(s->SGarray[s->SG_length - 1].size) + 256); + + /* If this is an MPEG stream, and VBI data is also pending, then append the + VBI DMA to the MPEG DMA and transfer both sets of data at once. + + VBI DMA is a second class citizen compared to MPEG and mixing them together + will confuse the firmware (the end of a VBI DMA is seen as the end of a + MPEG DMA, thus effectively dropping an MPEG frame). So instead we make + sure we only use the MPEG DMA to transfer the VBI DMA if both are in + use. This way no conflicts occur. */ + clear_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags); + if (s->type == IVTV_ENC_STREAM_TYPE_MPG && s_vbi->SG_length && + s->SG_length + s_vbi->SG_length <= s->buffers) { + ivtv_queue_move(s_vbi, &s_vbi->q_predma, NULL, &s_vbi->q_dma, s_vbi->q_predma.bytesused); + s_vbi->SGarray[s_vbi->SG_length - 1].size = cpu_to_le32(le32_to_cpu(s_vbi->SGarray[s->SG_length - 1].size) + 256); + for (i = 0; i < s_vbi->SG_length; i++) { + s->SGarray[s->SG_length++] = s_vbi->SGarray[i]; + } + itv->vbi.dma_offset = s_vbi->dma_offset; + s_vbi->SG_length = 0; + set_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags); + IVTV_DEBUG_DMA("include DMA for %s\n", s->name); + } + + /* Mark last buffer size for Interrupt flag */ + s->SGarray[s->SG_length - 1].size |= cpu_to_le32(0x80000000); + + /* Sync Hardware SG List of buffers */ + ivtv_stream_sync_for_device(s); + write_reg(s->SG_handle, IVTV_REG_ENCDMAADDR); + write_reg_sync(read_reg(IVTV_REG_DMAXFER) | 0x02, IVTV_REG_DMAXFER); + set_bit(IVTV_F_I_DMA, &itv->i_flags); + itv->cur_dma_stream = s->type; + itv->dma_timer.expires = jiffies + HZ / 10; + add_timer(&itv->dma_timer); +} + +static void ivtv_dma_dec_start(struct ivtv_stream *s) +{ + struct ivtv *itv = s->itv; + + if (s->q_predma.bytesused) + ivtv_queue_move(s, &s->q_predma, NULL, &s->q_dma, s->q_predma.bytesused); + IVTV_DEBUG_DMA("start DMA for %s\n", s->name); + /* put SG Handle into register 0x0c */ + write_reg(s->SG_handle, IVTV_REG_DECDMAADDR); + write_reg_sync(read_reg(IVTV_REG_DMAXFER) | 0x01, IVTV_REG_DMAXFER); + set_bit(IVTV_F_I_DMA, &itv->i_flags); + itv->cur_dma_stream = s->type; + itv->dma_timer.expires = jiffies + HZ / 10; + add_timer(&itv->dma_timer); +} + +static void ivtv_irq_dma_read(struct ivtv *itv) +{ + struct ivtv_stream *s = NULL; + struct ivtv_buffer *buf; + int hw_stream_type; + + IVTV_DEBUG_IRQ("DEC DMA READ\n"); + del_timer(&itv->dma_timer); + if (read_reg(IVTV_REG_DMASTATUS) & 0x14) { + IVTV_DEBUG_WARN("DEC DMA ERROR %x\n", read_reg(IVTV_REG_DMASTATUS)); + write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS); + } + if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags)) { + if (test_bit(IVTV_F_I_DEC_YUV, &itv->i_flags)) { + s = &itv->streams[IVTV_DEC_STREAM_TYPE_YUV]; + hw_stream_type = 2; + } + else { + s = &itv->streams[IVTV_DEC_STREAM_TYPE_MPG]; + hw_stream_type = 0; + } + IVTV_DEBUG_DMA("DEC DATA READ %s: %d\n", s->name, s->q_dma.bytesused); + + ivtv_stream_sync_for_cpu(s); + + /* For some reason must kick the firmware, like PIO mode, + I think this tells the firmware we are done and the size + of the xfer so it can calculate what we need next. + I think we can do this part ourselves but would have to + fully calculate xfer info ourselves and not use interrupts + */ + ivtv_vapi(itv, CX2341X_DEC_SCHED_DMA_FROM_HOST, 3, 0, s->q_dma.bytesused, + hw_stream_type); + + /* Free last DMA call */ + while ((buf = ivtv_dequeue(s, &s->q_dma)) != NULL) { + ivtv_buf_sync_for_cpu(s, buf); + ivtv_enqueue(s, buf, &s->q_free); + } + wake_up(&s->waitq); + } + clear_bit(IVTV_F_I_UDMA, &itv->i_flags); + clear_bit(IVTV_F_I_DMA, &itv->i_flags); + itv->cur_dma_stream = -1; + wake_up(&itv->dma_waitq); +} + +static void ivtv_irq_enc_dma_complete(struct ivtv *itv) +{ + u32 data[CX2341X_MBOX_MAX_DATA]; + struct ivtv_stream *s; + + del_timer(&itv->dma_timer); + ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA_END, data); + IVTV_DEBUG_IRQ("ENC DMA COMPLETE %x %d\n", data[0], data[1]); + if (test_and_clear_bit(IVTV_F_I_ENC_VBI, &itv->i_flags)) + data[1] = 3; + else if (data[1] > 2) + return; + s = &itv->streams[ivtv_stream_map[data[1]]]; + if (data[0] & 0x18) { + IVTV_DEBUG_WARN("ENC DMA ERROR %x\n", data[0]); + write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS); + ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, data[1]); + } + s->SG_length = 0; + clear_bit(IVTV_F_I_DMA, &itv->i_flags); + itv->cur_dma_stream = -1; + dma_post(s); + ivtv_stream_sync_for_cpu(s); + if (test_and_clear_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags)) { + u32 tmp; + + s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI]; + tmp = s->dma_offset; + s->dma_offset = itv->vbi.dma_offset; + dma_post(s); + s->dma_offset = tmp; + } + wake_up(&itv->dma_waitq); +} + +static void ivtv_irq_dma_err(struct ivtv *itv) +{ + u32 data[CX2341X_MBOX_MAX_DATA]; + + del_timer(&itv->dma_timer); + ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA_END, data); + IVTV_DEBUG_WARN("DMA ERROR %08x %08x %08x %d\n", data[0], data[1], + read_reg(IVTV_REG_DMASTATUS), itv->cur_dma_stream); + if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags) && + itv->cur_dma_stream >= 0 && itv->cur_dma_stream < IVTV_MAX_STREAMS) { + struct ivtv_stream *s = &itv->streams[itv->cur_dma_stream]; + + /* retry */ + write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS); + if (s->type >= IVTV_DEC_STREAM_TYPE_MPG) + ivtv_dma_dec_start(s); + else + ivtv_dma_enc_start(s); + return; + } + clear_bit(IVTV_F_I_UDMA, &itv->i_flags); + clear_bit(IVTV_F_I_DMA, &itv->i_flags); + itv->cur_dma_stream = -1; + wake_up(&itv->dma_waitq); +} + +static void ivtv_irq_enc_start_cap(struct ivtv *itv) +{ + u32 data[CX2341X_MBOX_MAX_DATA]; + struct ivtv_stream *s; + + /* Get DMA destination and size arguments from card */ + ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA, data); + IVTV_DEBUG_IRQ("ENC START CAP %d: %08x %08x\n", data[0], data[1], data[2]); + + if (data[0] > 2 || data[1] == 0 || data[2] == 0) { + IVTV_DEBUG_WARN("Unknown input: %08x %08x %08x\n", + data[0], data[1], data[2]); + return; + } + clear_bit(IVTV_F_I_ENC_VBI, &itv->i_flags); + s = &itv->streams[ivtv_stream_map[data[0]]]; + if (!stream_enc_dma_append(s, data)) { + if (ivtv_use_pio(s)) { + dma_post(s); + ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, data[0]); + } + else { + set_bit(IVTV_F_S_DMA_PENDING, &s->s_flags); + } + } +} + +static void ivtv_irq_enc_vbi_cap(struct ivtv *itv) +{ + struct ivtv_stream *s_mpg = &itv->streams[IVTV_ENC_STREAM_TYPE_MPG]; + u32 data[CX2341X_MBOX_MAX_DATA]; + struct ivtv_stream *s; + + IVTV_DEBUG_IRQ("ENC START VBI CAP\n"); + s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI]; + + if (ivtv_use_pio(s)) { + if (stream_enc_dma_append(s, data)) + return; + if (s->q_predma.bytesused) + ivtv_queue_move(s, &s->q_predma, NULL, &s->q_dma, s->q_predma.bytesused); + s->SG_length = 0; + dma_post(s); + return; + } + /* If more than two VBI buffers are pending, then + clear the old ones and start with this new one. + This can happen during transition stages when MPEG capturing is + started, but the first interrupts haven't arrived yet. During + that period VBI requests can accumulate without being able to + DMA the data. Since at most four VBI DMA buffers are available, + we just drop the old requests when there are already three + requests queued. */ + if (s->SG_length > 2) { + struct list_head *p; + list_for_each(p, &s->q_predma.list) { + struct ivtv_buffer *buf = list_entry(p, struct ivtv_buffer, list); + ivtv_buf_sync_for_cpu(s, buf); + } + ivtv_queue_move(s, &s->q_predma, NULL, &s->q_free, 0); + s->SG_length = 0; + } + /* if we can append the data, and the MPEG stream isn't capturing, + then start a DMA request for just the VBI data. */ + if (!stream_enc_dma_append(s, data) && + !test_bit(IVTV_F_S_STREAMING, &s_mpg->s_flags)) { + set_bit(IVTV_F_I_ENC_VBI, &itv->i_flags); + set_bit(IVTV_F_S_DMA_PENDING, &s->s_flags); + } +} + +static void ivtv_irq_dev_vbi_reinsert(struct ivtv *itv) +{ + u32 data[CX2341X_MBOX_MAX_DATA]; + struct ivtv_stream *s = &itv->streams[IVTV_DEC_STREAM_TYPE_VBI]; + + IVTV_DEBUG_IRQ("DEC VBI REINSERT\n"); + if (test_bit(IVTV_F_S_CLAIMED, &s->s_flags) && + !stream_enc_dma_append(s, data)) { + dma_post(s); + } +} + +static void ivtv_irq_dec_data_req(struct ivtv *itv) +{ + u32 data[CX2341X_MBOX_MAX_DATA]; + struct ivtv_stream *s; + + /* YUV or MPG */ + ivtv_api_get_data(&itv->dec_mbox, IVTV_MBOX_DMA, data); + + if (test_bit(IVTV_F_I_DEC_YUV, &itv->i_flags)) { + itv->dma_data_req_size = itv->params.width * itv->params.height * 3 / 2; + itv->dma_data_req_offset = data[1] ? data[1] : yuv_offset[0]; + s = &itv->streams[IVTV_DEC_STREAM_TYPE_YUV]; + } + else { + itv->dma_data_req_size = data[2] >= 0x10000 ? 0x10000 : data[2]; + itv->dma_data_req_offset = data[1]; + s = &itv->streams[IVTV_DEC_STREAM_TYPE_MPG]; + } + IVTV_DEBUG_IRQ("DEC DATA REQ %s: %d %08x %u\n", s->name, s->q_full.bytesused, + itv->dma_data_req_offset, itv->dma_data_req_size); + if (itv->dma_data_req_size == 0 || s->q_full.bytesused < itv->dma_data_req_size) { + set_bit(IVTV_F_S_NEEDS_DATA, &s->s_flags); + } + else { + clear_bit(IVTV_F_S_NEEDS_DATA, &s->s_flags); + ivtv_queue_move(s, &s->q_full, NULL, &s->q_predma, itv->dma_data_req_size); + ivtv_dma_stream_dec_prepare(s, itv->dma_data_req_offset + IVTV_DECODER_OFFSET, 0); + } +} + +static void ivtv_irq_vsync(struct ivtv *itv) +{ + /* The vsync interrupt is unusual in that it won't clear until + * the end of the first line for the current field, at which + * point it clears itself. This can result in repeated vsync + * interrupts, or a missed vsync. Read some of the registers + * to determine the line being displayed and ensure we handle + * one vsync per frame. + */ + unsigned int frame = read_reg(0x28c0) & 1; + int last_dma_frame = atomic_read(&itv->yuv_info.next_dma_frame); + + if (0) IVTV_DEBUG_IRQ("DEC VSYNC\n"); + + if (((frame ^ itv->yuv_info.lace_sync_field) == 0 && ((itv->lastVsyncFrame & 1) ^ itv->yuv_info.lace_sync_field)) || + (frame != (itv->lastVsyncFrame & 1) && !itv->yuv_info.frame_interlaced)) { + int next_dma_frame = last_dma_frame; + + if (next_dma_frame >= 0 && next_dma_frame != atomic_read(&itv->yuv_info.next_fill_frame)) { + write_reg(yuv_offset[next_dma_frame] >> 4, 0x82c); + write_reg((yuv_offset[next_dma_frame] + IVTV_YUV_BUFFER_UV_OFFSET) >> 4, 0x830); + write_reg(yuv_offset[next_dma_frame] >> 4, 0x834); + write_reg((yuv_offset[next_dma_frame] + IVTV_YUV_BUFFER_UV_OFFSET) >> 4, 0x838); + next_dma_frame = (next_dma_frame + 1) & 0x3; + atomic_set(&itv->yuv_info.next_dma_frame, next_dma_frame); + } + } + if (frame != (itv->lastVsyncFrame & 1)) { + struct ivtv_stream *s = ivtv_get_output_stream(itv); + + itv->lastVsyncFrame += 1; + if (frame == 0) { + clear_bit(IVTV_F_I_VALID_DEC_TIMINGS, &itv->i_flags); + clear_bit(IVTV_F_I_EV_VSYNC_FIELD, &itv->i_flags); + } + else { + set_bit(IVTV_F_I_EV_VSYNC_FIELD, &itv->i_flags); + } + if (test_bit(IVTV_F_I_EV_VSYNC_ENABLED, &itv->i_flags)) { + set_bit(IVTV_F_I_EV_VSYNC, &itv->i_flags); + wake_up(&itv->event_waitq); + } + wake_up(&itv->vsync_waitq); + if (s) + wake_up(&s->waitq); + + /* Send VBI to saa7127 */ + if (frame) + vbi_schedule_work(itv); + + /* Check if we need to update the yuv registers */ + if ((itv->yuv_info.yuv_forced_update || itv->yuv_info.new_frame_info[last_dma_frame].update) && last_dma_frame != -1) { + if (!itv->yuv_info.new_frame_info[last_dma_frame].update) + last_dma_frame = (last_dma_frame - 1) & 3; + + if (itv->yuv_info.new_frame_info[last_dma_frame].src_w) { + itv->yuv_info.update_frame = last_dma_frame; + itv->yuv_info.new_frame_info[last_dma_frame].update = 0; + itv->yuv_info.yuv_forced_update = 0; + queue_work(itv->yuv_info.work_queues, &itv->yuv_info.work_queue); + } + } + } +} + +#define IVTV_IRQ_DMA (IVTV_IRQ_DMA_READ | IVTV_IRQ_ENC_DMA_COMPLETE | IVTV_IRQ_DMA_ERR | IVTV_IRQ_ENC_START_CAP | IVTV_IRQ_ENC_VBI_CAP | IVTV_IRQ_DEC_DATA_REQ) + +irqreturn_t ivtv_irq_handler(int irq, void *dev_id) +{ + struct ivtv *itv = (struct ivtv *)dev_id; + u32 combo; + u32 stat; + int i; + u8 vsync_force = 0; + + spin_lock(&itv->dma_reg_lock); + /* get contents of irq status register */ + stat = read_reg(IVTV_REG_IRQSTATUS); + + combo = ~itv->irqmask & stat; + + /* Clear out IRQ */ + if (combo) write_reg(combo, IVTV_REG_IRQSTATUS); + + if (0 == combo) { + /* The vsync interrupt is unusual and clears itself. If we + * took too long, we may have missed it. Do some checks + */ + if (~itv->irqmask & IVTV_IRQ_DEC_VSYNC) { + /* vsync is enabled, see if we're in a new field */ + if ((itv->lastVsyncFrame & 1) != (read_reg(0x28c0) & 1)) { + /* New field, looks like we missed it */ + IVTV_DEBUG_YUV("VSync interrupt missed %d\n",read_reg(0x28c0)>>16); + vsync_force = 1; + } + } + + if (!vsync_force) { + /* No Vsync expected, wasn't for us */ + spin_unlock(&itv->dma_reg_lock); + return IRQ_NONE; + } + } + + /* Exclude interrupts noted below from the output, otherwise the log is flooded with + these messages */ + if (combo & ~0xff6d0400) + IVTV_DEBUG_IRQ("======= valid IRQ bits: 0x%08x ======\n", combo); + + if (combo & IVTV_IRQ_DEC_DMA_COMPLETE) { + IVTV_DEBUG_IRQ("DEC DMA COMPLETE\n"); + } + + if (combo & IVTV_IRQ_DMA_READ) { + ivtv_irq_dma_read(itv); + } + + if (combo & IVTV_IRQ_ENC_DMA_COMPLETE) { + ivtv_irq_enc_dma_complete(itv); + } + + if (combo & IVTV_IRQ_DMA_ERR) { + ivtv_irq_dma_err(itv); + } + + if (combo & IVTV_IRQ_ENC_START_CAP) { + ivtv_irq_enc_start_cap(itv); + } + + if (combo & IVTV_IRQ_ENC_VBI_CAP) { + ivtv_irq_enc_vbi_cap(itv); + } + + if (combo & IVTV_IRQ_DEC_VBI_RE_INSERT) { + ivtv_irq_dev_vbi_reinsert(itv); + } + + if (combo & IVTV_IRQ_ENC_EOS) { + IVTV_DEBUG_IRQ("ENC EOS\n"); + set_bit(IVTV_F_I_EOS, &itv->i_flags); + wake_up(&itv->cap_w); + } + + if (combo & IVTV_IRQ_DEC_DATA_REQ) { + ivtv_irq_dec_data_req(itv); + } + + /* Decoder Vertical Sync - We can't rely on 'combo', so check if vsync enabled */ + if (~itv->irqmask & IVTV_IRQ_DEC_VSYNC) { + ivtv_irq_vsync(itv); + } + + if (combo & IVTV_IRQ_ENC_VIM_RST) { + IVTV_DEBUG_IRQ("VIM RST\n"); + /*ivtv_vapi(itv, CX2341X_ENC_REFRESH_INPUT, 0); */ + } + + if (combo & IVTV_IRQ_DEC_AUD_MODE_CHG) { + IVTV_DEBUG_INFO("Stereo mode changed\n"); + } + + if ((combo & IVTV_IRQ_DMA) && !test_bit(IVTV_F_I_DMA, &itv->i_flags)) { + for (i = 0; i < IVTV_MAX_STREAMS; i++) { + int idx = (i + itv->irq_rr_idx++) % IVTV_MAX_STREAMS; + struct ivtv_stream *s = &itv->streams[idx]; + + if (!test_and_clear_bit(IVTV_F_S_DMA_PENDING, &s->s_flags)) + continue; + if (s->type >= IVTV_DEC_STREAM_TYPE_MPG) + ivtv_dma_dec_start(s); + else + ivtv_dma_enc_start(s); + break; + } + if (i == IVTV_MAX_STREAMS && test_and_clear_bit(IVTV_F_I_UDMA_PENDING, &itv->i_flags)) { + ivtv_udma_start(itv); + } + } + + spin_unlock(&itv->dma_reg_lock); + + /* If we've just handled a 'forced' vsync, it's safest to say it + * wasn't ours. Another device may have triggered it at just + * the right time. + */ + return vsync_force ? IRQ_NONE : IRQ_HANDLED; +} + +void ivtv_unfinished_dma(unsigned long arg) +{ + struct ivtv *itv = (struct ivtv *)arg; + + if (!test_bit(IVTV_F_I_DMA, &itv->i_flags)) + return; + IVTV_ERR("DMA TIMEOUT %08x %d\n", read_reg(IVTV_REG_DMASTATUS), itv->cur_dma_stream); + + write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS); + clear_bit(IVTV_F_I_UDMA, &itv->i_flags); + clear_bit(IVTV_F_I_DMA, &itv->i_flags); + itv->cur_dma_stream = -1; + wake_up(&itv->dma_waitq); +} diff --git a/drivers/media/video/ivtv/ivtv-irq.h b/drivers/media/video/ivtv/ivtv-irq.h new file mode 100644 index 000000000000..ed96205e87a2 --- /dev/null +++ b/drivers/media/video/ivtv/ivtv-irq.h @@ -0,0 +1,24 @@ +/* + interrupt handling + Copyright (C) 2003-2004 Kevin Thayer + Copyright (C) 2004 Chris Kennedy + Copyright (C) 2005-2007 Hans Verkuil + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +irqreturn_t ivtv_irq_handler(int irq, void *dev_id); +void ivtv_dma_stream_dec_prepare(struct ivtv_stream *s, u32 offset, int lock); +void ivtv_unfinished_dma(unsigned long arg); diff --git a/drivers/media/video/ivtv/ivtv-mailbox.c b/drivers/media/video/ivtv/ivtv-mailbox.c new file mode 100644 index 000000000000..6ae42a3b03cc --- /dev/null +++ b/drivers/media/video/ivtv/ivtv-mailbox.c @@ -0,0 +1,360 @@ +/* + mailbox functions + Copyright (C) 2003-2004 Kevin Thayer + Copyright (C) 2004 Chris Kennedy + Copyright (C) 2005-2007 Hans Verkuil + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include + +#include "ivtv-driver.h" +#include "ivtv-mailbox.h" + +/* Firmware mailbox flags*/ +#define IVTV_MBOX_FIRMWARE_DONE 0x00000004 +#define IVTV_MBOX_DRIVER_DONE 0x00000002 +#define IVTV_MBOX_DRIVER_BUSY 0x00000001 +#define IVTV_MBOX_FREE 0x00000000 + +/* Firmware mailbox standard timeout */ +#define IVTV_API_STD_TIMEOUT 0x02000000 + +#define API_CACHE (1 << 0) /* Allow the command to be stored in the cache */ +#define API_RESULT (1 << 1) /* Allow 1 second for this cmd to end */ +#define API_FAST_RESULT (3 << 1) /* Allow 0.1 second for this cmd to end */ +#define API_DMA (1 << 3) /* DMA mailbox, has special handling */ +#define API_NO_WAIT_MB (1 << 4) /* Command may not wait for a free mailbox */ +#define API_NO_WAIT_RES (1 << 5) /* Command may not wait for the result */ + +struct ivtv_api_info { + int flags; /* Flags, see above */ + const char *name; /* The name of the command */ +}; + +#define API_ENTRY(x, f) [x] = { (f), #x } + +static const struct ivtv_api_info api_info[256] = { + /* MPEG encoder API */ + API_ENTRY(CX2341X_ENC_PING_FW, API_FAST_RESULT), + API_ENTRY(CX2341X_ENC_START_CAPTURE, API_RESULT), + API_ENTRY(CX2341X_ENC_STOP_CAPTURE, API_RESULT), + API_ENTRY(CX2341X_ENC_SET_AUDIO_ID, API_CACHE), + API_ENTRY(CX2341X_ENC_SET_VIDEO_ID, API_CACHE), + API_ENTRY(CX2341X_ENC_SET_PCR_ID, API_CACHE), + API_ENTRY(CX2341X_ENC_SET_FRAME_RATE, API_CACHE), + API_ENTRY(CX2341X_ENC_SET_FRAME_SIZE, API_CACHE), + API_ENTRY(CX2341X_ENC_SET_BIT_RATE, API_CACHE), + API_ENTRY(CX2341X_ENC_SET_GOP_PROPERTIES, API_CACHE), + API_ENTRY(CX2341X_ENC_SET_ASPECT_RATIO, API_CACHE), + API_ENTRY(CX2341X_ENC_SET_DNR_FILTER_MODE, API_CACHE), + API_ENTRY(CX2341X_ENC_SET_DNR_FILTER_PROPS, API_CACHE), + API_ENTRY(CX2341X_ENC_SET_CORING_LEVELS, API_CACHE), + API_ENTRY(CX2341X_ENC_SET_SPATIAL_FILTER_TYPE, API_CACHE), + API_ENTRY(CX2341X_ENC_SET_VBI_LINE, API_RESULT), + API_ENTRY(CX2341X_ENC_SET_STREAM_TYPE, API_CACHE), + API_ENTRY(CX2341X_ENC_SET_OUTPUT_PORT, API_CACHE), + API_ENTRY(CX2341X_ENC_SET_AUDIO_PROPERTIES, API_CACHE), + API_ENTRY(CX2341X_ENC_HALT_FW, API_FAST_RESULT), + API_ENTRY(CX2341X_ENC_GET_VERSION, API_FAST_RESULT), + API_ENTRY(CX2341X_ENC_SET_GOP_CLOSURE, API_CACHE), + API_ENTRY(CX2341X_ENC_GET_SEQ_END, API_RESULT), + API_ENTRY(CX2341X_ENC_SET_PGM_INDEX_INFO, API_FAST_RESULT), + API_ENTRY(CX2341X_ENC_SET_VBI_CONFIG, API_RESULT), + API_ENTRY(CX2341X_ENC_SET_DMA_BLOCK_SIZE, API_CACHE), + API_ENTRY(CX2341X_ENC_GET_PREV_DMA_INFO_MB_10, API_FAST_RESULT), + API_ENTRY(CX2341X_ENC_GET_PREV_DMA_INFO_MB_9, API_FAST_RESULT), + API_ENTRY(CX2341X_ENC_SCHED_DMA_TO_HOST, API_DMA), + API_ENTRY(CX2341X_ENC_INITIALIZE_INPUT, API_RESULT), + API_ENTRY(CX2341X_ENC_SET_FRAME_DROP_RATE, API_CACHE), + API_ENTRY(CX2341X_ENC_PAUSE_ENCODER, API_RESULT), + API_ENTRY(CX2341X_ENC_REFRESH_INPUT, API_NO_WAIT_MB), + API_ENTRY(CX2341X_ENC_SET_COPYRIGHT, API_CACHE), + API_ENTRY(CX2341X_ENC_SET_EVENT_NOTIFICATION, API_RESULT), + API_ENTRY(CX2341X_ENC_SET_NUM_VSYNC_LINES, API_CACHE), + API_ENTRY(CX2341X_ENC_SET_PLACEHOLDER, API_CACHE), + API_ENTRY(CX2341X_ENC_MUTE_VIDEO, API_RESULT), + API_ENTRY(CX2341X_ENC_MUTE_AUDIO, API_RESULT), + API_ENTRY(CX2341X_ENC_SET_VERT_CROP_LINE, API_FAST_RESULT), + API_ENTRY(CX2341X_ENC_MISC, API_FAST_RESULT), + /* Obsolete PULLDOWN API command */ + API_ENTRY(0xb1, API_CACHE), + + /* MPEG decoder API */ + API_ENTRY(CX2341X_DEC_PING_FW, API_FAST_RESULT), + API_ENTRY(CX2341X_DEC_START_PLAYBACK, API_RESULT), + API_ENTRY(CX2341X_DEC_STOP_PLAYBACK, API_RESULT), + API_ENTRY(CX2341X_DEC_SET_PLAYBACK_SPEED, API_RESULT), + API_ENTRY(CX2341X_DEC_STEP_VIDEO, API_RESULT), + API_ENTRY(CX2341X_DEC_SET_DMA_BLOCK_SIZE, API_CACHE), + API_ENTRY(CX2341X_DEC_GET_XFER_INFO, API_FAST_RESULT), + API_ENTRY(CX2341X_DEC_GET_DMA_STATUS, API_FAST_RESULT), + API_ENTRY(CX2341X_DEC_SCHED_DMA_FROM_HOST, API_DMA), + API_ENTRY(CX2341X_DEC_PAUSE_PLAYBACK, API_RESULT), + API_ENTRY(CX2341X_DEC_HALT_FW, API_FAST_RESULT), + API_ENTRY(CX2341X_DEC_SET_STANDARD, API_CACHE), + API_ENTRY(CX2341X_DEC_GET_VERSION, API_FAST_RESULT), + API_ENTRY(CX2341X_DEC_SET_STREAM_INPUT, API_CACHE), + API_ENTRY(CX2341X_DEC_GET_TIMING_INFO, API_RESULT /*| API_NO_WAIT_RES*/), + API_ENTRY(CX2341X_DEC_SET_AUDIO_MODE, API_CACHE), + API_ENTRY(CX2341X_DEC_SET_EVENT_NOTIFICATION, API_RESULT), + API_ENTRY(CX2341X_DEC_SET_DISPLAY_BUFFERS, API_CACHE), + API_ENTRY(CX2341X_DEC_EXTRACT_VBI, API_RESULT), + API_ENTRY(CX2341X_DEC_SET_DECODER_SOURCE, API_FAST_RESULT), + API_ENTRY(CX2341X_DEC_SET_PREBUFFERING, API_CACHE), + + /* OSD API */ + API_ENTRY(CX2341X_OSD_GET_FRAMEBUFFER, API_FAST_RESULT), + API_ENTRY(CX2341X_OSD_GET_PIXEL_FORMAT, API_FAST_RESULT), + API_ENTRY(CX2341X_OSD_SET_PIXEL_FORMAT, API_CACHE), + API_ENTRY(CX2341X_OSD_GET_STATE, API_FAST_RESULT), + API_ENTRY(CX2341X_OSD_SET_STATE, API_CACHE), + API_ENTRY(CX2341X_OSD_GET_OSD_COORDS, API_FAST_RESULT), + API_ENTRY(CX2341X_OSD_SET_OSD_COORDS, API_CACHE), + API_ENTRY(CX2341X_OSD_GET_SCREEN_COORDS, API_FAST_RESULT), + API_ENTRY(CX2341X_OSD_SET_SCREEN_COORDS, API_CACHE), + API_ENTRY(CX2341X_OSD_GET_GLOBAL_ALPHA, API_FAST_RESULT), + API_ENTRY(CX2341X_OSD_SET_GLOBAL_ALPHA, API_CACHE), + API_ENTRY(CX2341X_OSD_SET_BLEND_COORDS, API_CACHE), + API_ENTRY(CX2341X_OSD_GET_FLICKER_STATE, API_FAST_RESULT), + API_ENTRY(CX2341X_OSD_SET_FLICKER_STATE, API_CACHE), + API_ENTRY(CX2341X_OSD_BLT_COPY, API_RESULT), + API_ENTRY(CX2341X_OSD_BLT_FILL, API_RESULT), + API_ENTRY(CX2341X_OSD_BLT_TEXT, API_RESULT), + API_ENTRY(CX2341X_OSD_SET_FRAMEBUFFER_WINDOW, API_CACHE), + API_ENTRY(CX2341X_OSD_SET_CHROMA_KEY, API_CACHE), + API_ENTRY(CX2341X_OSD_GET_ALPHA_CONTENT_INDEX, API_FAST_RESULT), + API_ENTRY(CX2341X_OSD_SET_ALPHA_CONTENT_INDEX, API_CACHE) +}; + +static int try_mailbox(struct ivtv *itv, struct ivtv_mailbox_data *mbdata, int mb) +{ + u32 flags = readl(&mbdata->mbox[mb].flags); + int is_free = flags == IVTV_MBOX_FREE || (flags & IVTV_MBOX_FIRMWARE_DONE); + + /* if the mailbox is free, then try to claim it */ + if (is_free && !test_and_set_bit(mb, &mbdata->busy)) { + write_sync(IVTV_MBOX_DRIVER_BUSY, &mbdata->mbox[mb].flags); + return 1; + } + return 0; +} + +/* Try to find a free mailbox. Note mailbox 0 is reserved for DMA and so is not + attempted here. */ +static int get_mailbox(struct ivtv *itv, struct ivtv_mailbox_data *mbdata, int flags) +{ + unsigned long then = jiffies; + int i, mb; + int max_mbox = mbdata->max_mbox; + int retries = 100; + + /* All slow commands use the same mailbox, serializing them and also + leaving the other mailbox free for simple fast commands. */ + if ((flags & API_FAST_RESULT) == API_RESULT) + max_mbox = 1; + + /* find free non-DMA mailbox */ + for (i = 0; i < retries; i++) { + for (mb = 1; mb <= max_mbox; mb++) + if (try_mailbox(itv, mbdata, mb)) + return mb; + + /* Sleep before a retry, if not atomic */ + if (!(flags & API_NO_WAIT_MB)) { + if (jiffies - then > retries * HZ / 100) + break; + ivtv_sleep_timeout(HZ / 100, 0); + } + } + return -ENODEV; +} + +static void write_mailbox(volatile struct ivtv_mailbox __iomem *mbox, int cmd, int args, u32 data[]) +{ + int i; + + write_sync(cmd, &mbox->cmd); + write_sync(IVTV_API_STD_TIMEOUT, &mbox->timeout); + + for (i = 0; i < CX2341X_MBOX_MAX_DATA; i++) + write_sync(data[i], &mbox->data[i]); + + write_sync(IVTV_MBOX_DRIVER_DONE | IVTV_MBOX_DRIVER_BUSY, &mbox->flags); +} + +static void clear_all_mailboxes(struct ivtv *itv, struct ivtv_mailbox_data *mbdata) +{ + int i; + + for (i = 0; i <= mbdata->max_mbox; i++) { + IVTV_DEBUG_WARN("Clearing mailbox %d: cmd 0x%08x flags 0x%08x\n", + i, readl(&mbdata->mbox[i].cmd), readl(&mbdata->mbox[i].flags)); + write_sync(0, &mbdata->mbox[i].flags); + clear_bit(i, &mbdata->busy); + } +} + +static int ivtv_api_call(struct ivtv *itv, int cmd, int args, u32 data[]) +{ + struct ivtv_mailbox_data *mbdata = (cmd >= 128) ? &itv->enc_mbox : &itv->dec_mbox; + volatile struct ivtv_mailbox __iomem *mbox; + int api_timeout = HZ; + int flags, mb, i; + unsigned long then; + + /* sanity checks */ + if (NULL == mbdata) { + IVTV_ERR("No mailbox allocated\n"); + return -ENODEV; + } + if (args < 0 || args > CX2341X_MBOX_MAX_DATA || + cmd < 0 || cmd > 255 || api_info[cmd].name == NULL) { + IVTV_ERR("Invalid API call: cmd = 0x%02x, args = %d\n", cmd, args); + return -EINVAL; + } + + IVTV_DEBUG_API("API Call: %s\n", api_info[cmd].name); + + /* clear possibly uninitialized part of data array */ + for (i = args; i < CX2341X_MBOX_MAX_DATA; i++) + data[i] = 0; + + /* If this command was issued within the last 30 minutes and with identical + data, then just return 0 as there is no need to issue this command again. + Just an optimization to prevent unnecessary use of mailboxes. */ + if (itv->api_cache[cmd].last_jiffies && + jiffies - itv->api_cache[cmd].last_jiffies < HZ * 1800 && + !memcmp(data, itv->api_cache[cmd].data, sizeof(itv->api_cache[cmd].data))) { + itv->api_cache[cmd].last_jiffies = jiffies; + return 0; + } + + flags = api_info[cmd].flags; + + if (flags & API_DMA) { + for (i = 0; i < 100; i++) { + mb = i % (mbdata->max_mbox + 1); + if (try_mailbox(itv, mbdata, mb)) { + write_mailbox(&mbdata->mbox[mb], cmd, args, data); + clear_bit(mb, &mbdata->busy); + return 0; + } + IVTV_DEBUG_WARN("%s: mailbox %d not free %08x\n", + api_info[cmd].name, mb, readl(&mbdata->mbox[mb].flags)); + } + IVTV_WARN("Could not find free DMA mailbox for %s\n", api_info[cmd].name); + clear_all_mailboxes(itv, mbdata); + return -EBUSY; + } + + if ((flags & API_FAST_RESULT) == API_FAST_RESULT) + api_timeout = HZ / 10; + + mb = get_mailbox(itv, mbdata, flags); + if (mb < 0) { + IVTV_DEBUG_WARN("No free mailbox found (%s)\n", api_info[cmd].name); + clear_all_mailboxes(itv, mbdata); + return -EBUSY; + } + mbox = &mbdata->mbox[mb]; + write_mailbox(mbox, cmd, args, data); + if (flags & API_CACHE) { + memcpy(itv->api_cache[cmd].data, data, sizeof(itv->api_cache[cmd].data)); + itv->api_cache[cmd].last_jiffies = jiffies; + } + if ((flags & API_RESULT) == 0) { + clear_bit(mb, &mbdata->busy); + return 0; + } + + /* Get results */ + then = jiffies; + + while (!(readl(&mbox->flags) & IVTV_MBOX_FIRMWARE_DONE)) { + if (jiffies - then > api_timeout) { + IVTV_DEBUG_WARN("Could not get result (%s)\n", api_info[cmd].name); + /* reset the mailbox, but it is likely too late already */ + write_sync(0, &mbox->flags); + clear_bit(mb, &mbdata->busy); + return -EIO; + } + if (flags & API_NO_WAIT_RES) + mdelay(1); + else + ivtv_sleep_timeout(HZ / 100, 0); + } + if (jiffies - then > HZ / 10) + IVTV_DEBUG_WARN("%s took %lu jiffies (%d per HZ)\n", + api_info[cmd].name, jiffies - then, HZ); + + for (i = 0; i < CX2341X_MBOX_MAX_DATA; i++) + data[i] = readl(&mbox->data[i]); + write_sync(0, &mbox->flags); + clear_bit(mb, &mbdata->busy); + return 0; +} + +int ivtv_api(struct ivtv *itv, int cmd, int args, u32 data[]) +{ + int res = ivtv_api_call(itv, cmd, args, data); + + /* Allow a single retry, probably already too late though. + If there is no free mailbox then that is usually an indication + of a more serious problem. */ + return (res == -EBUSY) ? ivtv_api_call(itv, cmd, args, data) : res; +} + +int ivtv_api_func(void *priv, int cmd, int in, int out, u32 data[CX2341X_MBOX_MAX_DATA]) +{ + return ivtv_api(priv, cmd, in, data); +} + +int ivtv_vapi_result(struct ivtv *itv, u32 data[CX2341X_MBOX_MAX_DATA], int cmd, int args, ...) +{ + va_list ap; + int i; + + va_start(ap, args); + for (i = 0; i < args; i++) { + data[i] = va_arg(ap, u32); + } + va_end(ap); + return ivtv_api(itv, cmd, args, data); +} + +int ivtv_vapi(struct ivtv *itv, int cmd, int args, ...) +{ + u32 data[CX2341X_MBOX_MAX_DATA]; + va_list ap; + int i; + + va_start(ap, args); + for (i = 0; i < args; i++) { + data[i] = va_arg(ap, u32); + } + va_end(ap); + return ivtv_api(itv, cmd, args, data); +} + +/* This one is for stuff that can't sleep.. irq handlers, etc.. */ +void ivtv_api_get_data(struct ivtv_mailbox_data *mbdata, int mb, u32 data[]) +{ + int i; + + for (i = 0; i < CX2341X_MBOX_MAX_DATA; i++) + data[i] = readl(&mbdata->mbox[mb].data[i]); +} diff --git a/drivers/media/video/ivtv/ivtv-mailbox.h b/drivers/media/video/ivtv/ivtv-mailbox.h new file mode 100644 index 000000000000..79b8aec14370 --- /dev/null +++ b/drivers/media/video/ivtv/ivtv-mailbox.h @@ -0,0 +1,25 @@ +/* + mailbox functions + Copyright (C) 2003-2004 Kevin Thayer + Copyright (C) 2005-2007 Hans Verkuil + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +void ivtv_api_get_data(struct ivtv_mailbox_data *mbox, int mb, u32 data[]); +int ivtv_api(struct ivtv *itv, int cmd, int args, u32 data[]); +int ivtv_vapi_result(struct ivtv *itv, u32 data[CX2341X_MBOX_MAX_DATA], int cmd, int args, ...); +int ivtv_vapi(struct ivtv *itv, int cmd, int args, ...); +int ivtv_api_func(void *priv, int cmd, int in, int out, u32 data[CX2341X_MBOX_MAX_DATA]); diff --git a/drivers/media/video/ivtv/ivtv-queue.c b/drivers/media/video/ivtv/ivtv-queue.c new file mode 100644 index 000000000000..ccfcef1ad91a --- /dev/null +++ b/drivers/media/video/ivtv/ivtv-queue.c @@ -0,0 +1,262 @@ +/* + buffer queues. + Copyright (C) 2003-2004 Kevin Thayer + Copyright (C) 2004 Chris Kennedy + Copyright (C) 2005-2007 Hans Verkuil + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include "ivtv-driver.h" +#include "ivtv-streams.h" +#include "ivtv-queue.h" +#include "ivtv-mailbox.h" + +int ivtv_buf_copy_from_user(struct ivtv_stream *s, struct ivtv_buffer *buf, const char __user *src, int copybytes) +{ + if (s->buf_size - buf->bytesused < copybytes) + copybytes = s->buf_size - buf->bytesused; + if (copy_from_user(buf->buf + buf->bytesused, src, copybytes)) { + return -EFAULT; + } + buf->bytesused += copybytes; + return copybytes; +} + +void ivtv_buf_swap(struct ivtv_buffer *buf) +{ + int i; + + for (i = 0; i < buf->bytesused; i += 4) + swab32s((u32 *)(buf->buf + i)); +} + +void ivtv_queue_init(struct ivtv_queue *q) +{ + INIT_LIST_HEAD(&q->list); + q->buffers = 0; + q->length = 0; + q->bytesused = 0; +} + +void ivtv_enqueue(struct ivtv_stream *s, struct ivtv_buffer *buf, struct ivtv_queue *q) +{ + unsigned long flags = 0; + + /* clear the buffer if it is going to be enqueued to the free queue */ + if (q == &s->q_free) { + buf->bytesused = 0; + buf->readpos = 0; + buf->b_flags = 0; + } + spin_lock_irqsave(&s->qlock, flags); + list_add_tail(&buf->list, &q->list); + q->buffers++; + q->length += s->buf_size; + q->bytesused += buf->bytesused - buf->readpos; + spin_unlock_irqrestore(&s->qlock, flags); +} + +struct ivtv_buffer *ivtv_dequeue(struct ivtv_stream *s, struct ivtv_queue *q) +{ + struct ivtv_buffer *buf = NULL; + unsigned long flags = 0; + + spin_lock_irqsave(&s->qlock, flags); + if (!list_empty(&q->list)) { + buf = list_entry(q->list.next, struct ivtv_buffer, list); + list_del_init(q->list.next); + q->buffers--; + q->length -= s->buf_size; + q->bytesused -= buf->bytesused - buf->readpos; + } + spin_unlock_irqrestore(&s->qlock, flags); + return buf; +} + +static void ivtv_queue_move_buf(struct ivtv_stream *s, struct ivtv_queue *from, + struct ivtv_queue *to, int clear, int full) +{ + struct ivtv_buffer *buf = list_entry(from->list.next, struct ivtv_buffer, list); + + list_move_tail(from->list.next, &to->list); + from->buffers--; + from->length -= s->buf_size; + from->bytesused -= buf->bytesused - buf->readpos; + /* special handling for q_free */ + if (clear) + buf->bytesused = buf->readpos = buf->b_flags = 0; + else if (full) { + /* special handling for stolen buffers, assume + all bytes are used. */ + buf->bytesused = s->buf_size; + buf->readpos = buf->b_flags = 0; + } + to->buffers++; + to->length += s->buf_size; + to->bytesused += buf->bytesused - buf->readpos; +} + +/* Move 'needed_bytes' worth of buffers from queue 'from' into queue 'to'. + If 'needed_bytes' == 0, then move all buffers from 'from' into 'to'. + If 'steal' != NULL, then buffers may also taken from that queue if + needed. + + The buffer is automatically cleared if it goes to the free queue. It is + also cleared if buffers need to be taken from the 'steal' queue and + the 'from' queue is the free queue. + + When 'from' is q_free, then needed_bytes is compared to the total + available buffer length, otherwise needed_bytes is compared to the + bytesused value. For the 'steal' queue the total available buffer + length is always used. + + -ENOMEM is returned if the buffers could not be obtained, 0 if all + buffers where obtained from the 'from' list and if non-zero then + the number of stolen buffers is returned. */ +int ivtv_queue_move(struct ivtv_stream *s, struct ivtv_queue *from, struct ivtv_queue *steal, + struct ivtv_queue *to, int needed_bytes) +{ + unsigned long flags; + int rc = 0; + int from_free = from == &s->q_free; + int to_free = to == &s->q_free; + int bytes_available; + + spin_lock_irqsave(&s->qlock, flags); + if (needed_bytes == 0) { + from_free = 1; + needed_bytes = from->length; + } + + bytes_available = from_free ? from->length : from->bytesused; + bytes_available += steal ? steal->length : 0; + + if (bytes_available < needed_bytes) { + spin_unlock_irqrestore(&s->qlock, flags); + return -ENOMEM; + } + if (from_free) { + u32 old_length = to->length; + + while (to->length - old_length < needed_bytes) { + if (list_empty(&from->list)) + from = steal; + if (from == steal) + rc++; /* keep track of 'stolen' buffers */ + ivtv_queue_move_buf(s, from, to, 1, 0); + } + } + else { + u32 old_bytesused = to->bytesused; + + while (to->bytesused - old_bytesused < needed_bytes) { + if (list_empty(&from->list)) + from = steal; + if (from == steal) + rc++; /* keep track of 'stolen' buffers */ + ivtv_queue_move_buf(s, from, to, to_free, rc); + } + } + spin_unlock_irqrestore(&s->qlock, flags); + return rc; +} + +void ivtv_flush_queues(struct ivtv_stream *s) +{ + ivtv_queue_move(s, &s->q_io, NULL, &s->q_free, 0); + ivtv_queue_move(s, &s->q_full, NULL, &s->q_free, 0); + ivtv_queue_move(s, &s->q_dma, NULL, &s->q_free, 0); + ivtv_queue_move(s, &s->q_predma, NULL, &s->q_free, 0); +} + +int ivtv_stream_alloc(struct ivtv_stream *s) +{ + struct ivtv *itv = s->itv; + int SGsize = sizeof(struct ivtv_SG_element) * s->buffers; + int i; + + if (s->buffers == 0) + return 0; + + IVTV_DEBUG_INFO("Allocate %s%s stream: %d x %d buffers (%dkB total)\n", + s->dma != PCI_DMA_NONE ? "DMA " : "", + s->name, s->buffers, s->buf_size, s->buffers * s->buf_size / 1024); + + /* Allocate DMA SG Arrays */ + if (s->dma != PCI_DMA_NONE) { + s->SGarray = (struct ivtv_SG_element *)kzalloc(SGsize, GFP_KERNEL); + if (s->SGarray == NULL) { + IVTV_ERR("Could not allocate SGarray for %s stream\n", s->name); + return -ENOMEM; + } + s->SG_length = 0; + s->SG_handle = pci_map_single(itv->dev, s->SGarray, SGsize, s->dma); + ivtv_stream_sync_for_cpu(s); + } + + /* allocate stream buffers. Initially all buffers are in q_free. */ + for (i = 0; i < s->buffers; i++) { + struct ivtv_buffer *buf = kzalloc(sizeof(struct ivtv_buffer), GFP_KERNEL); + + if (buf == NULL) + break; + buf->buf = kmalloc(s->buf_size + 256, GFP_KERNEL); + if (buf->buf == NULL) { + kfree(buf); + break; + } + INIT_LIST_HEAD(&buf->list); + if (s->dma != PCI_DMA_NONE) { + buf->dma_handle = pci_map_single(s->itv->dev, + buf->buf, s->buf_size + 256, s->dma); + ivtv_buf_sync_for_cpu(s, buf); + } + ivtv_enqueue(s, buf, &s->q_free); + } + if (i == s->buffers) + return 0; + IVTV_ERR("Couldn't allocate buffers for %s stream\n", s->name); + ivtv_stream_free(s); + return -ENOMEM; +} + +void ivtv_stream_free(struct ivtv_stream *s) +{ + struct ivtv_buffer *buf; + + /* move all buffers to q_free */ + ivtv_flush_queues(s); + + /* empty q_free */ + while ((buf = ivtv_dequeue(s, &s->q_free))) { + if (s->dma != PCI_DMA_NONE) + pci_unmap_single(s->itv->dev, buf->dma_handle, + s->buf_size + 256, s->dma); + kfree(buf->buf); + kfree(buf); + } + + /* Free SG Array/Lists */ + if (s->SGarray != NULL) { + if (s->SG_handle != IVTV_DMA_UNMAPPED) { + pci_unmap_single(s->itv->dev, s->SG_handle, + sizeof(struct ivtv_SG_element) * s->buffers, PCI_DMA_TODEVICE); + s->SG_handle = IVTV_DMA_UNMAPPED; + } + s->SGarray = NULL; + s->SG_length = 0; + } +} diff --git a/drivers/media/video/ivtv/ivtv-queue.h b/drivers/media/video/ivtv/ivtv-queue.h new file mode 100644 index 000000000000..903edd4b4381 --- /dev/null +++ b/drivers/media/video/ivtv/ivtv-queue.h @@ -0,0 +1,64 @@ +/* + buffer queues. + Copyright (C) 2003-2004 Kevin Thayer + Copyright (C) 2004 Chris Kennedy + Copyright (C) 2005-2007 Hans Verkuil + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#define IVTV_DMA_UNMAPPED ((u32) -1) + +/* ivtv_buffer utility functions */ +static inline void ivtv_buf_sync_for_cpu(struct ivtv_stream *s, struct ivtv_buffer *buf) +{ + if (s->dma != PCI_DMA_NONE) + pci_dma_sync_single_for_cpu(s->itv->dev, buf->dma_handle, + s->buf_size + 256, s->dma); +} + +static inline void ivtv_buf_sync_for_device(struct ivtv_stream *s, struct ivtv_buffer *buf) +{ + if (s->dma != PCI_DMA_NONE) + pci_dma_sync_single_for_device(s->itv->dev, buf->dma_handle, + s->buf_size + 256, s->dma); +} + +int ivtv_buf_copy_from_user(struct ivtv_stream *s, struct ivtv_buffer *buf, const char __user *src, int copybytes); +void ivtv_buf_swap(struct ivtv_buffer *buf); + +/* ivtv_queue utility functions */ +void ivtv_queue_init(struct ivtv_queue *q); +void ivtv_enqueue(struct ivtv_stream *s, struct ivtv_buffer *buf, struct ivtv_queue *q); +struct ivtv_buffer *ivtv_dequeue(struct ivtv_stream *s, struct ivtv_queue *q); +int ivtv_queue_move(struct ivtv_stream *s, struct ivtv_queue *from, struct ivtv_queue *steal, + struct ivtv_queue *to, int needed_bytes); +void ivtv_flush_queues(struct ivtv_stream *s); + +/* ivtv_stream utility functions */ +int ivtv_stream_alloc(struct ivtv_stream *s); +void ivtv_stream_free(struct ivtv_stream *s); + +static inline void ivtv_stream_sync_for_cpu(struct ivtv_stream *s) +{ + pci_dma_sync_single_for_cpu(s->itv->dev, s->SG_handle, + sizeof(struct ivtv_SG_element) * s->buffers, PCI_DMA_TODEVICE); +} + +static inline void ivtv_stream_sync_for_device(struct ivtv_stream *s) +{ + pci_dma_sync_single_for_device(s->itv->dev, s->SG_handle, + sizeof(struct ivtv_SG_element) * s->buffers, PCI_DMA_TODEVICE); +} diff --git a/drivers/media/video/ivtv/ivtv-streams.c b/drivers/media/video/ivtv/ivtv-streams.c new file mode 100644 index 000000000000..73a1c933d8f7 --- /dev/null +++ b/drivers/media/video/ivtv/ivtv-streams.c @@ -0,0 +1,977 @@ +/* + init/start/stop/exit stream functions + Copyright (C) 2003-2004 Kevin Thayer + Copyright (C) 2004 Chris Kennedy + Copyright (C) 2005-2007 Hans Verkuil + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +/* License: GPL + * Author: Kevin Thayer + * + * This file will hold API related functions, both internal (firmware api) + * and external (v4l2, etc) + * + * ----- + * MPG600/MPG160 support by T.Adachi + * and Takeru KOMORIYA + * + * AVerMedia M179 GPIO info by Chris Pinkham + * using information provided by Jiun-Kuei Jung @ AVerMedia. + */ + +#include "ivtv-driver.h" +#include "ivtv-fileops.h" +#include "ivtv-i2c.h" +#include "ivtv-queue.h" +#include "ivtv-mailbox.h" +#include "ivtv-audio.h" +#include "ivtv-video.h" +#include "ivtv-vbi.h" +#include "ivtv-ioctl.h" +#include "ivtv-irq.h" +#include "ivtv-streams.h" +#include "ivtv-cards.h" + +static struct file_operations ivtv_v4l2_enc_fops = { + .owner = THIS_MODULE, + .read = ivtv_v4l2_read, + .write = ivtv_v4l2_write, + .open = ivtv_v4l2_open, + .ioctl = ivtv_v4l2_ioctl, + .release = ivtv_v4l2_close, + .poll = ivtv_v4l2_enc_poll, +}; + +static struct file_operations ivtv_v4l2_dec_fops = { + .owner = THIS_MODULE, + .read = ivtv_v4l2_read, + .write = ivtv_v4l2_write, + .open = ivtv_v4l2_open, + .ioctl = ivtv_v4l2_ioctl, + .release = ivtv_v4l2_close, + .poll = ivtv_v4l2_dec_poll, +}; + +struct { + const char *name; + int vfl_type; + int minor_offset; + int dma, pio; + enum v4l2_buf_type buf_type; + struct file_operations *fops; +} ivtv_stream_info[] = { + { /* IVTV_ENC_STREAM_TYPE_MPG */ + "encoder MPEG", + VFL_TYPE_GRABBER, 0, + PCI_DMA_FROMDEVICE, 0, V4L2_BUF_TYPE_VIDEO_CAPTURE, + &ivtv_v4l2_enc_fops + }, + { /* IVTV_ENC_STREAM_TYPE_YUV */ + "encoder YUV", + VFL_TYPE_GRABBER, IVTV_V4L2_ENC_YUV_OFFSET, + PCI_DMA_FROMDEVICE, 0, V4L2_BUF_TYPE_VIDEO_CAPTURE, + &ivtv_v4l2_enc_fops + }, + { /* IVTV_ENC_STREAM_TYPE_VBI */ + "encoder VBI", + VFL_TYPE_VBI, 0, + PCI_DMA_FROMDEVICE, 0, V4L2_BUF_TYPE_VBI_CAPTURE, + &ivtv_v4l2_enc_fops + }, + { /* IVTV_ENC_STREAM_TYPE_PCM */ + "encoder PCM audio", + VFL_TYPE_GRABBER, IVTV_V4L2_ENC_PCM_OFFSET, + PCI_DMA_FROMDEVICE, 0, V4L2_BUF_TYPE_PRIVATE, + &ivtv_v4l2_enc_fops + }, + { /* IVTV_ENC_STREAM_TYPE_RAD */ + "encoder radio", + VFL_TYPE_RADIO, 0, + PCI_DMA_NONE, 1, V4L2_BUF_TYPE_PRIVATE, + &ivtv_v4l2_enc_fops + }, + { /* IVTV_DEC_STREAM_TYPE_MPG */ + "decoder MPEG", + VFL_TYPE_GRABBER, IVTV_V4L2_DEC_MPG_OFFSET, + PCI_DMA_TODEVICE, 0, V4L2_BUF_TYPE_VIDEO_OUTPUT, + &ivtv_v4l2_dec_fops + }, + { /* IVTV_DEC_STREAM_TYPE_VBI */ + "decoder VBI", + VFL_TYPE_VBI, IVTV_V4L2_DEC_VBI_OFFSET, + PCI_DMA_NONE, 1, V4L2_BUF_TYPE_VBI_CAPTURE, + &ivtv_v4l2_enc_fops + }, + { /* IVTV_DEC_STREAM_TYPE_VOUT */ + "decoder VOUT", + VFL_TYPE_VBI, IVTV_V4L2_DEC_VOUT_OFFSET, + PCI_DMA_NONE, 1, V4L2_BUF_TYPE_VBI_OUTPUT, + &ivtv_v4l2_dec_fops + }, + { /* IVTV_DEC_STREAM_TYPE_YUV */ + "decoder YUV", + VFL_TYPE_GRABBER, IVTV_V4L2_DEC_YUV_OFFSET, + PCI_DMA_TODEVICE, 0, V4L2_BUF_TYPE_VIDEO_OUTPUT, + &ivtv_v4l2_dec_fops + } +}; + +static void ivtv_stream_init(struct ivtv *itv, int type) +{ + struct ivtv_stream *s = &itv->streams[type]; + struct video_device *dev = s->v4l2dev; + + /* we need to keep v4l2dev, so restore it afterwards */ + memset(s, 0, sizeof(*s)); + s->v4l2dev = dev; + + /* initialize ivtv_stream fields */ + s->itv = itv; + s->type = type; + s->name = ivtv_stream_info[type].name; + + if (ivtv_stream_info[type].pio) + s->dma = PCI_DMA_NONE; + else + s->dma = ivtv_stream_info[type].dma; + s->buf_size = itv->stream_buf_size[type]; + if (s->buf_size) + s->buffers = itv->options.megabytes[type] * 1024 * 1024 / s->buf_size; + spin_lock_init(&s->qlock); + init_waitqueue_head(&s->waitq); + s->id = -1; + s->SG_handle = IVTV_DMA_UNMAPPED; + ivtv_queue_init(&s->q_free); + ivtv_queue_init(&s->q_full); + ivtv_queue_init(&s->q_dma); + ivtv_queue_init(&s->q_predma); + ivtv_queue_init(&s->q_io); +} + +static int ivtv_reg_dev(struct ivtv *itv, int type) +{ + struct ivtv_stream *s = &itv->streams[type]; + int vfl_type = ivtv_stream_info[type].vfl_type; + int minor_offset = ivtv_stream_info[type].minor_offset; + int minor; + + /* These four fields are always initialized. If v4l2dev == NULL, then + this stream is not in use. In that case no other fields but these + four can be used. */ + s->v4l2dev = NULL; + s->itv = itv; + s->type = type; + s->name = ivtv_stream_info[type].name; + + /* Check whether the radio is supported */ + if (type == IVTV_ENC_STREAM_TYPE_RAD && !(itv->v4l2_cap & V4L2_CAP_RADIO)) + return 0; + if (type >= IVTV_DEC_STREAM_TYPE_MPG && !(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT)) + return 0; + + if (minor_offset >= 0) + /* card number + user defined offset + device offset */ + minor = itv->num + ivtv_first_minor + minor_offset; + else + minor = -1; + + /* User explicitly selected 0 buffers for these streams, so don't + create them. */ + if (minor >= 0 && ivtv_stream_info[type].dma != PCI_DMA_NONE && + itv->options.megabytes[type] == 0) { + IVTV_INFO("Disabled %s device\n", ivtv_stream_info[type].name); + return 0; + } + + ivtv_stream_init(itv, type); + + /* allocate and initialize the v4l2 video device structure */ + s->v4l2dev = video_device_alloc(); + if (s->v4l2dev == NULL) { + IVTV_ERR("Couldn't allocate v4l2 video_device for %s\n", s->name); + return -ENOMEM; + } + + s->v4l2dev->type = VID_TYPE_CAPTURE | VID_TYPE_TUNER | VID_TYPE_TELETEXT | + VID_TYPE_CLIPPING | VID_TYPE_SCALES | VID_TYPE_MPEG_ENCODER; + if (itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT) { + s->v4l2dev->type |= VID_TYPE_MPEG_DECODER; + } + snprintf(s->v4l2dev->name, sizeof(s->v4l2dev->name), "ivtv%d %s", + itv->num, s->name); + + s->v4l2dev->minor = minor; + s->v4l2dev->dev = &itv->dev->dev; + s->v4l2dev->fops = ivtv_stream_info[type].fops; + s->v4l2dev->release = video_device_release; + + if (minor >= 0) { + /* Register device. First try the desired minor, then any free one. */ + if (video_register_device(s->v4l2dev, vfl_type, minor) && + video_register_device(s->v4l2dev, vfl_type, -1)) { + IVTV_ERR("Couldn't register v4l2 device for %s minor %d\n", + s->name, minor); + video_device_release(s->v4l2dev); + s->v4l2dev = NULL; + return -ENOMEM; + } + } + else { + /* Don't register a 'hidden' stream (OSD) */ + IVTV_INFO("Created framebuffer stream for %s\n", s->name); + return 0; + } + + switch (vfl_type) { + case VFL_TYPE_GRABBER: + IVTV_INFO("Registered device video%d for %s (%d MB)\n", + s->v4l2dev->minor, s->name, itv->options.megabytes[type]); + break; + case VFL_TYPE_RADIO: + IVTV_INFO("Registered device radio%d for %s\n", + s->v4l2dev->minor - MINOR_VFL_TYPE_RADIO_MIN, s->name); + break; + case VFL_TYPE_VBI: + if (itv->options.megabytes[type]) + IVTV_INFO("Registered device vbi%d for %s (%d MB)\n", + s->v4l2dev->minor - MINOR_VFL_TYPE_VBI_MIN, + s->name, itv->options.megabytes[type]); + else + IVTV_INFO("Registered device vbi%d for %s\n", + s->v4l2dev->minor - MINOR_VFL_TYPE_VBI_MIN, s->name); + break; + } + return 0; +} + +/* Initialize v4l2 variables and register v4l2 devices */ +int ivtv_streams_setup(struct ivtv *itv) +{ + int type; + + /* Setup V4L2 Devices */ + for (type = 0; type < IVTV_MAX_STREAMS; type++) { + /* Register Device */ + if (ivtv_reg_dev(itv, type)) + break; + + if (itv->streams[type].v4l2dev == NULL) + continue; + + /* Allocate Stream */ + if (ivtv_stream_alloc(&itv->streams[type])) + break; + } + if (type == IVTV_MAX_STREAMS) { + return 0; + } + + /* One or more streams could not be initialized. Clean 'em all up. */ + ivtv_streams_cleanup(itv); + return -ENOMEM; +} + +/* Unregister v4l2 devices */ +void ivtv_streams_cleanup(struct ivtv *itv) +{ + int type; + + /* Teardown all streams */ + for (type = 0; type < IVTV_MAX_STREAMS; type++) { + struct video_device *vdev = itv->streams[type].v4l2dev; + + itv->streams[type].v4l2dev = NULL; + if (vdev == NULL) + continue; + + ivtv_stream_free(&itv->streams[type]); + /* Free Device */ + if (vdev->minor == -1) /* 'Hidden' never registered stream (OSD) */ + video_device_release(vdev); + else /* All others, just unregister. */ + video_unregister_device(vdev); + } +} + +static void ivtv_vbi_setup(struct ivtv *itv) +{ + int raw = itv->vbi.sliced_in->service_set == 0; + u32 data[CX2341X_MBOX_MAX_DATA]; + int lines; + int i; + + /* If Embed then streamtype must be Program */ + /* TODO: should we really do this? */ + if (0 && !raw && itv->vbi.insert_mpeg) { + itv->params.stream_type = 0; + + /* assign stream type */ + ivtv_vapi(itv, CX2341X_ENC_SET_STREAM_TYPE, 1, itv->params.stream_type); + } + + /* Reset VBI */ + ivtv_vapi(itv, CX2341X_ENC_SET_VBI_LINE, 5, 0xffff , 0, 0, 0, 0); + + if (itv->is_60hz) { + itv->vbi.count = 12; + itv->vbi.start[0] = 10; + itv->vbi.start[1] = 273; + } else { /* PAL/SECAM */ + itv->vbi.count = 18; + itv->vbi.start[0] = 6; + itv->vbi.start[1] = 318; + } + + /* setup VBI registers */ + itv->video_dec_func(itv, VIDIOC_S_FMT, &itv->vbi.in); + + /* determine number of lines and total number of VBI bytes. + A raw line takes 1443 bytes: 2 * 720 + 4 byte frame header - 1 + The '- 1' byte is probably an unused U or V byte. Or something... + A sliced line takes 51 bytes: 4 byte frame header, 4 byte internal + header, 42 data bytes + checksum (to be confirmed) */ + if (raw) { + lines = itv->vbi.count * 2; + } else { + lines = itv->is_60hz ? 24 : 38; + if (itv->is_60hz && (itv->hw_flags & IVTV_HW_CX25840)) + lines += 2; + } + + itv->vbi.enc_size = lines * (raw ? itv->vbi.raw_size : itv->vbi.sliced_size); + + /* Note: sliced vs raw flag doesn't seem to have any effect + TODO: check mode (0x02) value with older ivtv versions. */ + data[0] = raw | 0x02 | (0xbd << 8); + + /* Every X number of frames a VBI interrupt arrives (frames as in 25 or 30 fps) */ + data[1] = 1; + /* The VBI frames are stored in a ringbuffer with this size (with a VBI frame as unit) */ + data[2] = raw ? 4 : 8; + /* The start/stop codes determine which VBI lines end up in the raw VBI data area. + The codes are from table 24 in the saa7115 datasheet. Each raw/sliced/video line + is framed with codes FF0000XX where XX is the SAV/EAV (Start/End of Active Video) + code. These values for raw VBI are obtained from a driver disassembly. The sliced + start/stop codes was deduced from this, but they do not appear in the driver. + Other code pairs that I found are: 0x250E6249/0x13545454 and 0x25256262/0x38137F54. + However, I have no idea what these values are for. */ + if (itv->hw_flags & IVTV_HW_CX25840) { + /* Setup VBI for the cx25840 digitizer */ + if (raw) { + data[3] = 0x20602060; + data[4] = 0x30703070; + } else { + data[3] = 0xB0F0B0F0; + data[4] = 0xA0E0A0E0; + } + /* Lines per frame */ + data[5] = lines; + /* bytes per line */ + data[6] = (raw ? itv->vbi.raw_size : itv->vbi.sliced_size); + } else { + /* Setup VBI for the saa7115 digitizer */ + if (raw) { + data[3] = 0x25256262; + data[4] = 0x387F7F7F; + } else { + data[3] = 0xABABECEC; + data[4] = 0xB6F1F1F1; + } + /* Lines per frame */ + data[5] = lines; + /* bytes per line */ + data[6] = itv->vbi.enc_size / lines; + } + + IVTV_DEBUG_INFO( + "Setup VBI API header 0x%08x pkts %d buffs %d ln %d sz %d\n", + data[0], data[1], data[2], data[5], data[6]); + + ivtv_api(itv, CX2341X_ENC_SET_VBI_CONFIG, 7, data); + + /* returns the VBI encoder memory area. */ + itv->vbi.enc_start = data[2]; + itv->vbi.fpi = data[0]; + if (!itv->vbi.fpi) + itv->vbi.fpi = 1; + + IVTV_DEBUG_INFO("Setup VBI start 0x%08x frames %d fpi %d lines 0x%08x\n", + itv->vbi.enc_start, data[1], itv->vbi.fpi, itv->digitizer); + + /* select VBI lines. + Note that the sliced argument seems to have no effect. */ + for (i = 2; i <= 24; i++) { + int valid; + + if (itv->is_60hz) { + valid = i >= 10 && i < 22; + } else { + valid = i >= 6 && i < 24; + } + ivtv_vapi(itv, CX2341X_ENC_SET_VBI_LINE, 5, i - 1, + valid, 0 , 0, 0); + ivtv_vapi(itv, CX2341X_ENC_SET_VBI_LINE, 5, (i - 1) | 0x80000000, + valid, 0, 0, 0); + } + + /* Remaining VBI questions: + - Is it possible to select particular VBI lines only for inclusion in the MPEG + stream? Currently you can only get the first X lines. + - Is mixed raw and sliced VBI possible? + - What's the meaning of the raw/sliced flag? + - What's the meaning of params 2, 3 & 4 of the Select VBI command? */ +} + +int ivtv_start_v4l2_encode_stream(struct ivtv_stream *s) +{ + u32 data[CX2341X_MBOX_MAX_DATA]; + struct ivtv *itv = s->itv; + int captype = 0, subtype = 0; + int enable_passthrough = 0; + + if (s->v4l2dev == NULL) + return -EINVAL; + + IVTV_DEBUG_INFO("Start encoder stream %s\n", s->name); + + switch (s->type) { + case IVTV_ENC_STREAM_TYPE_MPG: + captype = 0; + subtype = 3; + + /* Stop Passthrough */ + if (itv->output_mode == OUT_PASSTHROUGH) { + ivtv_passthrough_mode(itv, 0); + enable_passthrough = 1; + } + itv->mpg_data_received = itv->vbi_data_inserted = 0; + itv->dualwatch_jiffies = jiffies; + itv->dualwatch_stereo_mode = itv->params.audio_properties & 0x0300; + itv->search_pack_header = 0; + break; + + case IVTV_ENC_STREAM_TYPE_YUV: + if (itv->output_mode == OUT_PASSTHROUGH) { + captype = 2; + subtype = 11; /* video+audio+decoder */ + break; + } + captype = 1; + subtype = 1; + break; + case IVTV_ENC_STREAM_TYPE_PCM: + captype = 1; + subtype = 2; + break; + case IVTV_ENC_STREAM_TYPE_VBI: + captype = 1; + subtype = 4; + + itv->vbi.frame = 0; + itv->vbi.inserted_frame = 0; + memset(itv->vbi.sliced_mpeg_size, + 0, sizeof(itv->vbi.sliced_mpeg_size)); + break; + default: + return -EINVAL; + } + s->subtype = subtype; + s->buffers_stolen = 0; + + /* mute/unmute video */ + ivtv_vapi(itv, CX2341X_ENC_MUTE_VIDEO, 1, test_bit(IVTV_F_I_RADIO_USER, &itv->i_flags) ? 1 : 0); + + /* Clear Streamoff flags in case left from last capture */ + clear_bit(IVTV_F_S_STREAMOFF, &s->s_flags); + + if (atomic_read(&itv->capturing) == 0) { + /* Always use frame based mode. Experiments have demonstrated that byte + stream based mode results in dropped frames and corruption. Not often, + but occasionally. Many thanks go to Leonard Orb who spent a lot of + effort and time trying to trace the cause of the drop outs. */ + /* 1 frame per DMA */ + /*ivtv_vapi(itv, CX2341X_ENC_SET_DMA_BLOCK_SIZE, 2, 128, 0); */ + ivtv_vapi(itv, CX2341X_ENC_SET_DMA_BLOCK_SIZE, 2, 1, 1); + + /* Stuff from Windows, we don't know what it is */ + ivtv_vapi(itv, CX2341X_ENC_SET_VERT_CROP_LINE, 1, 0); + /* According to the docs, this should be correct. However, this is + untested. I don't dare enable this without having tested it. + Only very few old cards actually have this hardware combination. + ivtv_vapi(itv, CX2341X_ENC_SET_VERT_CROP_LINE, 1, + ((itv->hw_flags & IVTV_HW_SAA7114) && itv->is_60hz) ? 10001 : 0); + */ + ivtv_vapi(itv, CX2341X_ENC_MISC, 2, 3, !itv->has_cx23415); + ivtv_vapi(itv, CX2341X_ENC_MISC, 2, 8, 0); + ivtv_vapi(itv, CX2341X_ENC_MISC, 2, 4, 1); + ivtv_vapi(itv, CX2341X_ENC_MISC, 1, 12); + + /* assign placeholder */ + ivtv_vapi(itv, CX2341X_ENC_SET_PLACEHOLDER, 12, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); + + ivtv_vapi(itv, CX2341X_ENC_SET_NUM_VSYNC_LINES, 2, itv->digitizer, itv->digitizer); + + /* Setup VBI */ + if (itv->v4l2_cap & V4L2_CAP_VBI_CAPTURE) { + ivtv_vbi_setup(itv); + } + + /* assign program index info. Mask 7: select I/P/B, Num_req: 400 max */ + ivtv_vapi_result(itv, data, CX2341X_ENC_SET_PGM_INDEX_INFO, 2, 7, 400); + itv->pgm_info_offset = data[0]; + itv->pgm_info_num = data[1]; + itv->pgm_info_write_idx = 0; + itv->pgm_info_read_idx = 0; + + IVTV_DEBUG_INFO("PGM Index at 0x%08x with %d elements\n", + itv->pgm_info_offset, itv->pgm_info_num); + + /* Setup API for Stream */ + cx2341x_update(itv, ivtv_api_func, NULL, &itv->params); + } + + /* Vsync Setup */ + if (itv->has_cx23415 && !test_and_set_bit(IVTV_F_I_DIG_RST, &itv->i_flags)) { + /* event notification (on) */ + ivtv_vapi(itv, CX2341X_ENC_SET_EVENT_NOTIFICATION, 4, 0, 1, IVTV_IRQ_ENC_VIM_RST, -1); + ivtv_clear_irq_mask(itv, IVTV_IRQ_ENC_VIM_RST); + } + + if (atomic_read(&itv->capturing) == 0) { + /* Clear all Pending Interrupts */ + ivtv_set_irq_mask(itv, IVTV_IRQ_MASK_CAPTURE); + + clear_bit(IVTV_F_I_EOS, &itv->i_flags); + + /* Initialize Digitizer for Capture */ + ivtv_vapi(itv, CX2341X_ENC_INITIALIZE_INPUT, 0); + + ivtv_sleep_timeout(HZ / 10, 0); + } + + /* begin_capture */ + if (ivtv_vapi(itv, CX2341X_ENC_START_CAPTURE, 2, captype, subtype)) + { + IVTV_DEBUG_WARN( "Error starting capture!\n"); + return -EINVAL; + } + + /* Start Passthrough */ + if (enable_passthrough) { + ivtv_passthrough_mode(itv, 1); + } + + if (s->type == IVTV_ENC_STREAM_TYPE_VBI) + ivtv_clear_irq_mask(itv, IVTV_IRQ_ENC_VBI_CAP); + else + ivtv_clear_irq_mask(itv, IVTV_IRQ_MASK_CAPTURE); + + /* you're live! sit back and await interrupts :) */ + atomic_inc(&itv->capturing); + return 0; +} + +static int ivtv_setup_v4l2_decode_stream(struct ivtv_stream *s) +{ + u32 data[CX2341X_MBOX_MAX_DATA]; + struct ivtv *itv = s->itv; + int datatype; + + if (s->v4l2dev == NULL) + return -EINVAL; + + IVTV_DEBUG_INFO("Setting some initial decoder settings\n"); + + /* disable VBI signals, if the MPEG stream contains VBI data, + then that data will be processed automatically for you. */ + ivtv_disable_vbi(itv); + + /* set audio mode to left/stereo for dual/stereo mode. */ + ivtv_vapi(itv, CX2341X_DEC_SET_AUDIO_MODE, 2, itv->audio_bilingual_mode, itv->audio_stereo_mode); + + /* set number of internal decoder buffers */ + ivtv_vapi(itv, CX2341X_DEC_SET_DISPLAY_BUFFERS, 1, 0); + + /* prebuffering */ + ivtv_vapi(itv, CX2341X_DEC_SET_PREBUFFERING, 1, 1); + + /* extract from user packets */ + ivtv_vapi_result(itv, data, CX2341X_DEC_EXTRACT_VBI, 1, 1); + itv->vbi.dec_start = data[0]; + + IVTV_DEBUG_INFO("Decoder VBI RE-Insert start 0x%08x size 0x%08x\n", + itv->vbi.dec_start, data[1]); + + /* set decoder source settings */ + /* Data type: 0 = mpeg from host, + 1 = yuv from encoder, + 2 = yuv_from_host */ + switch (s->type) { + case IVTV_DEC_STREAM_TYPE_YUV: + datatype = itv->output_mode == OUT_PASSTHROUGH ? 1 : 2; + IVTV_DEBUG_INFO("Setup DEC YUV Stream data[0] = %d\n", datatype); + break; + case IVTV_DEC_STREAM_TYPE_MPG: + default: + datatype = 0; + break; + } + if (ivtv_vapi(itv, CX2341X_DEC_SET_DECODER_SOURCE, 4, datatype, + itv->params.width, itv->params.height, itv->params.audio_properties)) { + IVTV_DEBUG_WARN("COULDN'T INITIALIZE DECODER SOURCE\n"); + } + return 0; +} + +int ivtv_start_v4l2_decode_stream(struct ivtv_stream *s, int gop_offset) +{ + struct ivtv *itv = s->itv; + + if (s->v4l2dev == NULL) + return -EINVAL; + + if (test_and_set_bit(IVTV_F_S_STREAMING, &s->s_flags)) + return 0; /* already started */ + + IVTV_DEBUG_INFO("Starting decode stream %s (gop_offset %d)\n", s->name, gop_offset); + + /* Clear Streamoff */ + if (s->type == IVTV_DEC_STREAM_TYPE_YUV) { + /* Initialize Decoder */ + /* Reprogram Decoder YUV Buffers for YUV */ + write_reg(yuv_offset[0] >> 4, 0x82c); + write_reg((yuv_offset[0] + IVTV_YUV_BUFFER_UV_OFFSET) >> 4, 0x830); + write_reg(yuv_offset[0] >> 4, 0x834); + write_reg((yuv_offset[0] + IVTV_YUV_BUFFER_UV_OFFSET) >> 4, 0x838); + + write_reg_sync(0x00000000 | (0x0c << 16) | (0x0b << 8), 0x2d24); + + write_reg_sync(0x00108080, 0x2898); + /* Enable YUV decoder output */ + write_reg_sync(0x01, IVTV_REG_VDM); + } + + ivtv_setup_v4l2_decode_stream(s); + + /* set dma size to 65536 bytes */ + ivtv_vapi(itv, CX2341X_DEC_SET_DMA_BLOCK_SIZE, 1, 65536); + + clear_bit(IVTV_F_S_STREAMOFF, &s->s_flags); + + /* Zero out decoder counters */ + writel(0, &itv->dec_mbox.mbox[IVTV_MBOX_FIELD_DISPLAYED].data[0]); + writel(0, &itv->dec_mbox.mbox[IVTV_MBOX_FIELD_DISPLAYED].data[1]); + writel(0, &itv->dec_mbox.mbox[IVTV_MBOX_FIELD_DISPLAYED].data[2]); + writel(0, &itv->dec_mbox.mbox[IVTV_MBOX_FIELD_DISPLAYED].data[3]); + writel(0, &itv->dec_mbox.mbox[IVTV_MBOX_DMA].data[0]); + writel(0, &itv->dec_mbox.mbox[IVTV_MBOX_DMA].data[1]); + writel(0, &itv->dec_mbox.mbox[IVTV_MBOX_DMA].data[2]); + writel(0, &itv->dec_mbox.mbox[IVTV_MBOX_DMA].data[3]); + + /* turn on notification of dual/stereo mode change */ + ivtv_vapi(itv, CX2341X_DEC_SET_EVENT_NOTIFICATION, 4, 0, 1, IVTV_IRQ_DEC_AUD_MODE_CHG, -1); + + /* start playback */ + ivtv_vapi(itv, CX2341X_DEC_START_PLAYBACK, 2, gop_offset, 0); + + /* Clear the following Interrupt mask bits for decoding */ + ivtv_clear_irq_mask(itv, IVTV_IRQ_MASK_DECODE); + IVTV_DEBUG_IRQ("IRQ Mask is now: 0x%08x\n", itv->irqmask); + + /* you're live! sit back and await interrupts :) */ + atomic_inc(&itv->decoding); + return 0; +} + +void ivtv_stop_all_captures(struct ivtv *itv) +{ + int i; + + for (i = IVTV_MAX_STREAMS - 1; i >= 0; i--) { + struct ivtv_stream *s = &itv->streams[i]; + + if (s->v4l2dev == NULL) + continue; + if (test_bit(IVTV_F_S_STREAMING, &s->s_flags)) { + ivtv_stop_v4l2_encode_stream(s, 0); + } + } +} + +int ivtv_stop_v4l2_encode_stream(struct ivtv_stream *s, int gop_end) +{ + struct ivtv *itv = s->itv; + DECLARE_WAITQUEUE(wait, current); + int cap_type; + unsigned long then; + int stopmode; + u32 data[CX2341X_MBOX_MAX_DATA]; + + if (s->v4l2dev == NULL) + return -EINVAL; + + /* This function assumes that you are allowed to stop the capture + and that we are actually capturing */ + + IVTV_DEBUG_INFO("Stop Capture\n"); + + if (s->type == IVTV_DEC_STREAM_TYPE_VOUT) + return 0; + if (atomic_read(&itv->capturing) == 0) + return 0; + + switch (s->type) { + case IVTV_ENC_STREAM_TYPE_YUV: + cap_type = 1; + break; + case IVTV_ENC_STREAM_TYPE_PCM: + cap_type = 1; + break; + case IVTV_ENC_STREAM_TYPE_VBI: + cap_type = 1; + break; + case IVTV_ENC_STREAM_TYPE_MPG: + default: + cap_type = 0; + break; + } + + /* Stop Capture Mode */ + if (s->type == IVTV_ENC_STREAM_TYPE_MPG && gop_end) { + stopmode = 0; + } else { + stopmode = 1; + } + + /* end_capture */ + /* when: 0 = end of GOP 1 = NOW!, type: 0 = mpeg, subtype: 3 = video+audio */ + ivtv_vapi(itv, CX2341X_ENC_STOP_CAPTURE, 3, stopmode, cap_type, s->subtype); + + /* only run these if we're shutting down the last cap */ + if (atomic_read(&itv->capturing) - 1 == 0) { + /* event notification (off) */ + if (test_and_clear_bit(IVTV_F_I_DIG_RST, &itv->i_flags)) { + /* type: 0 = refresh */ + /* on/off: 0 = off, intr: 0x10000000, mbox_id: -1: none */ + ivtv_vapi(itv, CX2341X_ENC_SET_EVENT_NOTIFICATION, 4, 0, 0, IVTV_IRQ_ENC_VIM_RST, -1); + ivtv_set_irq_mask(itv, IVTV_IRQ_ENC_VIM_RST); + } + } + + then = jiffies; + + if (!test_bit(IVTV_F_S_PASSTHROUGH, &s->s_flags)) { + if (s->type == IVTV_ENC_STREAM_TYPE_MPG && gop_end) { + /* only run these if we're shutting down the last cap */ + unsigned long duration; + + then = jiffies; + add_wait_queue(&itv->cap_w, &wait); + + set_current_state(TASK_INTERRUPTIBLE); + + /* wait 2s for EOS interrupt */ + while (!test_bit(IVTV_F_I_EOS, &itv->i_flags) && jiffies < then + 2 * HZ) { + schedule_timeout(HZ / 100); + } + + /* To convert jiffies to ms, we must multiply by 1000 + * and divide by HZ. To avoid runtime division, we + * convert this to multiplication by 1000/HZ. + * Since integer division truncates, we get the best + * accuracy if we do a rounding calculation of the constant. + * Think of the case where HZ is 1024. + */ + duration = ((1000 + HZ / 2) / HZ) * (jiffies - then); + + if (!test_bit(IVTV_F_I_EOS, &itv->i_flags)) { + IVTV_DEBUG_WARN("%s: EOS interrupt not received! stopping anyway.\n", s->name); + IVTV_DEBUG_WARN("%s: waited %lu ms.\n", s->name, duration); + } else { + IVTV_DEBUG_INFO("%s: EOS took %lu ms to occur.\n", s->name, duration); + } + set_current_state(TASK_RUNNING); + remove_wait_queue(&itv->cap_w, &wait); + } + + then = jiffies; + /* Make sure DMA is complete */ + add_wait_queue(&s->waitq, &wait); + set_current_state(TASK_INTERRUPTIBLE); + do { + /* check if DMA is pending */ + if ((s->type == IVTV_ENC_STREAM_TYPE_MPG) && /* MPG Only */ + (read_reg(IVTV_REG_DMASTATUS) & 0x02)) { + /* Check for last DMA */ + ivtv_vapi_result(itv, data, CX2341X_ENC_GET_SEQ_END, 2, 0, 0); + + if (data[0] == 1) { + IVTV_DEBUG_DMA("%s: Last DMA of size 0x%08x\n", s->name, data[1]); + break; + } + } else if (read_reg(IVTV_REG_DMASTATUS) & 0x02) { + break; + } + + ivtv_sleep_timeout(HZ / 100, 1); + } while (then + HZ * 2 > jiffies); + + set_current_state(TASK_RUNNING); + remove_wait_queue(&s->waitq, &wait); + } + + atomic_dec(&itv->capturing); + + /* Clear capture and no-read bits */ + clear_bit(IVTV_F_S_STREAMING, &s->s_flags); + + if (s->type == IVTV_ENC_STREAM_TYPE_VBI) + ivtv_set_irq_mask(itv, IVTV_IRQ_ENC_VBI_CAP); + + if (atomic_read(&itv->capturing) > 0) { + return 0; + } + + /* Set the following Interrupt mask bits for capture */ + ivtv_set_irq_mask(itv, IVTV_IRQ_MASK_CAPTURE); + + wake_up(&s->waitq); + + return 0; +} + +int ivtv_stop_v4l2_decode_stream(struct ivtv_stream *s, int flags, u64 pts) +{ + struct ivtv *itv = s->itv; + + if (s->v4l2dev == NULL) + return -EINVAL; + + if (s->type != IVTV_DEC_STREAM_TYPE_YUV && s->type != IVTV_DEC_STREAM_TYPE_MPG) + return -EINVAL; + + if (!test_bit(IVTV_F_S_STREAMING, &s->s_flags)) + return 0; + + IVTV_DEBUG_INFO("Stop Decode at %llu, flags: %x\n", pts, flags); + + /* Stop Decoder */ + if (!(flags & VIDEO_CMD_STOP_IMMEDIATELY) || pts) { + u32 tmp = 0; + + /* Wait until the decoder is no longer running */ + if (pts) { + ivtv_vapi(itv, CX2341X_DEC_STOP_PLAYBACK, 3, + 0, (u32)(pts & 0xffffffff), (u32)(pts >> 32)); + } + while (1) { + u32 data[CX2341X_MBOX_MAX_DATA]; + ivtv_vapi_result(itv, data, CX2341X_DEC_GET_XFER_INFO, 0); + if (s->q_full.buffers + s->q_dma.buffers == 0) { + if (tmp == data[3]) + break; + tmp = data[3]; + } + if (ivtv_sleep_timeout(HZ/10, 1)) + break; + } + } + ivtv_vapi(itv, CX2341X_DEC_STOP_PLAYBACK, 3, flags & VIDEO_CMD_STOP_TO_BLACK, 0, 0); + + /* turn off notification of dual/stereo mode change */ + ivtv_vapi(itv, CX2341X_DEC_SET_EVENT_NOTIFICATION, 4, 0, 0, IVTV_IRQ_DEC_AUD_MODE_CHG, -1); + + ivtv_set_irq_mask(itv, IVTV_IRQ_MASK_DECODE); + + clear_bit(IVTV_F_S_NEEDS_DATA, &s->s_flags); + clear_bit(IVTV_F_S_STREAMING, &s->s_flags); + ivtv_flush_queues(s); + + if (!test_bit(IVTV_F_S_PASSTHROUGH, &s->s_flags)) { + /* disable VBI on TV-out */ + ivtv_disable_vbi(itv); + } + + /* decrement decoding */ + atomic_dec(&itv->decoding); + + set_bit(IVTV_F_I_EV_DEC_STOPPED, &itv->i_flags); + wake_up(&itv->event_waitq); + + /* wake up wait queues */ + wake_up(&s->waitq); + + return 0; +} + +int ivtv_passthrough_mode(struct ivtv *itv, int enable) +{ + struct ivtv_stream *yuv_stream = &itv->streams[IVTV_ENC_STREAM_TYPE_YUV]; + struct ivtv_stream *dec_stream = &itv->streams[IVTV_DEC_STREAM_TYPE_YUV]; + + if (yuv_stream->v4l2dev == NULL || dec_stream->v4l2dev == NULL) + return -EINVAL; + + IVTV_DEBUG_INFO("ivtv ioctl: Select passthrough mode\n"); + + /* Prevent others from starting/stopping streams while we + initiate/terminate passthrough mode */ + if (enable) { + if (itv->output_mode == OUT_PASSTHROUGH) { + return 0; + } + if (ivtv_set_output_mode(itv, OUT_PASSTHROUGH) != OUT_PASSTHROUGH) + return -EBUSY; + + /* Fully initialize stream, and then unflag init */ + set_bit(IVTV_F_S_PASSTHROUGH, &dec_stream->s_flags); + set_bit(IVTV_F_S_STREAMING, &dec_stream->s_flags); + + /* Setup YUV Decoder */ + ivtv_setup_v4l2_decode_stream(dec_stream); + + /* Start Decoder */ + ivtv_vapi(itv, CX2341X_DEC_START_PLAYBACK, 2, 0, 1); + atomic_inc(&itv->decoding); + + /* Setup capture if not already done */ + if (atomic_read(&itv->capturing) == 0) { + cx2341x_update(itv, ivtv_api_func, NULL, &itv->params); + } + + /* Start Passthrough Mode */ + ivtv_vapi(itv, CX2341X_ENC_START_CAPTURE, 2, 2, 11); + atomic_inc(&itv->capturing); + return 0; + } + + if (itv->output_mode != OUT_PASSTHROUGH) + return 0; + + /* Stop Passthrough Mode */ + ivtv_vapi(itv, CX2341X_ENC_STOP_CAPTURE, 3, 1, 2, 11); + ivtv_vapi(itv, CX2341X_DEC_STOP_PLAYBACK, 3, 1, 0, 0); + + atomic_dec(&itv->capturing); + atomic_dec(&itv->decoding); + clear_bit(IVTV_F_S_PASSTHROUGH, &dec_stream->s_flags); + clear_bit(IVTV_F_S_STREAMING, &dec_stream->s_flags); + itv->output_mode = OUT_NONE; + + return 0; +} diff --git a/drivers/media/video/ivtv/ivtv-streams.h b/drivers/media/video/ivtv/ivtv-streams.h new file mode 100644 index 000000000000..8597b75384a7 --- /dev/null +++ b/drivers/media/video/ivtv/ivtv-streams.h @@ -0,0 +1,31 @@ +/* + init/start/stop/exit stream functions + Copyright (C) 2003-2004 Kevin Thayer + Copyright (C) 2005-2007 Hans Verkuil + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +int ivtv_streams_setup(struct ivtv *itv); +void ivtv_streams_cleanup(struct ivtv *itv); + +/* Capture related */ +int ivtv_start_v4l2_encode_stream(struct ivtv_stream *s); +int ivtv_stop_v4l2_encode_stream(struct ivtv_stream *s, int gop_end); +int ivtv_start_v4l2_decode_stream(struct ivtv_stream *s, int gop_offset); +int ivtv_stop_v4l2_decode_stream(struct ivtv_stream *s, int flags, u64 pts); + +void ivtv_stop_all_captures(struct ivtv *itv); +int ivtv_passthrough_mode(struct ivtv *itv, int enable); diff --git a/drivers/media/video/ivtv/ivtv-udma.c b/drivers/media/video/ivtv/ivtv-udma.c new file mode 100644 index 000000000000..bd642e1aafc3 --- /dev/null +++ b/drivers/media/video/ivtv/ivtv-udma.c @@ -0,0 +1,200 @@ +/* + User DMA + + Copyright (C) 2003-2004 Kevin Thayer + Copyright (C) 2004 Chris Kennedy + Copyright (C) 2005-2007 Hans Verkuil + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include "ivtv-driver.h" +#include "ivtv-streams.h" +#include "ivtv-udma.h" + +void ivtv_udma_get_page_info(struct ivtv_dma_page_info *dma_page, unsigned long first, unsigned long size) +{ + dma_page->uaddr = first & PAGE_MASK; + dma_page->offset = first & ~PAGE_MASK; + dma_page->tail = 1 + ((first+size-1) & ~PAGE_MASK); + dma_page->first = (first & PAGE_MASK) >> PAGE_SHIFT; + dma_page->last = ((first+size-1) & PAGE_MASK) >> PAGE_SHIFT; + dma_page->page_count = dma_page->last - dma_page->first + 1; + if (dma_page->page_count == 1) dma_page->tail -= dma_page->offset; +} + +int ivtv_udma_fill_sg_list (struct ivtv_user_dma *dma, struct ivtv_dma_page_info *dma_page, int map_offset) +{ + int i, offset; + + offset = dma_page->offset; + + /* Fill SG Array with new values */ + for (i = 0; i < dma_page->page_count; i++) { + if (i == dma_page->page_count - 1) { + dma->SGlist[map_offset].length = dma_page->tail; + } + else { + dma->SGlist[map_offset].length = PAGE_SIZE - offset; + } + dma->SGlist[map_offset].offset = offset; + dma->SGlist[map_offset].page = dma->map[map_offset]; + offset = 0; + map_offset++; + } + return map_offset; +} + +void ivtv_udma_fill_sg_array (struct ivtv_user_dma *dma, u32 buffer_offset, u32 buffer_offset_2, u32 split) { + int i; + struct scatterlist *sg; + + for (i = 0, sg = dma->SGlist; i < dma->SG_length; i++, sg++) { + dma->SGarray[i].size = cpu_to_le32(sg_dma_len(sg)); + dma->SGarray[i].src = cpu_to_le32(sg_dma_address(sg)); + dma->SGarray[i].dst = cpu_to_le32(buffer_offset); + buffer_offset += sg_dma_len(sg); + + split -= sg_dma_len(sg); + if (split == 0) + buffer_offset = buffer_offset_2; + } +} + +/* User DMA Buffers */ +void ivtv_udma_alloc(struct ivtv *itv) +{ + if (itv->udma.SG_handle == 0) { + /* Map DMA Page Array Buffer */ + itv->udma.SG_handle = pci_map_single(itv->dev, itv->udma.SGarray, + sizeof(itv->udma.SGarray), PCI_DMA_TODEVICE); + ivtv_udma_sync_for_cpu(itv); + } +} + +int ivtv_udma_setup(struct ivtv *itv, unsigned long ivtv_dest_addr, + void __user *userbuf, int size_in_bytes) +{ + struct ivtv_dma_page_info user_dma; + struct ivtv_user_dma *dma = &itv->udma; + int err; + + IVTV_DEBUG_DMA("ivtv_udma_setup, dst: 0x%08x\n", (unsigned int)ivtv_dest_addr); + + /* Still in USE */ + if (dma->SG_length || dma->page_count) { + IVTV_DEBUG_WARN("ivtv_udma_setup: SG_length %d page_count %d still full?\n", + dma->SG_length, dma->page_count); + return -EBUSY; + } + + ivtv_udma_get_page_info(&user_dma, (unsigned long)userbuf, size_in_bytes); + + if (user_dma.page_count <= 0) { + IVTV_DEBUG_WARN("ivtv_udma_setup: Error %d page_count from %d bytes %d offset\n", + user_dma.page_count, size_in_bytes, user_dma.offset); + return -EINVAL; + } + + /* Get user pages for DMA Xfer */ + down_read(¤t->mm->mmap_sem); + err = get_user_pages(current, current->mm, + user_dma.uaddr, user_dma.page_count, 0, 1, dma->map, NULL); + up_read(¤t->mm->mmap_sem); + + if (user_dma.page_count != err) { + IVTV_DEBUG_WARN("failed to map user pages, returned %d instead of %d\n", + err, user_dma.page_count); + return -EINVAL; + } + + dma->page_count = user_dma.page_count; + + /* Fill SG List with new values */ + ivtv_udma_fill_sg_list(dma, &user_dma, 0); + + /* Map SG List */ + dma->SG_length = pci_map_sg(itv->dev, dma->SGlist, dma->page_count, PCI_DMA_TODEVICE); + + /* Fill SG Array with new values */ + ivtv_udma_fill_sg_array (dma, ivtv_dest_addr, 0, -1); + + /* Tag SG Array with Interrupt Bit */ + dma->SGarray[dma->SG_length - 1].size |= cpu_to_le32(0x80000000); + + ivtv_udma_sync_for_device(itv); + return dma->page_count; +} + +void ivtv_udma_unmap(struct ivtv *itv) +{ + struct ivtv_user_dma *dma = &itv->udma; + int i; + + IVTV_DEBUG_INFO("ivtv_unmap_user_dma\n"); + + /* Nothing to free */ + if (dma->page_count == 0) + return; + + /* Unmap Scatterlist */ + if (dma->SG_length) { + pci_unmap_sg(itv->dev, dma->SGlist, dma->page_count, PCI_DMA_TODEVICE); + dma->SG_length = 0; + } + /* sync DMA */ + ivtv_udma_sync_for_cpu(itv); + + /* Release User Pages */ + for (i = 0; i < dma->page_count; i++) { + put_page(dma->map[i]); + } + dma->page_count = 0; +} + +void ivtv_udma_free(struct ivtv *itv) +{ + /* Unmap SG Array */ + if (itv->udma.SG_handle) { + pci_unmap_single(itv->dev, itv->udma.SG_handle, + sizeof(itv->udma.SGarray), PCI_DMA_TODEVICE); + } + + /* Unmap Scatterlist */ + if (itv->udma.SG_length) { + pci_unmap_sg(itv->dev, itv->udma.SGlist, itv->udma.page_count, PCI_DMA_TODEVICE); + } +} + +void ivtv_udma_start(struct ivtv *itv) +{ + IVTV_DEBUG_DMA("start UDMA\n"); + write_reg(itv->udma.SG_handle, IVTV_REG_DECDMAADDR); + write_reg_sync(read_reg(IVTV_REG_DMAXFER) | 0x01, IVTV_REG_DMAXFER); + set_bit(IVTV_F_I_DMA, &itv->i_flags); + set_bit(IVTV_F_I_UDMA, &itv->i_flags); +} + +void ivtv_udma_prepare(struct ivtv *itv) +{ + unsigned long flags; + + spin_lock_irqsave(&itv->dma_reg_lock, flags); + if (!test_bit(IVTV_F_I_DMA, &itv->i_flags)) + ivtv_udma_start(itv); + else + set_bit(IVTV_F_I_UDMA_PENDING, &itv->i_flags); + spin_unlock_irqrestore(&itv->dma_reg_lock, flags); +} diff --git a/drivers/media/video/ivtv/ivtv-udma.h b/drivers/media/video/ivtv/ivtv-udma.h new file mode 100644 index 000000000000..e131bccedec0 --- /dev/null +++ b/drivers/media/video/ivtv/ivtv-udma.h @@ -0,0 +1,43 @@ +/* + Copyright (C) 2003-2004 Kevin Thayer + Copyright (C) 2004 Chris Kennedy + Copyright (C) 2006-2007 Hans Verkuil + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +/* User DMA functions */ +void ivtv_udma_get_page_info(struct ivtv_dma_page_info *dma_page, unsigned long first, unsigned long size); +int ivtv_udma_fill_sg_list(struct ivtv_user_dma *dma, struct ivtv_dma_page_info *dma_page, int map_offset); +void ivtv_udma_fill_sg_array(struct ivtv_user_dma *dma, u32 buffer_offset, u32 buffer_offset_2, u32 split); +int ivtv_udma_setup(struct ivtv *itv, unsigned long ivtv_dest_addr, + void __user *userbuf, int size_in_bytes); +void ivtv_udma_unmap(struct ivtv *itv); +void ivtv_udma_free(struct ivtv *itv); +void ivtv_udma_alloc(struct ivtv *itv); +void ivtv_udma_prepare(struct ivtv *itv); +void ivtv_udma_start(struct ivtv *itv); + +static inline void ivtv_udma_sync_for_device(struct ivtv *itv) +{ + pci_dma_sync_single_for_device((struct pci_dev *)itv->dev, itv->udma.SG_handle, + sizeof(itv->udma.SGarray), PCI_DMA_TODEVICE); +} + +static inline void ivtv_udma_sync_for_cpu(struct ivtv *itv) +{ + pci_dma_sync_single_for_cpu((struct pci_dev *)itv->dev, itv->udma.SG_handle, + sizeof(itv->udma.SGarray), PCI_DMA_TODEVICE); +} diff --git a/drivers/media/video/ivtv/ivtv-vbi.c b/drivers/media/video/ivtv/ivtv-vbi.c new file mode 100644 index 000000000000..b53ca508dacc --- /dev/null +++ b/drivers/media/video/ivtv/ivtv-vbi.c @@ -0,0 +1,545 @@ +/* + Vertical Blank Interval support functions + Copyright (C) 2004-2007 Hans Verkuil + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include "ivtv-driver.h" +#include "ivtv-video.h" +#include "ivtv-vbi.h" +#include "ivtv-ioctl.h" +#include "ivtv-queue.h" + +static int odd_parity(u8 c) +{ + c ^= (c >> 4); + c ^= (c >> 2); + c ^= (c >> 1); + + return c & 1; +} + +void vbi_schedule_work(struct ivtv *itv) +{ + queue_work(itv->vbi.work_queues, &itv->vbi.work_queue); +} + +static void passthrough_vbi_data(struct ivtv *itv, int cnt) +{ + int wss = 0; + u8 cc[4] = { 0x80, 0x80, 0x80, 0x80 }; + u8 vps[13]; + int found_cc = 0; + int found_wss = 0; + int found_vps = 0; + int cc_pos = itv->vbi.cc_pos; + int i; + + for (i = 0; i < cnt; i++) { + struct v4l2_sliced_vbi_data *d = itv->vbi.sliced_dec_data + i; + + if (d->id == V4L2_SLICED_CAPTION_525 && d->line == 21) { + found_cc = 1; + if (d->field) { + cc[2] = d->data[0]; + cc[3] = d->data[1]; + } else { + cc[0] = d->data[0]; + cc[1] = d->data[1]; + } + } + else if (d->id == V4L2_SLICED_VPS && d->line == 16 && d->field == 0) { + memcpy(vps, d->data, sizeof(vps)); + found_vps = 1; + } + else if (d->id == V4L2_SLICED_WSS_625 && d->line == 23 && d->field == 0) { + wss = d->data[0] | d->data[1] << 8; + found_wss = 1; + } + } + + if (itv->vbi.wss_found != found_wss || itv->vbi.wss != wss) { + itv->vbi.wss = wss; + itv->vbi.wss_found = found_wss; + set_bit(IVTV_F_I_UPDATE_WSS, &itv->i_flags); + } + + if (found_vps || itv->vbi.vps_found) { + itv->vbi.vps[0] = vps[2]; + itv->vbi.vps[1] = vps[8]; + itv->vbi.vps[2] = vps[9]; + itv->vbi.vps[3] = vps[10]; + itv->vbi.vps[4] = vps[11]; + itv->vbi.vps_found = found_vps; + set_bit(IVTV_F_I_UPDATE_VPS, &itv->i_flags); + } + + if (found_cc && cc_pos < sizeof(itv->vbi.cc_data_even)) { + itv->vbi.cc_data_odd[cc_pos] = cc[0]; + itv->vbi.cc_data_odd[cc_pos + 1] = cc[1]; + itv->vbi.cc_data_even[cc_pos] = cc[2]; + itv->vbi.cc_data_even[cc_pos + 1] = cc[3]; + itv->vbi.cc_pos = cc_pos + 2; + set_bit(IVTV_F_I_UPDATE_CC, &itv->i_flags); + } +} + +static void copy_vbi_data(struct ivtv *itv, int lines, u32 pts_stamp) +{ + int line = 0; + int i; + u32 linemask[2] = { 0, 0 }; + unsigned short size; + static const u8 mpeg_hdr_data[] = { + 0x00, 0x00, 0x01, 0xba, 0x44, 0x00, 0x0c, 0x66, + 0x24, 0x01, 0x01, 0xd1, 0xd3, 0xfa, 0xff, 0xff, + 0x00, 0x00, 0x01, 0xbd, 0x00, 0x1a, 0x84, 0x80, + 0x07, 0x21, 0x00, 0x5d, 0x63, 0xa7, 0xff, 0xff + }; + const int sd = sizeof(mpeg_hdr_data); /* start of vbi data */ + int idx = itv->vbi.frame % IVTV_VBI_FRAMES; + u8 *dst = &itv->vbi.sliced_mpeg_data[idx][0]; + + for (i = 0; i < lines; i++) { + int f, l; + + if (itv->vbi.sliced_data[i].id == 0) + continue; + + l = itv->vbi.sliced_data[i].line - 6; + f = itv->vbi.sliced_data[i].field; + if (f) + l += 18; + if (l < 32) + linemask[0] |= (1 << l); + else + linemask[1] |= (1 << (l - 32)); + dst[sd + 12 + line * 43] = service2vbi(itv->vbi.sliced_data[i].id); + memcpy(dst + sd + 12 + line * 43 + 1, itv->vbi.sliced_data[i].data, 42); + line++; + } + memcpy(dst, mpeg_hdr_data, sizeof(mpeg_hdr_data)); + if (line == 36) { + /* All lines are used, so there is no space for the linemask + (the max size of the VBI data is 36 * 43 + 4 bytes). + So in this case we use the magic number 'ITV0'. */ + memcpy(dst + sd, "ITV0", 4); + memcpy(dst + sd + 4, dst + sd + 12, line * 43); + size = 4 + ((43 * line + 3) & ~3); + } else { + memcpy(dst + sd, "itv0", 4); + memcpy(dst + sd + 4, &linemask[0], 8); + size = 12 + ((43 * line + 3) & ~3); + } + dst[4+16] = (size + 10) >> 8; + dst[5+16] = (size + 10) & 0xff; + dst[9+16] = 0x21 | ((pts_stamp >> 29) & 0x6); + dst[10+16] = (pts_stamp >> 22) & 0xff; + dst[11+16] = 1 | ((pts_stamp >> 14) & 0xff); + dst[12+16] = (pts_stamp >> 7) & 0xff; + dst[13+16] = 1 | ((pts_stamp & 0x7f) << 1); + itv->vbi.sliced_mpeg_size[idx] = sd + size; +} + +static int ivtv_convert_ivtv_vbi(struct ivtv *itv, u8 *p) +{ + u32 linemask[2]; + int i, l, id2; + int line = 0; + + if (!memcmp(p, "itv0", 4)) { + memcpy(linemask, p + 4, 8); + p += 12; + } else if (!memcmp(p, "ITV0", 4)) { + linemask[0] = 0xffffffff; + linemask[1] = 0xf; + p += 4; + } else { + /* unknown VBI data stream */ + return 0; + } + for (i = 0; i < 36; i++) { + int err = 0; + + if (i < 32 && !(linemask[0] & (1 << i))) + continue; + if (i >= 32 && !(linemask[1] & (1 << (i - 32)))) + continue; + id2 = *p & 0xf; + switch (id2) { + case IVTV_SLICED_TYPE_TELETEXT_B: + id2 = V4L2_SLICED_TELETEXT_B; + break; + case IVTV_SLICED_TYPE_CAPTION_525: + id2 = V4L2_SLICED_CAPTION_525; + err = !odd_parity(p[1]) || !odd_parity(p[2]); + break; + case IVTV_SLICED_TYPE_VPS: + id2 = V4L2_SLICED_VPS; + break; + case IVTV_SLICED_TYPE_WSS_625: + id2 = V4L2_SLICED_WSS_625; + break; + default: + id2 = 0; + break; + } + if (err == 0) { + l = (i < 18) ? i + 6 : i - 18 + 6; + itv->vbi.sliced_dec_data[line].line = l; + itv->vbi.sliced_dec_data[line].field = i >= 18; + itv->vbi.sliced_dec_data[line].id = id2; + memcpy(itv->vbi.sliced_dec_data[line].data, p + 1, 42); + line++; + } + p += 43; + } + while (line < 36) { + itv->vbi.sliced_dec_data[line].id = 0; + itv->vbi.sliced_dec_data[line].line = 0; + itv->vbi.sliced_dec_data[line].field = 0; + line++; + } + return line * sizeof(itv->vbi.sliced_dec_data[0]); +} + +ssize_t ivtv_write_vbi(struct ivtv *itv, const char __user *ubuf, size_t count) +{ + /* Should be a __user pointer, but sparse doesn't parse this bit correctly. */ + const struct v4l2_sliced_vbi_data *p = (const struct v4l2_sliced_vbi_data *)ubuf; + u8 cc[4] = { 0x80, 0x80, 0x80, 0x80 }; + int found_cc = 0; + int cc_pos = itv->vbi.cc_pos; + + if (itv->vbi.service_set_out == 0) + return -EPERM; + + while (count >= sizeof(struct v4l2_sliced_vbi_data)) { + switch (p->id) { + case V4L2_SLICED_CAPTION_525: + if (p->id == V4L2_SLICED_CAPTION_525 && + p->line == 21 && + (itv->vbi.service_set_out & + V4L2_SLICED_CAPTION_525) == 0) { + break; + } + found_cc = 1; + if (p->field) { + cc[2] = p->data[0]; + cc[3] = p->data[1]; + } else { + cc[0] = p->data[0]; + cc[1] = p->data[1]; + } + break; + + case V4L2_SLICED_VPS: + if (p->line == 16 && p->field == 0 && + (itv->vbi.service_set_out & V4L2_SLICED_VPS)) { + itv->vbi.vps[0] = p->data[2]; + itv->vbi.vps[1] = p->data[8]; + itv->vbi.vps[2] = p->data[9]; + itv->vbi.vps[3] = p->data[10]; + itv->vbi.vps[4] = p->data[11]; + itv->vbi.vps_found = 1; + set_bit(IVTV_F_I_UPDATE_VPS, &itv->i_flags); + } + break; + + case V4L2_SLICED_WSS_625: + if (p->line == 23 && p->field == 0 && + (itv->vbi.service_set_out & V4L2_SLICED_WSS_625)) { + /* No lock needed for WSS */ + itv->vbi.wss = p->data[0] | (p->data[1] << 8); + itv->vbi.wss_found = 1; + set_bit(IVTV_F_I_UPDATE_WSS, &itv->i_flags); + } + break; + + default: + break; + } + count -= sizeof(*p); + p++; + } + + if (found_cc && cc_pos < sizeof(itv->vbi.cc_data_even)) { + itv->vbi.cc_data_odd[cc_pos] = cc[0]; + itv->vbi.cc_data_odd[cc_pos + 1] = cc[1]; + itv->vbi.cc_data_even[cc_pos] = cc[2]; + itv->vbi.cc_data_even[cc_pos + 1] = cc[3]; + itv->vbi.cc_pos = cc_pos + 2; + set_bit(IVTV_F_I_UPDATE_CC, &itv->i_flags); + } + + return (const char __user *)p - ubuf; +} + +/* Compress raw VBI format, removes leading SAV codes and surplus space after the + field. + Returns new compressed size. */ +static u32 compress_raw_buf(struct ivtv *itv, u8 *buf, u32 size) +{ + u32 line_size = itv->vbi.raw_decoder_line_size; + u32 lines = itv->vbi.count; + u8 sav1 = itv->vbi.raw_decoder_sav_odd_field; + u8 sav2 = itv->vbi.raw_decoder_sav_even_field; + u8 *q = buf; + u8 *p; + int i; + + for (i = 0; i < lines; i++) { + p = buf + i * line_size; + + /* Look for SAV code */ + if (p[0] != 0xff || p[1] || p[2] || (p[3] != sav1 && p[3] != sav2)) { + break; + } + memcpy(q, p + 4, line_size - 4); + q += line_size - 4; + } + return lines * (line_size - 4); +} + + +/* Compressed VBI format, all found sliced blocks put next to one another + Returns new compressed size */ +static u32 compress_sliced_buf(struct ivtv *itv, u32 line, u8 *buf, u32 size, u8 sav) +{ + u32 line_size = itv->vbi.sliced_decoder_line_size; + struct v4l2_decode_vbi_line vbi; + int i; + + /* find the first valid line */ + for (i = 0; i < size; i++, buf++) { + if (buf[0] == 0xff && !buf[1] && !buf[2] && buf[3] == sav) + break; + } + + size -= i; + if (size < line_size) { + return line; + } + for (i = 0; i < size / line_size; i++) { + u8 *p = buf + i * line_size; + + /* Look for SAV code */ + if (p[0] != 0xff || p[1] || p[2] || p[3] != sav) { + continue; + } + vbi.p = p + 4; + itv->video_dec_func(itv, VIDIOC_INT_DECODE_VBI_LINE, &vbi); + if (vbi.type) { + itv->vbi.sliced_data[line].id = vbi.type; + itv->vbi.sliced_data[line].field = vbi.is_second_field; + itv->vbi.sliced_data[line].line = vbi.line; + memcpy(itv->vbi.sliced_data[line].data, vbi.p, 42); + line++; + } + } + return line; +} + +void ivtv_process_vbi_data(struct ivtv *itv, struct ivtv_buffer *buf, + u64 pts_stamp, int streamtype) +{ + u8 *p = (u8 *) buf->buf; + u32 size = buf->bytesused; + int y; + + /* Raw VBI data */ + if (streamtype == IVTV_ENC_STREAM_TYPE_VBI && itv->vbi.sliced_in->service_set == 0) { + u8 type; + + ivtv_buf_swap(buf); + + type = p[3]; + + size = buf->bytesused = compress_raw_buf(itv, p, size); + + /* second field of the frame? */ + if (type == itv->vbi.raw_decoder_sav_even_field) { + /* Dirty hack needed for backwards + compatibility of old VBI software. */ + p += size - 4; + memcpy(p, &itv->vbi.frame, 4); + itv->vbi.frame++; + } + return; + } + + /* Sliced VBI data with data insertion */ + if (streamtype == IVTV_ENC_STREAM_TYPE_VBI) { + int lines; + + ivtv_buf_swap(buf); + + /* first field */ + lines = compress_sliced_buf(itv, 0, p, size / 2, + itv->vbi.sliced_decoder_sav_odd_field); + /* second field */ + /* experimentation shows that the second half does not always begin + at the exact address. So start a bit earlier (hence 32). */ + lines = compress_sliced_buf(itv, lines, p + size / 2 - 32, size / 2 + 32, + itv->vbi.sliced_decoder_sav_even_field); + /* always return at least one empty line */ + if (lines == 0) { + itv->vbi.sliced_data[0].id = 0; + itv->vbi.sliced_data[0].line = 0; + itv->vbi.sliced_data[0].field = 0; + lines = 1; + } + buf->bytesused = size = lines * sizeof(itv->vbi.sliced_data[0]); + memcpy(p, &itv->vbi.sliced_data[0], size); + + if (itv->vbi.insert_mpeg) { + copy_vbi_data(itv, lines, pts_stamp); + } + itv->vbi.frame++; + return; + } + + /* Sliced VBI re-inserted from an MPEG stream */ + if (streamtype == IVTV_DEC_STREAM_TYPE_VBI) { + /* If the size is not 4-byte aligned, then the starting address + for the swapping is also shifted. After swapping the data the + real start address of the VBI data is exactly 4 bytes after the + original start. It's a bit fiddly but it works like a charm. + Non-4-byte alignment happens when an lseek is done on the input + mpeg file to a non-4-byte aligned position. So on arrival here + the VBI data is also non-4-byte aligned. */ + int offset = size & 3; + int cnt; + + if (offset) { + p += 4 - offset; + } + /* Swap Buffer */ + for (y = 0; y < size; y += 4) { + swab32s((u32 *)(p + y)); + } + + cnt = ivtv_convert_ivtv_vbi(itv, p + offset); + memcpy(buf->buf, itv->vbi.sliced_dec_data, cnt); + buf->bytesused = cnt; + + passthrough_vbi_data(itv, cnt / sizeof(itv->vbi.sliced_dec_data[0])); + return; + } +} + +void ivtv_disable_vbi(struct ivtv *itv) +{ + clear_bit(IVTV_F_I_UPDATE_WSS, &itv->i_flags); + clear_bit(IVTV_F_I_UPDATE_VPS, &itv->i_flags); + clear_bit(IVTV_F_I_UPDATE_CC, &itv->i_flags); + ivtv_set_wss(itv, 0, 0); + ivtv_set_cc(itv, 0, 0, 0, 0, 0); + ivtv_set_vps(itv, 0, 0, 0, 0, 0, 0); + itv->vbi.vps_found = itv->vbi.wss_found = 0; + itv->vbi.wss = 0; + itv->vbi.cc_pos = 0; +} + +void vbi_work_handler(struct work_struct *work) +{ + struct vbi_info *info = container_of(work, struct vbi_info, work_queue); + struct ivtv *itv = container_of(info, struct ivtv, vbi); + struct v4l2_sliced_vbi_data data; + DEFINE_WAIT(wait); + + /* Lock */ + if (itv->output_mode == OUT_PASSTHROUGH) { + /* Note: currently only the saa7115 is used in a PVR350, + so these commands are for now saa7115 specific. */ + if (itv->is_50hz) { + data.id = V4L2_SLICED_WSS_625; + data.field = 0; + + if (itv->video_dec_func(itv, VIDIOC_INT_G_VBI_DATA, &data) == 0) { + ivtv_set_wss(itv, 1, data.data[0] & 0xf); + itv->vbi.wss_no_update = 0; + } else if (itv->vbi.wss_no_update == 4) { + ivtv_set_wss(itv, 1, 0x8); /* 4x3 full format */ + } else { + itv->vbi.wss_no_update++; + } + } + else { + u8 c1 = 0, c2 = 0, c3 = 0, c4 = 0; + int mode = 0; + + data.id = V4L2_SLICED_CAPTION_525; + data.field = 0; + if (itv->video_dec_func(itv, VIDIOC_INT_G_VBI_DATA, &data) == 0) { + mode |= 1; + c1 = data.data[0]; + c2 = data.data[1]; + } + data.field = 1; + if (itv->video_dec_func(itv, VIDIOC_INT_G_VBI_DATA, &data) == 0) { + mode |= 2; + c3 = data.data[0]; + c4 = data.data[1]; + } + if (mode) { + itv->vbi.cc_no_update = 0; + ivtv_set_cc(itv, mode, c1, c2, c3, c4); + } else if (itv->vbi.cc_no_update == 4) { + ivtv_set_cc(itv, 0, 0, 0, 0, 0); + } else { + itv->vbi.cc_no_update++; + } + } + return; + } + + if (test_and_clear_bit(IVTV_F_I_UPDATE_WSS, &itv->i_flags)) { + /* Lock */ + ivtv_set_wss(itv, itv->vbi.wss_found, itv->vbi.wss & 0xf); + } + + if (test_and_clear_bit(IVTV_F_I_UPDATE_CC, &itv->i_flags)) { + if (itv->vbi.cc_pos == 0) { + ivtv_set_cc(itv, 3, 0x80, 0x80, 0x80, 0x80); + } + while (itv->vbi.cc_pos) { + u8 cc_odd0 = itv->vbi.cc_data_odd[0]; + u8 cc_odd1 = itv->vbi.cc_data_odd[1]; + u8 cc_even0 = itv->vbi.cc_data_even[0]; + u8 cc_even1 = itv->vbi.cc_data_even[1]; + + memcpy(itv->vbi.cc_data_odd, itv->vbi.cc_data_odd + 2, sizeof(itv->vbi.cc_data_odd) - 2); + memcpy(itv->vbi.cc_data_even, itv->vbi.cc_data_even + 2, sizeof(itv->vbi.cc_data_even) - 2); + itv->vbi.cc_pos -= 2; + if (itv->vbi.cc_pos && cc_odd0 == 0x80 && cc_odd1 == 0x80) + continue; + + /* Send to Saa7127 */ + ivtv_set_cc(itv, 3, cc_odd0, cc_odd1, cc_even0, cc_even1); + if (itv->vbi.cc_pos == 0) + set_bit(IVTV_F_I_UPDATE_CC, &itv->i_flags); + break; + } + } + + if (test_and_clear_bit(IVTV_F_I_UPDATE_VPS, &itv->i_flags)) { + /* Lock */ + ivtv_set_vps(itv, itv->vbi.vps_found, + itv->vbi.vps[0], itv->vbi.vps[1], + itv->vbi.vps[2], itv->vbi.vps[3], itv->vbi.vps[4]); + } +} diff --git a/drivers/media/video/ivtv/ivtv-vbi.h b/drivers/media/video/ivtv/ivtv-vbi.h new file mode 100644 index 000000000000..c897e9bd4f92 --- /dev/null +++ b/drivers/media/video/ivtv/ivtv-vbi.h @@ -0,0 +1,27 @@ +/* + Vertical Blank Interval support functions + Copyright (C) 2004-2007 Hans Verkuil + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +ssize_t ivtv_write_vbi(struct ivtv *itv, const char __user *ubuf, size_t count); +void ivtv_process_vbi_data(struct ivtv *itv, struct ivtv_buffer *buf, + u64 pts_stamp, int streamtype); +int ivtv_used_line(struct ivtv *itv, int line, int field); +void ivtv_disable_vbi(struct ivtv *itv); +void ivtv_set_vbi(unsigned long arg); +void vbi_work_handler(struct work_struct *work); +void vbi_schedule_work(struct ivtv *itv); diff --git a/drivers/media/video/ivtv/ivtv-version.h b/drivers/media/video/ivtv/ivtv-version.h new file mode 100644 index 000000000000..85530a3cd369 --- /dev/null +++ b/drivers/media/video/ivtv/ivtv-version.h @@ -0,0 +1,26 @@ +/* + ivtv driver version information + Copyright (C) 2005-2007 Hans Verkuil + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#define IVTV_DRIVER_NAME "ivtv" +#define IVTV_DRIVER_VERSION_MAJOR 1 +#define IVTV_DRIVER_VERSION_MINOR 0 +#define IVTV_DRIVER_VERSION_PATCHLEVEL 0 + +#define IVTV_VERSION __stringify(IVTV_DRIVER_VERSION_MAJOR) "." __stringify(IVTV_DRIVER_VERSION_MINOR) "." __stringify(IVTV_DRIVER_VERSION_PATCHLEVEL) +#define IVTV_DRIVER_VERSION KERNEL_VERSION(IVTV_DRIVER_VERSION_MAJOR,IVTV_DRIVER_VERSION_MINOR,IVTV_DRIVER_VERSION_PATCHLEVEL) diff --git a/drivers/media/video/ivtv/ivtv-video.c b/drivers/media/video/ivtv/ivtv-video.c new file mode 100644 index 000000000000..77e42d13cdee --- /dev/null +++ b/drivers/media/video/ivtv/ivtv-video.c @@ -0,0 +1,150 @@ +/* + saa7127 interface functions + Copyright (C) 2004-2007 Hans Verkuil + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include "ivtv-driver.h" +#include "ivtv-video.h" +#include "ivtv-i2c.h" +#include "ivtv-gpio.h" +#include "ivtv-cards.h" +#include +#include + +void ivtv_set_vps(struct ivtv *itv, int enabled, u8 vps1, u8 vps2, u8 vps3, + u8 vps4, u8 vps5) +{ + struct v4l2_sliced_vbi_data data; + + if (!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT)) + return; + data.id = V4L2_SLICED_VPS; + data.field = 0; + data.line = enabled ? 16 : 0; + data.data[4] = vps1; + data.data[10] = vps2; + data.data[11] = vps3; + data.data[12] = vps4; + data.data[13] = vps5; + ivtv_saa7127(itv, VIDIOC_INT_S_VBI_DATA, &data); +} + +void ivtv_set_cc(struct ivtv *itv, int mode, u8 cc1, u8 cc2, u8 cc3, u8 cc4) +{ + struct v4l2_sliced_vbi_data data; + + if (!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT)) + return; + data.id = V4L2_SLICED_CAPTION_525; + data.field = 0; + data.line = (mode & 1) ? 21 : 0; + data.data[0] = cc1; + data.data[1] = cc2; + ivtv_saa7127(itv, VIDIOC_INT_S_VBI_DATA, &data); + data.field = 1; + data.line = (mode & 2) ? 21 : 0; + data.data[0] = cc3; + data.data[1] = cc4; + ivtv_saa7127(itv, VIDIOC_INT_S_VBI_DATA, &data); +} + +void ivtv_set_wss(struct ivtv *itv, int enabled, int mode) +{ + struct v4l2_sliced_vbi_data data; + + if (!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT)) + return; + /* When using a 50 Hz system, always turn on the + wide screen signal with 4x3 ratio as the default. + Turning this signal on and off can confuse certain + TVs. As far as I can tell there is no reason not to + transmit this signal. */ + if ((itv->std & V4L2_STD_625_50) && !enabled) { + enabled = 1; + mode = 0x08; /* 4x3 full format */ + } + data.id = V4L2_SLICED_WSS_625; + data.field = 0; + data.line = enabled ? 23 : 0; + data.data[0] = mode & 0xff; + data.data[1] = (mode >> 8) & 0xff; + ivtv_saa7127(itv, VIDIOC_INT_S_VBI_DATA, &data); +} + +void ivtv_encoder_enable(struct ivtv *itv, int enabled) +{ + if (itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT) { + ivtv_saa7127(itv, enabled ? VIDIOC_STREAMON : VIDIOC_STREAMOFF, + &enabled); + } +} + +void ivtv_video_set_io(struct ivtv *itv) +{ + struct v4l2_routing route; + int inp = itv->active_input; + u32 type; + + route.input = itv->card->video_inputs[inp].video_input; + route.output = 0; + itv->video_dec_func(itv, VIDIOC_INT_S_VIDEO_ROUTING, &route); + + type = itv->card->video_inputs[inp].video_type; + + if (type == IVTV_CARD_INPUT_VID_TUNER) { + route.input = 0; /* Tuner */ + } else if (type < IVTV_CARD_INPUT_COMPOSITE1) { + route.input = 2; /* S-Video */ + } else { + route.input = 1; /* Composite */ + } + + if (itv->card->hw_video & IVTV_HW_GPIO) + ivtv_gpio(itv, VIDIOC_INT_S_VIDEO_ROUTING, &route); + + if (itv->card->hw_video & IVTV_HW_UPD64031A) { + if (type == IVTV_CARD_INPUT_VID_TUNER || + type >= IVTV_CARD_INPUT_COMPOSITE1) { + /* Composite: GR on, connect to 3DYCS */ + route.input = UPD64031A_GR_ON | UPD64031A_3DYCS_COMPOSITE; + } else { + /* S-Video: GR bypassed, turn it off */ + route.input = UPD64031A_GR_OFF | UPD64031A_3DYCS_DISABLE; + } + route.input |= itv->card->gr_config; + + ivtv_upd64031a(itv, VIDIOC_INT_S_VIDEO_ROUTING, &route); + } + + if (itv->card->hw_video & IVTV_HW_UPD6408X) { + route.input = UPD64083_YCS_MODE; + if (type > IVTV_CARD_INPUT_VID_TUNER && + type < IVTV_CARD_INPUT_COMPOSITE1) { + /* S-Video uses YCNR mode and internal Y-ADC, the upd64031a + is not used. */ + route.input |= UPD64083_YCNR_MODE; + } + else if (itv->card->hw_video & IVTV_HW_UPD64031A) { + /* Use upd64031a output for tuner and composite(CX23416GYC only) inputs */ + if ((type == IVTV_CARD_INPUT_VID_TUNER)|| + (itv->card->type == IVTV_CARD_CX23416GYC)) { + route.input |= UPD64083_EXT_Y_ADC; + } + } + ivtv_upd64083(itv, VIDIOC_INT_S_VIDEO_ROUTING, &route); + } +} diff --git a/drivers/media/video/ivtv/ivtv-video.h b/drivers/media/video/ivtv/ivtv-video.h new file mode 100644 index 000000000000..5efedebe317b --- /dev/null +++ b/drivers/media/video/ivtv/ivtv-video.h @@ -0,0 +1,25 @@ +/* + saa7127 interface functions + Copyright (C) 2004-2007 Hans Verkuil + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +void ivtv_set_wss(struct ivtv *itv, int enabled, int mode); +void ivtv_set_cc(struct ivtv *itv, int mode, u8 cc1, u8 cc2, u8 cc3, u8 cc4); +void ivtv_set_vps(struct ivtv *itv, int enabled, u8 vps1, u8 vps2, u8 vps3, + u8 vps4, u8 vps5); +void ivtv_encoder_enable(struct ivtv *itv, int enabled); +void ivtv_video_set_io(struct ivtv *itv); diff --git a/drivers/media/video/ivtv/ivtv-yuv.c b/drivers/media/video/ivtv/ivtv-yuv.c new file mode 100644 index 000000000000..e49ecef93046 --- /dev/null +++ b/drivers/media/video/ivtv/ivtv-yuv.c @@ -0,0 +1,1129 @@ +/* + yuv support + + Copyright (C) 2007 Ian Armstrong + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include "ivtv-driver.h" +#include "ivtv-queue.h" +#include "ivtv-udma.h" +#include "ivtv-irq.h" + +static int ivtv_yuv_prep_user_dma(struct ivtv *itv, struct ivtv_user_dma *dma, + struct ivtv_dma_frame *args) +{ + struct ivtv_dma_page_info y_dma; + struct ivtv_dma_page_info uv_dma; + + int i; + int y_pages, uv_pages; + + unsigned long y_buffer_offset, uv_buffer_offset; + int y_decode_height, uv_decode_height, y_size; + int frame = atomic_read(&itv->yuv_info.next_fill_frame); + + y_buffer_offset = IVTV_DEC_MEM_START + yuv_offset[frame]; + uv_buffer_offset = y_buffer_offset + IVTV_YUV_BUFFER_UV_OFFSET; + + y_decode_height = uv_decode_height = args->src.height + args->src.top; + + if (y_decode_height < 512-16) + y_buffer_offset += 720 * 16; + + if (y_decode_height & 15) + y_decode_height = (y_decode_height + 16) & ~15; + + if (uv_decode_height & 31) + uv_decode_height = (uv_decode_height + 32) & ~31; + + y_size = 720 * y_decode_height; + + /* Still in USE */ + if (dma->SG_length || dma->page_count) { + IVTV_DEBUG_WARN("prep_user_dma: SG_length %d page_count %d still full?\n", + dma->SG_length, dma->page_count); + return -EBUSY; + } + + ivtv_udma_get_page_info (&y_dma, (unsigned long)args->y_source, 720 * y_decode_height); + ivtv_udma_get_page_info (&uv_dma, (unsigned long)args->uv_source, 360 * uv_decode_height); + + /* Get user pages for DMA Xfer */ + down_read(¤t->mm->mmap_sem); + y_pages = get_user_pages(current, current->mm, y_dma.uaddr, y_dma.page_count, 0, 1, &dma->map[0], NULL); + uv_pages = get_user_pages(current, current->mm, uv_dma.uaddr, uv_dma.page_count, 0, 1, &dma->map[y_pages], NULL); + up_read(¤t->mm->mmap_sem); + + dma->page_count = y_dma.page_count + uv_dma.page_count; + + if (y_pages + uv_pages != dma->page_count) { + IVTV_DEBUG_WARN("failed to map user pages, returned %d instead of %d\n", + y_pages + uv_pages, dma->page_count); + + for (i = 0; i < dma->page_count; i++) { + put_page(dma->map[i]); + } + dma->page_count = 0; + return -EINVAL; + } + + /* Fill & map SG List */ + ivtv_udma_fill_sg_list (dma, &uv_dma, ivtv_udma_fill_sg_list (dma, &y_dma, 0)); + dma->SG_length = pci_map_sg(itv->dev, dma->SGlist, dma->page_count, PCI_DMA_TODEVICE); + + /* Fill SG Array with new values */ + ivtv_udma_fill_sg_array (dma, y_buffer_offset, uv_buffer_offset, y_size); + + /* If we've offset the y plane, ensure top area is blanked */ + if (args->src.height + args->src.top < 512-16) { + if (itv->yuv_info.blanking_dmaptr) { + dma->SGarray[dma->SG_length].size = cpu_to_le32(720*16); + dma->SGarray[dma->SG_length].src = cpu_to_le32(itv->yuv_info.blanking_dmaptr); + dma->SGarray[dma->SG_length].dst = cpu_to_le32(IVTV_DEC_MEM_START + yuv_offset[frame]); + dma->SG_length++; + } + } + + /* Tag SG Array with Interrupt Bit */ + dma->SGarray[dma->SG_length - 1].size |= cpu_to_le32(0x80000000); + + ivtv_udma_sync_for_device(itv); + return 0; +} + +/* We rely on a table held in the firmware - Quick check. */ +int ivtv_yuv_filter_check(struct ivtv *itv) +{ + int i, offset_y, offset_uv; + + for (i=0, offset_y = 16, offset_uv = 4; i<16; i++, offset_y += 24, offset_uv += 12) { + if ((read_dec(IVTV_YUV_HORIZONTAL_FILTER_OFFSET + offset_y) != i << 16) || + (read_dec(IVTV_YUV_VERTICAL_FILTER_OFFSET + offset_uv) != i << 16)) { + IVTV_WARN ("YUV filter table not found in firmware.\n"); + return -1; + } + } + return 0; +} + +static void ivtv_yuv_filter(struct ivtv *itv, int h_filter, int v_filter_1, int v_filter_2) +{ + int filter_index, filter_line; + + /* If any filter is -1, then don't update it */ + if (h_filter > -1) { + if (h_filter > 4) h_filter = 4; + filter_index = h_filter * 384; + filter_line = 0; + while (filter_line < 16) { + write_reg(read_dec(IVTV_YUV_HORIZONTAL_FILTER_OFFSET + filter_index), 0x02804); + write_reg(read_dec(IVTV_YUV_HORIZONTAL_FILTER_OFFSET + filter_index), 0x0281c); + filter_index += 4; + write_reg(read_dec(IVTV_YUV_HORIZONTAL_FILTER_OFFSET + filter_index), 0x02808); + write_reg(read_dec(IVTV_YUV_HORIZONTAL_FILTER_OFFSET + filter_index), 0x02820); + filter_index += 4; + write_reg(read_dec(IVTV_YUV_HORIZONTAL_FILTER_OFFSET + filter_index), 0x0280c); + write_reg(read_dec(IVTV_YUV_HORIZONTAL_FILTER_OFFSET + filter_index), 0x02824); + filter_index += 4; + write_reg(read_dec(IVTV_YUV_HORIZONTAL_FILTER_OFFSET + filter_index), 0x02810); + write_reg(read_dec(IVTV_YUV_HORIZONTAL_FILTER_OFFSET + filter_index), 0x02828); + filter_index += 4; + write_reg(read_dec(IVTV_YUV_HORIZONTAL_FILTER_OFFSET + filter_index), 0x02814); + write_reg(read_dec(IVTV_YUV_HORIZONTAL_FILTER_OFFSET + filter_index), 0x0282c); + filter_index += 8; + write_reg(0, 0x02818); + write_reg(0, 0x02830); + filter_line ++; + } + IVTV_DEBUG_YUV("h_filter -> %d\n",h_filter); + } + + if (v_filter_1 > -1) { + if (v_filter_1 > 4) v_filter_1 = 4; + filter_index = v_filter_1 * 192; + filter_line = 0; + while (filter_line < 16) { + write_reg(read_dec(IVTV_YUV_VERTICAL_FILTER_OFFSET + filter_index), 0x02900); + filter_index += 4; + write_reg(read_dec(IVTV_YUV_VERTICAL_FILTER_OFFSET + filter_index), 0x02904); + filter_index += 8; + write_reg(0, 0x02908); + filter_line ++; + } + IVTV_DEBUG_YUV("v_filter_1 -> %d\n",v_filter_1); + } + + if (v_filter_2 > -1) { + if (v_filter_2 > 4) v_filter_2 = 4; + filter_index = v_filter_2 * 192; + filter_line = 0; + while (filter_line < 16) { + write_reg(read_dec(IVTV_YUV_VERTICAL_FILTER_OFFSET + filter_index), 0x0290c); + filter_index += 4; + write_reg(read_dec(IVTV_YUV_VERTICAL_FILTER_OFFSET + filter_index), 0x02910); + filter_index += 8; + write_reg(0, 0x02914); + filter_line ++; + } + IVTV_DEBUG_YUV("v_filter_2 -> %d\n",v_filter_2); + } +} + +static void ivtv_yuv_handle_horizontal(struct ivtv *itv, struct yuv_frame_info *window) +{ + u32 reg_2834, reg_2838, reg_283c; + u32 reg_2844, reg_2854, reg_285c; + u32 reg_2864, reg_2874, reg_2890; + u32 reg_2870, reg_2870_base, reg_2870_offset; + int x_cutoff; + int h_filter; + u32 master_width; + + IVTV_DEBUG_WARN( "Need to adjust to width %d src_w %d dst_w %d src_x %d dst_x %d\n", + window->tru_w, window->src_w, window->dst_w,window->src_x, window->dst_x); + + /* How wide is the src image */ + x_cutoff = window->src_w + window->src_x; + + /* Set the display width */ + reg_2834 = window->dst_w; + reg_2838 = reg_2834; + + /* Set the display position */ + reg_2890 = window->dst_x; + + /* Index into the image horizontally */ + reg_2870 = 0; + + /* 2870 is normally fudged to align video coords with osd coords. + If running full screen, it causes an unwanted left shift + Remove the fudge if we almost fill the screen. + Gradually adjust the offset to avoid the video 'snapping' + left/right if it gets dragged through this region. + Only do this if osd is full width. */ + if (window->vis_w == 720) { + if ((window->tru_x - window->pan_x > -1) && (window->tru_x - window->pan_x <= 40) && (window->dst_w >= 680)){ + reg_2870 = 10 - (window->tru_x - window->pan_x) / 4; + } + else if ((window->tru_x - window->pan_x < 0) && (window->tru_x - window->pan_x >= -20) && (window->dst_w >= 660)) { + reg_2870 = (10 + (window->tru_x - window->pan_x) / 2); + } + + if (window->dst_w >= window->src_w) + reg_2870 = reg_2870 << 16 | reg_2870; + else + reg_2870 = ((reg_2870 & ~1) << 15) | (reg_2870 & ~1); + } + + if (window->dst_w < window->src_w) + reg_2870 = 0x000d000e - reg_2870; + else + reg_2870 = 0x0012000e - reg_2870; + + /* We're also using 2870 to shift the image left (src_x & negative dst_x) */ + reg_2870_offset = (window->src_x*((window->dst_w << 21)/window->src_w))>>19; + + if (window->dst_w >= window->src_w) { + x_cutoff &= ~1; + master_width = (window->src_w * 0x00200000) / (window->dst_w); + if (master_width * window->dst_w != window->src_w * 0x00200000) master_width ++; + reg_2834 = (reg_2834 << 16) | x_cutoff; + reg_2838 = (reg_2838 << 16) | x_cutoff; + reg_283c = master_width >> 2; + reg_2844 = master_width >> 2; + reg_2854 = master_width; + reg_285c = master_width >> 1; + reg_2864 = master_width >> 1; + + /* We also need to factor in the scaling + (src_w - dst_w) / (src_w / 4) */ + if (window->dst_w > window->src_w) + reg_2870_base = ((window->dst_w - window->src_w)<<16) / (window->src_w <<14); + else + reg_2870_base = 0; + + reg_2870 += (((reg_2870_offset << 14) & 0xFFFF0000) | reg_2870_offset >> 2) + (reg_2870_base << 17 | reg_2870_base); + reg_2874 = 0; + } + else if (window->dst_w < window->src_w / 2) { + master_width = (window->src_w * 0x00080000) / window->dst_w; + if (master_width * window->dst_w != window->src_w * 0x00080000) master_width ++; + reg_2834 = (reg_2834 << 16) | x_cutoff; + reg_2838 = (reg_2838 << 16) | x_cutoff; + reg_283c = master_width >> 2; + reg_2844 = master_width >> 1; + reg_2854 = master_width; + reg_285c = master_width >> 1; + reg_2864 = master_width >> 1; + reg_2870 += (((reg_2870_offset << 15) & 0xFFFF0000) | reg_2870_offset); + reg_2870 += (5 - (((window->src_w + window->src_w / 2) - 1) / window->dst_w)) << 16; + reg_2874 = 0x00000012; + } + else { + master_width = (window->src_w * 0x00100000) / window->dst_w; + if (master_width * window->dst_w != window->src_w * 0x00100000) master_width ++; + reg_2834 = (reg_2834 << 16) | x_cutoff; + reg_2838 = (reg_2838 << 16) | x_cutoff; + reg_283c = master_width >> 2; + reg_2844 = master_width >> 1; + reg_2854 = master_width; + reg_285c = master_width >> 1; + reg_2864 = master_width >> 1; + reg_2870 += (((reg_2870_offset << 14) & 0xFFFF0000) | reg_2870_offset >> 1); + reg_2870 += (5 - (((window->src_w * 3) - 1) / window->dst_w)) << 16; + reg_2874 = 0x00000001; + } + + /* Select the horizontal filter */ + if (window->src_w == window->dst_w) { + /* An exact size match uses filter 0 */ + h_filter = 0; + } + else { + /* Figure out which filter to use */ + h_filter = ((window->src_w << 16) / window->dst_w) >> 15; + h_filter = (h_filter >> 1) + (h_filter & 1); + /* Only an exact size match can use filter 0 */ + if (h_filter == 0) h_filter = 1; + } + + write_reg(reg_2834, 0x02834); + write_reg(reg_2838, 0x02838); + IVTV_DEBUG_YUV("Update reg 0x2834 %08x->%08x 0x2838 %08x->%08x\n",itv->yuv_info.reg_2834, reg_2834, itv->yuv_info.reg_2838, reg_2838); + + write_reg(reg_283c, 0x0283c); + write_reg(reg_2844, 0x02844); + + IVTV_DEBUG_YUV("Update reg 0x283c %08x->%08x 0x2844 %08x->%08x\n",itv->yuv_info.reg_283c, reg_283c, itv->yuv_info.reg_2844, reg_2844); + + write_reg(0x00080514, 0x02840); + write_reg(0x00100514, 0x02848); + IVTV_DEBUG_YUV("Update reg 0x2840 %08x->%08x 0x2848 %08x->%08x\n",itv->yuv_info.reg_2840, 0x00080514, itv->yuv_info.reg_2848, 0x00100514); + + write_reg(reg_2854, 0x02854); + IVTV_DEBUG_YUV("Update reg 0x2854 %08x->%08x \n",itv->yuv_info.reg_2854, reg_2854); + + write_reg(reg_285c, 0x0285c); + write_reg(reg_2864, 0x02864); + IVTV_DEBUG_YUV("Update reg 0x285c %08x->%08x 0x2864 %08x->%08x\n",itv->yuv_info.reg_285c, reg_285c, itv->yuv_info.reg_2864, reg_2864); + + write_reg(reg_2874, 0x02874); + IVTV_DEBUG_YUV("Update reg 0x2874 %08x->%08x\n",itv->yuv_info.reg_2874, reg_2874); + + write_reg(reg_2870, 0x02870); + IVTV_DEBUG_YUV("Update reg 0x2870 %08x->%08x\n",itv->yuv_info.reg_2870, reg_2870); + + write_reg( reg_2890,0x02890); + IVTV_DEBUG_YUV("Update reg 0x2890 %08x->%08x\n",itv->yuv_info.reg_2890, reg_2890); + + /* Only update the filter if we really need to */ + if (h_filter != itv->yuv_info.h_filter) { + ivtv_yuv_filter (itv,h_filter,-1,-1); + itv->yuv_info.h_filter = h_filter; + } +} + +static void ivtv_yuv_handle_vertical(struct ivtv *itv, struct yuv_frame_info *window) +{ + u32 master_height; + u32 reg_2918, reg_291c, reg_2920, reg_2928; + u32 reg_2930, reg_2934, reg_293c; + u32 reg_2940, reg_2944, reg_294c; + u32 reg_2950, reg_2954, reg_2958, reg_295c; + u32 reg_2960, reg_2964, reg_2968, reg_296c; + u32 reg_289c; + u32 src_y_major_y, src_y_minor_y; + u32 src_y_major_uv, src_y_minor_uv; + u32 reg_2964_base, reg_2968_base; + int v_filter_1, v_filter_2; + + IVTV_DEBUG_WARN("Need to adjust to height %d src_h %d dst_h %d src_y %d dst_y %d\n", + window->tru_h, window->src_h, window->dst_h,window->src_y, window->dst_y); + + /* What scaling mode is being used... */ + if (window->interlaced_y) { + IVTV_DEBUG_YUV("Scaling mode Y: Interlaced\n"); + } + else { + IVTV_DEBUG_YUV("Scaling mode Y: Progressive\n"); + } + + if (window->interlaced_uv) { + IVTV_DEBUG_YUV("Scaling mode UV: Interlaced\n"); + } + else { + IVTV_DEBUG_YUV("Scaling mode UV: Progressive\n"); + } + + /* What is the source video being treated as... */ + if (itv->yuv_info.frame_interlaced) { + IVTV_DEBUG_WARN("Source video: Interlaced\n"); + } + else { + IVTV_DEBUG_WARN("Source video: Non-interlaced\n"); + } + + /* We offset into the image using two different index methods, so split + the y source coord into two parts. */ + if (window->src_y < 8) { + src_y_minor_uv = window->src_y; + src_y_major_uv = 0; + } + else { + src_y_minor_uv = 8; + src_y_major_uv = window->src_y - 8; + } + + src_y_minor_y = src_y_minor_uv; + src_y_major_y = src_y_major_uv; + + if (window->offset_y) src_y_minor_y += 16; + + if (window->interlaced_y) + reg_2918 = (window->dst_h << 16) | (window->src_h + src_y_minor_y); + else + reg_2918 = (window->dst_h << 16) | ((window->src_h + src_y_minor_y) << 1); + + if (window->interlaced_uv) + reg_291c = (window->dst_h << 16) | ((window->src_h + src_y_minor_uv) >> 1); + else + reg_291c = (window->dst_h << 16) | (window->src_h + src_y_minor_uv); + + reg_2964_base = (src_y_minor_y * ((window->dst_h << 16)/window->src_h)) >> 14; + reg_2968_base = (src_y_minor_uv * ((window->dst_h << 16)/window->src_h)) >> 14; + + if (window->dst_h / 2 >= window->src_h && !window->interlaced_y) { + master_height = (window->src_h * 0x00400000) / window->dst_h; + if ((window->src_h * 0x00400000) - (master_height * window->dst_h) >= window->dst_h / 2) master_height ++; + reg_2920 = master_height >> 2; + reg_2928 = master_height >> 3; + reg_2930 = master_height; + reg_2940 = master_height >> 1; + reg_2964_base >>= 3; + reg_2968_base >>= 3; + reg_296c = 0x00000000; + } + else if (window->dst_h >= window->src_h) { + master_height = (window->src_h * 0x00400000) / window->dst_h; + master_height = (master_height >> 1) + (master_height & 1); + reg_2920 = master_height >> 2; + reg_2928 = master_height >> 2; + reg_2930 = master_height; + reg_2940 = master_height >> 1; + reg_296c = 0x00000000; + if (window->interlaced_y) { + reg_2964_base >>= 3; + } + else { + reg_296c ++; + reg_2964_base >>= 2; + } + if (window->interlaced_uv) reg_2928 >>= 1; + reg_2968_base >>= 3; + } + else if (window->dst_h >= window->src_h / 2) { + master_height = (window->src_h * 0x00200000) / window->dst_h; + master_height = (master_height >> 1) + (master_height & 1); + reg_2920 = master_height >> 2; + reg_2928 = master_height >> 2; + reg_2930 = master_height; + reg_2940 = master_height; + reg_296c = 0x00000101; + if (window->interlaced_y) { + reg_2964_base >>= 2; + } + else { + reg_296c ++; + reg_2964_base >>= 1; + } + if (window->interlaced_uv) reg_2928 >>= 1; + reg_2968_base >>= 2; + } + else { + master_height = (window->src_h * 0x00100000) / window->dst_h; + master_height = (master_height >> 1) + (master_height & 1); + reg_2920 = master_height >> 2; + reg_2928 = master_height >> 2; + reg_2930 = master_height; + reg_2940 = master_height; + reg_2964_base >>= 1; + reg_2968_base >>= 2; + reg_296c = 0x00000102; + } + + /* FIXME These registers change depending on scaled / unscaled output + We really need to work out what they should be */ + if (window->src_h == window->dst_h){ + reg_2934 = 0x00020000; + reg_293c = 0x00100000; + reg_2944 = 0x00040000; + reg_294c = 0x000b0000; + } + else { + reg_2934 = 0x00000FF0; + reg_293c = 0x00000FF0; + reg_2944 = 0x00000FF0; + reg_294c = 0x00000FF0; + } + + /* The first line to be displayed */ + reg_2950 = 0x00010000 + src_y_major_y; + if (window->interlaced_y) reg_2950 += 0x00010000; + reg_2954 = reg_2950 + 1; + + reg_2958 = 0x00010000 + (src_y_major_y >> 1); + if (window->interlaced_uv) reg_2958 += 0x00010000; + reg_295c = reg_2958 + 1; + + if (itv->yuv_info.decode_height == 480) + reg_289c = 0x011e0017; + else + reg_289c = 0x01500017; + + if (window->dst_y < 0) + reg_289c = (reg_289c - ((window->dst_y & ~1)<<15))-(window->dst_y >>1); + else + reg_289c = (reg_289c + ((window->dst_y & ~1)<<15))+(window->dst_y >>1); + + /* How much of the source to decode. + Take into account the source offset */ + reg_2960 = ((src_y_minor_y + window->src_h + src_y_major_y) - 1 ) | + ((((src_y_minor_uv + window->src_h + src_y_major_uv) - 1) & ~1) << 15); + + /* Calculate correct value for register 2964 */ + if (window->src_h == window->dst_h) + reg_2964 = 1; + else { + reg_2964 = 2 + ((window->dst_h << 1) / window->src_h); + reg_2964 = (reg_2964 >> 1) + (reg_2964 & 1); + } + reg_2968 = (reg_2964 << 16) + reg_2964 + (reg_2964 >> 1); + reg_2964 = (reg_2964 << 16) + reg_2964 + (reg_2964 * 46 / 94); + + /* Okay, we've wasted time working out the correct value, + but if we use it, it fouls the the window alignment. + Fudge it to what we want... */ + reg_2964 = 0x00010001 + ((reg_2964 & 0x0000FFFF) - (reg_2964 >> 16)); + reg_2968 = 0x00010001 + ((reg_2968 & 0x0000FFFF) - (reg_2968 >> 16)); + + /* Deviate further from what it should be. I find the flicker headache + inducing so try to reduce it slightly. Leave 2968 as-is otherwise + colours foul. */ + if ((reg_2964 != 0x00010001) && (window->dst_h / 2 <= window->src_h)) + reg_2964 = (reg_2964 & 0xFFFF0000) + ((reg_2964 & 0x0000FFFF)/2); + + if (!window->interlaced_y) reg_2964 -= 0x00010001; + if (!window->interlaced_uv) reg_2968 -= 0x00010001; + + reg_2964 += ((reg_2964_base << 16) | reg_2964_base); + reg_2968 += ((reg_2968_base << 16) | reg_2968_base); + + /* Select the vertical filter */ + if (window->src_h == window->dst_h) { + /* An exact size match uses filter 0/1 */ + v_filter_1 = 0; + v_filter_2 = 1; + } + else { + /* Figure out which filter to use */ + v_filter_1 = ((window->src_h << 16) / window->dst_h) >> 15; + v_filter_1 = (v_filter_1 >> 1) + (v_filter_1 & 1); + /* Only an exact size match can use filter 0 */ + if (v_filter_1 == 0) v_filter_1 = 1; + v_filter_2 = v_filter_1; + } + + write_reg(reg_2934, 0x02934); + write_reg(reg_293c, 0x0293c); + IVTV_DEBUG_YUV("Update reg 0x2934 %08x->%08x 0x293c %08x->%08x\n",itv->yuv_info.reg_2934, reg_2934, itv->yuv_info.reg_293c, reg_293c); + write_reg(reg_2944, 0x02944); + write_reg(reg_294c, 0x0294c); + IVTV_DEBUG_YUV("Update reg 0x2944 %08x->%08x 0x294c %08x->%08x\n",itv->yuv_info.reg_2944, reg_2944, itv->yuv_info.reg_294c, reg_294c); + + /* Ensure 2970 is 0 (does it ever change ?) */ +/* write_reg(0,0x02970); */ +/* IVTV_DEBUG_YUV("Update reg 0x2970 %08x->%08x\n",itv->yuv_info.reg_2970, 0); */ + + write_reg(reg_2930, 0x02938); + write_reg(reg_2930, 0x02930); + IVTV_DEBUG_YUV("Update reg 0x2930 %08x->%08x 0x2938 %08x->%08x\n",itv->yuv_info.reg_2930, reg_2930, itv->yuv_info.reg_2938, reg_2930); + + write_reg(reg_2928, 0x02928); + write_reg(reg_2928+0x514, 0x0292C); + IVTV_DEBUG_YUV("Update reg 0x2928 %08x->%08x 0x292c %08x->%08x\n",itv->yuv_info.reg_2928, reg_2928, itv->yuv_info.reg_292c, reg_2928+0x514); + + write_reg(reg_2920, 0x02920); + write_reg(reg_2920+0x514, 0x02924); + IVTV_DEBUG_YUV("Update reg 0x2920 %08x->%08x 0x2924 %08x->%08x\n",itv->yuv_info.reg_2920, reg_2920, itv->yuv_info.reg_2924, 0x514+reg_2920); + + write_reg (reg_2918,0x02918); + write_reg (reg_291c,0x0291C); + IVTV_DEBUG_YUV("Update reg 0x2918 %08x->%08x 0x291C %08x->%08x\n",itv->yuv_info.reg_2918,reg_2918,itv->yuv_info.reg_291c,reg_291c); + + write_reg(reg_296c, 0x0296c); + IVTV_DEBUG_YUV("Update reg 0x296c %08x->%08x\n",itv->yuv_info.reg_296c, reg_296c); + + write_reg(reg_2940, 0x02948); + write_reg(reg_2940, 0x02940); + IVTV_DEBUG_YUV("Update reg 0x2940 %08x->%08x 0x2948 %08x->%08x\n",itv->yuv_info.reg_2940, reg_2940, itv->yuv_info.reg_2948, reg_2940); + + write_reg(reg_2950, 0x02950); + write_reg(reg_2954, 0x02954); + IVTV_DEBUG_YUV("Update reg 0x2950 %08x->%08x 0x2954 %08x->%08x\n",itv->yuv_info.reg_2950, reg_2950, itv->yuv_info.reg_2954, reg_2954); + + write_reg(reg_2958, 0x02958); + write_reg(reg_295c, 0x0295C); + IVTV_DEBUG_YUV("Update reg 0x2958 %08x->%08x 0x295C %08x->%08x\n",itv->yuv_info.reg_2958, reg_2958, itv->yuv_info.reg_295c, reg_295c); + + write_reg(reg_2960, 0x02960); + IVTV_DEBUG_YUV("Update reg 0x2960 %08x->%08x \n",itv->yuv_info.reg_2960, reg_2960); + + write_reg(reg_2964, 0x02964); + write_reg(reg_2968, 0x02968); + IVTV_DEBUG_YUV("Update reg 0x2964 %08x->%08x 0x2968 %08x->%08x\n",itv->yuv_info.reg_2964, reg_2964, itv->yuv_info.reg_2968, reg_2968); + + write_reg( reg_289c,0x0289c); + IVTV_DEBUG_YUV("Update reg 0x289c %08x->%08x\n",itv->yuv_info.reg_289c, reg_289c); + + /* Only update filter 1 if we really need to */ + if (v_filter_1 != itv->yuv_info.v_filter_1) { + ivtv_yuv_filter (itv,-1,v_filter_1,-1); + itv->yuv_info.v_filter_1 = v_filter_1; + } + + /* Only update filter 2 if we really need to */ + if (v_filter_2 != itv->yuv_info.v_filter_2) { + ivtv_yuv_filter (itv,-1,-1,v_filter_2); + itv->yuv_info.v_filter_2 = v_filter_2; + } + + itv->yuv_info.frame_interlaced_last = itv->yuv_info.frame_interlaced; + itv->yuv_info.lace_threshold_last = itv->yuv_info.lace_threshold; +} + +/* Modify the supplied coordinate information to fit the visible osd area */ +static u32 ivtv_yuv_window_setup (struct ivtv *itv, struct yuv_frame_info *window) +{ + int osd_crop; + u32 osd_scale; + u32 yuv_update = 0; + + /* Work out the lace settings */ + switch (itv->yuv_info.lace_mode) { + case IVTV_YUV_MODE_PROGRESSIVE: /* Progressive mode */ + itv->yuv_info.frame_interlaced = 0; + if (window->tru_h < 512 || (window->tru_h > 576 && window->tru_h < 1021)) + window->interlaced_y = 0; + else + window->interlaced_y = 1; + + if (window->tru_h < 1021 && (window->dst_h >= window->src_h /2)) + window->interlaced_uv = 0; + else + window->interlaced_uv = 1; + break; + + case IVTV_YUV_MODE_AUTO: + if (window->tru_h <= itv->yuv_info.lace_threshold || window->tru_h > 576 || window->tru_w > 720){ + itv->yuv_info.frame_interlaced = 0; + if ((window->tru_h < 512) || + (window->tru_h > 576 && window->tru_h < 1021) || + (window->tru_w > 720 && window->tru_h < 1021)) + window->interlaced_y = 0; + else + window->interlaced_y = 1; + + if (window->tru_h < 1021 && (window->dst_h >= window->src_h /2)) + window->interlaced_uv = 0; + else + window->interlaced_uv = 1; + } + else { + itv->yuv_info.frame_interlaced = 1; + window->interlaced_y = 1; + window->interlaced_uv = 1; + } + break; + + case IVTV_YUV_MODE_INTERLACED: /* Interlace mode */ + default: + itv->yuv_info.frame_interlaced = 1; + window->interlaced_y = 1; + window->interlaced_uv = 1; + break; + } + + /* Sorry, but no negative coords for src */ + if (window->src_x < 0) window->src_x = 0; + if (window->src_y < 0) window->src_y = 0; + + /* Can only reduce width down to 1/4 original size */ + if ((osd_crop = window->src_w - ( 4 * window->dst_w )) > 0) { + window->src_x += osd_crop / 2; + window->src_w = (window->src_w - osd_crop) & ~3; + window->dst_w = window->src_w / 4; + window->dst_w += window->dst_w & 1; + } + + /* Can only reduce height down to 1/4 original size */ + if (window->src_h / window->dst_h >= 2) { + /* Overflow may be because we're running progressive, so force mode switch */ + window->interlaced_y = 1; + /* Make sure we're still within limits for interlace */ + if ((osd_crop = window->src_h - ( 4 * window->dst_h )) > 0) { + /* If we reach here we'll have to force the height. */ + window->src_y += osd_crop / 2; + window->src_h = (window->src_h - osd_crop) & ~3; + window->dst_h = window->src_h / 4; + window->dst_h += window->dst_h & 1; + } + } + + /* If there's nothing to safe to display, we may as well stop now */ + if ((int)window->dst_w <= 2 || (int)window->dst_h <= 2 || (int)window->src_w <= 2 || (int)window->src_h <= 2) { + return 0; + } + + /* Ensure video remains inside OSD area */ + osd_scale = (window->src_h << 16) / window->dst_h; + + if ((osd_crop = window->pan_y - window->dst_y) > 0) { + /* Falls off the upper edge - crop */ + window->src_y += (osd_scale * osd_crop) >> 16; + window->src_h -= (osd_scale * osd_crop) >> 16; + window->dst_h -= osd_crop; + window->dst_y = 0; + } + else { + window->dst_y -= window->pan_y; + } + + if ((osd_crop = window->dst_h + window->dst_y - window->vis_h) > 0) { + /* Falls off the lower edge - crop */ + window->dst_h -= osd_crop; + window->src_h -= (osd_scale * osd_crop) >> 16; + } + + osd_scale = (window->src_w << 16) / window->dst_w; + + if ((osd_crop = window->pan_x - window->dst_x) > 0) { + /* Fall off the left edge - crop */ + window->src_x += (osd_scale * osd_crop) >> 16; + window->src_w -= (osd_scale * osd_crop) >> 16; + window->dst_w -= osd_crop; + window->dst_x = 0; + } + else { + window->dst_x -= window->pan_x; + } + + if ((osd_crop = window->dst_w + window->dst_x - window->vis_w) > 0) { + /* Falls off the right edge - crop */ + window->dst_w -= osd_crop; + window->src_w -= (osd_scale * osd_crop) >> 16; + } + + /* The OSD can be moved. Track to it */ + window->dst_x += itv->yuv_info.osd_x_offset; + window->dst_y += itv->yuv_info.osd_y_offset; + + /* Width & height for both src & dst must be even. + Same for coordinates. */ + window->dst_w &= ~1; + window->dst_x &= ~1; + + window->src_w += window->src_x & 1; + window->src_x &= ~1; + + window->src_w &= ~1; + window->dst_w &= ~1; + + window->dst_h &= ~1; + window->dst_y &= ~1; + + window->src_h += window->src_y & 1; + window->src_y &= ~1; + + window->src_h &= ~1; + window->dst_h &= ~1; + + /* Due to rounding, we may have reduced the output size to <1/4 of the source + Check again, but this time just resize. Don't change source coordinates */ + if (window->dst_w < window->src_w / 4) { + window->src_w &= ~3; + window->dst_w = window->src_w / 4; + window->dst_w += window->dst_w & 1; + } + if (window->dst_h < window->src_h / 4) { + window->src_h &= ~3; + window->dst_h = window->src_h / 4; + window->dst_h += window->dst_h & 1; + } + + /* Check again. If there's nothing to safe to display, stop now */ + if ((int)window->dst_w <= 2 || (int)window->dst_h <= 2 || (int)window->src_w <= 2 || (int)window->src_h <= 2) { + return 0; + } + + /* Both x offset & width are linked, so they have to be done together */ + if ((itv->yuv_info.old_frame_info.dst_w != window->dst_w) || + (itv->yuv_info.old_frame_info.src_w != window->src_w) || + (itv->yuv_info.old_frame_info.dst_x != window->dst_x) || + (itv->yuv_info.old_frame_info.src_x != window->src_x) || + (itv->yuv_info.old_frame_info.pan_x != window->pan_x) || + (itv->yuv_info.old_frame_info.vis_w != window->vis_w)) { + yuv_update |= IVTV_YUV_UPDATE_HORIZONTAL; + } + + if ((itv->yuv_info.old_frame_info.src_h != window->src_h) || + (itv->yuv_info.old_frame_info.dst_h != window->dst_h) || + (itv->yuv_info.old_frame_info.dst_y != window->dst_y) || + (itv->yuv_info.old_frame_info.src_y != window->src_y) || + (itv->yuv_info.old_frame_info.pan_y != window->pan_y) || + (itv->yuv_info.old_frame_info.vis_h != window->vis_h) || + (itv->yuv_info.old_frame_info.interlaced_y != window->interlaced_y) || + (itv->yuv_info.old_frame_info.interlaced_uv != window->interlaced_uv)) { + yuv_update |= IVTV_YUV_UPDATE_VERTICAL; + } + + return yuv_update; +} + +/* Update the scaling register to the requested value */ +void ivtv_yuv_work_handler (struct work_struct *work) +{ + struct yuv_playback_info *info = container_of(work, struct yuv_playback_info, work_queue); + struct ivtv *itv = container_of(info, struct ivtv, yuv_info); + DEFINE_WAIT(wait); + + struct yuv_frame_info window; + u32 yuv_update; + + int frame = itv->yuv_info.update_frame; + +/* IVTV_DEBUG_YUV("Update yuv registers for frame %d\n",frame); */ + memcpy(&window, &itv->yuv_info.new_frame_info[frame], sizeof (window)); + + /* Update the osd pan info */ + window.pan_x = itv->yuv_info.osd_x_pan; + window.pan_y = itv->yuv_info.osd_y_pan; + window.vis_w = itv->yuv_info.osd_vis_w; + window.vis_h = itv->yuv_info.osd_vis_h; + + /* Calculate the display window coordinates. Exit if nothing left */ + if (!(yuv_update = ivtv_yuv_window_setup (itv, &window))) + return; + + /* Update horizontal settings */ + if (yuv_update & IVTV_YUV_UPDATE_HORIZONTAL) + ivtv_yuv_handle_horizontal(itv, &window); + + if (yuv_update & IVTV_YUV_UPDATE_VERTICAL) + ivtv_yuv_handle_vertical(itv, &window); + + memcpy(&itv->yuv_info.old_frame_info, &window, sizeof (itv->yuv_info.old_frame_info)); +} + +static void ivtv_yuv_init (struct ivtv *itv) +{ + IVTV_DEBUG_YUV("ivtv_yuv_init\n"); + + /* Take a snapshot of the current register settings */ + itv->yuv_info.reg_2834 = read_reg(0x02834); + itv->yuv_info.reg_2838 = read_reg(0x02838); + itv->yuv_info.reg_283c = read_reg(0x0283c); + itv->yuv_info.reg_2840 = read_reg(0x02840); + itv->yuv_info.reg_2844 = read_reg(0x02844); + itv->yuv_info.reg_2848 = read_reg(0x02848); + itv->yuv_info.reg_2854 = read_reg(0x02854); + itv->yuv_info.reg_285c = read_reg(0x0285c); + itv->yuv_info.reg_2864 = read_reg(0x02864); + itv->yuv_info.reg_2870 = read_reg(0x02870); + itv->yuv_info.reg_2874 = read_reg(0x02874); + itv->yuv_info.reg_2898 = read_reg(0x02898); + itv->yuv_info.reg_2890 = read_reg(0x02890); + + itv->yuv_info.reg_289c = read_reg(0x0289c); + itv->yuv_info.reg_2918 = read_reg(0x02918); + itv->yuv_info.reg_291c = read_reg(0x0291c); + itv->yuv_info.reg_2920 = read_reg(0x02920); + itv->yuv_info.reg_2924 = read_reg(0x02924); + itv->yuv_info.reg_2928 = read_reg(0x02928); + itv->yuv_info.reg_292c = read_reg(0x0292c); + itv->yuv_info.reg_2930 = read_reg(0x02930); + itv->yuv_info.reg_2934 = read_reg(0x02934); + itv->yuv_info.reg_2938 = read_reg(0x02938); + itv->yuv_info.reg_293c = read_reg(0x0293c); + itv->yuv_info.reg_2940 = read_reg(0x02940); + itv->yuv_info.reg_2944 = read_reg(0x02944); + itv->yuv_info.reg_2948 = read_reg(0x02948); + itv->yuv_info.reg_294c = read_reg(0x0294c); + itv->yuv_info.reg_2950 = read_reg(0x02950); + itv->yuv_info.reg_2954 = read_reg(0x02954); + itv->yuv_info.reg_2958 = read_reg(0x02958); + itv->yuv_info.reg_295c = read_reg(0x0295c); + itv->yuv_info.reg_2960 = read_reg(0x02960); + itv->yuv_info.reg_2964 = read_reg(0x02964); + itv->yuv_info.reg_2968 = read_reg(0x02968); + itv->yuv_info.reg_296c = read_reg(0x0296c); + itv->yuv_info.reg_2970 = read_reg(0x02970); + + itv->yuv_info.v_filter_1 = -1; + itv->yuv_info.v_filter_2 = -1; + itv->yuv_info.h_filter = -1; + + /* Set some valid size info */ + itv->yuv_info.osd_x_offset = read_reg(0x02a04) & 0x00000FFF; + itv->yuv_info.osd_y_offset = (read_reg(0x02a04) >> 16) & 0x00000FFF; + + /* Bit 2 of reg 2878 indicates current decoder output format + 0 : NTSC 1 : PAL */ + if (read_reg(0x2878) & 4) + itv->yuv_info.decode_height = 576; + else + itv->yuv_info.decode_height = 480; + + /* If no visible size set, assume full size */ + if (!itv->yuv_info.osd_vis_w) itv->yuv_info.osd_vis_w = 720 - itv->yuv_info.osd_x_offset; + if (!itv->yuv_info.osd_vis_h) itv->yuv_info.osd_vis_h = itv->yuv_info.decode_height - itv->yuv_info.osd_y_offset; + + /* We need a buffer for blanking when Y plane is offset - non-fatal if we can't get one */ + itv->yuv_info.blanking_ptr = kzalloc(720*16,GFP_KERNEL); + if (itv->yuv_info.blanking_ptr) { + itv->yuv_info.blanking_dmaptr = pci_map_single(itv->dev, itv->yuv_info.blanking_ptr, 720*16, PCI_DMA_TODEVICE); + } + else { + itv->yuv_info.blanking_dmaptr = 0; + IVTV_DEBUG_WARN ("Failed to allocate yuv blanking buffer\n"); + } + + IVTV_DEBUG_WARN("Enable video output\n"); + write_reg_sync(0x00108080, 0x2898); + + /* Enable YUV decoder output */ + write_reg_sync(0x01, IVTV_REG_VDM); + + set_bit(IVTV_F_I_DECODING_YUV, &itv->i_flags); + atomic_set(&itv->yuv_info.next_dma_frame,0); +} + +int ivtv_yuv_prep_frame(struct ivtv *itv, struct ivtv_dma_frame *args) +{ + DEFINE_WAIT(wait); + int rc = 0; + int got_sig = 0; + int frame, next_fill_frame, last_fill_frame; + + IVTV_DEBUG_INFO("yuv_prep_frame\n"); + + if (atomic_read(&itv->yuv_info.next_dma_frame) == -1) ivtv_yuv_init(itv); + + frame = atomic_read(&itv->yuv_info.next_fill_frame); + next_fill_frame = (frame + 1) & 0x3; + last_fill_frame = (atomic_read(&itv->yuv_info.next_dma_frame)+1) & 0x3; + + if (next_fill_frame != last_fill_frame && last_fill_frame != frame) { + /* Buffers are full - Overwrite the last frame */ + next_fill_frame = frame; + frame = (frame - 1) & 3; + } + + /* Take a snapshot of the yuv coordinate information */ + itv->yuv_info.new_frame_info[frame].src_x = args->src.left; + itv->yuv_info.new_frame_info[frame].src_y = args->src.top; + itv->yuv_info.new_frame_info[frame].src_w = args->src.width; + itv->yuv_info.new_frame_info[frame].src_h = args->src.height; + itv->yuv_info.new_frame_info[frame].dst_x = args->dst.left; + itv->yuv_info.new_frame_info[frame].dst_y = args->dst.top; + itv->yuv_info.new_frame_info[frame].dst_w = args->dst.width; + itv->yuv_info.new_frame_info[frame].dst_h = args->dst.height; + itv->yuv_info.new_frame_info[frame].tru_x = args->dst.left; + itv->yuv_info.new_frame_info[frame].tru_w = args->src_width; + itv->yuv_info.new_frame_info[frame].tru_h = args->src_height; + + /* Are we going to offset the Y plane */ + if (args->src.height + args->src.top < 512-16) + itv->yuv_info.new_frame_info[frame].offset_y = 1; + else + itv->yuv_info.new_frame_info[frame].offset_y = 0; + + /* Snapshot the osd pan info */ + itv->yuv_info.new_frame_info[frame].pan_x = itv->yuv_info.osd_x_pan; + itv->yuv_info.new_frame_info[frame].pan_y = itv->yuv_info.osd_y_pan; + itv->yuv_info.new_frame_info[frame].vis_w = itv->yuv_info.osd_vis_w; + itv->yuv_info.new_frame_info[frame].vis_h = itv->yuv_info.osd_vis_h; + + itv->yuv_info.new_frame_info[frame].update = 0; + itv->yuv_info.new_frame_info[frame].interlaced_y = 0; + itv->yuv_info.new_frame_info[frame].interlaced_uv = 0; + + if (memcmp (&itv->yuv_info.old_frame_info_args, &itv->yuv_info.new_frame_info[frame], + sizeof (itv->yuv_info.new_frame_info[frame]))) { + memcpy(&itv->yuv_info.old_frame_info_args, &itv->yuv_info.new_frame_info[frame], sizeof (itv->yuv_info.old_frame_info_args)); + itv->yuv_info.new_frame_info[frame].update = 1; +/* IVTV_DEBUG_YUV ("Requesting register update for frame %d\n",frame); */ + } + + /* DMA the frame */ + mutex_lock(&itv->udma.lock); + + if ((rc = ivtv_yuv_prep_user_dma(itv, &itv->udma, args)) != 0) { + mutex_unlock(&itv->udma.lock); + return rc; + } + + ivtv_udma_prepare(itv); + prepare_to_wait(&itv->dma_waitq, &wait, TASK_INTERRUPTIBLE); + /* if no UDMA is pending and no UDMA is in progress, then the DMA + is finished */ + while (itv->i_flags & (IVTV_F_I_UDMA_PENDING | IVTV_F_I_UDMA)) { + /* don't interrupt if the DMA is in progress but break off + a still pending DMA. */ + got_sig = signal_pending(current); + if (got_sig && test_and_clear_bit(IVTV_F_I_UDMA_PENDING, &itv->i_flags)) + break; + got_sig = 0; + schedule(); + } + finish_wait(&itv->dma_waitq, &wait); + + /* Unmap Last DMA Xfer */ + ivtv_udma_unmap(itv); + + if (got_sig) { + IVTV_DEBUG_INFO("User stopped YUV UDMA\n"); + mutex_unlock(&itv->udma.lock); + return -EINTR; + } + + atomic_set(&itv->yuv_info.next_fill_frame, next_fill_frame); + + mutex_unlock(&itv->udma.lock); + return rc; +} + +void ivtv_yuv_close(struct ivtv *itv) +{ + int h_filter, v_filter_1, v_filter_2; + + IVTV_DEBUG_YUV("ivtv_yuv_close\n"); + ivtv_waitq(&itv->vsync_waitq); + + atomic_set(&itv->yuv_info.next_dma_frame, -1); + atomic_set(&itv->yuv_info.next_fill_frame, 0); + + /* Reset registers we have changed so mpeg playback works */ + + /* If we fully restore this register, the display may remain active. + Restore, but set one bit to blank the video. Firmware will always + clear this bit when needed, so not a problem. */ + write_reg(itv->yuv_info.reg_2898 | 0x01000000, 0x2898); + + write_reg(itv->yuv_info.reg_2834, 0x02834); + write_reg(itv->yuv_info.reg_2838, 0x02838); + write_reg(itv->yuv_info.reg_283c, 0x0283c); + write_reg(itv->yuv_info.reg_2840, 0x02840); + write_reg(itv->yuv_info.reg_2844, 0x02844); + write_reg(itv->yuv_info.reg_2848, 0x02848); + write_reg(itv->yuv_info.reg_2854, 0x02854); + write_reg(itv->yuv_info.reg_285c, 0x0285c); + write_reg(itv->yuv_info.reg_2864, 0x02864); + write_reg(itv->yuv_info.reg_2870, 0x02870); + write_reg(itv->yuv_info.reg_2874, 0x02874); + write_reg(itv->yuv_info.reg_2890, 0x02890); + write_reg(itv->yuv_info.reg_289c, 0x0289c); + + write_reg(itv->yuv_info.reg_2918, 0x02918); + write_reg(itv->yuv_info.reg_291c, 0x0291c); + write_reg(itv->yuv_info.reg_2920, 0x02920); + write_reg(itv->yuv_info.reg_2924, 0x02924); + write_reg(itv->yuv_info.reg_2928, 0x02928); + write_reg(itv->yuv_info.reg_292c, 0x0292c); + write_reg(itv->yuv_info.reg_2930, 0x02930); + write_reg(itv->yuv_info.reg_2934, 0x02934); + write_reg(itv->yuv_info.reg_2938, 0x02938); + write_reg(itv->yuv_info.reg_293c, 0x0293c); + write_reg(itv->yuv_info.reg_2940, 0x02940); + write_reg(itv->yuv_info.reg_2944, 0x02944); + write_reg(itv->yuv_info.reg_2948, 0x02948); + write_reg(itv->yuv_info.reg_294c, 0x0294c); + write_reg(itv->yuv_info.reg_2950, 0x02950); + write_reg(itv->yuv_info.reg_2954, 0x02954); + write_reg(itv->yuv_info.reg_2958, 0x02958); + write_reg(itv->yuv_info.reg_295c, 0x0295c); + write_reg(itv->yuv_info.reg_2960, 0x02960); + write_reg(itv->yuv_info.reg_2964, 0x02964); + write_reg(itv->yuv_info.reg_2968, 0x02968); + write_reg(itv->yuv_info.reg_296c, 0x0296c); + write_reg(itv->yuv_info.reg_2970, 0x02970); + + /* Prepare to restore filters */ + + /* First the horizontal filter */ + if ((itv->yuv_info.reg_2834 & 0x0000FFFF) == (itv->yuv_info.reg_2834 >> 16)) { + /* An exact size match uses filter 0 */ + h_filter = 0; + } + else { + /* Figure out which filter to use */ + h_filter = ((itv->yuv_info.reg_2834 << 16) / (itv->yuv_info.reg_2834 >> 16)) >> 15; + h_filter = (h_filter >> 1) + (h_filter & 1); + /* Only an exact size match can use filter 0. */ + if (h_filter < 1) h_filter = 1; + } + + /* Now the vertical filter */ + if ((itv->yuv_info.reg_2918 & 0x0000FFFF) == (itv->yuv_info.reg_2918 >> 16)) { + /* An exact size match uses filter 0/1 */ + v_filter_1 = 0; + v_filter_2 = 1; + } + else { + /* Figure out which filter to use */ + v_filter_1 = ((itv->yuv_info.reg_2918 << 16) / (itv->yuv_info.reg_2918 >> 16)) >> 15; + v_filter_1 = (v_filter_1 >> 1) + (v_filter_1 & 1); + /* Only an exact size match can use filter 0 */ + if (v_filter_1 == 0) v_filter_1 = 1; + v_filter_2 = v_filter_1; + } + + /* Now restore the filters */ + ivtv_yuv_filter (itv,h_filter,v_filter_1,v_filter_2); + + /* and clear a few registers */ + write_reg(0, 0x02814); + write_reg(0, 0x0282c); + write_reg(0, 0x02904); + write_reg(0, 0x02910); + + /* Release the blanking buffer */ + if (itv->yuv_info.blanking_ptr) { + kfree (itv->yuv_info.blanking_ptr); + itv->yuv_info.blanking_ptr = NULL; + pci_unmap_single(itv->dev, itv->yuv_info.blanking_dmaptr, 720*16, PCI_DMA_TODEVICE); + } + + /* Invalidate the old dimension information */ + itv->yuv_info.old_frame_info.src_w = 0; + itv->yuv_info.old_frame_info.src_h = 0; + itv->yuv_info.old_frame_info_args.src_w = 0; + itv->yuv_info.old_frame_info_args.src_h = 0; + + /* All done. */ + clear_bit(IVTV_F_I_DECODING_YUV, &itv->i_flags); +} + diff --git a/drivers/media/video/ivtv/ivtv-yuv.h b/drivers/media/video/ivtv/ivtv-yuv.h new file mode 100644 index 000000000000..31128733e784 --- /dev/null +++ b/drivers/media/video/ivtv/ivtv-yuv.h @@ -0,0 +1,24 @@ +/* + yuv support + + Copyright (C) 2007 Ian Armstrong + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +int ivtv_yuv_filter_check(struct ivtv *itv); +int ivtv_yuv_prep_frame(struct ivtv *itv, struct ivtv_dma_frame *args); +void ivtv_yuv_close(struct ivtv *itv); +void ivtv_yuv_work_handler (struct work_struct *work); diff --git a/include/media/ivtv.h b/include/media/ivtv.h new file mode 100644 index 000000000000..412b48ea8eda --- /dev/null +++ b/include/media/ivtv.h @@ -0,0 +1,65 @@ +/* + Public ivtv API header + Copyright (C) 2003-2004 Kevin Thayer + Copyright (C) 2004-2007 Hans Verkuil + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef _LINUX_IVTV_H +#define _LINUX_IVTV_H + +/* ivtv knows several distinct output modes: MPEG streaming, + YUV streaming, YUV updates through user DMA and the passthrough + mode. + + In order to clearly tell the driver that we are in user DMA + YUV mode you need to call IVTV_IOC_DMA_FRAME with y_source == NULL + first (althrough if you don't then the first time + DMA_FRAME is called the mode switch is done automatically). + + When you close the file handle the user DMA mode is exited again. + + While in one mode, you cannot use another mode (EBUSY is returned). + + All this means that if you want to change the YUV interlacing + for the user DMA YUV mode you first need to do call IVTV_IOC_DMA_FRAME + with y_source == NULL before you can set the correct format using + VIDIOC_S_FMT. + + Eventually all this should be replaced with a proper V4L2 API, + but for now we have to do it this way. */ + +struct ivtv_dma_frame { + enum v4l2_buf_type type; /* V4L2_BUF_TYPE_VIDEO_OUTPUT */ + __u32 pixelformat; /* 0 == same as destination */ + void __user *y_source; /* if NULL and type == V4L2_BUF_TYPE_VIDEO_OUTPUT, + then just switch to user DMA YUV output mode */ + void __user *uv_source; /* Unused for RGB pixelformats */ + struct v4l2_rect src; + struct v4l2_rect dst; + __u32 src_width; + __u32 src_height; +}; + +#define IVTV_IOC_DMA_FRAME _IOW ('V', BASE_VIDIOC_PRIVATE+0, struct ivtv_dma_frame) + +/* These are the VBI types as they appear in the embedded VBI private packets. */ +#define IVTV_SLICED_TYPE_TELETEXT_B (1) +#define IVTV_SLICED_TYPE_CAPTION_525 (4) +#define IVTV_SLICED_TYPE_WSS_625 (5) +#define IVTV_SLICED_TYPE_VPS (7) + +#endif /* _LINUX_IVTV_H */ -- cgit v1.2.3-59-g8ed1b From 74cab31c413c8615efe818d44ff4ac83e2a138be Mon Sep 17 00:00:00 2001 From: Hans Verkuil Date: Fri, 27 Apr 2007 12:31:26 -0300 Subject: V4L/DVB (5355): Add VIDIOC_G_CHIP_IDENT to various i2c modules Signed-off-by: Hans Verkuil Signed-off-by: Mauro Carvalho Chehab --- drivers/media/video/cs53l32a.c | 4 ++ drivers/media/video/msp3400-driver.c | 5 ++ drivers/media/video/msp3400-driver.h | 1 + drivers/media/video/tvaudio.c | 4 ++ drivers/media/video/upd64031a.c | 4 ++ drivers/media/video/upd64083.c | 5 ++ drivers/media/video/wm8739.c | 4 ++ drivers/media/video/wm8775.c | 4 ++ include/media/v4l2-chip-ident.h | 91 ++++++++++++++++++++++++++++++++++-- 9 files changed, 118 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/drivers/media/video/cs53l32a.c b/drivers/media/video/cs53l32a.c index de87247c74ee..a73e285af730 100644 --- a/drivers/media/video/cs53l32a.c +++ b/drivers/media/video/cs53l32a.c @@ -28,6 +28,7 @@ #include #include #include +#include MODULE_DESCRIPTION("i2c device driver for cs53l32a Audio ADC"); MODULE_AUTHOR("Martin Vaughan"); @@ -103,6 +104,9 @@ static int cs53l32a_command(struct i2c_client *client, unsigned int cmd, cs53l32a_write(client, 0x05, (u8) ctrl->value); break; + case VIDIOC_G_CHIP_IDENT: + return v4l2_chip_ident_i2c_client(client, arg, V4L2_IDENT_CS53l32A, 0); + case VIDIOC_LOG_STATUS: { u8 v = cs53l32a_read(client, 0x01); diff --git a/drivers/media/video/msp3400-driver.c b/drivers/media/video/msp3400-driver.c index ba1af3c8525e..3bb7d6634862 100644 --- a/drivers/media/video/msp3400-driver.c +++ b/drivers/media/video/msp3400-driver.c @@ -773,6 +773,9 @@ static int msp_command(struct i2c_client *client, unsigned int cmd, void *arg) break; } + case VIDIOC_G_CHIP_IDENT: + return v4l2_chip_ident_i2c_client(client, arg, state->ident, (state->rev1 << 16) | state->rev2); + default: /* unknown */ return -EINVAL; @@ -872,6 +875,8 @@ static int msp_attach(struct i2c_adapter *adapter, int address, int kind) snprintf(client->name, sizeof(client->name), "MSP%d4%02d%c-%c%d", msp_family, msp_product, msp_revision, msp_hard, msp_rom); + /* Rev B=2, C=3, D=4, G=7 */ + state->ident = msp_family * 10000 + 4000 + msp_product * 10 + msp_revision - '@'; /* Has NICAM support: all mspx41x and mspx45x products have NICAM */ state->has_nicam = msp_prod_hi == 1 || msp_prod_hi == 5; diff --git a/drivers/media/video/msp3400-driver.h b/drivers/media/video/msp3400-driver.h index 7531efa1615e..ab69a290e5dc 100644 --- a/drivers/media/video/msp3400-driver.h +++ b/drivers/media/video/msp3400-driver.h @@ -50,6 +50,7 @@ extern int msp_stereo_thresh; struct msp_state { int rev1, rev2; + int ident; u8 has_nicam; u8 has_radio; u8 has_headphones; diff --git a/drivers/media/video/tvaudio.c b/drivers/media/video/tvaudio.c index d506dfaa45a9..426083c08986 100644 --- a/drivers/media/video/tvaudio.c +++ b/drivers/media/video/tvaudio.c @@ -33,6 +33,7 @@ #include #include +#include #include @@ -1775,6 +1776,9 @@ static int chip_command(struct i2c_client *client, /* the thread will call checkmode() later */ } break; + + case VIDIOC_G_CHIP_IDENT: + return v4l2_chip_ident_i2c_client(client, arg, V4L2_IDENT_TVAUDIO, 0); } return 0; } diff --git a/drivers/media/video/upd64031a.c b/drivers/media/video/upd64031a.c index 28d1133a3b7a..0b2a961efd22 100644 --- a/drivers/media/video/upd64031a.c +++ b/drivers/media/video/upd64031a.c @@ -27,6 +27,7 @@ #include #include #include +#include #include // --------------------- read registers functions define ----------------------- @@ -179,6 +180,9 @@ static int upd64031a_command(struct i2c_client *client, unsigned int cmd, void * } #endif + case VIDIOC_G_CHIP_IDENT: + return v4l2_chip_ident_i2c_client(client, arg, V4L2_IDENT_UPD64031A, 0); + default: break; } diff --git a/drivers/media/video/upd64083.c b/drivers/media/video/upd64083.c index fe38224150d8..401bd21f46eb 100644 --- a/drivers/media/video/upd64083.c +++ b/drivers/media/video/upd64083.c @@ -26,6 +26,7 @@ #include #include #include +#include #include MODULE_DESCRIPTION("uPD64083 driver"); @@ -155,6 +156,10 @@ static int upd64083_command(struct i2c_client *client, unsigned int cmd, void *a break; } #endif + + case VIDIOC_G_CHIP_IDENT: + return v4l2_chip_ident_i2c_client(client, arg, V4L2_IDENT_UPD64083, 0); + default: break; } diff --git a/drivers/media/video/wm8739.c b/drivers/media/video/wm8739.c index a9b59c35cd67..8f6741a28a47 100644 --- a/drivers/media/video/wm8739.c +++ b/drivers/media/video/wm8739.c @@ -29,6 +29,7 @@ #include #include #include +#include MODULE_DESCRIPTION("wm8739 driver"); MODULE_AUTHOR("T. Adachi, Hans Verkuil"); @@ -236,6 +237,9 @@ static int wm8739_command(struct i2c_client *client, unsigned int cmd, void *arg return -EINVAL; } + case VIDIOC_G_CHIP_IDENT: + return v4l2_chip_ident_i2c_client(client, arg, V4L2_IDENT_WM8739, 0); + case VIDIOC_LOG_STATUS: v4l_info(client, "Frequency: %u Hz\n", state->clock_freq); v4l_info(client, "Volume L: %02x%s\n", state->vol_l & 0x1f, diff --git a/drivers/media/video/wm8775.c b/drivers/media/video/wm8775.c index d81a88bbe43d..4df5d30d4d09 100644 --- a/drivers/media/video/wm8775.c +++ b/drivers/media/video/wm8775.c @@ -33,6 +33,7 @@ #include #include #include +#include MODULE_DESCRIPTION("wm8775 driver"); MODULE_AUTHOR("Ulf Eklund, Hans Verkuil"); @@ -124,6 +125,9 @@ static int wm8775_command(struct i2c_client *client, unsigned int cmd, wm8775_write(client, R21, 0x100 + state->input); break; + case VIDIOC_G_CHIP_IDENT: + return v4l2_chip_ident_i2c_client(client, arg, V4L2_IDENT_WM8775, 0); + case VIDIOC_LOG_STATUS: v4l_info(client, "Input: %d%s\n", state->input, state->muted ? " (muted)" : ""); diff --git a/include/media/v4l2-chip-ident.h b/include/media/v4l2-chip-ident.h index 7d0c65413981..09d16c4f00f7 100644 --- a/include/media/v4l2-chip-ident.h +++ b/include/media/v4l2-chip-ident.h @@ -31,10 +31,13 @@ enum { V4L2_IDENT_AMBIGUOUS = 1, /* Match too general, multiple chips matched */ V4L2_IDENT_UNKNOWN = 2, /* Chip found, but cannot identify */ - /* module saa7110: just ident= 100 */ + /* module tvaudio: reserved range 50-99 */ + V4L2_IDENT_TVAUDIO = 50, /* A tvaudio chip, unknown which it is exactly */ + + /* module saa7110: just ident 100 */ V4L2_IDENT_SAA7110 = 100, - /* module saa7111: just ident= 101 */ + /* module saa7111: just ident 101 */ V4L2_IDENT_SAA7111 = 101, /* module saa7115: reserved range 102-149 */ @@ -55,12 +58,92 @@ enum { V4L2_IDENT_CX25842 = 242, V4L2_IDENT_CX25843 = 243, - /* OmniVision sensors - range 250-299 */ + /* OmniVision sensors: reserved range 250-299 */ V4L2_IDENT_OV7670 = 250, - /* Conexant MPEG encoder/decoders: range 410-420 */ + /* Conexant MPEG encoder/decoders: reserved range 410-420 */ V4L2_IDENT_CX23415 = 415, V4L2_IDENT_CX23416 = 416, + + /* module wm8739: just ident 8739 */ + V4L2_IDENT_WM8739 = 8739, + + /* module wm8775: just ident 8775 */ + V4L2_IDENT_WM8775 = 8775, + + /* module cs53132a: just ident 53132 */ + V4L2_IDENT_CS53l32A = 53132, + + /* module upd64031a: just ident 64031 */ + V4L2_IDENT_UPD64031A = 64031, + + /* module upd64083: just ident 64083 */ + V4L2_IDENT_UPD64083 = 64083, + + /* module msp34xx: reserved range 34000-34999 */ + V4L2_IDENT_MSP3400B = 34002, + V4L2_IDENT_MSP3410B = 34102, + + V4L2_IDENT_MSP3400C = 34003, + V4L2_IDENT_MSP3410C = 34103, + + V4L2_IDENT_MSP3400D = 34004, + V4L2_IDENT_MSP3410D = 34104, + V4L2_IDENT_MSP3405D = 34054, + V4L2_IDENT_MSP3415D = 34154, + V4L2_IDENT_MSP3407D = 34074, + V4L2_IDENT_MSP3417D = 34174, + + V4L2_IDENT_MSP3400G = 34007, + V4L2_IDENT_MSP3410G = 34107, + V4L2_IDENT_MSP3420G = 34207, + V4L2_IDENT_MSP3430G = 34307, + V4L2_IDENT_MSP3440G = 34407, + V4L2_IDENT_MSP3450G = 34507, + V4L2_IDENT_MSP3460G = 34607, + + V4L2_IDENT_MSP3401G = 34017, + V4L2_IDENT_MSP3411G = 34117, + V4L2_IDENT_MSP3421G = 34217, + V4L2_IDENT_MSP3431G = 34317, + V4L2_IDENT_MSP3441G = 34417, + V4L2_IDENT_MSP3451G = 34517, + V4L2_IDENT_MSP3461G = 34617, + + V4L2_IDENT_MSP3402G = 34027, + V4L2_IDENT_MSP3412G = 34127, + V4L2_IDENT_MSP3422G = 34227, + V4L2_IDENT_MSP3442G = 34427, + V4L2_IDENT_MSP3452G = 34527, + + V4L2_IDENT_MSP3405G = 34057, + V4L2_IDENT_MSP3415G = 34157, + V4L2_IDENT_MSP3425G = 34257, + V4L2_IDENT_MSP3435G = 34357, + V4L2_IDENT_MSP3445G = 34457, + V4L2_IDENT_MSP3455G = 34557, + V4L2_IDENT_MSP3465G = 34657, + + V4L2_IDENT_MSP3407G = 34077, + V4L2_IDENT_MSP3417G = 34177, + V4L2_IDENT_MSP3427G = 34277, + V4L2_IDENT_MSP3437G = 34377, + V4L2_IDENT_MSP3447G = 34477, + V4L2_IDENT_MSP3457G = 34577, + V4L2_IDENT_MSP3467G = 34677, + + /* module msp44xx: reserved range 44000-44999 */ + V4L2_IDENT_MSP4400G = 44007, + V4L2_IDENT_MSP4410G = 44107, + V4L2_IDENT_MSP4420G = 44207, + V4L2_IDENT_MSP4440G = 44407, + V4L2_IDENT_MSP4450G = 44507, + + V4L2_IDENT_MSP4408G = 44087, + V4L2_IDENT_MSP4418G = 44187, + V4L2_IDENT_MSP4428G = 44287, + V4L2_IDENT_MSP4448G = 44487, + V4L2_IDENT_MSP4458G = 44587, }; #endif -- cgit v1.2.3-59-g8ed1b From 43d0dfcfc654fa18b6dd91b9483273b44112997f Mon Sep 17 00:00:00 2001 From: Hans Verkuil Date: Sat, 10 Mar 2007 06:24:30 -0300 Subject: V4L/DVB (5402): Add vsync_field to the union in video_event for VIDEO_EVENT_VSYNC VIDEO_EVENT_VSYNC needs to tell the application which field it was that received a VSYNC (odd/even/progressive). The vsync_field was added to the union in video_event for this purpose. Signed-off-by: Hans Verkuil Signed-off-by: Mauro Carvalho Chehab --- include/linux/dvb/video.h | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'include') diff --git a/include/linux/dvb/video.h b/include/linux/dvb/video.h index a96da40c50f5..0c2a1c7c50a6 100644 --- a/include/linux/dvb/video.h +++ b/include/linux/dvb/video.h @@ -120,6 +120,13 @@ struct video_command { }; }; +/* FIELD_UNKNOWN can be used if the hardware does not know whether + the Vsync is for an odd, even or progressive (i.e. non-interlaced) + field. */ +#define VIDEO_VSYNC_FIELD_UNKNOWN (0) +#define VIDEO_VSYNC_FIELD_ODD (1) +#define VIDEO_VSYNC_FIELD_EVEN (2) +#define VIDEO_VSYNC_FIELD_PROGRESSIVE (3) struct video_event { int32_t type; @@ -131,6 +138,7 @@ struct video_event { union { video_size_t size; unsigned int frame_rate; /* in frames per 1000sec */ + unsigned char vsync_field; /* unknown/odd/even/progressive */ } u; }; -- cgit v1.2.3-59-g8ed1b From 3700a90f05f316948328e8d0e6a9955338a96565 Mon Sep 17 00:00:00 2001 From: Hans Verkuil Date: Sun, 11 Mar 2007 10:50:03 -0300 Subject: V4L/DVB (5418): Speed is a signed 32-bit integer, not unsigned. Negative speed values have to be allowed for reverse playback. Signed-off-by: Hans Verkuil Signed-off-by: Mauro Carvalho Chehab --- include/linux/dvb/video.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/dvb/video.h b/include/linux/dvb/video.h index 0c2a1c7c50a6..4c314914339f 100644 --- a/include/linux/dvb/video.h +++ b/include/linux/dvb/video.h @@ -110,7 +110,7 @@ struct video_command { } stop; struct { - __u32 speed; + __s32 speed; __u32 format; } play; -- cgit v1.2.3-59-g8ed1b From 6816b1991fd4bcb457c9534e8136476e45bfee0a Mon Sep 17 00:00:00 2001 From: Hans Verkuil Date: Sun, 11 Mar 2007 10:54:11 -0300 Subject: V4L/DVB (5419): Add comment how the speed field is interpreted. Signed-off-by: Hans Verkuil Signed-off-by: Mauro Carvalho Chehab --- include/linux/dvb/video.h | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'include') diff --git a/include/linux/dvb/video.h b/include/linux/dvb/video.h index 4c314914339f..93e4c3a6d190 100644 --- a/include/linux/dvb/video.h +++ b/include/linux/dvb/video.h @@ -110,6 +110,11 @@ struct video_command { } stop; struct { + /* 0 or 1000 specifies normal speed, + 1 specifies forward single stepping, + -1 specifies backward single stepping, + >1: playback at speed/1000 of the normal speed, + <-1: reverse playback at (-speed/1000) of the normal speed. */ __s32 speed; __u32 format; } play; -- cgit v1.2.3-59-g8ed1b From bb74782e621e988555354abba03812982236a3af Mon Sep 17 00:00:00 2001 From: Oliver Neukum Date: Tue, 27 Feb 2007 11:30:24 +0100 Subject: USB: additional structure from cdc spec this adds another structure for CDC devices to cdc.h. Signed-off-by: Oliver Neukum Signed-off-by: Greg Kroah-Hartman --- include/linux/usb/cdc.h | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'include') diff --git a/include/linux/usb/cdc.h b/include/linux/usb/cdc.h index 956edf3bbecb..2204ae22c381 100644 --- a/include/linux/usb/cdc.h +++ b/include/linux/usb/cdc.h @@ -91,6 +91,17 @@ struct usb_cdc_union_desc { /* ... and there could be other slave interfaces */ } __attribute__ ((packed)); +/* "Country Selection Functional Descriptor" from CDC spec 5.2.3.9 */ +struct usb_cdc_country_functional_desc { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + + __u8 iCountryCodeRelDate; + __le16 wCountyCode0; + /* ... and there can be a lot of country codes */ +} __attribute__ ((packed)); + /* "Network Channel Terminal Functional Descriptor" from CDC spec 5.2.3.11 */ struct usb_cdc_network_terminal_desc { __u8 bLength; -- cgit v1.2.3-59-g8ed1b From eaafbc3a8adab16babe2c20e54ad3ba40d1fbbc9 Mon Sep 17 00:00:00 2001 From: Alan Stern Date: Tue, 13 Mar 2007 16:39:15 -0400 Subject: USB: Allow autosuspend delay to equal 0 This patch (as867) adds an entry for the new power/autosuspend attribute in Documentation/ABI/testing, and it changes the behavior of the delay value. Now a delay of 0 means to autosuspend as soon as possible, and negative values will prevent autosuspend. Signed-off-by: Alan Stern Signed-off-by: Greg Kroah-Hartman --- Documentation/ABI/testing/sysfs-bus-usb | 15 +++++++++++++++ Documentation/kernel-parameters.txt | 2 +- drivers/usb/core/driver.c | 2 +- drivers/usb/core/sysfs.c | 16 ++++++++++------ drivers/usb/core/usb.c | 2 +- include/linux/usb.h | 2 +- 6 files changed, 29 insertions(+), 10 deletions(-) create mode 100644 Documentation/ABI/testing/sysfs-bus-usb (limited to 'include') diff --git a/Documentation/ABI/testing/sysfs-bus-usb b/Documentation/ABI/testing/sysfs-bus-usb new file mode 100644 index 000000000000..00a84326325f --- /dev/null +++ b/Documentation/ABI/testing/sysfs-bus-usb @@ -0,0 +1,15 @@ +What: /sys/bus/usb/devices/.../power/autosuspend +Date: March 2007 +KernelVersion: 2.6.21 +Contact: Alan Stern +Description: + Each USB device directory will contain a file named + power/autosuspend. This file holds the time (in seconds) + the device must be idle before it will be autosuspended. + 0 means the device will be autosuspended as soon as + possible. Negative values will prevent the device from + being autosuspended at all, and writing a negative value + will resume the device if it is already suspended. + + The autosuspend delay for newly-created devices is set to + the value of the usbcore.autosuspend module parameter. diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 12533a958c51..2017942e0966 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -1792,7 +1792,7 @@ and is between 256 and 4096 characters. It is defined in the file for newly-detected USB devices (default 2). This is the time required before an idle device will be autosuspended. Devices for which the delay is set - to 0 won't be autosuspended at all. + to a negative value won't be autosuspended at all. usbhid.mousepoll= [USBHID] The interval which mice are to be polled at. diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c index abea48de8766..884179f1e163 100644 --- a/drivers/usb/core/driver.c +++ b/drivers/usb/core/driver.c @@ -970,7 +970,7 @@ static int autosuspend_check(struct usb_device *udev) udev->do_remote_wakeup = device_may_wakeup(&udev->dev); if (udev->pm_usage_cnt > 0) return -EBUSY; - if (!udev->autosuspend_delay) + if (udev->autosuspend_delay < 0) return -EPERM; if (udev->actconfig) { diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c index 311d5df80386..731001f7d2c1 100644 --- a/drivers/usb/core/sysfs.c +++ b/drivers/usb/core/sysfs.c @@ -165,7 +165,7 @@ show_autosuspend(struct device *dev, struct device_attribute *attr, char *buf) { struct usb_device *udev = to_usb_device(dev); - return sprintf(buf, "%u\n", udev->autosuspend_delay / HZ); + return sprintf(buf, "%d\n", udev->autosuspend_delay / HZ); } static ssize_t @@ -173,17 +173,21 @@ set_autosuspend(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct usb_device *udev = to_usb_device(dev); - unsigned value, old; + int value; - if (sscanf(buf, "%u", &value) != 1 || value >= INT_MAX/HZ) + if (sscanf(buf, "%d", &value) != 1 || value >= INT_MAX/HZ || + value <= - INT_MAX/HZ) return -EINVAL; value *= HZ; - old = udev->autosuspend_delay; udev->autosuspend_delay = value; - if (value > 0 && old == 0) + if (value >= 0) usb_try_autosuspend_device(udev); - + else { + usb_lock_device(udev); + usb_external_resume_device(udev); + usb_unlock_device(udev); + } return count; } diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c index 138252e0a1cf..6f35dce8a95d 100644 --- a/drivers/usb/core/usb.c +++ b/drivers/usb/core/usb.c @@ -55,7 +55,7 @@ struct workqueue_struct *ksuspend_usb_wq; #ifdef CONFIG_USB_SUSPEND static int usb_autosuspend_delay = 2; /* Default delay value, * in seconds */ -module_param_named(autosuspend, usb_autosuspend_delay, uint, 0644); +module_param_named(autosuspend, usb_autosuspend_delay, int, 0644); MODULE_PARM_DESC(autosuspend, "default autosuspend delay"); #else diff --git a/include/linux/usb.h b/include/linux/usb.h index 87dc75a6cee1..cc24d089faa0 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h @@ -394,7 +394,7 @@ struct usb_device { struct delayed_work autosuspend; /* for delayed autosuspends */ struct mutex pm_mutex; /* protects PM operations */ - unsigned autosuspend_delay; /* in jiffies */ + int autosuspend_delay; /* in jiffies */ unsigned auto_pm:1; /* autosuspend/resume in progress */ unsigned do_remote_wakeup:1; /* remote wakeup should be enabled */ -- cgit v1.2.3-59-g8ed1b From 2add5229d77a3de08015feef437653e02372162f Mon Sep 17 00:00:00 2001 From: Alan Stern Date: Tue, 20 Mar 2007 14:59:39 -0400 Subject: USB: add power/level sysfs attribute This patch (as874) adds another piece to the user-visible part of the USB autosuspend interface. The new power/level sysfs attribute allows users to force the device on (with autosuspend off), force the device to sleep (with autoresume off), or return to normal automatic operation. Signed-off-by: Alan Stern Signed-off-by: Greg Kroah-Hartman --- Documentation/ABI/testing/sysfs-bus-usb | 26 +++++++++++ drivers/usb/core/driver.c | 15 ++++-- drivers/usb/core/quirks.c | 2 +- drivers/usb/core/sysfs.c | 81 +++++++++++++++++++++++++++++++-- include/linux/usb.h | 2 + 5 files changed, 118 insertions(+), 8 deletions(-) (limited to 'include') diff --git a/Documentation/ABI/testing/sysfs-bus-usb b/Documentation/ABI/testing/sysfs-bus-usb index 00a84326325f..f9937add033d 100644 --- a/Documentation/ABI/testing/sysfs-bus-usb +++ b/Documentation/ABI/testing/sysfs-bus-usb @@ -13,3 +13,29 @@ Description: The autosuspend delay for newly-created devices is set to the value of the usbcore.autosuspend module parameter. + +What: /sys/bus/usb/devices/.../power/level +Date: March 2007 +KernelVersion: 2.6.21 +Contact: Alan Stern +Description: + Each USB device directory will contain a file named + power/level. This file holds a power-level setting for + the device, one of "on", "auto", or "suspend". + + "on" means that the device is not allowed to autosuspend, + although normal suspends for system sleep will still + be honored. "auto" means the device will autosuspend + and autoresume in the usual manner, according to the + capabilities of its driver. "suspend" means the device + is forced into a suspended state and it will not autoresume + in response to I/O requests. However remote-wakeup requests + from the device may still be enabled (the remote-wakeup + setting is controlled separately by the power/wakeup + attribute). + + During normal use, devices should be left in the "auto" + level. The other levels are meant for administrative uses. + If you want to suspend a device immediately but leave it + free to wake up in response to I/O requests, you should + write "0" to power/autosuspend. diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c index 884179f1e163..9b6a60fafddb 100644 --- a/drivers/usb/core/driver.c +++ b/drivers/usb/core/driver.c @@ -872,8 +872,10 @@ static int usb_resume_device(struct usb_device *udev) done: // dev_dbg(&udev->dev, "%s: status %d\n", __FUNCTION__, status); - if (status == 0) + if (status == 0) { + udev->autoresume_disabled = 0; udev->dev.power.power_state.event = PM_EVENT_ON; + } return status; } @@ -970,7 +972,7 @@ static int autosuspend_check(struct usb_device *udev) udev->do_remote_wakeup = device_may_wakeup(&udev->dev); if (udev->pm_usage_cnt > 0) return -EBUSY; - if (udev->autosuspend_delay < 0) + if (udev->autosuspend_delay < 0 || udev->autosuspend_disabled) return -EPERM; if (udev->actconfig) { @@ -1116,6 +1118,8 @@ static int usb_resume_both(struct usb_device *udev) struct usb_interface *intf; struct usb_device *parent = udev->parent; + if (udev->auto_pm && udev->autoresume_disabled) + return -EPERM; cancel_delayed_work(&udev->autosuspend); if (udev->state == USB_STATE_NOTATTACHED) return -ENODEV; @@ -1486,9 +1490,14 @@ static int usb_suspend(struct device *dev, pm_message_t message) static int usb_resume(struct device *dev) { + struct usb_device *udev; + if (!is_usb_device(dev)) /* Ignore PM for interfaces */ return 0; - return usb_external_resume_device(to_usb_device(dev)); + udev = to_usb_device(dev); + if (udev->autoresume_disabled) + return -EPERM; + return usb_external_resume_device(udev); } #else diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index f08ec85a6d64..739f520908aa 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c @@ -42,7 +42,7 @@ static void usb_autosuspend_quirk(struct usb_device *udev) { #ifdef CONFIG_USB_SUSPEND /* disable autosuspend, but allow the user to re-enable it via sysfs */ - udev->autosuspend_delay = 0; + udev->autosuspend_disabled = 1; #endif } diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c index 731001f7d2c1..2ea47a38aefa 100644 --- a/drivers/usb/core/sysfs.c +++ b/drivers/usb/core/sysfs.c @@ -11,6 +11,7 @@ #include +#include #include #include "usb.h" @@ -184,9 +185,8 @@ set_autosuspend(struct device *dev, struct device_attribute *attr, if (value >= 0) usb_try_autosuspend_device(udev); else { - usb_lock_device(udev); - usb_external_resume_device(udev); - usb_unlock_device(udev); + if (usb_autoresume_device(udev) == 0) + usb_autosuspend_device(udev); } return count; } @@ -194,21 +194,94 @@ set_autosuspend(struct device *dev, struct device_attribute *attr, static DEVICE_ATTR(autosuspend, S_IRUGO | S_IWUSR, show_autosuspend, set_autosuspend); +static const char on_string[] = "on"; +static const char auto_string[] = "auto"; +static const char suspend_string[] = "suspend"; + +static ssize_t +show_level(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct usb_device *udev = to_usb_device(dev); + const char *p = auto_string; + + if (udev->state == USB_STATE_SUSPENDED) { + if (udev->autoresume_disabled) + p = suspend_string; + } else { + if (udev->autosuspend_disabled) + p = on_string; + } + return sprintf(buf, "%s\n", p); +} + +static ssize_t +set_level(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct usb_device *udev = to_usb_device(dev); + int len = count; + char *cp; + int rc = 0; + + cp = memchr(buf, '\n', count); + if (cp) + len = cp - buf; + + usb_lock_device(udev); + + /* Setting the flags without calling usb_pm_lock is a subject to + * races, but who cares... + */ + if (len == sizeof on_string - 1 && + strncmp(buf, on_string, len) == 0) { + udev->autosuspend_disabled = 1; + udev->autoresume_disabled = 0; + rc = usb_external_resume_device(udev); + + } else if (len == sizeof auto_string - 1 && + strncmp(buf, auto_string, len) == 0) { + udev->autosuspend_disabled = 0; + udev->autoresume_disabled = 0; + rc = usb_external_resume_device(udev); + + } else if (len == sizeof suspend_string - 1 && + strncmp(buf, suspend_string, len) == 0) { + udev->autosuspend_disabled = 0; + udev->autoresume_disabled = 1; + rc = usb_external_suspend_device(udev, PMSG_SUSPEND); + + } else + rc = -EINVAL; + + usb_unlock_device(udev); + return (rc < 0 ? rc : count); +} + +static DEVICE_ATTR(level, S_IRUGO | S_IWUSR, show_level, set_level); + static char power_group[] = "power"; static int add_power_attributes(struct device *dev) { int rc = 0; - if (is_usb_device(dev)) + if (is_usb_device(dev)) { rc = sysfs_add_file_to_group(&dev->kobj, &dev_attr_autosuspend.attr, power_group); + if (rc == 0) + rc = sysfs_add_file_to_group(&dev->kobj, + &dev_attr_level.attr, + power_group); + } return rc; } static void remove_power_attributes(struct device *dev) { + sysfs_remove_file_from_group(&dev->kobj, + &dev_attr_level.attr, + power_group); sysfs_remove_file_from_group(&dev->kobj, &dev_attr_autosuspend.attr, power_group); diff --git a/include/linux/usb.h b/include/linux/usb.h index cc24d089faa0..5e8e144afbae 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h @@ -398,6 +398,8 @@ struct usb_device { unsigned auto_pm:1; /* autosuspend/resume in progress */ unsigned do_remote_wakeup:1; /* remote wakeup should be enabled */ + unsigned autosuspend_disabled:1; /* autosuspend and autoresume */ + unsigned autoresume_disabled:1; /* disabled by the user */ #endif }; #define to_usb_device(d) container_of(d, struct usb_device, dev) -- cgit v1.2.3-59-g8ed1b From 9f8b17e643fe6aa505629658445849397bda4e4f Mon Sep 17 00:00:00 2001 From: Kay Sievers Date: Tue, 13 Mar 2007 15:59:31 +0100 Subject: USB: make usbdevices export their device nodes instead of using a separate class o The "real" usb-devices export now a device node which can populate /dev/bus/usb. o The usb_device class is optional now and can be disabled in the kernel config. Major/minor of the "real" devices and class devices are the same. o The environment of the usb-device event contains DEVNUM and BUSNUM to help udev and get rid of the ugly udev rule we need for the class devices. o The usb-devices and usb-interfaces share the same bus, so I used the new "struct device_type" to let these devices identify themselves. This also removes the current logic of using a magic platform-pointer. The name of the device_type is also added to the environment which makes it easier to distinguish the different kinds of devices on the same subsystem. It looks like this: add@/devices/pci0000:00/0000:00:1d.1/usb2/2-1 ACTION=add DEVPATH=/devices/pci0000:00/0000:00:1d.1/usb2/2-1 SUBSYSTEM=usb SEQNUM=1533 MAJOR=189 MINOR=131 DEVTYPE=usb_device PRODUCT=46d/c03e/2000 TYPE=0/0/0 BUSNUM=002 DEVNUM=004 This udev rule works as a replacement for usb_device class devices: SUBSYSTEM=="usb", ACTION=="add", ENV{DEVTYPE}=="usb_device", \ NAME="bus/usb/$env{BUSNUM}/$env{DEVNUM}", MODE="0644" Updated patch, which needs the device_type patches in Greg's tree. I also got a bugzilla assigned for this. :) https://bugzilla.novell.com/show_bug.cgi?id=250659 Signed-off-by: Kay Sievers Signed-off-by: Greg Kroah-Hartman --- drivers/usb/core/Kconfig | 25 +++++++++++- drivers/usb/core/devio.c | 94 +++++++++++++++++++++++++--------------------- drivers/usb/core/driver.c | 58 +++++++--------------------- drivers/usb/core/hub.c | 10 +++-- drivers/usb/core/inode.c | 2 +- drivers/usb/core/message.c | 65 +++++++++++++++++++++++++++++++- drivers/usb/core/usb.c | 20 +++++----- drivers/usb/core/usb.h | 14 +++---- include/linux/usb.h | 10 +++-- 9 files changed, 183 insertions(+), 115 deletions(-) (limited to 'include') diff --git a/drivers/usb/core/Kconfig b/drivers/usb/core/Kconfig index 2fc0f88a3d86..f493fb1eaa27 100644 --- a/drivers/usb/core/Kconfig +++ b/drivers/usb/core/Kconfig @@ -31,7 +31,30 @@ config USB_DEVICEFS For the format of the various /proc/bus/usb/ files, please read . - Most users want to say Y here. + Usbfs files can't handle Access Control Lists (ACL), which are the + default way to grant access to USB devices for untrusted users of a + desktop system. The usbfs functionality is replaced by real + device-nodes managed by udev. These nodes live in /dev/bus/usb and + are used by libusb. + +config USB_DEVICE_CLASS + bool "USB device class-devices (DEPRECATED)" + depends on USB + default n + ---help--- + Userspace access to USB devices is granted by device-nodes exported + directly from the usbdev in sysfs. Old versions of the driver + core and udev needed additional class devices to export device nodes. + + These additional devices are difficult to handle in userspace, if + information about USB interfaces must be available. One device contains + the device node, the other device contains the interface data. Both + devices are at the same level in sysfs (siblings) and one can't access + the other. The device node created directly by the usbdev is the parent + device of the interface and therefore easily accessible from the interface + event. + + This option provides backward compatibility if needed. config USB_DYNAMIC_MINORS bool "Dynamic USB minor allocation (EXPERIMENTAL)" diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c index fc3545ddb06e..e023f3d56248 100644 --- a/drivers/usb/core/devio.c +++ b/drivers/usb/core/devio.c @@ -57,7 +57,6 @@ #define USB_MAXBUS 64 #define USB_DEVICE_MAX USB_MAXBUS * 128 -static struct class *usb_device_class; /* Mutual exclusion for removal, open, and release */ DEFINE_MUTEX(usbfs_mutex); @@ -514,22 +513,25 @@ static int check_ctrlrecip(struct dev_state *ps, unsigned int requesttype, unsig return ret; } -static struct usb_device *usbdev_lookup_minor(int minor) +static int __match_minor(struct device *dev, void *data) { - struct device *device; - struct usb_device *udev = NULL; + int minor = *((int *)data); - down(&usb_device_class->sem); - list_for_each_entry(device, &usb_device_class->devices, node) { - if (device->devt == MKDEV(USB_DEVICE_MAJOR, minor)) { - udev = device->platform_data; - break; - } - } - up(&usb_device_class->sem); + if (dev->devt == MKDEV(USB_DEVICE_MAJOR, minor)) + return 1; + return 0; +} - return udev; -}; +static struct usb_device *usbdev_lookup_by_minor(int minor) +{ + struct device *dev; + + dev = bus_find_device(&usb_bus_type, NULL, &minor, __match_minor); + if (!dev) + return NULL; + put_device(dev); + return container_of(dev, struct usb_device, dev); +} /* * file operations @@ -548,11 +550,14 @@ static int usbdev_open(struct inode *inode, struct file *file) goto out; ret = -ENOENT; - /* check if we are called from a real node or usbfs */ + /* usbdev device-node */ if (imajor(inode) == USB_DEVICE_MAJOR) - dev = usbdev_lookup_minor(iminor(inode)); + dev = usbdev_lookup_by_minor(iminor(inode)); +#ifdef CONFIG_USB_DEVICEFS + /* procfs file */ if (!dev) dev = inode->i_private; +#endif if (!dev) goto out; ret = usb_autoresume_device(dev); @@ -1570,7 +1575,7 @@ static unsigned int usbdev_poll(struct file *file, struct poll_table_struct *wai return mask; } -const struct file_operations usbfs_device_file_operations = { +const struct file_operations usbdev_file_operations = { .llseek = usbdev_lseek, .read = usbdev_read, .poll = usbdev_poll, @@ -1579,50 +1584,53 @@ const struct file_operations usbfs_device_file_operations = { .release = usbdev_release, }; -static int usbdev_add(struct usb_device *dev) +#ifdef CONFIG_USB_DEVICE_CLASS +static struct class *usb_classdev_class; + +static int usb_classdev_add(struct usb_device *dev) { int minor = ((dev->bus->busnum-1) * 128) + (dev->devnum-1); - dev->usbfs_dev = device_create(usb_device_class, &dev->dev, + dev->usb_classdev = device_create(usb_classdev_class, &dev->dev, MKDEV(USB_DEVICE_MAJOR, minor), "usbdev%d.%d", dev->bus->busnum, dev->devnum); - if (IS_ERR(dev->usbfs_dev)) - return PTR_ERR(dev->usbfs_dev); + if (IS_ERR(dev->usb_classdev)) + return PTR_ERR(dev->usb_classdev); - dev->usbfs_dev->platform_data = dev; return 0; } -static void usbdev_remove(struct usb_device *dev) +static void usb_classdev_remove(struct usb_device *dev) { - device_unregister(dev->usbfs_dev); + device_unregister(dev->usb_classdev); } -static int usbdev_notify(struct notifier_block *self, unsigned long action, - void *dev) +static int usb_classdev_notify(struct notifier_block *self, + unsigned long action, void *dev) { switch (action) { case USB_DEVICE_ADD: - if (usbdev_add(dev)) + if (usb_classdev_add(dev)) return NOTIFY_BAD; break; case USB_DEVICE_REMOVE: - usbdev_remove(dev); + usb_classdev_remove(dev); break; } return NOTIFY_OK; } static struct notifier_block usbdev_nb = { - .notifier_call = usbdev_notify, + .notifier_call = usb_classdev_notify, }; +#endif static struct cdev usb_device_cdev = { .kobj = {.name = "usb_device", }, .owner = THIS_MODULE, }; -int __init usbdev_init(void) +int __init usb_devio_init(void) { int retval; @@ -1632,38 +1640,38 @@ int __init usbdev_init(void) err("unable to register minors for usb_device"); goto out; } - cdev_init(&usb_device_cdev, &usbfs_device_file_operations); + cdev_init(&usb_device_cdev, &usbdev_file_operations); retval = cdev_add(&usb_device_cdev, USB_DEVICE_DEV, USB_DEVICE_MAX); if (retval) { err("unable to get usb_device major %d", USB_DEVICE_MAJOR); goto error_cdev; } - usb_device_class = class_create(THIS_MODULE, "usb_device"); - if (IS_ERR(usb_device_class)) { +#ifdef CONFIG_USB_DEVICE_CLASS + usb_classdev_class = class_create(THIS_MODULE, "usb_device"); + if (IS_ERR(usb_classdev_class)) { err("unable to register usb_device class"); - retval = PTR_ERR(usb_device_class); - goto error_class; + retval = PTR_ERR(usb_classdev_class); + cdev_del(&usb_device_cdev); + usb_classdev_class = NULL; + goto out; } usb_register_notify(&usbdev_nb); - +#endif out: return retval; -error_class: - usb_device_class = NULL; - cdev_del(&usb_device_cdev); - error_cdev: unregister_chrdev_region(USB_DEVICE_DEV, USB_DEVICE_MAX); goto out; } -void usbdev_cleanup(void) +void usb_devio_cleanup(void) { +#ifdef CONFIG_USB_DEVICE_CLASS usb_unregister_notify(&usbdev_nb); - class_destroy(usb_device_class); + class_destroy(usb_classdev_class); +#endif cdev_del(&usb_device_cdev); unregister_chrdev_region(USB_DEVICE_DEV, USB_DEVICE_MAX); } - diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c index 9b6a60fafddb..593386eb974d 100644 --- a/drivers/usb/core/driver.c +++ b/drivers/usb/core/driver.c @@ -574,23 +574,10 @@ static int usb_device_match(struct device *dev, struct device_driver *drv) } #ifdef CONFIG_HOTPLUG - -/* - * This sends an uevent to userspace, typically helping to load driver - * or other modules, configure the device, and more. Drivers can provide - * a MODULE_DEVICE_TABLE to help with module loading subtasks. - * - * We're called either from khubd (the typical case) or from root hub - * (init, kapmd, modprobe, rmmod, etc), but the agents need to handle - * delays in event delivery. Use sysfs (and DEVPATH) to make sure the - * device (and this configuration!) are still present. - */ static int usb_uevent(struct device *dev, char **envp, int num_envp, char *buffer, int buffer_size) { - struct usb_interface *intf; struct usb_device *usb_dev; - struct usb_host_interface *alt; int i = 0; int length = 0; @@ -600,13 +587,11 @@ static int usb_uevent(struct device *dev, char **envp, int num_envp, /* driver is often null here; dev_dbg() would oops */ pr_debug ("usb %s: uevent\n", dev->bus_id); - if (is_usb_device(dev)) { + if (is_usb_device(dev)) usb_dev = to_usb_device(dev); - alt = NULL; - } else { - intf = to_usb_interface(dev); + else { + struct usb_interface *intf = to_usb_interface(dev); usb_dev = interface_to_usbdev(intf); - alt = intf->cur_altsetting; } if (usb_dev->devnum < 0) { @@ -621,9 +606,7 @@ static int usb_uevent(struct device *dev, char **envp, int num_envp, #ifdef CONFIG_USB_DEVICEFS /* If this is available, userspace programs can directly read * all the device descriptors we don't tell them about. Or - * even act as usermode drivers. - * - * FIXME reduce hardwired intelligence here + * act as usermode drivers. */ if (add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length, @@ -650,44 +633,29 @@ static int usb_uevent(struct device *dev, char **envp, int num_envp, usb_dev->descriptor.bDeviceProtocol)) return -ENOMEM; - if (!is_usb_device(dev)) { - - if (add_uevent_var(envp, num_envp, &i, + if (add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length, - "INTERFACE=%d/%d/%d", - alt->desc.bInterfaceClass, - alt->desc.bInterfaceSubClass, - alt->desc.bInterfaceProtocol)) - return -ENOMEM; + "BUSNUM=%03d", + usb_dev->bus->busnum)) + return -ENOMEM; - if (add_uevent_var(envp, num_envp, &i, + if (add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length, - "MODALIAS=usb:v%04Xp%04Xd%04Xdc%02Xdsc%02Xdp%02Xic%02Xisc%02Xip%02X", - le16_to_cpu(usb_dev->descriptor.idVendor), - le16_to_cpu(usb_dev->descriptor.idProduct), - le16_to_cpu(usb_dev->descriptor.bcdDevice), - usb_dev->descriptor.bDeviceClass, - usb_dev->descriptor.bDeviceSubClass, - usb_dev->descriptor.bDeviceProtocol, - alt->desc.bInterfaceClass, - alt->desc.bInterfaceSubClass, - alt->desc.bInterfaceProtocol)) - return -ENOMEM; - } + "DEVNUM=%03d", + usb_dev->devnum)) + return -ENOMEM; envp[i] = NULL; - return 0; } #else static int usb_uevent(struct device *dev, char **envp, - int num_envp, char *buffer, int buffer_size) + int num_envp, char *buffer, int buffer_size) { return -ENODEV; } - #endif /* CONFIG_HOTPLUG */ /** diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index 19abe81babd5..2a0b15e42bc7 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -1367,11 +1367,15 @@ int usb_new_device(struct usb_device *udev) } #endif + /* export the usbdev device-node for libusb */ + udev->dev.devt = MKDEV(USB_DEVICE_MAJOR, + (((udev->bus->busnum-1) * 128) + (udev->devnum-1))); + /* Register the device. The device driver is responsible - * for adding the device files to usbfs and sysfs and for - * configuring the device. + * for adding the device files to sysfs and for configuring + * the device. */ - err = device_add (&udev->dev); + err = device_add(&udev->dev); if (err) { dev_err(&udev->dev, "can't device_add, error %d\n", err); goto fail; diff --git a/drivers/usb/core/inode.c b/drivers/usb/core/inode.c index 11dad22da41c..cddfc62c4611 100644 --- a/drivers/usb/core/inode.c +++ b/drivers/usb/core/inode.c @@ -662,7 +662,7 @@ static void usbfs_add_device(struct usb_device *dev) sprintf (name, "%03d", dev->devnum); dev->usbfs_dentry = fs_create_file (name, devmode | S_IFREG, dev->bus->usbfs_dentry, dev, - &usbfs_device_file_operations, + &usbdev_file_operations, devuid, devgid); if (dev->usbfs_dentry == NULL) { err ("error creating usbfs device entry"); diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c index c359ccb32998..da4ee07e0094 100644 --- a/drivers/usb/core/message.c +++ b/drivers/usb/core/message.c @@ -1305,7 +1305,7 @@ int usb_reset_configuration(struct usb_device *dev) return 0; } -static void release_interface(struct device *dev) +void usb_release_interface(struct device *dev) { struct usb_interface *intf = to_usb_interface(dev); struct usb_interface_cache *intfc = @@ -1315,6 +1315,67 @@ static void release_interface(struct device *dev) kfree(intf); } +#ifdef CONFIG_HOTPLUG +static int usb_if_uevent(struct device *dev, char **envp, int num_envp, + char *buffer, int buffer_size) +{ + struct usb_device *usb_dev; + struct usb_interface *intf; + struct usb_host_interface *alt; + int i = 0; + int length = 0; + + if (!dev) + return -ENODEV; + + /* driver is often null here; dev_dbg() would oops */ + pr_debug ("usb %s: uevent\n", dev->bus_id); + + intf = to_usb_interface(dev); + usb_dev = interface_to_usbdev(intf); + alt = intf->cur_altsetting; + + if (add_uevent_var(envp, num_envp, &i, + buffer, buffer_size, &length, + "INTERFACE=%d/%d/%d", + alt->desc.bInterfaceClass, + alt->desc.bInterfaceSubClass, + alt->desc.bInterfaceProtocol)) + return -ENOMEM; + + if (add_uevent_var(envp, num_envp, &i, + buffer, buffer_size, &length, + "MODALIAS=usb:v%04Xp%04Xd%04Xdc%02Xdsc%02Xdp%02Xic%02Xisc%02Xip%02X", + le16_to_cpu(usb_dev->descriptor.idVendor), + le16_to_cpu(usb_dev->descriptor.idProduct), + le16_to_cpu(usb_dev->descriptor.bcdDevice), + usb_dev->descriptor.bDeviceClass, + usb_dev->descriptor.bDeviceSubClass, + usb_dev->descriptor.bDeviceProtocol, + alt->desc.bInterfaceClass, + alt->desc.bInterfaceSubClass, + alt->desc.bInterfaceProtocol)) + return -ENOMEM; + + envp[i] = NULL; + return 0; +} + +#else + +static int usb_if_uevent(struct device *dev, char **envp, + int num_envp, char *buffer, int buffer_size) +{ + return -ENODEV; +} +#endif /* CONFIG_HOTPLUG */ + +struct device_type usb_if_device_type = { + .name = "usb_interface", + .release = usb_release_interface, + .uevent = usb_if_uevent, +}; + /* * usb_set_configuration - Makes a particular device setting be current * @dev: the device whose configuration is being updated @@ -1478,8 +1539,8 @@ free_interfaces: intf->dev.parent = &dev->dev; intf->dev.driver = NULL; intf->dev.bus = &usb_bus_type; + intf->dev.type = &usb_if_device_type; intf->dev.dma_mask = dev->dev.dma_mask; - intf->dev.release = release_interface; device_initialize (&intf->dev); mark_quiesced(intf); sprintf (&intf->dev.bus_id[0], "%d-%s:%d.%d", diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c index 6f35dce8a95d..dfd1b5c87ca3 100644 --- a/drivers/usb/core/usb.c +++ b/drivers/usb/core/usb.c @@ -197,6 +197,11 @@ static void usb_release_dev(struct device *dev) kfree(udev); } +struct device_type usb_device_type = { + .name = "usb_device", + .release = usb_release_dev, +}; + #ifdef CONFIG_PM static int ksuspend_usb_init(void) @@ -247,13 +252,10 @@ usb_alloc_dev(struct usb_device *parent, struct usb_bus *bus, unsigned port1) device_initialize(&dev->dev); dev->dev.bus = &usb_bus_type; + dev->dev.type = &usb_device_type; dev->dev.dma_mask = bus->controller->dma_mask; - dev->dev.release = usb_release_dev; dev->state = USB_STATE_ATTACHED; - /* This magic assignment distinguishes devices from interfaces */ - dev->dev.platform_data = &usb_generic_driver; - INIT_LIST_HEAD(&dev->ep0.urb_list); dev->ep0.desc.bLength = USB_DT_ENDPOINT_SIZE; dev->ep0.desc.bDescriptorType = USB_DT_ENDPOINT; @@ -882,9 +884,9 @@ static int __init usb_init(void) retval = usb_register(&usbfs_driver); if (retval) goto driver_register_failed; - retval = usbdev_init(); + retval = usb_devio_init(); if (retval) - goto usbdevice_init_failed; + goto usb_devio_init_failed; retval = usbfs_init(); if (retval) goto fs_init_failed; @@ -899,8 +901,8 @@ static int __init usb_init(void) hub_init_failed: usbfs_cleanup(); fs_init_failed: - usbdev_cleanup(); -usbdevice_init_failed: + usb_devio_cleanup(); +usb_devio_init_failed: usb_deregister(&usbfs_driver); driver_register_failed: usb_major_cleanup(); @@ -927,7 +929,7 @@ static void __exit usb_exit(void) usb_major_cleanup(); usbfs_cleanup(); usb_deregister(&usbfs_driver); - usbdev_cleanup(); + usb_devio_cleanup(); usb_hub_cleanup(); usb_host_cleanup(); bus_unregister(&usb_bus_type); diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h index c94379e55f2d..bf2eb0dae2ec 100644 --- a/drivers/usb/core/usb.h +++ b/drivers/usb/core/usb.h @@ -78,15 +78,13 @@ static inline int usb_autoresume_device(struct usb_device *udev) extern struct workqueue_struct *ksuspend_usb_wq; extern struct bus_type usb_bus_type; +extern struct device_type usb_device_type; +extern struct device_type usb_if_device_type; extern struct usb_device_driver usb_generic_driver; -/* Here's how we tell apart devices and interfaces. Luckily there's - * no such thing as a platform USB device, so we can steal the use - * of the platform_data field. */ - static inline int is_usb_device(const struct device *dev) { - return dev->platform_data == &usb_generic_driver; + return dev->type == &usb_device_type; } /* Do the same for device drivers and interface drivers. */ @@ -122,11 +120,11 @@ extern const char *usbcore_name; extern struct mutex usbfs_mutex; extern struct usb_driver usbfs_driver; extern const struct file_operations usbfs_devices_fops; -extern const struct file_operations usbfs_device_file_operations; +extern const struct file_operations usbdev_file_operations; extern void usbfs_conn_disc_event(void); -extern int usbdev_init(void); -extern void usbdev_cleanup(void); +extern int usb_devio_init(void); +extern void usb_devio_cleanup(void); struct dev_state { struct list_head list; /* state list */ diff --git a/include/linux/usb.h b/include/linux/usb.h index 5e8e144afbae..f9e4445d5b53 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h @@ -299,8 +299,9 @@ struct usb_bus { int bandwidth_int_reqs; /* number of Interrupt requests */ int bandwidth_isoc_reqs; /* number of Isoc. requests */ +#ifdef CONFIG_USB_DEVICEFS struct dentry *usbfs_dentry; /* usbfs dentry entry for the bus */ - +#endif struct class_device *class_dev; /* class device for this bus */ #if defined(CONFIG_USB_MON) @@ -373,9 +374,12 @@ struct usb_device { char *serial; /* iSerialNumber string, if present */ struct list_head filelist; - struct device *usbfs_dev; +#ifdef CONFIG_USB_DEVICE_CLASS + struct device *usb_classdev; +#endif +#ifdef CONFIG_USB_DEVICEFS struct dentry *usbfs_dentry; /* usbfs dentry entry for the device */ - +#endif /* * Child devices - these can be either new devices * (if this is a hub device), or different instances -- cgit v1.2.3-59-g8ed1b From 1941044aa9632aa8debbb94a3c8a5ed0ebddade8 Mon Sep 17 00:00:00 2001 From: Alan Stern Date: Tue, 27 Mar 2007 13:33:59 -0400 Subject: USB: add "last_busy" field for use in autosuspend This patch (as877) adds a "last_busy" field to struct usb_device, for use by the autosuspend framework. Now if an autosuspend call comes at a time when the device isn't busy but hasn't yet been idle for long enough, the timer can be set to exactly the desired value. And we will be ready to handle things like HID drivers, which can't maintain a useful usage count and must rely on the time-of-last-use to decide when to autosuspend. The patch also makes some related minor improvements: Move the calls to the autosuspend condition-checking routine into usb_suspend_both(), which is the only place where it really matters. If the autosuspend timer is already running, don't stop and restart it. Replace immediate returns with gotos so that the optional debugging ouput won't be bypassed. If autoresume is disabled but the device is already awake, don't return an error for an autoresume call. Don't try to autoresume a device if it isn't suspended. (Yes, this undercuts the previous change -- so sue me.) Don't duplicate existing code in the autosuspend work routine. Fix the kerneldoc in usb_autopm_put_interface(): If an autoresume call fails, the usage counter is left unchanged. Signed-off-by: Alan Stern Signed-off-by: Greg Kroah-Hartman --- drivers/usb/core/driver.c | 102 +++++++++++++++++++++++++++++----------------- drivers/usb/core/hcd.c | 1 + drivers/usb/core/hub.c | 1 + include/linux/usb.h | 8 ++++ 4 files changed, 75 insertions(+), 37 deletions(-) (limited to 'include') diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c index 593386eb974d..631f30582481 100644 --- a/drivers/usb/core/driver.c +++ b/drivers/usb/core/driver.c @@ -932,6 +932,7 @@ static int autosuspend_check(struct usb_device *udev) { int i; struct usb_interface *intf; + long suspend_time; /* For autosuspend, fail fast if anything is in use or autosuspend * is disabled. Also fail if any interfaces require remote wakeup @@ -943,6 +944,7 @@ static int autosuspend_check(struct usb_device *udev) if (udev->autosuspend_delay < 0 || udev->autosuspend_disabled) return -EPERM; + suspend_time = udev->last_busy + udev->autosuspend_delay; if (udev->actconfig) { for (i = 0; i < udev->actconfig->desc.bNumInterfaces; i++) { intf = udev->actconfig->interface[i]; @@ -958,6 +960,17 @@ static int autosuspend_check(struct usb_device *udev) } } } + + /* If everything is okay but the device hasn't been idle for long + * enough, queue a delayed autosuspend request. + */ + suspend_time -= jiffies; + if (suspend_time > 0) { + if (!timer_pending(&udev->autosuspend.timer)) + queue_delayed_work(ksuspend_usb_wq, &udev->autosuspend, + suspend_time); + return -EAGAIN; + } return 0; } @@ -1010,19 +1023,18 @@ static int usb_suspend_both(struct usb_device *udev, pm_message_t msg) struct usb_interface *intf; struct usb_device *parent = udev->parent; - cancel_delayed_work(&udev->autosuspend); - if (udev->state == USB_STATE_NOTATTACHED) - return 0; - if (udev->state == USB_STATE_SUSPENDED) - return 0; + if (udev->state == USB_STATE_NOTATTACHED || + udev->state == USB_STATE_SUSPENDED) + goto done; udev->do_remote_wakeup = device_may_wakeup(&udev->dev); if (udev->auto_pm) { status = autosuspend_check(udev); if (status < 0) - return status; + goto done; } + cancel_delayed_work(&udev->autosuspend); /* Suspend all the interfaces and then udev itself */ if (udev->actconfig) { @@ -1047,6 +1059,7 @@ static int usb_suspend_both(struct usb_device *udev, pm_message_t msg) } else if (parent) usb_autosuspend_device(parent); + done: // dev_dbg(&udev->dev, "%s: status %d\n", __FUNCTION__, status); return status; } @@ -1086,14 +1099,18 @@ static int usb_resume_both(struct usb_device *udev) struct usb_interface *intf; struct usb_device *parent = udev->parent; - if (udev->auto_pm && udev->autoresume_disabled) - return -EPERM; cancel_delayed_work(&udev->autosuspend); - if (udev->state == USB_STATE_NOTATTACHED) - return -ENODEV; + if (udev->state == USB_STATE_NOTATTACHED) { + status = -ENODEV; + goto done; + } /* Propagate the resume up the tree, if necessary */ if (udev->state == USB_STATE_SUSPENDED) { + if (udev->auto_pm && udev->autoresume_disabled) { + status = -EPERM; + goto done; + } if (parent) { status = usb_autoresume_device(parent); if (status == 0) { @@ -1139,24 +1156,13 @@ static int usb_resume_both(struct usb_device *udev) } } + done: // dev_dbg(&udev->dev, "%s: status %d\n", __FUNCTION__, status); return status; } #ifdef CONFIG_USB_SUSPEND -/* usb_autosuspend_work - callback routine to autosuspend a USB device */ -void usb_autosuspend_work(struct work_struct *work) -{ - struct usb_device *udev = - container_of(work, struct usb_device, autosuspend.work); - - usb_pm_lock(udev); - udev->auto_pm = 1; - usb_suspend_both(udev, PMSG_SUSPEND); - usb_pm_unlock(udev); -} - /* Internal routine to adjust a device's usage counter and change * its autosuspend state. */ @@ -1165,20 +1171,34 @@ static int usb_autopm_do_device(struct usb_device *udev, int inc_usage_cnt) int status = 0; usb_pm_lock(udev); + udev->auto_pm = 1; udev->pm_usage_cnt += inc_usage_cnt; WARN_ON(udev->pm_usage_cnt < 0); if (inc_usage_cnt >= 0 && udev->pm_usage_cnt > 0) { - udev->auto_pm = 1; - status = usb_resume_both(udev); + if (udev->state == USB_STATE_SUSPENDED) + status = usb_resume_both(udev); if (status != 0) udev->pm_usage_cnt -= inc_usage_cnt; - } else if (inc_usage_cnt <= 0 && autosuspend_check(udev) == 0) - queue_delayed_work(ksuspend_usb_wq, &udev->autosuspend, - udev->autosuspend_delay); + else if (inc_usage_cnt) + udev->last_busy = jiffies; + } else if (inc_usage_cnt <= 0 && udev->pm_usage_cnt <= 0) { + if (inc_usage_cnt) + udev->last_busy = jiffies; + status = usb_suspend_both(udev, PMSG_SUSPEND); + } usb_pm_unlock(udev); return status; } +/* usb_autosuspend_work - callback routine to autosuspend a USB device */ +void usb_autosuspend_work(struct work_struct *work) +{ + struct usb_device *udev = + container_of(work, struct usb_device, autosuspend.work); + + usb_autopm_do_device(udev, 0); +} + /** * usb_autosuspend_device - delayed autosuspend of a USB device and its interfaces * @udev: the usb_device to autosuspend @@ -1270,15 +1290,20 @@ static int usb_autopm_do_interface(struct usb_interface *intf, if (intf->condition == USB_INTERFACE_UNBOUND) status = -ENODEV; else { + udev->auto_pm = 1; intf->pm_usage_cnt += inc_usage_cnt; if (inc_usage_cnt >= 0 && intf->pm_usage_cnt > 0) { - udev->auto_pm = 1; - status = usb_resume_both(udev); + if (udev->state == USB_STATE_SUSPENDED) + status = usb_resume_both(udev); if (status != 0) intf->pm_usage_cnt -= inc_usage_cnt; - } else if (inc_usage_cnt <= 0 && autosuspend_check(udev) == 0) - queue_delayed_work(ksuspend_usb_wq, &udev->autosuspend, - udev->autosuspend_delay); + else if (inc_usage_cnt) + udev->last_busy = jiffies; + } else if (inc_usage_cnt <= 0 && intf->pm_usage_cnt <= 0) { + if (inc_usage_cnt) + udev->last_busy = jiffies; + status = usb_suspend_both(udev, PMSG_SUSPEND); + } } usb_pm_unlock(udev); return status; @@ -1337,11 +1362,14 @@ EXPORT_SYMBOL_GPL(usb_autopm_put_interface); * or @intf is unbound. A typical example would be a character-device * driver when its device file is opened. * - * The routine increments @intf's usage counter. So long as the counter - * is greater than 0, autosuspend will not be allowed for @intf or its - * usb_device. When the driver is finished using @intf it should call - * usb_autopm_put_interface() to decrement the usage counter and queue - * a delayed autosuspend request (if the counter is <= 0). + * + * The routine increments @intf's usage counter. (However if the + * autoresume fails then the counter is re-decremented.) So long as the + * counter is greater than 0, autosuspend will not be allowed for @intf + * or its usb_device. When the driver is finished using @intf it should + * call usb_autopm_put_interface() to decrement the usage counter and + * queue a delayed autosuspend request (if the counter is <= 0). + * * * Note that @intf->pm_usage_cnt is owned by the interface driver. The * core will not change its value other than the increment and decrement diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c index 8bc3ce6d9666..40cf882293e6 100644 --- a/drivers/usb/core/hcd.c +++ b/drivers/usb/core/hcd.c @@ -1306,6 +1306,7 @@ static void hcd_resume_work(struct work_struct *work) struct usb_device *udev = hcd->self.root_hub; usb_lock_device(udev); + usb_mark_last_busy(udev); usb_external_resume_device(udev); usb_unlock_device(udev); } diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index 2a0b15e42bc7..bde29ab2b504 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -1859,6 +1859,7 @@ static int remote_wakeup(struct usb_device *udev) usb_lock_device(udev); if (udev->state == USB_STATE_SUSPENDED) { dev_dbg(&udev->dev, "usb %sresume\n", "wakeup-"); + usb_mark_last_busy(udev); status = usb_external_resume_device(udev); } usb_unlock_device(udev); diff --git a/include/linux/usb.h b/include/linux/usb.h index f9e4445d5b53..cfbd2bb8fa2c 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h @@ -398,6 +398,7 @@ struct usb_device { struct delayed_work autosuspend; /* for delayed autosuspends */ struct mutex pm_mutex; /* protects PM operations */ + unsigned long last_busy; /* time of last use */ int autosuspend_delay; /* in jiffies */ unsigned auto_pm:1; /* autosuspend/resume in progress */ @@ -443,6 +444,11 @@ static inline void usb_autopm_disable(struct usb_interface *intf) usb_autopm_set_interface(intf); } +static inline void usb_mark_last_busy(struct usb_device *udev) +{ + udev->last_busy = jiffies; +} + #else static inline int usb_autopm_set_interface(struct usb_interface *intf) @@ -457,6 +463,8 @@ static inline void usb_autopm_enable(struct usb_interface *intf) { } static inline void usb_autopm_disable(struct usb_interface *intf) { } +static inline void usb_mark_last_busy(struct usb_device *udev) +{ } #endif /*-------------------------------------------------------------------------*/ -- cgit v1.2.3-59-g8ed1b From aa2ce5ca6be480cb139e21258671c2c27826f8ff Mon Sep 17 00:00:00 2001 From: David Brownell Date: Tue, 17 Apr 2007 17:51:38 -0700 Subject: USB: minor doc update Minor doc update to ... say where USB_DT_CS_* came from and update the definitions to match how they're derived there. Signed-off-by: David Brownell Signed-off-by: Greg Kroah-Hartman --- include/linux/usb/ch9.h | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/include/linux/usb/ch9.h b/include/linux/usb/ch9.h index 1122a6c2c1a3..6169438ec5a2 100644 --- a/include/linux/usb/ch9.h +++ b/include/linux/usb/ch9.h @@ -181,12 +181,15 @@ struct usb_ctrlrequest { #define USB_DT_WIRE_ADAPTER 0x21 #define USB_DT_RPIPE 0x22 -/* conventional codes for class-specific descriptors */ -#define USB_DT_CS_DEVICE 0x21 -#define USB_DT_CS_CONFIG 0x22 -#define USB_DT_CS_STRING 0x23 -#define USB_DT_CS_INTERFACE 0x24 -#define USB_DT_CS_ENDPOINT 0x25 +/* Conventional codes for class-specific descriptors. The convention is + * defined in the USB "Common Class" Spec (3.11). Individual class specs + * are authoritative for their usage, not the "common class" writeup. + */ +#define USB_DT_CS_DEVICE (USB_TYPE_CLASS | USB_DT_DEVICE) +#define USB_DT_CS_CONFIG (USB_TYPE_CLASS | USB_DT_CONFIG) +#define USB_DT_CS_STRING (USB_TYPE_CLASS | USB_DT_STRING) +#define USB_DT_CS_INTERFACE (USB_TYPE_CLASS | USB_DT_INTERFACE) +#define USB_DT_CS_ENDPOINT (USB_TYPE_CLASS | USB_DT_ENDPOINT) /* All standard descriptors have these 2 fields at the beginning */ struct usb_descriptor_header { -- cgit v1.2.3-59-g8ed1b From b8b8fd2dc23725fba77f66b3fef11b11f983fc08 Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 27 Apr 2007 15:31:24 -0700 Subject: [NET]: Fix networking compilation errors Fix miscellaneous networking compilation errors. (*) Export ktime_add_ns() for modules. (*) wext_proc_init() should have an ANSI declaration. Signed-off-by: David Howells Signed-off-by: David S. Miller --- include/net/wext.h | 2 +- kernel/hrtimer.c | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/net/wext.h b/include/net/wext.h index 55741836a675..c02b8decf3af 100644 --- a/include/net/wext.h +++ b/include/net/wext.h @@ -10,7 +10,7 @@ extern int wext_proc_init(void); extern int wext_handle_ioctl(struct ifreq *ifr, unsigned int cmd, void __user *arg); #else -static inline int wext_proc_init() +static inline int wext_proc_init(void) { return 0; } diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index f5cfde8c9025..1b3033105b40 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c @@ -279,6 +279,8 @@ ktime_t ktime_add_ns(const ktime_t kt, u64 nsec) return ktime_add(kt, tmp); } + +EXPORT_SYMBOL_GPL(ktime_add_ns); # endif /* !CONFIG_KTIME_SCALAR */ /* -- cgit v1.2.3-59-g8ed1b